]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/crypto/chelsio/chcr_algo.c
Merge tag 'upstream-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
98
99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 return ctx->crypto_ctx->aeadctx;
102 }
103
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 return ctx->crypto_ctx->ablkctx;
107 }
108
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 return ctx->crypto_ctx->hmacctx;
112 }
113
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 return gctx->ctx->gcm;
117 }
118
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 return gctx->ctx->authenc;
122 }
123
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 unsigned int entlen,
141 unsigned int skip)
142 {
143 int nents = 0;
144 unsigned int less;
145 unsigned int skip_len = 0;
146
147 while (sg && skip) {
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
150 skip_len = 0;
151 sg = sg_next(sg);
152 } else {
153 skip_len = skip;
154 skip = 0;
155 }
156 }
157
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
161 reqlen -= less;
162 skip_len = 0;
163 sg = sg_next(sg);
164 }
165 return nents;
166 }
167
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
182 int cmp = 0;
183
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 } else {
189
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 }
195 if (cmp)
196 *err = -EBADMSG;
197 else
198 *err = 0;
199 }
200
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203 int err = 0;
204
205 spin_lock_bh(&dev->lock_chcr_dev);
206 if (dev->state == CHCR_DETACH)
207 err = 1;
208 else
209 atomic_inc(&dev->inflight);
210
211 spin_unlock_bh(&dev->lock_chcr_dev);
212
213 return err;
214 }
215
216 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
217 {
218 atomic_dec(&dev->inflight);
219 }
220
221 static inline int chcr_handle_aead_resp(struct aead_request *req,
222 unsigned char *input,
223 int err)
224 {
225 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
226 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
227 struct chcr_dev *dev = a_ctx(tfm)->dev;
228
229 chcr_aead_common_exit(req);
230 if (reqctx->verify == VERIFY_SW) {
231 chcr_verify_tag(req, input, &err);
232 reqctx->verify = VERIFY_HW;
233 }
234 chcr_dec_wrcount(dev);
235 req->base.complete(&req->base, err);
236
237 return err;
238 }
239
240 static void get_aes_decrypt_key(unsigned char *dec_key,
241 const unsigned char *key,
242 unsigned int keylength)
243 {
244 u32 temp;
245 u32 w_ring[MAX_NK];
246 int i, j, k;
247 u8 nr, nk;
248
249 switch (keylength) {
250 case AES_KEYLENGTH_128BIT:
251 nk = KEYLENGTH_4BYTES;
252 nr = NUMBER_OF_ROUNDS_10;
253 break;
254 case AES_KEYLENGTH_192BIT:
255 nk = KEYLENGTH_6BYTES;
256 nr = NUMBER_OF_ROUNDS_12;
257 break;
258 case AES_KEYLENGTH_256BIT:
259 nk = KEYLENGTH_8BYTES;
260 nr = NUMBER_OF_ROUNDS_14;
261 break;
262 default:
263 return;
264 }
265 for (i = 0; i < nk; i++)
266 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
267
268 i = 0;
269 temp = w_ring[nk - 1];
270 while (i + nk < (nr + 1) * 4) {
271 if (!(i % nk)) {
272 /* RotWord(temp) */
273 temp = (temp << 8) | (temp >> 24);
274 temp = aes_ks_subword(temp);
275 temp ^= round_constant[i / nk];
276 } else if (nk == 8 && (i % 4 == 0)) {
277 temp = aes_ks_subword(temp);
278 }
279 w_ring[i % nk] ^= temp;
280 temp = w_ring[i % nk];
281 i++;
282 }
283 i--;
284 for (k = 0, j = i % nk; k < nk; k++) {
285 *((u32 *)dec_key + k) = htonl(w_ring[j]);
286 j--;
287 if (j < 0)
288 j += nk;
289 }
290 }
291
292 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
293 {
294 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
295
296 switch (ds) {
297 case SHA1_DIGEST_SIZE:
298 base_hash = crypto_alloc_shash("sha1", 0, 0);
299 break;
300 case SHA224_DIGEST_SIZE:
301 base_hash = crypto_alloc_shash("sha224", 0, 0);
302 break;
303 case SHA256_DIGEST_SIZE:
304 base_hash = crypto_alloc_shash("sha256", 0, 0);
305 break;
306 case SHA384_DIGEST_SIZE:
307 base_hash = crypto_alloc_shash("sha384", 0, 0);
308 break;
309 case SHA512_DIGEST_SIZE:
310 base_hash = crypto_alloc_shash("sha512", 0, 0);
311 break;
312 }
313
314 return base_hash;
315 }
316
317 static int chcr_compute_partial_hash(struct shash_desc *desc,
318 char *iopad, char *result_hash,
319 int digest_size)
320 {
321 struct sha1_state sha1_st;
322 struct sha256_state sha256_st;
323 struct sha512_state sha512_st;
324 int error;
325
326 if (digest_size == SHA1_DIGEST_SIZE) {
327 error = crypto_shash_init(desc) ?:
328 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
329 crypto_shash_export(desc, (void *)&sha1_st);
330 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
331 } else if (digest_size == SHA224_DIGEST_SIZE) {
332 error = crypto_shash_init(desc) ?:
333 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
334 crypto_shash_export(desc, (void *)&sha256_st);
335 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
336
337 } else if (digest_size == SHA256_DIGEST_SIZE) {
338 error = crypto_shash_init(desc) ?:
339 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
340 crypto_shash_export(desc, (void *)&sha256_st);
341 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
342
343 } else if (digest_size == SHA384_DIGEST_SIZE) {
344 error = crypto_shash_init(desc) ?:
345 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
346 crypto_shash_export(desc, (void *)&sha512_st);
347 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
348
349 } else if (digest_size == SHA512_DIGEST_SIZE) {
350 error = crypto_shash_init(desc) ?:
351 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
352 crypto_shash_export(desc, (void *)&sha512_st);
353 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
354 } else {
355 error = -EINVAL;
356 pr_err("Unknown digest size %d\n", digest_size);
357 }
358 return error;
359 }
360
361 static void chcr_change_order(char *buf, int ds)
362 {
363 int i;
364
365 if (ds == SHA512_DIGEST_SIZE) {
366 for (i = 0; i < (ds / sizeof(u64)); i++)
367 *((__be64 *)buf + i) =
368 cpu_to_be64(*((u64 *)buf + i));
369 } else {
370 for (i = 0; i < (ds / sizeof(u32)); i++)
371 *((__be32 *)buf + i) =
372 cpu_to_be32(*((u32 *)buf + i));
373 }
374 }
375
376 static inline int is_hmac(struct crypto_tfm *tfm)
377 {
378 struct crypto_alg *alg = tfm->__crt_alg;
379 struct chcr_alg_template *chcr_crypto_alg =
380 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
381 alg.hash);
382 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
383 return 1;
384 return 0;
385 }
386
387 static inline void dsgl_walk_init(struct dsgl_walk *walk,
388 struct cpl_rx_phys_dsgl *dsgl)
389 {
390 walk->dsgl = dsgl;
391 walk->nents = 0;
392 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
393 }
394
395 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
396 int pci_chan_id)
397 {
398 struct cpl_rx_phys_dsgl *phys_cpl;
399
400 phys_cpl = walk->dsgl;
401
402 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
403 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
404 phys_cpl->pcirlxorder_to_noofsgentr =
405 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
406 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
407 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
408 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
409 CPL_RX_PHYS_DSGL_DCAID_V(0) |
410 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
411 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
412 phys_cpl->rss_hdr_int.qid = htons(qid);
413 phys_cpl->rss_hdr_int.hash_val = 0;
414 phys_cpl->rss_hdr_int.channel = pci_chan_id;
415 }
416
417 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
418 size_t size,
419 dma_addr_t addr)
420 {
421 int j;
422
423 if (!size)
424 return;
425 j = walk->nents;
426 walk->to->len[j % 8] = htons(size);
427 walk->to->addr[j % 8] = cpu_to_be64(addr);
428 j++;
429 if ((j % 8) == 0)
430 walk->to++;
431 walk->nents = j;
432 }
433
434 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
435 struct scatterlist *sg,
436 unsigned int slen,
437 unsigned int skip)
438 {
439 int skip_len = 0;
440 unsigned int left_size = slen, len = 0;
441 unsigned int j = walk->nents;
442 int offset, ent_len;
443
444 if (!slen)
445 return;
446 while (sg && skip) {
447 if (sg_dma_len(sg) <= skip) {
448 skip -= sg_dma_len(sg);
449 skip_len = 0;
450 sg = sg_next(sg);
451 } else {
452 skip_len = skip;
453 skip = 0;
454 }
455 }
456
457 while (left_size && sg) {
458 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
459 offset = 0;
460 while (len) {
461 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
462 walk->to->len[j % 8] = htons(ent_len);
463 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
464 offset + skip_len);
465 offset += ent_len;
466 len -= ent_len;
467 j++;
468 if ((j % 8) == 0)
469 walk->to++;
470 }
471 walk->last_sg = sg;
472 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
473 skip_len) + skip_len;
474 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
475 skip_len = 0;
476 sg = sg_next(sg);
477 }
478 walk->nents = j;
479 }
480
481 static inline void ulptx_walk_init(struct ulptx_walk *walk,
482 struct ulptx_sgl *ulp)
483 {
484 walk->sgl = ulp;
485 walk->nents = 0;
486 walk->pair_idx = 0;
487 walk->pair = ulp->sge;
488 walk->last_sg = NULL;
489 walk->last_sg_len = 0;
490 }
491
492 static inline void ulptx_walk_end(struct ulptx_walk *walk)
493 {
494 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
495 ULPTX_NSGE_V(walk->nents));
496 }
497
498
499 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
500 size_t size,
501 dma_addr_t addr)
502 {
503 if (!size)
504 return;
505
506 if (walk->nents == 0) {
507 walk->sgl->len0 = cpu_to_be32(size);
508 walk->sgl->addr0 = cpu_to_be64(addr);
509 } else {
510 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
511 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
512 walk->pair_idx = !walk->pair_idx;
513 if (!walk->pair_idx)
514 walk->pair++;
515 }
516 walk->nents++;
517 }
518
519 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
520 struct scatterlist *sg,
521 unsigned int len,
522 unsigned int skip)
523 {
524 int small;
525 int skip_len = 0;
526 unsigned int sgmin;
527
528 if (!len)
529 return;
530 while (sg && skip) {
531 if (sg_dma_len(sg) <= skip) {
532 skip -= sg_dma_len(sg);
533 skip_len = 0;
534 sg = sg_next(sg);
535 } else {
536 skip_len = skip;
537 skip = 0;
538 }
539 }
540 WARN(!sg, "SG should not be null here\n");
541 if (sg && (walk->nents == 0)) {
542 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
543 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
544 walk->sgl->len0 = cpu_to_be32(sgmin);
545 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
546 walk->nents++;
547 len -= sgmin;
548 walk->last_sg = sg;
549 walk->last_sg_len = sgmin + skip_len;
550 skip_len += sgmin;
551 if (sg_dma_len(sg) == skip_len) {
552 sg = sg_next(sg);
553 skip_len = 0;
554 }
555 }
556
557 while (sg && len) {
558 small = min(sg_dma_len(sg) - skip_len, len);
559 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
560 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
561 walk->pair->addr[walk->pair_idx] =
562 cpu_to_be64(sg_dma_address(sg) + skip_len);
563 walk->pair_idx = !walk->pair_idx;
564 walk->nents++;
565 if (!walk->pair_idx)
566 walk->pair++;
567 len -= sgmin;
568 skip_len += sgmin;
569 walk->last_sg = sg;
570 walk->last_sg_len = skip_len;
571 if (sg_dma_len(sg) == skip_len) {
572 sg = sg_next(sg);
573 skip_len = 0;
574 }
575 }
576 }
577
578 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
579 {
580 struct crypto_alg *alg = tfm->__crt_alg;
581 struct chcr_alg_template *chcr_crypto_alg =
582 container_of(alg, struct chcr_alg_template, alg.crypto);
583
584 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
585 }
586
587 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
588 {
589 struct adapter *adap = netdev2adap(dev);
590 struct sge_uld_txq_info *txq_info =
591 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
592 struct sge_uld_txq *txq;
593 int ret = 0;
594
595 local_bh_disable();
596 txq = &txq_info->uldtxq[idx];
597 spin_lock(&txq->sendq.lock);
598 if (txq->full)
599 ret = -1;
600 spin_unlock(&txq->sendq.lock);
601 local_bh_enable();
602 return ret;
603 }
604
605 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
606 struct _key_ctx *key_ctx)
607 {
608 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
609 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
610 } else {
611 memcpy(key_ctx->key,
612 ablkctx->key + (ablkctx->enckey_len >> 1),
613 ablkctx->enckey_len >> 1);
614 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
615 ablkctx->rrkey, ablkctx->enckey_len >> 1);
616 }
617 return 0;
618 }
619
620 static int chcr_hash_ent_in_wr(struct scatterlist *src,
621 unsigned int minsg,
622 unsigned int space,
623 unsigned int srcskip)
624 {
625 int srclen = 0;
626 int srcsg = minsg;
627 int soffset = 0, sless;
628
629 if (sg_dma_len(src) == srcskip) {
630 src = sg_next(src);
631 srcskip = 0;
632 }
633 while (src && space > (sgl_ent_len[srcsg + 1])) {
634 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
635 CHCR_SRC_SG_SIZE);
636 srclen += sless;
637 soffset += sless;
638 srcsg++;
639 if (sg_dma_len(src) == (soffset + srcskip)) {
640 src = sg_next(src);
641 soffset = 0;
642 srcskip = 0;
643 }
644 }
645 return srclen;
646 }
647
648 static int chcr_sg_ent_in_wr(struct scatterlist *src,
649 struct scatterlist *dst,
650 unsigned int minsg,
651 unsigned int space,
652 unsigned int srcskip,
653 unsigned int dstskip)
654 {
655 int srclen = 0, dstlen = 0;
656 int srcsg = minsg, dstsg = minsg;
657 int offset = 0, soffset = 0, less, sless = 0;
658
659 if (sg_dma_len(src) == srcskip) {
660 src = sg_next(src);
661 srcskip = 0;
662 }
663 if (sg_dma_len(dst) == dstskip) {
664 dst = sg_next(dst);
665 dstskip = 0;
666 }
667
668 while (src && dst &&
669 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
670 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
671 CHCR_SRC_SG_SIZE);
672 srclen += sless;
673 srcsg++;
674 offset = 0;
675 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
676 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
677 if (srclen <= dstlen)
678 break;
679 less = min_t(unsigned int, sg_dma_len(dst) - offset -
680 dstskip, CHCR_DST_SG_SIZE);
681 dstlen += less;
682 offset += less;
683 if ((offset + dstskip) == sg_dma_len(dst)) {
684 dst = sg_next(dst);
685 offset = 0;
686 }
687 dstsg++;
688 dstskip = 0;
689 }
690 soffset += sless;
691 if ((soffset + srcskip) == sg_dma_len(src)) {
692 src = sg_next(src);
693 srcskip = 0;
694 soffset = 0;
695 }
696
697 }
698 return min(srclen, dstlen);
699 }
700
701 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
702 u32 flags,
703 struct scatterlist *src,
704 struct scatterlist *dst,
705 unsigned int nbytes,
706 u8 *iv,
707 unsigned short op_type)
708 {
709 int err;
710
711 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
712
713 skcipher_request_set_sync_tfm(subreq, cipher);
714 skcipher_request_set_callback(subreq, flags, NULL, NULL);
715 skcipher_request_set_crypt(subreq, src, dst,
716 nbytes, iv);
717
718 err = op_type ? crypto_skcipher_decrypt(subreq) :
719 crypto_skcipher_encrypt(subreq);
720 skcipher_request_zero(subreq);
721
722 return err;
723
724 }
725 static inline void create_wreq(struct chcr_context *ctx,
726 struct chcr_wr *chcr_req,
727 struct crypto_async_request *req,
728 unsigned int imm,
729 int hash_sz,
730 unsigned int len16,
731 unsigned int sc_len,
732 unsigned int lcb)
733 {
734 struct uld_ctx *u_ctx = ULD_CTX(ctx);
735 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
736
737
738 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
739 chcr_req->wreq.pld_size_hash_size =
740 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
741 chcr_req->wreq.len16_pkd =
742 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
743 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
744 chcr_req->wreq.rx_chid_to_rx_q_id =
745 FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
746 !!lcb, ctx->tx_qidx);
747
748 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
749 qid);
750 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
751 ((sizeof(chcr_req->wreq)) >> 4)));
752
753 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
754 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
755 sizeof(chcr_req->key_ctx) + sc_len);
756 }
757
758 /**
759 * create_cipher_wr - form the WR for cipher operations
760 * @req: cipher req.
761 * @ctx: crypto driver context of the request.
762 * @qid: ingress qid where response of this WR should be received.
763 * @op_type: encryption or decryption
764 */
765 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
766 {
767 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
768 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
769 struct sk_buff *skb = NULL;
770 struct chcr_wr *chcr_req;
771 struct cpl_rx_phys_dsgl *phys_cpl;
772 struct ulptx_sgl *ulptx;
773 struct chcr_blkcipher_req_ctx *reqctx =
774 ablkcipher_request_ctx(wrparam->req);
775 unsigned int temp = 0, transhdr_len, dst_size;
776 int error;
777 int nents;
778 unsigned int kctx_len;
779 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
780 GFP_KERNEL : GFP_ATOMIC;
781 struct adapter *adap = padap(c_ctx(tfm)->dev);
782
783 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
784 reqctx->dst_ofst);
785 dst_size = get_space_for_phys_dsgl(nents);
786 kctx_len = roundup(ablkctx->enckey_len, 16);
787 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
788 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
789 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
790 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
791 (sgl_len(nents) * 8);
792 transhdr_len += temp;
793 transhdr_len = roundup(transhdr_len, 16);
794 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
795 if (!skb) {
796 error = -ENOMEM;
797 goto err;
798 }
799 chcr_req = __skb_put_zero(skb, transhdr_len);
800 chcr_req->sec_cpl.op_ivinsrtofst =
801 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
802
803 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
804 chcr_req->sec_cpl.aadstart_cipherstop_hi =
805 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
806
807 chcr_req->sec_cpl.cipherstop_lo_authinsert =
808 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
809 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
810 ablkctx->ciph_mode,
811 0, 0, IV >> 1);
812 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
813 0, 1, dst_size);
814
815 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
816 if ((reqctx->op == CHCR_DECRYPT_OP) &&
817 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
818 CRYPTO_ALG_SUB_TYPE_CTR)) &&
819 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
820 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
821 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
822 } else {
823 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
824 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
825 memcpy(chcr_req->key_ctx.key, ablkctx->key,
826 ablkctx->enckey_len);
827 } else {
828 memcpy(chcr_req->key_ctx.key, ablkctx->key +
829 (ablkctx->enckey_len >> 1),
830 ablkctx->enckey_len >> 1);
831 memcpy(chcr_req->key_ctx.key +
832 (ablkctx->enckey_len >> 1),
833 ablkctx->key,
834 ablkctx->enckey_len >> 1);
835 }
836 }
837 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
838 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
839 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
840 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
841
842 atomic_inc(&adap->chcr_stats.cipher_rqst);
843 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
844 + (reqctx->imm ? (wrparam->bytes) : 0);
845 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
846 transhdr_len, temp,
847 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
848 reqctx->skb = skb;
849
850 if (reqctx->op && (ablkctx->ciph_mode ==
851 CHCR_SCMD_CIPHER_MODE_AES_CBC))
852 sg_pcopy_to_buffer(wrparam->req->src,
853 sg_nents(wrparam->req->src), wrparam->req->info, 16,
854 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
855
856 return skb;
857 err:
858 return ERR_PTR(error);
859 }
860
861 static inline int chcr_keyctx_ck_size(unsigned int keylen)
862 {
863 int ck_size = 0;
864
865 if (keylen == AES_KEYSIZE_128)
866 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
867 else if (keylen == AES_KEYSIZE_192)
868 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
869 else if (keylen == AES_KEYSIZE_256)
870 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
871 else
872 ck_size = 0;
873
874 return ck_size;
875 }
876 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
877 const u8 *key,
878 unsigned int keylen)
879 {
880 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
881 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
882 int err = 0;
883
884 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
885 CRYPTO_TFM_REQ_MASK);
886 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
887 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
888 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
889 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
890 tfm->crt_flags |=
891 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
892 CRYPTO_TFM_RES_MASK;
893 return err;
894 }
895
896 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
897 const u8 *key,
898 unsigned int keylen)
899 {
900 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
901 unsigned int ck_size, context_size;
902 u16 alignment = 0;
903 int err;
904
905 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
906 if (err)
907 goto badkey_err;
908
909 ck_size = chcr_keyctx_ck_size(keylen);
910 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
911 memcpy(ablkctx->key, key, keylen);
912 ablkctx->enckey_len = keylen;
913 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
914 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
915 keylen + alignment) >> 4;
916
917 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
918 0, 0, context_size);
919 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
920 return 0;
921 badkey_err:
922 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
923 ablkctx->enckey_len = 0;
924
925 return err;
926 }
927
928 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
929 const u8 *key,
930 unsigned int keylen)
931 {
932 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933 unsigned int ck_size, context_size;
934 u16 alignment = 0;
935 int err;
936
937 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
938 if (err)
939 goto badkey_err;
940 ck_size = chcr_keyctx_ck_size(keylen);
941 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
942 memcpy(ablkctx->key, key, keylen);
943 ablkctx->enckey_len = keylen;
944 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 keylen + alignment) >> 4;
946
947 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948 0, 0, context_size);
949 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
950
951 return 0;
952 badkey_err:
953 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
954 ablkctx->enckey_len = 0;
955
956 return err;
957 }
958
959 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
960 const u8 *key,
961 unsigned int keylen)
962 {
963 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
964 unsigned int ck_size, context_size;
965 u16 alignment = 0;
966 int err;
967
968 if (keylen < CTR_RFC3686_NONCE_SIZE)
969 return -EINVAL;
970 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
971 CTR_RFC3686_NONCE_SIZE);
972
973 keylen -= CTR_RFC3686_NONCE_SIZE;
974 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
975 if (err)
976 goto badkey_err;
977
978 ck_size = chcr_keyctx_ck_size(keylen);
979 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
980 memcpy(ablkctx->key, key, keylen);
981 ablkctx->enckey_len = keylen;
982 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
983 keylen + alignment) >> 4;
984
985 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
986 0, 0, context_size);
987 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
988
989 return 0;
990 badkey_err:
991 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
992 ablkctx->enckey_len = 0;
993
994 return err;
995 }
996 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
997 {
998 unsigned int size = AES_BLOCK_SIZE;
999 __be32 *b = (__be32 *)(dstiv + size);
1000 u32 c, prev;
1001
1002 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1003 for (; size >= 4; size -= 4) {
1004 prev = be32_to_cpu(*--b);
1005 c = prev + add;
1006 *b = cpu_to_be32(c);
1007 if (prev < c)
1008 break;
1009 add = 1;
1010 }
1011
1012 }
1013
1014 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1015 {
1016 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1017 u64 c;
1018 u32 temp = be32_to_cpu(*--b);
1019
1020 temp = ~temp;
1021 c = (u64)temp + 1; // No of block can processed withou overflow
1022 if ((bytes / AES_BLOCK_SIZE) > c)
1023 bytes = c * AES_BLOCK_SIZE;
1024 return bytes;
1025 }
1026
1027 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1028 u32 isfinal)
1029 {
1030 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1031 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1032 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1033 struct crypto_cipher *cipher;
1034 int ret, i;
1035 u8 *key;
1036 unsigned int keylen;
1037 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1038 int round8 = round / 8;
1039
1040 cipher = ablkctx->aes_generic;
1041 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1042
1043 keylen = ablkctx->enckey_len / 2;
1044 key = ablkctx->key + keylen;
1045 ret = crypto_cipher_setkey(cipher, key, keylen);
1046 if (ret)
1047 goto out;
1048 crypto_cipher_encrypt_one(cipher, iv, iv);
1049 for (i = 0; i < round8; i++)
1050 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1051
1052 for (i = 0; i < (round % 8); i++)
1053 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1054
1055 if (!isfinal)
1056 crypto_cipher_decrypt_one(cipher, iv, iv);
1057 out:
1058 return ret;
1059 }
1060
1061 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1062 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1063 {
1064 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1065 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1066 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1067 int ret = 0;
1068
1069 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1070 ctr_add_iv(iv, req->info, (reqctx->processed /
1071 AES_BLOCK_SIZE));
1072 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1073 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1074 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1075 AES_BLOCK_SIZE) + 1);
1076 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1077 ret = chcr_update_tweak(req, iv, 0);
1078 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1079 if (reqctx->op)
1080 /*Updated before sending last WR*/
1081 memcpy(iv, req->info, AES_BLOCK_SIZE);
1082 else
1083 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1084 }
1085
1086 return ret;
1087
1088 }
1089
1090 /* We need separate function for final iv because in rfc3686 Initial counter
1091 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1092 * for subsequent update requests
1093 */
1094
1095 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1096 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097 {
1098 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1099 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1100 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1101 int ret = 0;
1102
1103 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104 ctr_add_iv(iv, req->info, (reqctx->processed /
1105 AES_BLOCK_SIZE));
1106 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1107 ret = chcr_update_tweak(req, iv, 1);
1108 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1109 /*Already updated for Decrypt*/
1110 if (!reqctx->op)
1111 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1112
1113 }
1114 return ret;
1115
1116 }
1117
1118 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1119 unsigned char *input, int err)
1120 {
1121 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1122 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1123 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1124 struct sk_buff *skb;
1125 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1126 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1127 struct cipher_wr_param wrparam;
1128 struct chcr_dev *dev = c_ctx(tfm)->dev;
1129 int bytes;
1130
1131 if (err)
1132 goto unmap;
1133 if (req->nbytes == reqctx->processed) {
1134 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1135 req);
1136 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1137 goto complete;
1138 }
1139
1140 if (!reqctx->imm) {
1141 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1142 CIP_SPACE_LEFT(ablkctx->enckey_len),
1143 reqctx->src_ofst, reqctx->dst_ofst);
1144 if ((bytes + reqctx->processed) >= req->nbytes)
1145 bytes = req->nbytes - reqctx->processed;
1146 else
1147 bytes = rounddown(bytes, 16);
1148 } else {
1149 /*CTR mode counter overfloa*/
1150 bytes = req->nbytes - reqctx->processed;
1151 }
1152 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1153 if (err)
1154 goto unmap;
1155
1156 if (unlikely(bytes == 0)) {
1157 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1158 req);
1159 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1160 req->base.flags,
1161 req->src,
1162 req->dst,
1163 req->nbytes,
1164 req->info,
1165 reqctx->op);
1166 goto complete;
1167 }
1168
1169 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1170 CRYPTO_ALG_SUB_TYPE_CTR)
1171 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1172 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1173 wrparam.req = req;
1174 wrparam.bytes = bytes;
1175 skb = create_cipher_wr(&wrparam);
1176 if (IS_ERR(skb)) {
1177 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1178 err = PTR_ERR(skb);
1179 goto unmap;
1180 }
1181 skb->dev = u_ctx->lldi.ports[0];
1182 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1183 chcr_send_wr(skb);
1184 reqctx->last_req_len = bytes;
1185 reqctx->processed += bytes;
1186 return 0;
1187 unmap:
1188 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1189 complete:
1190 chcr_dec_wrcount(dev);
1191 req->base.complete(&req->base, err);
1192 return err;
1193 }
1194
1195 static int process_cipher(struct ablkcipher_request *req,
1196 unsigned short qid,
1197 struct sk_buff **skb,
1198 unsigned short op_type)
1199 {
1200 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1201 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1202 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1203 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1204 struct cipher_wr_param wrparam;
1205 int bytes, err = -EINVAL;
1206
1207 reqctx->processed = 0;
1208 if (!req->info)
1209 goto error;
1210 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1211 (req->nbytes == 0) ||
1212 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1213 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1214 ablkctx->enckey_len, req->nbytes, ivsize);
1215 goto error;
1216 }
1217
1218 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1219 if (err)
1220 goto error;
1221 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1222 AES_MIN_KEY_SIZE +
1223 sizeof(struct cpl_rx_phys_dsgl) +
1224 /*Min dsgl size*/
1225 32))) {
1226 /* Can be sent as Imm*/
1227 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1228
1229 dnents = sg_nents_xlen(req->dst, req->nbytes,
1230 CHCR_DST_SG_SIZE, 0);
1231 phys_dsgl = get_space_for_phys_dsgl(dnents);
1232 kctx_len = roundup(ablkctx->enckey_len, 16);
1233 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1234 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1235 SGE_MAX_WR_LEN;
1236 bytes = IV + req->nbytes;
1237
1238 } else {
1239 reqctx->imm = 0;
1240 }
1241
1242 if (!reqctx->imm) {
1243 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1244 CIP_SPACE_LEFT(ablkctx->enckey_len),
1245 0, 0);
1246 if ((bytes + reqctx->processed) >= req->nbytes)
1247 bytes = req->nbytes - reqctx->processed;
1248 else
1249 bytes = rounddown(bytes, 16);
1250 } else {
1251 bytes = req->nbytes;
1252 }
1253 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1254 CRYPTO_ALG_SUB_TYPE_CTR) {
1255 bytes = adjust_ctr_overflow(req->info, bytes);
1256 }
1257 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1258 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1259 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1260 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1261 CTR_RFC3686_IV_SIZE);
1262
1263 /* initialize counter portion of counter block */
1264 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1265 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1266
1267 } else {
1268
1269 memcpy(reqctx->iv, req->info, IV);
1270 }
1271 if (unlikely(bytes == 0)) {
1272 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1273 req);
1274 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1275 req->base.flags,
1276 req->src,
1277 req->dst,
1278 req->nbytes,
1279 reqctx->iv,
1280 op_type);
1281 goto error;
1282 }
1283 reqctx->op = op_type;
1284 reqctx->srcsg = req->src;
1285 reqctx->dstsg = req->dst;
1286 reqctx->src_ofst = 0;
1287 reqctx->dst_ofst = 0;
1288 wrparam.qid = qid;
1289 wrparam.req = req;
1290 wrparam.bytes = bytes;
1291 *skb = create_cipher_wr(&wrparam);
1292 if (IS_ERR(*skb)) {
1293 err = PTR_ERR(*skb);
1294 goto unmap;
1295 }
1296 reqctx->processed = bytes;
1297 reqctx->last_req_len = bytes;
1298
1299 return 0;
1300 unmap:
1301 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1302 error:
1303 return err;
1304 }
1305
1306 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1307 {
1308 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1309 struct chcr_dev *dev = c_ctx(tfm)->dev;
1310 struct sk_buff *skb = NULL;
1311 int err, isfull = 0;
1312 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1313
1314 err = chcr_inc_wrcount(dev);
1315 if (err)
1316 return -ENXIO;
1317 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1318 c_ctx(tfm)->tx_qidx))) {
1319 isfull = 1;
1320 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1321 err = -ENOSPC;
1322 goto error;
1323 }
1324 }
1325
1326 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1327 &skb, CHCR_ENCRYPT_OP);
1328 if (err || !skb)
1329 return err;
1330 skb->dev = u_ctx->lldi.ports[0];
1331 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1332 chcr_send_wr(skb);
1333 return isfull ? -EBUSY : -EINPROGRESS;
1334 error:
1335 chcr_dec_wrcount(dev);
1336 return err;
1337 }
1338
1339 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1340 {
1341 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1342 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1343 struct chcr_dev *dev = c_ctx(tfm)->dev;
1344 struct sk_buff *skb = NULL;
1345 int err, isfull = 0;
1346
1347 err = chcr_inc_wrcount(dev);
1348 if (err)
1349 return -ENXIO;
1350
1351 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1352 c_ctx(tfm)->tx_qidx))) {
1353 isfull = 1;
1354 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1355 return -ENOSPC;
1356 }
1357
1358 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1359 &skb, CHCR_DECRYPT_OP);
1360 if (err || !skb)
1361 return err;
1362 skb->dev = u_ctx->lldi.ports[0];
1363 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1364 chcr_send_wr(skb);
1365 return isfull ? -EBUSY : -EINPROGRESS;
1366 }
1367
1368 static int chcr_device_init(struct chcr_context *ctx)
1369 {
1370 struct uld_ctx *u_ctx = NULL;
1371 unsigned int id;
1372 int txq_perchan, txq_idx, ntxq;
1373 int err = 0, rxq_perchan, rxq_idx;
1374
1375 id = smp_processor_id();
1376 if (!ctx->dev) {
1377 u_ctx = assign_chcr_device();
1378 if (!u_ctx) {
1379 err = -ENXIO;
1380 pr_err("chcr device assignment fails\n");
1381 goto out;
1382 }
1383 ctx->dev = &u_ctx->dev;
1384 ntxq = u_ctx->lldi.ntxq;
1385 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1386 txq_perchan = ntxq / u_ctx->lldi.nchan;
1387 spin_lock(&ctx->dev->lock_chcr_dev);
1388 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1389 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1390 spin_unlock(&ctx->dev->lock_chcr_dev);
1391 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1392 rxq_idx += id % rxq_perchan;
1393 txq_idx = ctx->tx_chan_id * txq_perchan;
1394 txq_idx += id % txq_perchan;
1395 ctx->rx_qidx = rxq_idx;
1396 ctx->tx_qidx = txq_idx;
1397 /* Channel Id used by SGE to forward packet to Host.
1398 * Same value should be used in cpl_fw6_pld RSS_CH field
1399 * by FW. Driver programs PCI channel ID to be used in fw
1400 * at the time of queue allocation with value "pi->tx_chan"
1401 */
1402 ctx->pci_chan_id = txq_idx / txq_perchan;
1403 }
1404 out:
1405 return err;
1406 }
1407
1408 static int chcr_cra_init(struct crypto_tfm *tfm)
1409 {
1410 struct crypto_alg *alg = tfm->__crt_alg;
1411 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1412 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1413
1414 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1415 CRYPTO_ALG_NEED_FALLBACK);
1416 if (IS_ERR(ablkctx->sw_cipher)) {
1417 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1418 return PTR_ERR(ablkctx->sw_cipher);
1419 }
1420
1421 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1422 /* To update tweak*/
1423 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1424 if (IS_ERR(ablkctx->aes_generic)) {
1425 pr_err("failed to allocate aes cipher for tweak\n");
1426 return PTR_ERR(ablkctx->aes_generic);
1427 }
1428 } else
1429 ablkctx->aes_generic = NULL;
1430
1431 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1432 return chcr_device_init(crypto_tfm_ctx(tfm));
1433 }
1434
1435 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1436 {
1437 struct crypto_alg *alg = tfm->__crt_alg;
1438 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1439 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1440
1441 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1442 * cannot be used as fallback in chcr_handle_cipher_response
1443 */
1444 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1445 CRYPTO_ALG_NEED_FALLBACK);
1446 if (IS_ERR(ablkctx->sw_cipher)) {
1447 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1448 return PTR_ERR(ablkctx->sw_cipher);
1449 }
1450 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1451 return chcr_device_init(crypto_tfm_ctx(tfm));
1452 }
1453
1454
1455 static void chcr_cra_exit(struct crypto_tfm *tfm)
1456 {
1457 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1458 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1459
1460 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1461 if (ablkctx->aes_generic)
1462 crypto_free_cipher(ablkctx->aes_generic);
1463 }
1464
1465 static int get_alg_config(struct algo_param *params,
1466 unsigned int auth_size)
1467 {
1468 switch (auth_size) {
1469 case SHA1_DIGEST_SIZE:
1470 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1471 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1472 params->result_size = SHA1_DIGEST_SIZE;
1473 break;
1474 case SHA224_DIGEST_SIZE:
1475 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1476 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1477 params->result_size = SHA256_DIGEST_SIZE;
1478 break;
1479 case SHA256_DIGEST_SIZE:
1480 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1481 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1482 params->result_size = SHA256_DIGEST_SIZE;
1483 break;
1484 case SHA384_DIGEST_SIZE:
1485 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1486 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1487 params->result_size = SHA512_DIGEST_SIZE;
1488 break;
1489 case SHA512_DIGEST_SIZE:
1490 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1491 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1492 params->result_size = SHA512_DIGEST_SIZE;
1493 break;
1494 default:
1495 pr_err("chcr : ERROR, unsupported digest size\n");
1496 return -EINVAL;
1497 }
1498 return 0;
1499 }
1500
1501 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1502 {
1503 crypto_free_shash(base_hash);
1504 }
1505
1506 /**
1507 * create_hash_wr - Create hash work request
1508 * @req - Cipher req base
1509 */
1510 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1511 struct hash_wr_param *param)
1512 {
1513 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1515 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1516 struct sk_buff *skb = NULL;
1517 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1518 struct chcr_wr *chcr_req;
1519 struct ulptx_sgl *ulptx;
1520 unsigned int nents = 0, transhdr_len;
1521 unsigned int temp = 0;
1522 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1523 GFP_ATOMIC;
1524 struct adapter *adap = padap(h_ctx(tfm)->dev);
1525 int error = 0;
1526
1527 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1528 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1529 param->sg_len) <= SGE_MAX_WR_LEN;
1530 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1531 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1532 nents += param->bfr_len ? 1 : 0;
1533 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1534 param->sg_len, 16) : (sgl_len(nents) * 8);
1535 transhdr_len = roundup(transhdr_len, 16);
1536
1537 skb = alloc_skb(transhdr_len, flags);
1538 if (!skb)
1539 return ERR_PTR(-ENOMEM);
1540 chcr_req = __skb_put_zero(skb, transhdr_len);
1541
1542 chcr_req->sec_cpl.op_ivinsrtofst =
1543 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1544 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1545
1546 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1547 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1548 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1549 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1550 chcr_req->sec_cpl.seqno_numivs =
1551 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1552 param->opad_needed, 0);
1553
1554 chcr_req->sec_cpl.ivgen_hdrlen =
1555 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1556
1557 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1558 param->alg_prm.result_size);
1559
1560 if (param->opad_needed)
1561 memcpy(chcr_req->key_ctx.key +
1562 ((param->alg_prm.result_size <= 32) ? 32 :
1563 CHCR_HASH_MAX_DIGEST_SIZE),
1564 hmacctx->opad, param->alg_prm.result_size);
1565
1566 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1567 param->alg_prm.mk_size, 0,
1568 param->opad_needed,
1569 ((param->kctx_len +
1570 sizeof(chcr_req->key_ctx)) >> 4));
1571 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1572 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1573 DUMMY_BYTES);
1574 if (param->bfr_len != 0) {
1575 req_ctx->hctx_wr.dma_addr =
1576 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1577 param->bfr_len, DMA_TO_DEVICE);
1578 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1579 req_ctx->hctx_wr. dma_addr)) {
1580 error = -ENOMEM;
1581 goto err;
1582 }
1583 req_ctx->hctx_wr.dma_len = param->bfr_len;
1584 } else {
1585 req_ctx->hctx_wr.dma_addr = 0;
1586 }
1587 chcr_add_hash_src_ent(req, ulptx, param);
1588 /* Request upto max wr size */
1589 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1590 (param->sg_len + param->bfr_len) : 0);
1591 atomic_inc(&adap->chcr_stats.digest_rqst);
1592 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1593 param->hash_size, transhdr_len,
1594 temp, 0);
1595 req_ctx->hctx_wr.skb = skb;
1596 return skb;
1597 err:
1598 kfree_skb(skb);
1599 return ERR_PTR(error);
1600 }
1601
1602 static int chcr_ahash_update(struct ahash_request *req)
1603 {
1604 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1605 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1606 struct uld_ctx *u_ctx = NULL;
1607 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1608 struct sk_buff *skb;
1609 u8 remainder = 0, bs;
1610 unsigned int nbytes = req->nbytes;
1611 struct hash_wr_param params;
1612 int error, isfull = 0;
1613
1614 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1615 u_ctx = ULD_CTX(h_ctx(rtfm));
1616
1617 if (nbytes + req_ctx->reqlen >= bs) {
1618 remainder = (nbytes + req_ctx->reqlen) % bs;
1619 nbytes = nbytes + req_ctx->reqlen - remainder;
1620 } else {
1621 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1622 + req_ctx->reqlen, nbytes, 0);
1623 req_ctx->reqlen += nbytes;
1624 return 0;
1625 }
1626 error = chcr_inc_wrcount(dev);
1627 if (error)
1628 return -ENXIO;
1629 /* Detach state for CHCR means lldi or padap is freed. Increasing
1630 * inflight count for dev guarantees that lldi and padap is valid
1631 */
1632 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1633 h_ctx(rtfm)->tx_qidx))) {
1634 isfull = 1;
1635 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1636 error = -ENOSPC;
1637 goto err;
1638 }
1639 }
1640
1641 chcr_init_hctx_per_wr(req_ctx);
1642 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1643 if (error) {
1644 error = -ENOMEM;
1645 goto err;
1646 }
1647 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1648 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1649 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1650 HASH_SPACE_LEFT(params.kctx_len), 0);
1651 if (params.sg_len > req->nbytes)
1652 params.sg_len = req->nbytes;
1653 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1654 req_ctx->reqlen;
1655 params.opad_needed = 0;
1656 params.more = 1;
1657 params.last = 0;
1658 params.bfr_len = req_ctx->reqlen;
1659 params.scmd1 = 0;
1660 req_ctx->hctx_wr.srcsg = req->src;
1661
1662 params.hash_size = params.alg_prm.result_size;
1663 req_ctx->data_len += params.sg_len + params.bfr_len;
1664 skb = create_hash_wr(req, &params);
1665 if (IS_ERR(skb)) {
1666 error = PTR_ERR(skb);
1667 goto unmap;
1668 }
1669
1670 req_ctx->hctx_wr.processed += params.sg_len;
1671 if (remainder) {
1672 /* Swap buffers */
1673 swap(req_ctx->reqbfr, req_ctx->skbfr);
1674 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1675 req_ctx->reqbfr, remainder, req->nbytes -
1676 remainder);
1677 }
1678 req_ctx->reqlen = remainder;
1679 skb->dev = u_ctx->lldi.ports[0];
1680 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1681 chcr_send_wr(skb);
1682
1683 return isfull ? -EBUSY : -EINPROGRESS;
1684 unmap:
1685 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1686 err:
1687 chcr_dec_wrcount(dev);
1688 return error;
1689 }
1690
1691 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1692 {
1693 memset(bfr_ptr, 0, bs);
1694 *bfr_ptr = 0x80;
1695 if (bs == 64)
1696 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1697 else
1698 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1699 }
1700
1701 static int chcr_ahash_final(struct ahash_request *req)
1702 {
1703 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1704 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1705 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1706 struct hash_wr_param params;
1707 struct sk_buff *skb;
1708 struct uld_ctx *u_ctx = NULL;
1709 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1710 int error = -EINVAL;
1711
1712 error = chcr_inc_wrcount(dev);
1713 if (error)
1714 return -ENXIO;
1715
1716 chcr_init_hctx_per_wr(req_ctx);
1717 u_ctx = ULD_CTX(h_ctx(rtfm));
1718 if (is_hmac(crypto_ahash_tfm(rtfm)))
1719 params.opad_needed = 1;
1720 else
1721 params.opad_needed = 0;
1722 params.sg_len = 0;
1723 req_ctx->hctx_wr.isfinal = 1;
1724 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1725 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1726 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1727 params.opad_needed = 1;
1728 params.kctx_len *= 2;
1729 } else {
1730 params.opad_needed = 0;
1731 }
1732
1733 req_ctx->hctx_wr.result = 1;
1734 params.bfr_len = req_ctx->reqlen;
1735 req_ctx->data_len += params.bfr_len + params.sg_len;
1736 req_ctx->hctx_wr.srcsg = req->src;
1737 if (req_ctx->reqlen == 0) {
1738 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1739 params.last = 0;
1740 params.more = 1;
1741 params.scmd1 = 0;
1742 params.bfr_len = bs;
1743
1744 } else {
1745 params.scmd1 = req_ctx->data_len;
1746 params.last = 1;
1747 params.more = 0;
1748 }
1749 params.hash_size = crypto_ahash_digestsize(rtfm);
1750 skb = create_hash_wr(req, &params);
1751 if (IS_ERR(skb)) {
1752 error = PTR_ERR(skb);
1753 goto err;
1754 }
1755 req_ctx->reqlen = 0;
1756 skb->dev = u_ctx->lldi.ports[0];
1757 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1758 chcr_send_wr(skb);
1759 return -EINPROGRESS;
1760 err:
1761 chcr_dec_wrcount(dev);
1762 return error;
1763 }
1764
1765 static int chcr_ahash_finup(struct ahash_request *req)
1766 {
1767 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1768 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1769 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1770 struct uld_ctx *u_ctx = NULL;
1771 struct sk_buff *skb;
1772 struct hash_wr_param params;
1773 u8 bs;
1774 int error, isfull = 0;
1775
1776 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1777 u_ctx = ULD_CTX(h_ctx(rtfm));
1778 error = chcr_inc_wrcount(dev);
1779 if (error)
1780 return -ENXIO;
1781
1782 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1783 h_ctx(rtfm)->tx_qidx))) {
1784 isfull = 1;
1785 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1786 error = -ENOSPC;
1787 goto err;
1788 }
1789 }
1790 chcr_init_hctx_per_wr(req_ctx);
1791 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1792 if (error) {
1793 error = -ENOMEM;
1794 goto err;
1795 }
1796
1797 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1798 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1799 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1800 params.kctx_len *= 2;
1801 params.opad_needed = 1;
1802 } else {
1803 params.opad_needed = 0;
1804 }
1805
1806 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1807 HASH_SPACE_LEFT(params.kctx_len), 0);
1808 if (params.sg_len < req->nbytes) {
1809 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1810 params.kctx_len /= 2;
1811 params.opad_needed = 0;
1812 }
1813 params.last = 0;
1814 params.more = 1;
1815 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1816 - req_ctx->reqlen;
1817 params.hash_size = params.alg_prm.result_size;
1818 params.scmd1 = 0;
1819 } else {
1820 params.last = 1;
1821 params.more = 0;
1822 params.sg_len = req->nbytes;
1823 params.hash_size = crypto_ahash_digestsize(rtfm);
1824 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1825 params.sg_len;
1826 }
1827 params.bfr_len = req_ctx->reqlen;
1828 req_ctx->data_len += params.bfr_len + params.sg_len;
1829 req_ctx->hctx_wr.result = 1;
1830 req_ctx->hctx_wr.srcsg = req->src;
1831 if ((req_ctx->reqlen + req->nbytes) == 0) {
1832 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1833 params.last = 0;
1834 params.more = 1;
1835 params.scmd1 = 0;
1836 params.bfr_len = bs;
1837 }
1838 skb = create_hash_wr(req, &params);
1839 if (IS_ERR(skb)) {
1840 error = PTR_ERR(skb);
1841 goto unmap;
1842 }
1843 req_ctx->reqlen = 0;
1844 req_ctx->hctx_wr.processed += params.sg_len;
1845 skb->dev = u_ctx->lldi.ports[0];
1846 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1847 chcr_send_wr(skb);
1848
1849 return isfull ? -EBUSY : -EINPROGRESS;
1850 unmap:
1851 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1852 err:
1853 chcr_dec_wrcount(dev);
1854 return error;
1855 }
1856
1857 static int chcr_ahash_digest(struct ahash_request *req)
1858 {
1859 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1860 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1861 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1862 struct uld_ctx *u_ctx = NULL;
1863 struct sk_buff *skb;
1864 struct hash_wr_param params;
1865 u8 bs;
1866 int error, isfull = 0;
1867
1868 rtfm->init(req);
1869 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1870 error = chcr_inc_wrcount(dev);
1871 if (error)
1872 return -ENXIO;
1873
1874 u_ctx = ULD_CTX(h_ctx(rtfm));
1875 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1876 h_ctx(rtfm)->tx_qidx))) {
1877 isfull = 1;
1878 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1879 error = -ENOSPC;
1880 goto err;
1881 }
1882 }
1883
1884 chcr_init_hctx_per_wr(req_ctx);
1885 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1886 if (error) {
1887 error = -ENOMEM;
1888 goto err;
1889 }
1890
1891 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1892 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1893 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1894 params.kctx_len *= 2;
1895 params.opad_needed = 1;
1896 } else {
1897 params.opad_needed = 0;
1898 }
1899 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1900 HASH_SPACE_LEFT(params.kctx_len), 0);
1901 if (params.sg_len < req->nbytes) {
1902 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1903 params.kctx_len /= 2;
1904 params.opad_needed = 0;
1905 }
1906 params.last = 0;
1907 params.more = 1;
1908 params.scmd1 = 0;
1909 params.sg_len = rounddown(params.sg_len, bs);
1910 params.hash_size = params.alg_prm.result_size;
1911 } else {
1912 params.sg_len = req->nbytes;
1913 params.hash_size = crypto_ahash_digestsize(rtfm);
1914 params.last = 1;
1915 params.more = 0;
1916 params.scmd1 = req->nbytes + req_ctx->data_len;
1917
1918 }
1919 params.bfr_len = 0;
1920 req_ctx->hctx_wr.result = 1;
1921 req_ctx->hctx_wr.srcsg = req->src;
1922 req_ctx->data_len += params.bfr_len + params.sg_len;
1923
1924 if (req->nbytes == 0) {
1925 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1926 params.more = 1;
1927 params.bfr_len = bs;
1928 }
1929
1930 skb = create_hash_wr(req, &params);
1931 if (IS_ERR(skb)) {
1932 error = PTR_ERR(skb);
1933 goto unmap;
1934 }
1935 req_ctx->hctx_wr.processed += params.sg_len;
1936 skb->dev = u_ctx->lldi.ports[0];
1937 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1938 chcr_send_wr(skb);
1939 return isfull ? -EBUSY : -EINPROGRESS;
1940 unmap:
1941 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1942 err:
1943 chcr_dec_wrcount(dev);
1944 return error;
1945 }
1946
1947 static int chcr_ahash_continue(struct ahash_request *req)
1948 {
1949 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1950 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1951 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1952 struct uld_ctx *u_ctx = NULL;
1953 struct sk_buff *skb;
1954 struct hash_wr_param params;
1955 u8 bs;
1956 int error;
1957
1958 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1959 u_ctx = ULD_CTX(h_ctx(rtfm));
1960 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1961 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1962 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1963 params.kctx_len *= 2;
1964 params.opad_needed = 1;
1965 } else {
1966 params.opad_needed = 0;
1967 }
1968 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1969 HASH_SPACE_LEFT(params.kctx_len),
1970 hctx_wr->src_ofst);
1971 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1972 params.sg_len = req->nbytes - hctx_wr->processed;
1973 if (!hctx_wr->result ||
1974 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1975 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1976 params.kctx_len /= 2;
1977 params.opad_needed = 0;
1978 }
1979 params.last = 0;
1980 params.more = 1;
1981 params.sg_len = rounddown(params.sg_len, bs);
1982 params.hash_size = params.alg_prm.result_size;
1983 params.scmd1 = 0;
1984 } else {
1985 params.last = 1;
1986 params.more = 0;
1987 params.hash_size = crypto_ahash_digestsize(rtfm);
1988 params.scmd1 = reqctx->data_len + params.sg_len;
1989 }
1990 params.bfr_len = 0;
1991 reqctx->data_len += params.sg_len;
1992 skb = create_hash_wr(req, &params);
1993 if (IS_ERR(skb)) {
1994 error = PTR_ERR(skb);
1995 goto err;
1996 }
1997 hctx_wr->processed += params.sg_len;
1998 skb->dev = u_ctx->lldi.ports[0];
1999 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
2000 chcr_send_wr(skb);
2001 return 0;
2002 err:
2003 return error;
2004 }
2005
2006 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2007 unsigned char *input,
2008 int err)
2009 {
2010 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2011 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2012 int digestsize, updated_digestsize;
2013 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2014 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2015 struct chcr_dev *dev = h_ctx(tfm)->dev;
2016
2017 if (input == NULL)
2018 goto out;
2019 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2020 updated_digestsize = digestsize;
2021 if (digestsize == SHA224_DIGEST_SIZE)
2022 updated_digestsize = SHA256_DIGEST_SIZE;
2023 else if (digestsize == SHA384_DIGEST_SIZE)
2024 updated_digestsize = SHA512_DIGEST_SIZE;
2025
2026 if (hctx_wr->dma_addr) {
2027 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2028 hctx_wr->dma_len, DMA_TO_DEVICE);
2029 hctx_wr->dma_addr = 0;
2030 }
2031 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2032 req->nbytes)) {
2033 if (hctx_wr->result == 1) {
2034 hctx_wr->result = 0;
2035 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2036 digestsize);
2037 } else {
2038 memcpy(reqctx->partial_hash,
2039 input + sizeof(struct cpl_fw6_pld),
2040 updated_digestsize);
2041
2042 }
2043 goto unmap;
2044 }
2045 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2046 updated_digestsize);
2047
2048 err = chcr_ahash_continue(req);
2049 if (err)
2050 goto unmap;
2051 return;
2052 unmap:
2053 if (hctx_wr->is_sg_map)
2054 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2055
2056
2057 out:
2058 chcr_dec_wrcount(dev);
2059 req->base.complete(&req->base, err);
2060 }
2061
2062 /*
2063 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2064 * @req: crypto request
2065 */
2066 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2067 int err)
2068 {
2069 struct crypto_tfm *tfm = req->tfm;
2070 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2071 struct adapter *adap = padap(ctx->dev);
2072
2073 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2074 case CRYPTO_ALG_TYPE_AEAD:
2075 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2076 break;
2077
2078 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2079 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2080 input, err);
2081 break;
2082 case CRYPTO_ALG_TYPE_AHASH:
2083 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2084 }
2085 atomic_inc(&adap->chcr_stats.complete);
2086 return err;
2087 }
2088 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2089 {
2090 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2091 struct chcr_ahash_req_ctx *state = out;
2092
2093 state->reqlen = req_ctx->reqlen;
2094 state->data_len = req_ctx->data_len;
2095 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2096 memcpy(state->partial_hash, req_ctx->partial_hash,
2097 CHCR_HASH_MAX_DIGEST_SIZE);
2098 chcr_init_hctx_per_wr(state);
2099 return 0;
2100 }
2101
2102 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2103 {
2104 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2106
2107 req_ctx->reqlen = state->reqlen;
2108 req_ctx->data_len = state->data_len;
2109 req_ctx->reqbfr = req_ctx->bfr1;
2110 req_ctx->skbfr = req_ctx->bfr2;
2111 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2112 memcpy(req_ctx->partial_hash, state->partial_hash,
2113 CHCR_HASH_MAX_DIGEST_SIZE);
2114 chcr_init_hctx_per_wr(req_ctx);
2115 return 0;
2116 }
2117
2118 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2119 unsigned int keylen)
2120 {
2121 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2122 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2123 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2124 unsigned int i, err = 0, updated_digestsize;
2125
2126 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2127
2128 /* use the key to calculate the ipad and opad. ipad will sent with the
2129 * first request's data. opad will be sent with the final hash result
2130 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2131 */
2132 shash->tfm = hmacctx->base_hash;
2133 if (keylen > bs) {
2134 err = crypto_shash_digest(shash, key, keylen,
2135 hmacctx->ipad);
2136 if (err)
2137 goto out;
2138 keylen = digestsize;
2139 } else {
2140 memcpy(hmacctx->ipad, key, keylen);
2141 }
2142 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2143 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2144
2145 for (i = 0; i < bs / sizeof(int); i++) {
2146 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2147 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2148 }
2149
2150 updated_digestsize = digestsize;
2151 if (digestsize == SHA224_DIGEST_SIZE)
2152 updated_digestsize = SHA256_DIGEST_SIZE;
2153 else if (digestsize == SHA384_DIGEST_SIZE)
2154 updated_digestsize = SHA512_DIGEST_SIZE;
2155 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2156 hmacctx->ipad, digestsize);
2157 if (err)
2158 goto out;
2159 chcr_change_order(hmacctx->ipad, updated_digestsize);
2160
2161 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2162 hmacctx->opad, digestsize);
2163 if (err)
2164 goto out;
2165 chcr_change_order(hmacctx->opad, updated_digestsize);
2166 out:
2167 return err;
2168 }
2169
2170 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2171 unsigned int key_len)
2172 {
2173 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2174 unsigned short context_size = 0;
2175 int err;
2176
2177 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2178 if (err)
2179 goto badkey_err;
2180
2181 memcpy(ablkctx->key, key, key_len);
2182 ablkctx->enckey_len = key_len;
2183 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2184 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2185 ablkctx->key_ctx_hdr =
2186 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2187 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2188 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2189 CHCR_KEYCTX_NO_KEY, 1,
2190 0, context_size);
2191 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2192 return 0;
2193 badkey_err:
2194 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2195 ablkctx->enckey_len = 0;
2196
2197 return err;
2198 }
2199
2200 static int chcr_sha_init(struct ahash_request *areq)
2201 {
2202 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2203 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2204 int digestsize = crypto_ahash_digestsize(tfm);
2205
2206 req_ctx->data_len = 0;
2207 req_ctx->reqlen = 0;
2208 req_ctx->reqbfr = req_ctx->bfr1;
2209 req_ctx->skbfr = req_ctx->bfr2;
2210 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2211
2212 return 0;
2213 }
2214
2215 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2216 {
2217 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2218 sizeof(struct chcr_ahash_req_ctx));
2219 return chcr_device_init(crypto_tfm_ctx(tfm));
2220 }
2221
2222 static int chcr_hmac_init(struct ahash_request *areq)
2223 {
2224 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2225 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2226 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2227 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2228 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2229
2230 chcr_sha_init(areq);
2231 req_ctx->data_len = bs;
2232 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2233 if (digestsize == SHA224_DIGEST_SIZE)
2234 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2235 SHA256_DIGEST_SIZE);
2236 else if (digestsize == SHA384_DIGEST_SIZE)
2237 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2238 SHA512_DIGEST_SIZE);
2239 else
2240 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2241 digestsize);
2242 }
2243 return 0;
2244 }
2245
2246 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2247 {
2248 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2249 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2250 unsigned int digestsize =
2251 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2252
2253 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2254 sizeof(struct chcr_ahash_req_ctx));
2255 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2256 if (IS_ERR(hmacctx->base_hash))
2257 return PTR_ERR(hmacctx->base_hash);
2258 return chcr_device_init(crypto_tfm_ctx(tfm));
2259 }
2260
2261 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2262 {
2263 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2264 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2265
2266 if (hmacctx->base_hash) {
2267 chcr_free_shash(hmacctx->base_hash);
2268 hmacctx->base_hash = NULL;
2269 }
2270 }
2271
2272 inline void chcr_aead_common_exit(struct aead_request *req)
2273 {
2274 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2275 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2276 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2277
2278 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2279 }
2280
2281 static int chcr_aead_common_init(struct aead_request *req)
2282 {
2283 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2284 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2285 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2286 unsigned int authsize = crypto_aead_authsize(tfm);
2287 int error = -EINVAL;
2288
2289 /* validate key size */
2290 if (aeadctx->enckey_len == 0)
2291 goto err;
2292 if (reqctx->op && req->cryptlen < authsize)
2293 goto err;
2294 if (reqctx->b0_len)
2295 reqctx->scratch_pad = reqctx->iv + IV;
2296 else
2297 reqctx->scratch_pad = NULL;
2298
2299 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2300 reqctx->op);
2301 if (error) {
2302 error = -ENOMEM;
2303 goto err;
2304 }
2305
2306 return 0;
2307 err:
2308 return error;
2309 }
2310
2311 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2312 int aadmax, int wrlen,
2313 unsigned short op_type)
2314 {
2315 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2316
2317 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2318 dst_nents > MAX_DSGL_ENT ||
2319 (req->assoclen > aadmax) ||
2320 (wrlen > SGE_MAX_WR_LEN))
2321 return 1;
2322 return 0;
2323 }
2324
2325 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2326 {
2327 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2328 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2329 struct aead_request *subreq = aead_request_ctx(req);
2330
2331 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2332 aead_request_set_callback(subreq, req->base.flags,
2333 req->base.complete, req->base.data);
2334 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2335 req->iv);
2336 aead_request_set_ad(subreq, req->assoclen);
2337 return op_type ? crypto_aead_decrypt(subreq) :
2338 crypto_aead_encrypt(subreq);
2339 }
2340
2341 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2342 unsigned short qid,
2343 int size)
2344 {
2345 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2346 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2347 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2348 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2349 struct sk_buff *skb = NULL;
2350 struct chcr_wr *chcr_req;
2351 struct cpl_rx_phys_dsgl *phys_cpl;
2352 struct ulptx_sgl *ulptx;
2353 unsigned int transhdr_len;
2354 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2355 unsigned int kctx_len = 0, dnents, snents;
2356 unsigned int authsize = crypto_aead_authsize(tfm);
2357 int error = -EINVAL;
2358 u8 *ivptr;
2359 int null = 0;
2360 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2361 GFP_ATOMIC;
2362 struct adapter *adap = padap(a_ctx(tfm)->dev);
2363
2364 if (req->cryptlen == 0)
2365 return NULL;
2366
2367 reqctx->b0_len = 0;
2368 error = chcr_aead_common_init(req);
2369 if (error)
2370 return ERR_PTR(error);
2371
2372 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2373 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2374 null = 1;
2375 }
2376 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2377 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2378 dnents += MIN_AUTH_SG; // For IV
2379 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2380 CHCR_SRC_SG_SIZE, 0);
2381 dst_size = get_space_for_phys_dsgl(dnents);
2382 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2383 - sizeof(chcr_req->key_ctx);
2384 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2385 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2386 SGE_MAX_WR_LEN;
2387 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2388 : (sgl_len(snents) * 8);
2389 transhdr_len += temp;
2390 transhdr_len = roundup(transhdr_len, 16);
2391
2392 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2393 transhdr_len, reqctx->op)) {
2394 atomic_inc(&adap->chcr_stats.fallback);
2395 chcr_aead_common_exit(req);
2396 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2397 }
2398 skb = alloc_skb(transhdr_len, flags);
2399 if (!skb) {
2400 error = -ENOMEM;
2401 goto err;
2402 }
2403
2404 chcr_req = __skb_put_zero(skb, transhdr_len);
2405
2406 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2407
2408 /*
2409 * Input order is AAD,IV and Payload. where IV should be included as
2410 * the part of authdata. All other fields should be filled according
2411 * to the hardware spec
2412 */
2413 chcr_req->sec_cpl.op_ivinsrtofst =
2414 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2415 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2416 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2417 null ? 0 : 1 + IV,
2418 null ? 0 : IV + req->assoclen,
2419 req->assoclen + IV + 1,
2420 (temp & 0x1F0) >> 4);
2421 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2422 temp & 0xF,
2423 null ? 0 : req->assoclen + IV + 1,
2424 temp, temp);
2425 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2426 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2427 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2428 else
2429 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2430 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2431 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2432 temp,
2433 actx->auth_mode, aeadctx->hmac_ctrl,
2434 IV >> 1);
2435 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2436 0, 0, dst_size);
2437
2438 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2439 if (reqctx->op == CHCR_ENCRYPT_OP ||
2440 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2441 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2442 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2443 aeadctx->enckey_len);
2444 else
2445 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2446 aeadctx->enckey_len);
2447
2448 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2449 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2450 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2451 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2452 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2453 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2454 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2455 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2456 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2457 CTR_RFC3686_IV_SIZE);
2458 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2459 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2460 } else {
2461 memcpy(ivptr, req->iv, IV);
2462 }
2463 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2464 chcr_add_aead_src_ent(req, ulptx);
2465 atomic_inc(&adap->chcr_stats.cipher_rqst);
2466 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2467 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2468 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2469 transhdr_len, temp, 0);
2470 reqctx->skb = skb;
2471
2472 return skb;
2473 err:
2474 chcr_aead_common_exit(req);
2475
2476 return ERR_PTR(error);
2477 }
2478
2479 int chcr_aead_dma_map(struct device *dev,
2480 struct aead_request *req,
2481 unsigned short op_type)
2482 {
2483 int error;
2484 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2485 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2486 unsigned int authsize = crypto_aead_authsize(tfm);
2487 int dst_size;
2488
2489 dst_size = req->assoclen + req->cryptlen + (op_type ?
2490 -authsize : authsize);
2491 if (!req->cryptlen || !dst_size)
2492 return 0;
2493 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2494 DMA_BIDIRECTIONAL);
2495 if (dma_mapping_error(dev, reqctx->iv_dma))
2496 return -ENOMEM;
2497 if (reqctx->b0_len)
2498 reqctx->b0_dma = reqctx->iv_dma + IV;
2499 else
2500 reqctx->b0_dma = 0;
2501 if (req->src == req->dst) {
2502 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2503 DMA_BIDIRECTIONAL);
2504 if (!error)
2505 goto err;
2506 } else {
2507 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2508 DMA_TO_DEVICE);
2509 if (!error)
2510 goto err;
2511 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2512 DMA_FROM_DEVICE);
2513 if (!error) {
2514 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2515 DMA_TO_DEVICE);
2516 goto err;
2517 }
2518 }
2519
2520 return 0;
2521 err:
2522 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2523 return -ENOMEM;
2524 }
2525
2526 void chcr_aead_dma_unmap(struct device *dev,
2527 struct aead_request *req,
2528 unsigned short op_type)
2529 {
2530 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2531 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2532 unsigned int authsize = crypto_aead_authsize(tfm);
2533 int dst_size;
2534
2535 dst_size = req->assoclen + req->cryptlen + (op_type ?
2536 -authsize : authsize);
2537 if (!req->cryptlen || !dst_size)
2538 return;
2539
2540 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2541 DMA_BIDIRECTIONAL);
2542 if (req->src == req->dst) {
2543 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2544 DMA_BIDIRECTIONAL);
2545 } else {
2546 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2547 DMA_TO_DEVICE);
2548 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2549 DMA_FROM_DEVICE);
2550 }
2551 }
2552
2553 void chcr_add_aead_src_ent(struct aead_request *req,
2554 struct ulptx_sgl *ulptx)
2555 {
2556 struct ulptx_walk ulp_walk;
2557 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2558
2559 if (reqctx->imm) {
2560 u8 *buf = (u8 *)ulptx;
2561
2562 if (reqctx->b0_len) {
2563 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2564 buf += reqctx->b0_len;
2565 }
2566 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2567 buf, req->cryptlen + req->assoclen, 0);
2568 } else {
2569 ulptx_walk_init(&ulp_walk, ulptx);
2570 if (reqctx->b0_len)
2571 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2572 reqctx->b0_dma);
2573 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2574 req->assoclen, 0);
2575 ulptx_walk_end(&ulp_walk);
2576 }
2577 }
2578
2579 void chcr_add_aead_dst_ent(struct aead_request *req,
2580 struct cpl_rx_phys_dsgl *phys_cpl,
2581 unsigned short qid)
2582 {
2583 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2584 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2585 struct dsgl_walk dsgl_walk;
2586 unsigned int authsize = crypto_aead_authsize(tfm);
2587 struct chcr_context *ctx = a_ctx(tfm);
2588 u32 temp;
2589
2590 dsgl_walk_init(&dsgl_walk, phys_cpl);
2591 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2592 temp = req->assoclen + req->cryptlen +
2593 (reqctx->op ? -authsize : authsize);
2594 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2595 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2596 }
2597
2598 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2599 void *ulptx,
2600 struct cipher_wr_param *wrparam)
2601 {
2602 struct ulptx_walk ulp_walk;
2603 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2604 u8 *buf = ulptx;
2605
2606 memcpy(buf, reqctx->iv, IV);
2607 buf += IV;
2608 if (reqctx->imm) {
2609 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2610 buf, wrparam->bytes, reqctx->processed);
2611 } else {
2612 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2613 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2614 reqctx->src_ofst);
2615 reqctx->srcsg = ulp_walk.last_sg;
2616 reqctx->src_ofst = ulp_walk.last_sg_len;
2617 ulptx_walk_end(&ulp_walk);
2618 }
2619 }
2620
2621 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2622 struct cpl_rx_phys_dsgl *phys_cpl,
2623 struct cipher_wr_param *wrparam,
2624 unsigned short qid)
2625 {
2626 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2627 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2628 struct chcr_context *ctx = c_ctx(tfm);
2629 struct dsgl_walk dsgl_walk;
2630
2631 dsgl_walk_init(&dsgl_walk, phys_cpl);
2632 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2633 reqctx->dst_ofst);
2634 reqctx->dstsg = dsgl_walk.last_sg;
2635 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2636
2637 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2638 }
2639
2640 void chcr_add_hash_src_ent(struct ahash_request *req,
2641 struct ulptx_sgl *ulptx,
2642 struct hash_wr_param *param)
2643 {
2644 struct ulptx_walk ulp_walk;
2645 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2646
2647 if (reqctx->hctx_wr.imm) {
2648 u8 *buf = (u8 *)ulptx;
2649
2650 if (param->bfr_len) {
2651 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2652 buf += param->bfr_len;
2653 }
2654
2655 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2656 sg_nents(reqctx->hctx_wr.srcsg), buf,
2657 param->sg_len, 0);
2658 } else {
2659 ulptx_walk_init(&ulp_walk, ulptx);
2660 if (param->bfr_len)
2661 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2662 reqctx->hctx_wr.dma_addr);
2663 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2664 param->sg_len, reqctx->hctx_wr.src_ofst);
2665 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2666 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2667 ulptx_walk_end(&ulp_walk);
2668 }
2669 }
2670
2671 int chcr_hash_dma_map(struct device *dev,
2672 struct ahash_request *req)
2673 {
2674 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2675 int error = 0;
2676
2677 if (!req->nbytes)
2678 return 0;
2679 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2680 DMA_TO_DEVICE);
2681 if (!error)
2682 return -ENOMEM;
2683 req_ctx->hctx_wr.is_sg_map = 1;
2684 return 0;
2685 }
2686
2687 void chcr_hash_dma_unmap(struct device *dev,
2688 struct ahash_request *req)
2689 {
2690 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2691
2692 if (!req->nbytes)
2693 return;
2694
2695 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2696 DMA_TO_DEVICE);
2697 req_ctx->hctx_wr.is_sg_map = 0;
2698
2699 }
2700
2701 int chcr_cipher_dma_map(struct device *dev,
2702 struct ablkcipher_request *req)
2703 {
2704 int error;
2705
2706 if (req->src == req->dst) {
2707 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2708 DMA_BIDIRECTIONAL);
2709 if (!error)
2710 goto err;
2711 } else {
2712 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2713 DMA_TO_DEVICE);
2714 if (!error)
2715 goto err;
2716 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2717 DMA_FROM_DEVICE);
2718 if (!error) {
2719 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2720 DMA_TO_DEVICE);
2721 goto err;
2722 }
2723 }
2724
2725 return 0;
2726 err:
2727 return -ENOMEM;
2728 }
2729
2730 void chcr_cipher_dma_unmap(struct device *dev,
2731 struct ablkcipher_request *req)
2732 {
2733 if (req->src == req->dst) {
2734 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2735 DMA_BIDIRECTIONAL);
2736 } else {
2737 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2738 DMA_TO_DEVICE);
2739 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2740 DMA_FROM_DEVICE);
2741 }
2742 }
2743
2744 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2745 {
2746 __be32 data;
2747
2748 memset(block, 0, csize);
2749 block += csize;
2750
2751 if (csize >= 4)
2752 csize = 4;
2753 else if (msglen > (unsigned int)(1 << (8 * csize)))
2754 return -EOVERFLOW;
2755
2756 data = cpu_to_be32(msglen);
2757 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2758
2759 return 0;
2760 }
2761
2762 static int generate_b0(struct aead_request *req, u8 *ivptr,
2763 unsigned short op_type)
2764 {
2765 unsigned int l, lp, m;
2766 int rc;
2767 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2768 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2769 u8 *b0 = reqctx->scratch_pad;
2770
2771 m = crypto_aead_authsize(aead);
2772
2773 memcpy(b0, ivptr, 16);
2774
2775 lp = b0[0];
2776 l = lp + 1;
2777
2778 /* set m, bits 3-5 */
2779 *b0 |= (8 * ((m - 2) / 2));
2780
2781 /* set adata, bit 6, if associated data is used */
2782 if (req->assoclen)
2783 *b0 |= 64;
2784 rc = set_msg_len(b0 + 16 - l,
2785 (op_type == CHCR_DECRYPT_OP) ?
2786 req->cryptlen - m : req->cryptlen, l);
2787
2788 return rc;
2789 }
2790
2791 static inline int crypto_ccm_check_iv(const u8 *iv)
2792 {
2793 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2794 if (iv[0] < 1 || iv[0] > 7)
2795 return -EINVAL;
2796
2797 return 0;
2798 }
2799
2800 static int ccm_format_packet(struct aead_request *req,
2801 u8 *ivptr,
2802 unsigned int sub_type,
2803 unsigned short op_type,
2804 unsigned int assoclen)
2805 {
2806 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2807 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2808 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2809 int rc = 0;
2810
2811 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2812 ivptr[0] = 3;
2813 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2814 memcpy(ivptr + 4, req->iv, 8);
2815 memset(ivptr + 12, 0, 4);
2816 } else {
2817 memcpy(ivptr, req->iv, 16);
2818 }
2819 if (assoclen)
2820 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2821 htons(assoclen);
2822
2823 rc = generate_b0(req, ivptr, op_type);
2824 /* zero the ctr value */
2825 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2826 return rc;
2827 }
2828
2829 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2830 unsigned int dst_size,
2831 struct aead_request *req,
2832 unsigned short op_type)
2833 {
2834 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2835 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2836 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2837 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2838 unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2839 unsigned int ccm_xtra;
2840 unsigned char tag_offset = 0, auth_offset = 0;
2841 unsigned int assoclen;
2842
2843 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2844 assoclen = req->assoclen - 8;
2845 else
2846 assoclen = req->assoclen;
2847 ccm_xtra = CCM_B0_SIZE +
2848 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2849
2850 auth_offset = req->cryptlen ?
2851 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2852 if (op_type == CHCR_DECRYPT_OP) {
2853 if (crypto_aead_authsize(tfm) != req->cryptlen)
2854 tag_offset = crypto_aead_authsize(tfm);
2855 else
2856 auth_offset = 0;
2857 }
2858
2859
2860 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2861 2, 1);
2862 sec_cpl->pldlen =
2863 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2864 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2865 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2866 1 + IV, IV + assoclen + ccm_xtra,
2867 req->assoclen + IV + 1 + ccm_xtra, 0);
2868
2869 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2870 auth_offset, tag_offset,
2871 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2872 crypto_aead_authsize(tfm));
2873 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2874 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2875 cipher_mode, mac_mode,
2876 aeadctx->hmac_ctrl, IV >> 1);
2877
2878 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2879 0, dst_size);
2880 }
2881
2882 static int aead_ccm_validate_input(unsigned short op_type,
2883 struct aead_request *req,
2884 struct chcr_aead_ctx *aeadctx,
2885 unsigned int sub_type)
2886 {
2887 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2888 if (crypto_ccm_check_iv(req->iv)) {
2889 pr_err("CCM: IV check fails\n");
2890 return -EINVAL;
2891 }
2892 } else {
2893 if (req->assoclen != 16 && req->assoclen != 20) {
2894 pr_err("RFC4309: Invalid AAD length %d\n",
2895 req->assoclen);
2896 return -EINVAL;
2897 }
2898 }
2899 return 0;
2900 }
2901
2902 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2903 unsigned short qid,
2904 int size)
2905 {
2906 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2907 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2908 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2909 struct sk_buff *skb = NULL;
2910 struct chcr_wr *chcr_req;
2911 struct cpl_rx_phys_dsgl *phys_cpl;
2912 struct ulptx_sgl *ulptx;
2913 unsigned int transhdr_len;
2914 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2915 unsigned int sub_type, assoclen = req->assoclen;
2916 unsigned int authsize = crypto_aead_authsize(tfm);
2917 int error = -EINVAL;
2918 u8 *ivptr;
2919 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2920 GFP_ATOMIC;
2921 struct adapter *adap = padap(a_ctx(tfm)->dev);
2922
2923 sub_type = get_aead_subtype(tfm);
2924 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2925 assoclen -= 8;
2926 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2927 error = chcr_aead_common_init(req);
2928 if (error)
2929 return ERR_PTR(error);
2930
2931 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2932 if (error)
2933 goto err;
2934 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2935 + (reqctx->op ? -authsize : authsize),
2936 CHCR_DST_SG_SIZE, 0);
2937 dnents += MIN_CCM_SG; // For IV and B0
2938 dst_size = get_space_for_phys_dsgl(dnents);
2939 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2940 CHCR_SRC_SG_SIZE, 0);
2941 snents += MIN_CCM_SG; //For B0
2942 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2943 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2944 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2945 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2946 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2947 reqctx->b0_len, 16) :
2948 (sgl_len(snents) * 8);
2949 transhdr_len += temp;
2950 transhdr_len = roundup(transhdr_len, 16);
2951
2952 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2953 reqctx->b0_len, transhdr_len, reqctx->op)) {
2954 atomic_inc(&adap->chcr_stats.fallback);
2955 chcr_aead_common_exit(req);
2956 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2957 }
2958 skb = alloc_skb(transhdr_len, flags);
2959
2960 if (!skb) {
2961 error = -ENOMEM;
2962 goto err;
2963 }
2964
2965 chcr_req = __skb_put_zero(skb, transhdr_len);
2966
2967 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2968
2969 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2970 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2971 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2972 aeadctx->key, aeadctx->enckey_len);
2973
2974 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2975 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2976 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2977 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2978 if (error)
2979 goto dstmap_fail;
2980 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2981 chcr_add_aead_src_ent(req, ulptx);
2982
2983 atomic_inc(&adap->chcr_stats.aead_rqst);
2984 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2985 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2986 reqctx->b0_len) : 0);
2987 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2988 transhdr_len, temp, 0);
2989 reqctx->skb = skb;
2990
2991 return skb;
2992 dstmap_fail:
2993 kfree_skb(skb);
2994 err:
2995 chcr_aead_common_exit(req);
2996 return ERR_PTR(error);
2997 }
2998
2999 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3000 unsigned short qid,
3001 int size)
3002 {
3003 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3004 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3005 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3006 struct sk_buff *skb = NULL;
3007 struct chcr_wr *chcr_req;
3008 struct cpl_rx_phys_dsgl *phys_cpl;
3009 struct ulptx_sgl *ulptx;
3010 unsigned int transhdr_len, dnents = 0, snents;
3011 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3012 unsigned int authsize = crypto_aead_authsize(tfm);
3013 int error = -EINVAL;
3014 u8 *ivptr;
3015 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3016 GFP_ATOMIC;
3017 struct adapter *adap = padap(a_ctx(tfm)->dev);
3018
3019 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3020 assoclen = req->assoclen - 8;
3021
3022 reqctx->b0_len = 0;
3023 error = chcr_aead_common_init(req);
3024 if (error)
3025 return ERR_PTR(error);
3026 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3027 (reqctx->op ? -authsize : authsize),
3028 CHCR_DST_SG_SIZE, 0);
3029 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3030 CHCR_SRC_SG_SIZE, 0);
3031 dnents += MIN_GCM_SG; // For IV
3032 dst_size = get_space_for_phys_dsgl(dnents);
3033 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3034 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3035 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3036 SGE_MAX_WR_LEN;
3037 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3038 (sgl_len(snents) * 8);
3039 transhdr_len += temp;
3040 transhdr_len = roundup(transhdr_len, 16);
3041 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3042 transhdr_len, reqctx->op)) {
3043
3044 atomic_inc(&adap->chcr_stats.fallback);
3045 chcr_aead_common_exit(req);
3046 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3047 }
3048 skb = alloc_skb(transhdr_len, flags);
3049 if (!skb) {
3050 error = -ENOMEM;
3051 goto err;
3052 }
3053
3054 chcr_req = __skb_put_zero(skb, transhdr_len);
3055
3056 //Offset of tag from end
3057 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3058 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3059 a_ctx(tfm)->tx_chan_id, 2, 1);
3060 chcr_req->sec_cpl.pldlen =
3061 htonl(req->assoclen + IV + req->cryptlen);
3062 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3063 assoclen ? 1 + IV : 0,
3064 assoclen ? IV + assoclen : 0,
3065 req->assoclen + IV + 1, 0);
3066 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3067 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3068 temp, temp);
3069 chcr_req->sec_cpl.seqno_numivs =
3070 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3071 CHCR_ENCRYPT_OP) ? 1 : 0,
3072 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3073 CHCR_SCMD_AUTH_MODE_GHASH,
3074 aeadctx->hmac_ctrl, IV >> 1);
3075 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3076 0, 0, dst_size);
3077 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3078 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3079 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3080 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3081
3082 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3083 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3084 /* prepare a 16 byte iv */
3085 /* S A L T | IV | 0x00000001 */
3086 if (get_aead_subtype(tfm) ==
3087 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3088 memcpy(ivptr, aeadctx->salt, 4);
3089 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3090 } else {
3091 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3092 }
3093 *((unsigned int *)(ivptr + 12)) = htonl(0x01);
3094
3095 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3096
3097 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3098 chcr_add_aead_src_ent(req, ulptx);
3099 atomic_inc(&adap->chcr_stats.aead_rqst);
3100 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3101 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3102 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3103 transhdr_len, temp, reqctx->verify);
3104 reqctx->skb = skb;
3105 return skb;
3106
3107 err:
3108 chcr_aead_common_exit(req);
3109 return ERR_PTR(error);
3110 }
3111
3112
3113
3114 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3115 {
3116 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3117 struct aead_alg *alg = crypto_aead_alg(tfm);
3118
3119 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3120 CRYPTO_ALG_NEED_FALLBACK |
3121 CRYPTO_ALG_ASYNC);
3122 if (IS_ERR(aeadctx->sw_cipher))
3123 return PTR_ERR(aeadctx->sw_cipher);
3124 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3125 sizeof(struct aead_request) +
3126 crypto_aead_reqsize(aeadctx->sw_cipher)));
3127 return chcr_device_init(a_ctx(tfm));
3128 }
3129
3130 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3131 {
3132 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3133
3134 crypto_free_aead(aeadctx->sw_cipher);
3135 }
3136
3137 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3138 unsigned int authsize)
3139 {
3140 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3141
3142 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3143 aeadctx->mayverify = VERIFY_HW;
3144 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3145 }
3146 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3147 unsigned int authsize)
3148 {
3149 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3150 u32 maxauth = crypto_aead_maxauthsize(tfm);
3151
3152 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3153 * true for sha1. authsize == 12 condition should be before
3154 * authsize == (maxauth >> 1)
3155 */
3156 if (authsize == ICV_4) {
3157 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3158 aeadctx->mayverify = VERIFY_HW;
3159 } else if (authsize == ICV_6) {
3160 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3161 aeadctx->mayverify = VERIFY_HW;
3162 } else if (authsize == ICV_10) {
3163 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3164 aeadctx->mayverify = VERIFY_HW;
3165 } else if (authsize == ICV_12) {
3166 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3167 aeadctx->mayverify = VERIFY_HW;
3168 } else if (authsize == ICV_14) {
3169 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3170 aeadctx->mayverify = VERIFY_HW;
3171 } else if (authsize == (maxauth >> 1)) {
3172 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3173 aeadctx->mayverify = VERIFY_HW;
3174 } else if (authsize == maxauth) {
3175 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3176 aeadctx->mayverify = VERIFY_HW;
3177 } else {
3178 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3179 aeadctx->mayverify = VERIFY_SW;
3180 }
3181 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3182 }
3183
3184
3185 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3186 {
3187 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3188
3189 switch (authsize) {
3190 case ICV_4:
3191 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3192 aeadctx->mayverify = VERIFY_HW;
3193 break;
3194 case ICV_8:
3195 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3196 aeadctx->mayverify = VERIFY_HW;
3197 break;
3198 case ICV_12:
3199 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3200 aeadctx->mayverify = VERIFY_HW;
3201 break;
3202 case ICV_14:
3203 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3204 aeadctx->mayverify = VERIFY_HW;
3205 break;
3206 case ICV_16:
3207 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3208 aeadctx->mayverify = VERIFY_HW;
3209 break;
3210 case ICV_13:
3211 case ICV_15:
3212 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3213 aeadctx->mayverify = VERIFY_SW;
3214 break;
3215 default:
3216
3217 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3218 CRYPTO_TFM_RES_BAD_KEY_LEN);
3219 return -EINVAL;
3220 }
3221 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3222 }
3223
3224 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3225 unsigned int authsize)
3226 {
3227 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3228
3229 switch (authsize) {
3230 case ICV_8:
3231 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3232 aeadctx->mayverify = VERIFY_HW;
3233 break;
3234 case ICV_12:
3235 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3236 aeadctx->mayverify = VERIFY_HW;
3237 break;
3238 case ICV_16:
3239 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3240 aeadctx->mayverify = VERIFY_HW;
3241 break;
3242 default:
3243 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3244 CRYPTO_TFM_RES_BAD_KEY_LEN);
3245 return -EINVAL;
3246 }
3247 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3248 }
3249
3250 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3251 unsigned int authsize)
3252 {
3253 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3254
3255 switch (authsize) {
3256 case ICV_4:
3257 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3258 aeadctx->mayverify = VERIFY_HW;
3259 break;
3260 case ICV_6:
3261 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3262 aeadctx->mayverify = VERIFY_HW;
3263 break;
3264 case ICV_8:
3265 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3266 aeadctx->mayverify = VERIFY_HW;
3267 break;
3268 case ICV_10:
3269 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3270 aeadctx->mayverify = VERIFY_HW;
3271 break;
3272 case ICV_12:
3273 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3274 aeadctx->mayverify = VERIFY_HW;
3275 break;
3276 case ICV_14:
3277 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3278 aeadctx->mayverify = VERIFY_HW;
3279 break;
3280 case ICV_16:
3281 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3282 aeadctx->mayverify = VERIFY_HW;
3283 break;
3284 default:
3285 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3286 CRYPTO_TFM_RES_BAD_KEY_LEN);
3287 return -EINVAL;
3288 }
3289 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3290 }
3291
3292 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3293 const u8 *key,
3294 unsigned int keylen)
3295 {
3296 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3297 unsigned char ck_size, mk_size;
3298 int key_ctx_size = 0;
3299
3300 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3301 if (keylen == AES_KEYSIZE_128) {
3302 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3303 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3304 } else if (keylen == AES_KEYSIZE_192) {
3305 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3306 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3307 } else if (keylen == AES_KEYSIZE_256) {
3308 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3309 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3310 } else {
3311 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3312 CRYPTO_TFM_RES_BAD_KEY_LEN);
3313 aeadctx->enckey_len = 0;
3314 return -EINVAL;
3315 }
3316 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3317 key_ctx_size >> 4);
3318 memcpy(aeadctx->key, key, keylen);
3319 aeadctx->enckey_len = keylen;
3320
3321 return 0;
3322 }
3323
3324 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3325 const u8 *key,
3326 unsigned int keylen)
3327 {
3328 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3329 int error;
3330
3331 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3332 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3333 CRYPTO_TFM_REQ_MASK);
3334 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3335 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3336 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3337 CRYPTO_TFM_RES_MASK);
3338 if (error)
3339 return error;
3340 return chcr_ccm_common_setkey(aead, key, keylen);
3341 }
3342
3343 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3344 unsigned int keylen)
3345 {
3346 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3347 int error;
3348
3349 if (keylen < 3) {
3350 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3351 CRYPTO_TFM_RES_BAD_KEY_LEN);
3352 aeadctx->enckey_len = 0;
3353 return -EINVAL;
3354 }
3355 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3356 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3357 CRYPTO_TFM_REQ_MASK);
3358 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3359 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3360 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3361 CRYPTO_TFM_RES_MASK);
3362 if (error)
3363 return error;
3364 keylen -= 3;
3365 memcpy(aeadctx->salt, key + keylen, 3);
3366 return chcr_ccm_common_setkey(aead, key, keylen);
3367 }
3368
3369 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3370 unsigned int keylen)
3371 {
3372 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3373 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3374 struct crypto_cipher *cipher;
3375 unsigned int ck_size;
3376 int ret = 0, key_ctx_size = 0;
3377
3378 aeadctx->enckey_len = 0;
3379 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3380 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3381 & CRYPTO_TFM_REQ_MASK);
3382 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3383 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3384 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3385 CRYPTO_TFM_RES_MASK);
3386 if (ret)
3387 goto out;
3388
3389 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3390 keylen > 3) {
3391 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3392 memcpy(aeadctx->salt, key + keylen, 4);
3393 }
3394 if (keylen == AES_KEYSIZE_128) {
3395 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3396 } else if (keylen == AES_KEYSIZE_192) {
3397 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3398 } else if (keylen == AES_KEYSIZE_256) {
3399 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3400 } else {
3401 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3402 CRYPTO_TFM_RES_BAD_KEY_LEN);
3403 pr_err("GCM: Invalid key length %d\n", keylen);
3404 ret = -EINVAL;
3405 goto out;
3406 }
3407
3408 memcpy(aeadctx->key, key, keylen);
3409 aeadctx->enckey_len = keylen;
3410 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3411 AEAD_H_SIZE;
3412 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3413 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3414 0, 0,
3415 key_ctx_size >> 4);
3416 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3417 * It will go in key context
3418 */
3419 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3420 if (IS_ERR(cipher)) {
3421 aeadctx->enckey_len = 0;
3422 ret = -ENOMEM;
3423 goto out;
3424 }
3425
3426 ret = crypto_cipher_setkey(cipher, key, keylen);
3427 if (ret) {
3428 aeadctx->enckey_len = 0;
3429 goto out1;
3430 }
3431 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3432 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3433
3434 out1:
3435 crypto_free_cipher(cipher);
3436 out:
3437 return ret;
3438 }
3439
3440 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3441 unsigned int keylen)
3442 {
3443 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3444 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3445 /* it contains auth and cipher key both*/
3446 struct crypto_authenc_keys keys;
3447 unsigned int bs, subtype;
3448 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3449 int err = 0, i, key_ctx_len = 0;
3450 unsigned char ck_size = 0;
3451 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3452 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3453 struct algo_param param;
3454 int align;
3455 u8 *o_ptr = NULL;
3456
3457 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3458 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3459 & CRYPTO_TFM_REQ_MASK);
3460 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3461 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3462 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3463 & CRYPTO_TFM_RES_MASK);
3464 if (err)
3465 goto out;
3466
3467 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3468 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3469 goto out;
3470 }
3471
3472 if (get_alg_config(&param, max_authsize)) {
3473 pr_err("chcr : Unsupported digest size\n");
3474 goto out;
3475 }
3476 subtype = get_aead_subtype(authenc);
3477 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3478 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3479 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3480 goto out;
3481 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3482 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3483 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3484 }
3485 if (keys.enckeylen == AES_KEYSIZE_128) {
3486 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3487 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3488 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3489 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3490 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3491 } else {
3492 pr_err("chcr : Unsupported cipher key\n");
3493 goto out;
3494 }
3495
3496 /* Copy only encryption key. We use authkey to generate h(ipad) and
3497 * h(opad) so authkey is not needed again. authkeylen size have the
3498 * size of the hash digest size.
3499 */
3500 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3501 aeadctx->enckey_len = keys.enckeylen;
3502 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3503 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3504
3505 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3506 aeadctx->enckey_len << 3);
3507 }
3508 base_hash = chcr_alloc_shash(max_authsize);
3509 if (IS_ERR(base_hash)) {
3510 pr_err("chcr : Base driver cannot be loaded\n");
3511 aeadctx->enckey_len = 0;
3512 memzero_explicit(&keys, sizeof(keys));
3513 return -EINVAL;
3514 }
3515 {
3516 SHASH_DESC_ON_STACK(shash, base_hash);
3517
3518 shash->tfm = base_hash;
3519 bs = crypto_shash_blocksize(base_hash);
3520 align = KEYCTX_ALIGN_PAD(max_authsize);
3521 o_ptr = actx->h_iopad + param.result_size + align;
3522
3523 if (keys.authkeylen > bs) {
3524 err = crypto_shash_digest(shash, keys.authkey,
3525 keys.authkeylen,
3526 o_ptr);
3527 if (err) {
3528 pr_err("chcr : Base driver cannot be loaded\n");
3529 goto out;
3530 }
3531 keys.authkeylen = max_authsize;
3532 } else
3533 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3534
3535 /* Compute the ipad-digest*/
3536 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3537 memcpy(pad, o_ptr, keys.authkeylen);
3538 for (i = 0; i < bs >> 2; i++)
3539 *((unsigned int *)pad + i) ^= IPAD_DATA;
3540
3541 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3542 max_authsize))
3543 goto out;
3544 /* Compute the opad-digest */
3545 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3546 memcpy(pad, o_ptr, keys.authkeylen);
3547 for (i = 0; i < bs >> 2; i++)
3548 *((unsigned int *)pad + i) ^= OPAD_DATA;
3549
3550 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3551 goto out;
3552
3553 /* convert the ipad and opad digest to network order */
3554 chcr_change_order(actx->h_iopad, param.result_size);
3555 chcr_change_order(o_ptr, param.result_size);
3556 key_ctx_len = sizeof(struct _key_ctx) +
3557 roundup(keys.enckeylen, 16) +
3558 (param.result_size + align) * 2;
3559 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3560 0, 1, key_ctx_len >> 4);
3561 actx->auth_mode = param.auth_mode;
3562 chcr_free_shash(base_hash);
3563
3564 memzero_explicit(&keys, sizeof(keys));
3565 return 0;
3566 }
3567 out:
3568 aeadctx->enckey_len = 0;
3569 memzero_explicit(&keys, sizeof(keys));
3570 if (!IS_ERR(base_hash))
3571 chcr_free_shash(base_hash);
3572 return -EINVAL;
3573 }
3574
3575 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3576 const u8 *key, unsigned int keylen)
3577 {
3578 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3579 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3580 struct crypto_authenc_keys keys;
3581 int err;
3582 /* it contains auth and cipher key both*/
3583 unsigned int subtype;
3584 int key_ctx_len = 0;
3585 unsigned char ck_size = 0;
3586
3587 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3588 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3589 & CRYPTO_TFM_REQ_MASK);
3590 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3591 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3592 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3593 & CRYPTO_TFM_RES_MASK);
3594 if (err)
3595 goto out;
3596
3597 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3598 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3599 goto out;
3600 }
3601 subtype = get_aead_subtype(authenc);
3602 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3603 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3604 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3605 goto out;
3606 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3607 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3608 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3609 }
3610 if (keys.enckeylen == AES_KEYSIZE_128) {
3611 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3612 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3613 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3614 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3615 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3616 } else {
3617 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3618 goto out;
3619 }
3620 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3621 aeadctx->enckey_len = keys.enckeylen;
3622 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3623 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3624 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3625 aeadctx->enckey_len << 3);
3626 }
3627 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3628
3629 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3630 0, key_ctx_len >> 4);
3631 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3632 memzero_explicit(&keys, sizeof(keys));
3633 return 0;
3634 out:
3635 aeadctx->enckey_len = 0;
3636 memzero_explicit(&keys, sizeof(keys));
3637 return -EINVAL;
3638 }
3639
3640 static int chcr_aead_op(struct aead_request *req,
3641 int size,
3642 create_wr_t create_wr_fn)
3643 {
3644 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3645 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3646 struct uld_ctx *u_ctx;
3647 struct sk_buff *skb;
3648 int isfull = 0;
3649 struct chcr_dev *cdev;
3650
3651 cdev = a_ctx(tfm)->dev;
3652 if (!cdev) {
3653 pr_err("chcr : %s : No crypto device.\n", __func__);
3654 return -ENXIO;
3655 }
3656
3657 if (chcr_inc_wrcount(cdev)) {
3658 /* Detach state for CHCR means lldi or padap is freed.
3659 * We cannot increment fallback here.
3660 */
3661 return chcr_aead_fallback(req, reqctx->op);
3662 }
3663
3664 u_ctx = ULD_CTX(a_ctx(tfm));
3665 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3666 a_ctx(tfm)->tx_qidx)) {
3667 isfull = 1;
3668 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3669 chcr_dec_wrcount(cdev);
3670 return -ENOSPC;
3671 }
3672 }
3673
3674 /* Form a WR from req */
3675 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3676
3677 if (IS_ERR_OR_NULL(skb)) {
3678 chcr_dec_wrcount(cdev);
3679 return PTR_ERR_OR_ZERO(skb);
3680 }
3681
3682 skb->dev = u_ctx->lldi.ports[0];
3683 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3684 chcr_send_wr(skb);
3685 return isfull ? -EBUSY : -EINPROGRESS;
3686 }
3687
3688 static int chcr_aead_encrypt(struct aead_request *req)
3689 {
3690 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3691 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3692
3693 reqctx->verify = VERIFY_HW;
3694 reqctx->op = CHCR_ENCRYPT_OP;
3695
3696 switch (get_aead_subtype(tfm)) {
3697 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3698 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3699 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3700 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3701 return chcr_aead_op(req, 0, create_authenc_wr);
3702 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3703 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3704 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3705 default:
3706 return chcr_aead_op(req, 0, create_gcm_wr);
3707 }
3708 }
3709
3710 static int chcr_aead_decrypt(struct aead_request *req)
3711 {
3712 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3713 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3714 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3715 int size;
3716
3717 if (aeadctx->mayverify == VERIFY_SW) {
3718 size = crypto_aead_maxauthsize(tfm);
3719 reqctx->verify = VERIFY_SW;
3720 } else {
3721 size = 0;
3722 reqctx->verify = VERIFY_HW;
3723 }
3724 reqctx->op = CHCR_DECRYPT_OP;
3725 switch (get_aead_subtype(tfm)) {
3726 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3727 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3728 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3729 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3730 return chcr_aead_op(req, size, create_authenc_wr);
3731 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3732 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3733 return chcr_aead_op(req, size, create_aead_ccm_wr);
3734 default:
3735 return chcr_aead_op(req, size, create_gcm_wr);
3736 }
3737 }
3738
3739 static struct chcr_alg_template driver_algs[] = {
3740 /* AES-CBC */
3741 {
3742 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3743 .is_registered = 0,
3744 .alg.crypto = {
3745 .cra_name = "cbc(aes)",
3746 .cra_driver_name = "cbc-aes-chcr",
3747 .cra_blocksize = AES_BLOCK_SIZE,
3748 .cra_init = chcr_cra_init,
3749 .cra_exit = chcr_cra_exit,
3750 .cra_u.ablkcipher = {
3751 .min_keysize = AES_MIN_KEY_SIZE,
3752 .max_keysize = AES_MAX_KEY_SIZE,
3753 .ivsize = AES_BLOCK_SIZE,
3754 .setkey = chcr_aes_cbc_setkey,
3755 .encrypt = chcr_aes_encrypt,
3756 .decrypt = chcr_aes_decrypt,
3757 }
3758 }
3759 },
3760 {
3761 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3762 .is_registered = 0,
3763 .alg.crypto = {
3764 .cra_name = "xts(aes)",
3765 .cra_driver_name = "xts-aes-chcr",
3766 .cra_blocksize = AES_BLOCK_SIZE,
3767 .cra_init = chcr_cra_init,
3768 .cra_exit = NULL,
3769 .cra_u .ablkcipher = {
3770 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3771 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3772 .ivsize = AES_BLOCK_SIZE,
3773 .setkey = chcr_aes_xts_setkey,
3774 .encrypt = chcr_aes_encrypt,
3775 .decrypt = chcr_aes_decrypt,
3776 }
3777 }
3778 },
3779 {
3780 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3781 .is_registered = 0,
3782 .alg.crypto = {
3783 .cra_name = "ctr(aes)",
3784 .cra_driver_name = "ctr-aes-chcr",
3785 .cra_blocksize = 1,
3786 .cra_init = chcr_cra_init,
3787 .cra_exit = chcr_cra_exit,
3788 .cra_u.ablkcipher = {
3789 .min_keysize = AES_MIN_KEY_SIZE,
3790 .max_keysize = AES_MAX_KEY_SIZE,
3791 .ivsize = AES_BLOCK_SIZE,
3792 .setkey = chcr_aes_ctr_setkey,
3793 .encrypt = chcr_aes_encrypt,
3794 .decrypt = chcr_aes_decrypt,
3795 }
3796 }
3797 },
3798 {
3799 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3800 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3801 .is_registered = 0,
3802 .alg.crypto = {
3803 .cra_name = "rfc3686(ctr(aes))",
3804 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3805 .cra_blocksize = 1,
3806 .cra_init = chcr_rfc3686_init,
3807 .cra_exit = chcr_cra_exit,
3808 .cra_u.ablkcipher = {
3809 .min_keysize = AES_MIN_KEY_SIZE +
3810 CTR_RFC3686_NONCE_SIZE,
3811 .max_keysize = AES_MAX_KEY_SIZE +
3812 CTR_RFC3686_NONCE_SIZE,
3813 .ivsize = CTR_RFC3686_IV_SIZE,
3814 .setkey = chcr_aes_rfc3686_setkey,
3815 .encrypt = chcr_aes_encrypt,
3816 .decrypt = chcr_aes_decrypt,
3817 }
3818 }
3819 },
3820 /* SHA */
3821 {
3822 .type = CRYPTO_ALG_TYPE_AHASH,
3823 .is_registered = 0,
3824 .alg.hash = {
3825 .halg.digestsize = SHA1_DIGEST_SIZE,
3826 .halg.base = {
3827 .cra_name = "sha1",
3828 .cra_driver_name = "sha1-chcr",
3829 .cra_blocksize = SHA1_BLOCK_SIZE,
3830 }
3831 }
3832 },
3833 {
3834 .type = CRYPTO_ALG_TYPE_AHASH,
3835 .is_registered = 0,
3836 .alg.hash = {
3837 .halg.digestsize = SHA256_DIGEST_SIZE,
3838 .halg.base = {
3839 .cra_name = "sha256",
3840 .cra_driver_name = "sha256-chcr",
3841 .cra_blocksize = SHA256_BLOCK_SIZE,
3842 }
3843 }
3844 },
3845 {
3846 .type = CRYPTO_ALG_TYPE_AHASH,
3847 .is_registered = 0,
3848 .alg.hash = {
3849 .halg.digestsize = SHA224_DIGEST_SIZE,
3850 .halg.base = {
3851 .cra_name = "sha224",
3852 .cra_driver_name = "sha224-chcr",
3853 .cra_blocksize = SHA224_BLOCK_SIZE,
3854 }
3855 }
3856 },
3857 {
3858 .type = CRYPTO_ALG_TYPE_AHASH,
3859 .is_registered = 0,
3860 .alg.hash = {
3861 .halg.digestsize = SHA384_DIGEST_SIZE,
3862 .halg.base = {
3863 .cra_name = "sha384",
3864 .cra_driver_name = "sha384-chcr",
3865 .cra_blocksize = SHA384_BLOCK_SIZE,
3866 }
3867 }
3868 },
3869 {
3870 .type = CRYPTO_ALG_TYPE_AHASH,
3871 .is_registered = 0,
3872 .alg.hash = {
3873 .halg.digestsize = SHA512_DIGEST_SIZE,
3874 .halg.base = {
3875 .cra_name = "sha512",
3876 .cra_driver_name = "sha512-chcr",
3877 .cra_blocksize = SHA512_BLOCK_SIZE,
3878 }
3879 }
3880 },
3881 /* HMAC */
3882 {
3883 .type = CRYPTO_ALG_TYPE_HMAC,
3884 .is_registered = 0,
3885 .alg.hash = {
3886 .halg.digestsize = SHA1_DIGEST_SIZE,
3887 .halg.base = {
3888 .cra_name = "hmac(sha1)",
3889 .cra_driver_name = "hmac-sha1-chcr",
3890 .cra_blocksize = SHA1_BLOCK_SIZE,
3891 }
3892 }
3893 },
3894 {
3895 .type = CRYPTO_ALG_TYPE_HMAC,
3896 .is_registered = 0,
3897 .alg.hash = {
3898 .halg.digestsize = SHA224_DIGEST_SIZE,
3899 .halg.base = {
3900 .cra_name = "hmac(sha224)",
3901 .cra_driver_name = "hmac-sha224-chcr",
3902 .cra_blocksize = SHA224_BLOCK_SIZE,
3903 }
3904 }
3905 },
3906 {
3907 .type = CRYPTO_ALG_TYPE_HMAC,
3908 .is_registered = 0,
3909 .alg.hash = {
3910 .halg.digestsize = SHA256_DIGEST_SIZE,
3911 .halg.base = {
3912 .cra_name = "hmac(sha256)",
3913 .cra_driver_name = "hmac-sha256-chcr",
3914 .cra_blocksize = SHA256_BLOCK_SIZE,
3915 }
3916 }
3917 },
3918 {
3919 .type = CRYPTO_ALG_TYPE_HMAC,
3920 .is_registered = 0,
3921 .alg.hash = {
3922 .halg.digestsize = SHA384_DIGEST_SIZE,
3923 .halg.base = {
3924 .cra_name = "hmac(sha384)",
3925 .cra_driver_name = "hmac-sha384-chcr",
3926 .cra_blocksize = SHA384_BLOCK_SIZE,
3927 }
3928 }
3929 },
3930 {
3931 .type = CRYPTO_ALG_TYPE_HMAC,
3932 .is_registered = 0,
3933 .alg.hash = {
3934 .halg.digestsize = SHA512_DIGEST_SIZE,
3935 .halg.base = {
3936 .cra_name = "hmac(sha512)",
3937 .cra_driver_name = "hmac-sha512-chcr",
3938 .cra_blocksize = SHA512_BLOCK_SIZE,
3939 }
3940 }
3941 },
3942 /* Add AEAD Algorithms */
3943 {
3944 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3945 .is_registered = 0,
3946 .alg.aead = {
3947 .base = {
3948 .cra_name = "gcm(aes)",
3949 .cra_driver_name = "gcm-aes-chcr",
3950 .cra_blocksize = 1,
3951 .cra_priority = CHCR_AEAD_PRIORITY,
3952 .cra_ctxsize = sizeof(struct chcr_context) +
3953 sizeof(struct chcr_aead_ctx) +
3954 sizeof(struct chcr_gcm_ctx),
3955 },
3956 .ivsize = GCM_AES_IV_SIZE,
3957 .maxauthsize = GHASH_DIGEST_SIZE,
3958 .setkey = chcr_gcm_setkey,
3959 .setauthsize = chcr_gcm_setauthsize,
3960 }
3961 },
3962 {
3963 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3964 .is_registered = 0,
3965 .alg.aead = {
3966 .base = {
3967 .cra_name = "rfc4106(gcm(aes))",
3968 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3969 .cra_blocksize = 1,
3970 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3971 .cra_ctxsize = sizeof(struct chcr_context) +
3972 sizeof(struct chcr_aead_ctx) +
3973 sizeof(struct chcr_gcm_ctx),
3974
3975 },
3976 .ivsize = GCM_RFC4106_IV_SIZE,
3977 .maxauthsize = GHASH_DIGEST_SIZE,
3978 .setkey = chcr_gcm_setkey,
3979 .setauthsize = chcr_4106_4309_setauthsize,
3980 }
3981 },
3982 {
3983 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3984 .is_registered = 0,
3985 .alg.aead = {
3986 .base = {
3987 .cra_name = "ccm(aes)",
3988 .cra_driver_name = "ccm-aes-chcr",
3989 .cra_blocksize = 1,
3990 .cra_priority = CHCR_AEAD_PRIORITY,
3991 .cra_ctxsize = sizeof(struct chcr_context) +
3992 sizeof(struct chcr_aead_ctx),
3993
3994 },
3995 .ivsize = AES_BLOCK_SIZE,
3996 .maxauthsize = GHASH_DIGEST_SIZE,
3997 .setkey = chcr_aead_ccm_setkey,
3998 .setauthsize = chcr_ccm_setauthsize,
3999 }
4000 },
4001 {
4002 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4003 .is_registered = 0,
4004 .alg.aead = {
4005 .base = {
4006 .cra_name = "rfc4309(ccm(aes))",
4007 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4008 .cra_blocksize = 1,
4009 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4010 .cra_ctxsize = sizeof(struct chcr_context) +
4011 sizeof(struct chcr_aead_ctx),
4012
4013 },
4014 .ivsize = 8,
4015 .maxauthsize = GHASH_DIGEST_SIZE,
4016 .setkey = chcr_aead_rfc4309_setkey,
4017 .setauthsize = chcr_4106_4309_setauthsize,
4018 }
4019 },
4020 {
4021 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4022 .is_registered = 0,
4023 .alg.aead = {
4024 .base = {
4025 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4026 .cra_driver_name =
4027 "authenc-hmac-sha1-cbc-aes-chcr",
4028 .cra_blocksize = AES_BLOCK_SIZE,
4029 .cra_priority = CHCR_AEAD_PRIORITY,
4030 .cra_ctxsize = sizeof(struct chcr_context) +
4031 sizeof(struct chcr_aead_ctx) +
4032 sizeof(struct chcr_authenc_ctx),
4033
4034 },
4035 .ivsize = AES_BLOCK_SIZE,
4036 .maxauthsize = SHA1_DIGEST_SIZE,
4037 .setkey = chcr_authenc_setkey,
4038 .setauthsize = chcr_authenc_setauthsize,
4039 }
4040 },
4041 {
4042 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4043 .is_registered = 0,
4044 .alg.aead = {
4045 .base = {
4046
4047 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4048 .cra_driver_name =
4049 "authenc-hmac-sha256-cbc-aes-chcr",
4050 .cra_blocksize = AES_BLOCK_SIZE,
4051 .cra_priority = CHCR_AEAD_PRIORITY,
4052 .cra_ctxsize = sizeof(struct chcr_context) +
4053 sizeof(struct chcr_aead_ctx) +
4054 sizeof(struct chcr_authenc_ctx),
4055
4056 },
4057 .ivsize = AES_BLOCK_SIZE,
4058 .maxauthsize = SHA256_DIGEST_SIZE,
4059 .setkey = chcr_authenc_setkey,
4060 .setauthsize = chcr_authenc_setauthsize,
4061 }
4062 },
4063 {
4064 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4065 .is_registered = 0,
4066 .alg.aead = {
4067 .base = {
4068 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4069 .cra_driver_name =
4070 "authenc-hmac-sha224-cbc-aes-chcr",
4071 .cra_blocksize = AES_BLOCK_SIZE,
4072 .cra_priority = CHCR_AEAD_PRIORITY,
4073 .cra_ctxsize = sizeof(struct chcr_context) +
4074 sizeof(struct chcr_aead_ctx) +
4075 sizeof(struct chcr_authenc_ctx),
4076 },
4077 .ivsize = AES_BLOCK_SIZE,
4078 .maxauthsize = SHA224_DIGEST_SIZE,
4079 .setkey = chcr_authenc_setkey,
4080 .setauthsize = chcr_authenc_setauthsize,
4081 }
4082 },
4083 {
4084 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4085 .is_registered = 0,
4086 .alg.aead = {
4087 .base = {
4088 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4089 .cra_driver_name =
4090 "authenc-hmac-sha384-cbc-aes-chcr",
4091 .cra_blocksize = AES_BLOCK_SIZE,
4092 .cra_priority = CHCR_AEAD_PRIORITY,
4093 .cra_ctxsize = sizeof(struct chcr_context) +
4094 sizeof(struct chcr_aead_ctx) +
4095 sizeof(struct chcr_authenc_ctx),
4096
4097 },
4098 .ivsize = AES_BLOCK_SIZE,
4099 .maxauthsize = SHA384_DIGEST_SIZE,
4100 .setkey = chcr_authenc_setkey,
4101 .setauthsize = chcr_authenc_setauthsize,
4102 }
4103 },
4104 {
4105 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4106 .is_registered = 0,
4107 .alg.aead = {
4108 .base = {
4109 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4110 .cra_driver_name =
4111 "authenc-hmac-sha512-cbc-aes-chcr",
4112 .cra_blocksize = AES_BLOCK_SIZE,
4113 .cra_priority = CHCR_AEAD_PRIORITY,
4114 .cra_ctxsize = sizeof(struct chcr_context) +
4115 sizeof(struct chcr_aead_ctx) +
4116 sizeof(struct chcr_authenc_ctx),
4117
4118 },
4119 .ivsize = AES_BLOCK_SIZE,
4120 .maxauthsize = SHA512_DIGEST_SIZE,
4121 .setkey = chcr_authenc_setkey,
4122 .setauthsize = chcr_authenc_setauthsize,
4123 }
4124 },
4125 {
4126 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4127 .is_registered = 0,
4128 .alg.aead = {
4129 .base = {
4130 .cra_name = "authenc(digest_null,cbc(aes))",
4131 .cra_driver_name =
4132 "authenc-digest_null-cbc-aes-chcr",
4133 .cra_blocksize = AES_BLOCK_SIZE,
4134 .cra_priority = CHCR_AEAD_PRIORITY,
4135 .cra_ctxsize = sizeof(struct chcr_context) +
4136 sizeof(struct chcr_aead_ctx) +
4137 sizeof(struct chcr_authenc_ctx),
4138
4139 },
4140 .ivsize = AES_BLOCK_SIZE,
4141 .maxauthsize = 0,
4142 .setkey = chcr_aead_digest_null_setkey,
4143 .setauthsize = chcr_authenc_null_setauthsize,
4144 }
4145 },
4146 {
4147 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4148 .is_registered = 0,
4149 .alg.aead = {
4150 .base = {
4151 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4152 .cra_driver_name =
4153 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4154 .cra_blocksize = 1,
4155 .cra_priority = CHCR_AEAD_PRIORITY,
4156 .cra_ctxsize = sizeof(struct chcr_context) +
4157 sizeof(struct chcr_aead_ctx) +
4158 sizeof(struct chcr_authenc_ctx),
4159
4160 },
4161 .ivsize = CTR_RFC3686_IV_SIZE,
4162 .maxauthsize = SHA1_DIGEST_SIZE,
4163 .setkey = chcr_authenc_setkey,
4164 .setauthsize = chcr_authenc_setauthsize,
4165 }
4166 },
4167 {
4168 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4169 .is_registered = 0,
4170 .alg.aead = {
4171 .base = {
4172
4173 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4174 .cra_driver_name =
4175 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4176 .cra_blocksize = 1,
4177 .cra_priority = CHCR_AEAD_PRIORITY,
4178 .cra_ctxsize = sizeof(struct chcr_context) +
4179 sizeof(struct chcr_aead_ctx) +
4180 sizeof(struct chcr_authenc_ctx),
4181
4182 },
4183 .ivsize = CTR_RFC3686_IV_SIZE,
4184 .maxauthsize = SHA256_DIGEST_SIZE,
4185 .setkey = chcr_authenc_setkey,
4186 .setauthsize = chcr_authenc_setauthsize,
4187 }
4188 },
4189 {
4190 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4191 .is_registered = 0,
4192 .alg.aead = {
4193 .base = {
4194 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4195 .cra_driver_name =
4196 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4197 .cra_blocksize = 1,
4198 .cra_priority = CHCR_AEAD_PRIORITY,
4199 .cra_ctxsize = sizeof(struct chcr_context) +
4200 sizeof(struct chcr_aead_ctx) +
4201 sizeof(struct chcr_authenc_ctx),
4202 },
4203 .ivsize = CTR_RFC3686_IV_SIZE,
4204 .maxauthsize = SHA224_DIGEST_SIZE,
4205 .setkey = chcr_authenc_setkey,
4206 .setauthsize = chcr_authenc_setauthsize,
4207 }
4208 },
4209 {
4210 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4211 .is_registered = 0,
4212 .alg.aead = {
4213 .base = {
4214 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4215 .cra_driver_name =
4216 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4217 .cra_blocksize = 1,
4218 .cra_priority = CHCR_AEAD_PRIORITY,
4219 .cra_ctxsize = sizeof(struct chcr_context) +
4220 sizeof(struct chcr_aead_ctx) +
4221 sizeof(struct chcr_authenc_ctx),
4222
4223 },
4224 .ivsize = CTR_RFC3686_IV_SIZE,
4225 .maxauthsize = SHA384_DIGEST_SIZE,
4226 .setkey = chcr_authenc_setkey,
4227 .setauthsize = chcr_authenc_setauthsize,
4228 }
4229 },
4230 {
4231 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4232 .is_registered = 0,
4233 .alg.aead = {
4234 .base = {
4235 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4236 .cra_driver_name =
4237 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4238 .cra_blocksize = 1,
4239 .cra_priority = CHCR_AEAD_PRIORITY,
4240 .cra_ctxsize = sizeof(struct chcr_context) +
4241 sizeof(struct chcr_aead_ctx) +
4242 sizeof(struct chcr_authenc_ctx),
4243
4244 },
4245 .ivsize = CTR_RFC3686_IV_SIZE,
4246 .maxauthsize = SHA512_DIGEST_SIZE,
4247 .setkey = chcr_authenc_setkey,
4248 .setauthsize = chcr_authenc_setauthsize,
4249 }
4250 },
4251 {
4252 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4253 .is_registered = 0,
4254 .alg.aead = {
4255 .base = {
4256 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4257 .cra_driver_name =
4258 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4259 .cra_blocksize = 1,
4260 .cra_priority = CHCR_AEAD_PRIORITY,
4261 .cra_ctxsize = sizeof(struct chcr_context) +
4262 sizeof(struct chcr_aead_ctx) +
4263 sizeof(struct chcr_authenc_ctx),
4264
4265 },
4266 .ivsize = CTR_RFC3686_IV_SIZE,
4267 .maxauthsize = 0,
4268 .setkey = chcr_aead_digest_null_setkey,
4269 .setauthsize = chcr_authenc_null_setauthsize,
4270 }
4271 },
4272 };
4273
4274 /*
4275 * chcr_unregister_alg - Deregister crypto algorithms with
4276 * kernel framework.
4277 */
4278 static int chcr_unregister_alg(void)
4279 {
4280 int i;
4281
4282 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4283 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4284 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4285 if (driver_algs[i].is_registered)
4286 crypto_unregister_alg(
4287 &driver_algs[i].alg.crypto);
4288 break;
4289 case CRYPTO_ALG_TYPE_AEAD:
4290 if (driver_algs[i].is_registered)
4291 crypto_unregister_aead(
4292 &driver_algs[i].alg.aead);
4293 break;
4294 case CRYPTO_ALG_TYPE_AHASH:
4295 if (driver_algs[i].is_registered)
4296 crypto_unregister_ahash(
4297 &driver_algs[i].alg.hash);
4298 break;
4299 }
4300 driver_algs[i].is_registered = 0;
4301 }
4302 return 0;
4303 }
4304
4305 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4306 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4307 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4308
4309 /*
4310 * chcr_register_alg - Register crypto algorithms with kernel framework.
4311 */
4312 static int chcr_register_alg(void)
4313 {
4314 struct crypto_alg ai;
4315 struct ahash_alg *a_hash;
4316 int err = 0, i;
4317 char *name = NULL;
4318
4319 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4320 if (driver_algs[i].is_registered)
4321 continue;
4322 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4323 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4324 driver_algs[i].alg.crypto.cra_priority =
4325 CHCR_CRA_PRIORITY;
4326 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4327 driver_algs[i].alg.crypto.cra_flags =
4328 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4329 CRYPTO_ALG_NEED_FALLBACK;
4330 driver_algs[i].alg.crypto.cra_ctxsize =
4331 sizeof(struct chcr_context) +
4332 sizeof(struct ablk_ctx);
4333 driver_algs[i].alg.crypto.cra_alignmask = 0;
4334 driver_algs[i].alg.crypto.cra_type =
4335 &crypto_ablkcipher_type;
4336 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4337 name = driver_algs[i].alg.crypto.cra_driver_name;
4338 break;
4339 case CRYPTO_ALG_TYPE_AEAD:
4340 driver_algs[i].alg.aead.base.cra_flags =
4341 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4342 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4343 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4344 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4345 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4346 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4347 err = crypto_register_aead(&driver_algs[i].alg.aead);
4348 name = driver_algs[i].alg.aead.base.cra_driver_name;
4349 break;
4350 case CRYPTO_ALG_TYPE_AHASH:
4351 a_hash = &driver_algs[i].alg.hash;
4352 a_hash->update = chcr_ahash_update;
4353 a_hash->final = chcr_ahash_final;
4354 a_hash->finup = chcr_ahash_finup;
4355 a_hash->digest = chcr_ahash_digest;
4356 a_hash->export = chcr_ahash_export;
4357 a_hash->import = chcr_ahash_import;
4358 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4359 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4360 a_hash->halg.base.cra_module = THIS_MODULE;
4361 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4362 a_hash->halg.base.cra_alignmask = 0;
4363 a_hash->halg.base.cra_exit = NULL;
4364
4365 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4366 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4367 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4368 a_hash->init = chcr_hmac_init;
4369 a_hash->setkey = chcr_ahash_setkey;
4370 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4371 } else {
4372 a_hash->init = chcr_sha_init;
4373 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4374 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4375 }
4376 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4377 ai = driver_algs[i].alg.hash.halg.base;
4378 name = ai.cra_driver_name;
4379 break;
4380 }
4381 if (err) {
4382 pr_err("chcr : %s : Algorithm registration failed\n",
4383 name);
4384 goto register_err;
4385 } else {
4386 driver_algs[i].is_registered = 1;
4387 }
4388 }
4389 return 0;
4390
4391 register_err:
4392 chcr_unregister_alg();
4393 return err;
4394 }
4395
4396 /*
4397 * start_crypto - Register the crypto algorithms.
4398 * This should called once when the first device comesup. After this
4399 * kernel will start calling driver APIs for crypto operations.
4400 */
4401 int start_crypto(void)
4402 {
4403 return chcr_register_alg();
4404 }
4405
4406 /*
4407 * stop_crypto - Deregister all the crypto algorithms with kernel.
4408 * This should be called once when the last device goes down. After this
4409 * kernel will not call the driver API for crypto operations.
4410 */
4411 int stop_crypto(void)
4412 {
4413 chcr_unregister_alg();
4414 return 0;
4415 }