2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
73 #define IV AES_BLOCK_SIZE
75 static unsigned int sgl_ent_len
[] = {
76 0, 0, 16, 24, 40, 48, 64, 72, 88,
77 96, 112, 120, 136, 144, 160, 168, 184,
78 192, 208, 216, 232, 240, 256, 264, 280,
79 288, 304, 312, 328, 336, 352, 360, 376
82 static unsigned int dsgl_ent_len
[] = {
83 0, 32, 32, 48, 48, 64, 64, 80, 80,
84 112, 112, 128, 128, 144, 144, 160, 160,
85 192, 192, 208, 208, 224, 224, 240, 240,
86 272, 272, 288, 288, 304, 304, 320, 320
89 static u32 round_constant
[11] = {
90 0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 0x1B000000, 0x36000000, 0x6C000000
95 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
96 unsigned char *input
, int err
);
98 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
100 return ctx
->crypto_ctx
->aeadctx
;
103 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
105 return ctx
->crypto_ctx
->ablkctx
;
108 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
110 return ctx
->crypto_ctx
->hmacctx
;
113 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
115 return gctx
->ctx
->gcm
;
118 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
120 return gctx
->ctx
->authenc
;
123 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
125 return container_of(ctx
->dev
, struct uld_ctx
, dev
);
128 static inline int is_ofld_imm(const struct sk_buff
*skb
)
130 return (skb
->len
<= SGE_MAX_WR_LEN
);
133 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
135 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
138 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
144 unsigned int skip_len
= 0;
147 if (sg_dma_len(sg
) <= skip
) {
148 skip
-= sg_dma_len(sg
);
157 while (sg
&& reqlen
) {
158 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
159 nents
+= DIV_ROUND_UP(less
, entlen
);
167 static inline int get_aead_subtype(struct crypto_aead
*aead
)
169 struct aead_alg
*alg
= crypto_aead_alg(aead
);
170 struct chcr_alg_template
*chcr_crypto_alg
=
171 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
172 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
175 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
177 u8 temp
[SHA512_DIGEST_SIZE
];
178 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
179 int authsize
= crypto_aead_authsize(tfm
);
180 struct cpl_fw6_pld
*fw6_pld
;
183 fw6_pld
= (struct cpl_fw6_pld
*)input
;
184 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
185 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
186 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
189 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
190 authsize
, req
->assoclen
+
191 req
->cryptlen
- authsize
);
192 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
200 static int chcr_inc_wrcount(struct chcr_dev
*dev
)
202 if (dev
->state
== CHCR_DETACH
)
204 atomic_inc(&dev
->inflight
);
208 static inline void chcr_dec_wrcount(struct chcr_dev
*dev
)
210 atomic_dec(&dev
->inflight
);
213 static inline int chcr_handle_aead_resp(struct aead_request
*req
,
214 unsigned char *input
,
217 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
218 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
219 struct chcr_dev
*dev
= a_ctx(tfm
)->dev
;
221 chcr_aead_common_exit(req
);
222 if (reqctx
->verify
== VERIFY_SW
) {
223 chcr_verify_tag(req
, input
, &err
);
224 reqctx
->verify
= VERIFY_HW
;
226 chcr_dec_wrcount(dev
);
227 req
->base
.complete(&req
->base
, err
);
232 static void get_aes_decrypt_key(unsigned char *dec_key
,
233 const unsigned char *key
,
234 unsigned int keylength
)
242 case AES_KEYLENGTH_128BIT
:
243 nk
= KEYLENGTH_4BYTES
;
244 nr
= NUMBER_OF_ROUNDS_10
;
246 case AES_KEYLENGTH_192BIT
:
247 nk
= KEYLENGTH_6BYTES
;
248 nr
= NUMBER_OF_ROUNDS_12
;
250 case AES_KEYLENGTH_256BIT
:
251 nk
= KEYLENGTH_8BYTES
;
252 nr
= NUMBER_OF_ROUNDS_14
;
257 for (i
= 0; i
< nk
; i
++)
258 w_ring
[i
] = get_unaligned_be32(&key
[i
* 4]);
261 temp
= w_ring
[nk
- 1];
262 while (i
+ nk
< (nr
+ 1) * 4) {
265 temp
= (temp
<< 8) | (temp
>> 24);
266 temp
= aes_ks_subword(temp
);
267 temp
^= round_constant
[i
/ nk
];
268 } else if (nk
== 8 && (i
% 4 == 0)) {
269 temp
= aes_ks_subword(temp
);
271 w_ring
[i
% nk
] ^= temp
;
272 temp
= w_ring
[i
% nk
];
276 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
277 put_unaligned_be32(w_ring
[j
], &dec_key
[k
* 4]);
284 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
286 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
289 case SHA1_DIGEST_SIZE
:
290 base_hash
= crypto_alloc_shash("sha1", 0, 0);
292 case SHA224_DIGEST_SIZE
:
293 base_hash
= crypto_alloc_shash("sha224", 0, 0);
295 case SHA256_DIGEST_SIZE
:
296 base_hash
= crypto_alloc_shash("sha256", 0, 0);
298 case SHA384_DIGEST_SIZE
:
299 base_hash
= crypto_alloc_shash("sha384", 0, 0);
301 case SHA512_DIGEST_SIZE
:
302 base_hash
= crypto_alloc_shash("sha512", 0, 0);
309 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
310 char *iopad
, char *result_hash
,
313 struct sha1_state sha1_st
;
314 struct sha256_state sha256_st
;
315 struct sha512_state sha512_st
;
318 if (digest_size
== SHA1_DIGEST_SIZE
) {
319 error
= crypto_shash_init(desc
) ?:
320 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
321 crypto_shash_export(desc
, (void *)&sha1_st
);
322 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
323 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
324 error
= crypto_shash_init(desc
) ?:
325 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
326 crypto_shash_export(desc
, (void *)&sha256_st
);
327 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
329 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
330 error
= crypto_shash_init(desc
) ?:
331 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
332 crypto_shash_export(desc
, (void *)&sha256_st
);
333 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
335 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
336 error
= crypto_shash_init(desc
) ?:
337 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
338 crypto_shash_export(desc
, (void *)&sha512_st
);
339 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
341 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
342 error
= crypto_shash_init(desc
) ?:
343 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
344 crypto_shash_export(desc
, (void *)&sha512_st
);
345 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
348 pr_err("Unknown digest size %d\n", digest_size
);
353 static void chcr_change_order(char *buf
, int ds
)
357 if (ds
== SHA512_DIGEST_SIZE
) {
358 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
359 *((__be64
*)buf
+ i
) =
360 cpu_to_be64(*((u64
*)buf
+ i
));
362 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
363 *((__be32
*)buf
+ i
) =
364 cpu_to_be32(*((u32
*)buf
+ i
));
368 static inline int is_hmac(struct crypto_tfm
*tfm
)
370 struct crypto_alg
*alg
= tfm
->__crt_alg
;
371 struct chcr_alg_template
*chcr_crypto_alg
=
372 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
374 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
379 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
380 struct cpl_rx_phys_dsgl
*dsgl
)
384 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
387 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
390 struct cpl_rx_phys_dsgl
*phys_cpl
;
392 phys_cpl
= walk
->dsgl
;
394 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
395 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396 phys_cpl
->pcirlxorder_to_noofsgentr
=
397 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
403 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
404 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
405 phys_cpl
->rss_hdr_int
.hash_val
= 0;
406 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
409 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
418 walk
->to
->len
[j
% 8] = htons(size
);
419 walk
->to
->addr
[j
% 8] = cpu_to_be64(addr
);
426 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
427 struct scatterlist
*sg
,
432 unsigned int left_size
= slen
, len
= 0;
433 unsigned int j
= walk
->nents
;
439 if (sg_dma_len(sg
) <= skip
) {
440 skip
-= sg_dma_len(sg
);
449 while (left_size
&& sg
) {
450 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
453 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
454 walk
->to
->len
[j
% 8] = htons(ent_len
);
455 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
464 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
465 skip_len
) + skip_len
;
466 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
473 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
474 struct ulptx_sgl
*ulp
)
479 walk
->pair
= ulp
->sge
;
480 walk
->last_sg
= NULL
;
481 walk
->last_sg_len
= 0;
484 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
486 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
487 ULPTX_NSGE_V(walk
->nents
));
491 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
498 if (walk
->nents
== 0) {
499 walk
->sgl
->len0
= cpu_to_be32(size
);
500 walk
->sgl
->addr0
= cpu_to_be64(addr
);
502 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(addr
);
503 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
504 walk
->pair_idx
= !walk
->pair_idx
;
511 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
512 struct scatterlist
*sg
,
523 if (sg_dma_len(sg
) <= skip
) {
524 skip
-= sg_dma_len(sg
);
532 WARN(!sg
, "SG should not be null here\n");
533 if (sg
&& (walk
->nents
== 0)) {
534 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
535 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
536 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
537 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
541 walk
->last_sg_len
= sgmin
+ skip_len
;
543 if (sg_dma_len(sg
) == skip_len
) {
550 small
= min(sg_dma_len(sg
) - skip_len
, len
);
551 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
552 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
553 walk
->pair
->addr
[walk
->pair_idx
] =
554 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
555 walk
->pair_idx
= !walk
->pair_idx
;
562 walk
->last_sg_len
= skip_len
;
563 if (sg_dma_len(sg
) == skip_len
) {
570 static inline int get_cryptoalg_subtype(struct crypto_skcipher
*tfm
)
572 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
573 struct chcr_alg_template
*chcr_crypto_alg
=
574 container_of(alg
, struct chcr_alg_template
, alg
.skcipher
);
576 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
579 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
581 struct adapter
*adap
= netdev2adap(dev
);
582 struct sge_uld_txq_info
*txq_info
=
583 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
584 struct sge_uld_txq
*txq
;
588 txq
= &txq_info
->uldtxq
[idx
];
589 spin_lock(&txq
->sendq
.lock
);
592 spin_unlock(&txq
->sendq
.lock
);
597 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
598 struct _key_ctx
*key_ctx
)
600 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
601 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
604 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
605 ablkctx
->enckey_len
>> 1);
606 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
607 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
612 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
615 unsigned int srcskip
)
619 int soffset
= 0, sless
;
621 if (sg_dma_len(src
) == srcskip
) {
625 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
626 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
631 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
640 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
641 struct scatterlist
*dst
,
644 unsigned int srcskip
,
645 unsigned int dstskip
)
647 int srclen
= 0, dstlen
= 0;
648 int srcsg
= minsg
, dstsg
= minsg
;
649 int offset
= 0, soffset
= 0, less
, sless
= 0;
651 if (sg_dma_len(src
) == srcskip
) {
655 if (sg_dma_len(dst
) == dstskip
) {
661 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
662 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
667 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
668 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
669 if (srclen
<= dstlen
)
671 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
672 dstskip
, CHCR_DST_SG_SIZE
);
675 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
683 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
690 return min(srclen
, dstlen
);
693 static int chcr_cipher_fallback(struct crypto_sync_skcipher
*cipher
,
695 struct scatterlist
*src
,
696 struct scatterlist
*dst
,
699 unsigned short op_type
)
703 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
705 skcipher_request_set_sync_tfm(subreq
, cipher
);
706 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
707 skcipher_request_set_crypt(subreq
, src
, dst
,
710 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
711 crypto_skcipher_encrypt(subreq
);
712 skcipher_request_zero(subreq
);
718 static inline int get_qidxs(struct crypto_async_request
*req
,
719 unsigned int *txqidx
, unsigned int *rxqidx
)
721 struct crypto_tfm
*tfm
= req
->tfm
;
724 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
725 case CRYPTO_ALG_TYPE_AEAD
:
727 struct aead_request
*aead_req
=
728 container_of(req
, struct aead_request
, base
);
729 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(aead_req
);
730 *txqidx
= reqctx
->txqidx
;
731 *rxqidx
= reqctx
->rxqidx
;
734 case CRYPTO_ALG_TYPE_SKCIPHER
:
736 struct skcipher_request
*sk_req
=
737 container_of(req
, struct skcipher_request
, base
);
738 struct chcr_skcipher_req_ctx
*reqctx
=
739 skcipher_request_ctx(sk_req
);
740 *txqidx
= reqctx
->txqidx
;
741 *rxqidx
= reqctx
->rxqidx
;
744 case CRYPTO_ALG_TYPE_AHASH
:
746 struct ahash_request
*ahash_req
=
747 container_of(req
, struct ahash_request
, base
);
748 struct chcr_ahash_req_ctx
*reqctx
=
749 ahash_request_ctx(ahash_req
);
750 *txqidx
= reqctx
->txqidx
;
751 *rxqidx
= reqctx
->rxqidx
;
756 /* should never get here */
763 static inline void create_wreq(struct chcr_context
*ctx
,
764 struct chcr_wr
*chcr_req
,
765 struct crypto_async_request
*req
,
772 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
773 unsigned int tx_channel_id
, rx_channel_id
;
774 unsigned int txqidx
= 0, rxqidx
= 0;
775 unsigned int qid
, fid
;
777 get_qidxs(req
, &txqidx
, &rxqidx
);
778 qid
= u_ctx
->lldi
.rxq_ids
[rxqidx
];
779 fid
= u_ctx
->lldi
.rxq_ids
[0];
780 tx_channel_id
= txqidx
/ ctx
->txq_perchan
;
781 rx_channel_id
= rxqidx
/ ctx
->rxq_perchan
;
784 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
785 chcr_req
->wreq
.pld_size_hash_size
=
786 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
787 chcr_req
->wreq
.len16_pkd
=
788 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
789 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
790 chcr_req
->wreq
.rx_chid_to_rx_q_id
= FILL_WR_RX_Q_ID(rx_channel_id
, qid
,
793 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(tx_channel_id
, fid
);
794 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
795 ((sizeof(chcr_req
->wreq
)) >> 4)));
796 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
797 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
798 sizeof(chcr_req
->key_ctx
) + sc_len
);
802 * create_cipher_wr - form the WR for cipher operations
804 * @ctx: crypto driver context of the request.
805 * @qid: ingress qid where response of this WR should be received.
806 * @op_type: encryption or decryption
808 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
810 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
811 struct chcr_context
*ctx
= c_ctx(tfm
);
812 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
813 struct sk_buff
*skb
= NULL
;
814 struct chcr_wr
*chcr_req
;
815 struct cpl_rx_phys_dsgl
*phys_cpl
;
816 struct ulptx_sgl
*ulptx
;
817 struct chcr_skcipher_req_ctx
*reqctx
=
818 skcipher_request_ctx(wrparam
->req
);
819 unsigned int temp
= 0, transhdr_len
, dst_size
;
822 unsigned int kctx_len
;
823 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
824 GFP_KERNEL
: GFP_ATOMIC
;
825 struct adapter
*adap
= padap(ctx
->dev
);
826 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
828 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
830 dst_size
= get_space_for_phys_dsgl(nents
);
831 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
832 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
833 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
834 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
835 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
836 (sgl_len(nents
) * 8);
837 transhdr_len
+= temp
;
838 transhdr_len
= roundup(transhdr_len
, 16);
839 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
844 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
845 chcr_req
->sec_cpl
.op_ivinsrtofst
=
846 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
848 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
849 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
850 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
852 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
853 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
854 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
857 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
860 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
861 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
862 (!(get_cryptoalg_subtype(tfm
) ==
863 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
864 (!(get_cryptoalg_subtype(tfm
) ==
865 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
866 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
868 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
869 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
870 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
871 ablkctx
->enckey_len
);
873 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
874 (ablkctx
->enckey_len
>> 1),
875 ablkctx
->enckey_len
>> 1);
876 memcpy(chcr_req
->key_ctx
.key
+
877 (ablkctx
->enckey_len
>> 1),
879 ablkctx
->enckey_len
>> 1);
882 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
883 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
884 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
885 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
887 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
888 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
889 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
890 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
892 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
895 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
896 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
897 sg_pcopy_to_buffer(wrparam
->req
->src
,
898 sg_nents(wrparam
->req
->src
), wrparam
->req
->iv
, 16,
899 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
903 return ERR_PTR(error
);
906 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
910 if (keylen
== AES_KEYSIZE_128
)
911 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
912 else if (keylen
== AES_KEYSIZE_192
)
913 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
914 else if (keylen
== AES_KEYSIZE_256
)
915 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
921 static int chcr_cipher_fallback_setkey(struct crypto_skcipher
*cipher
,
925 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
927 crypto_sync_skcipher_clear_flags(ablkctx
->sw_cipher
,
928 CRYPTO_TFM_REQ_MASK
);
929 crypto_sync_skcipher_set_flags(ablkctx
->sw_cipher
,
930 cipher
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
931 return crypto_sync_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
934 static int chcr_aes_cbc_setkey(struct crypto_skcipher
*cipher
,
938 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
939 unsigned int ck_size
, context_size
;
943 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
947 ck_size
= chcr_keyctx_ck_size(keylen
);
948 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
949 memcpy(ablkctx
->key
, key
, keylen
);
950 ablkctx
->enckey_len
= keylen
;
951 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
952 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
953 keylen
+ alignment
) >> 4;
955 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
957 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
960 ablkctx
->enckey_len
= 0;
965 static int chcr_aes_ctr_setkey(struct crypto_skcipher
*cipher
,
969 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
970 unsigned int ck_size
, context_size
;
974 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
977 ck_size
= chcr_keyctx_ck_size(keylen
);
978 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
979 memcpy(ablkctx
->key
, key
, keylen
);
980 ablkctx
->enckey_len
= keylen
;
981 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
982 keylen
+ alignment
) >> 4;
984 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
986 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
990 ablkctx
->enckey_len
= 0;
995 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher
*cipher
,
999 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
1000 unsigned int ck_size
, context_size
;
1004 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
1006 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
1007 CTR_RFC3686_NONCE_SIZE
);
1009 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1010 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
1014 ck_size
= chcr_keyctx_ck_size(keylen
);
1015 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
1016 memcpy(ablkctx
->key
, key
, keylen
);
1017 ablkctx
->enckey_len
= keylen
;
1018 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
1019 keylen
+ alignment
) >> 4;
1021 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
1022 0, 0, context_size
);
1023 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
1027 ablkctx
->enckey_len
= 0;
1031 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
1033 unsigned int size
= AES_BLOCK_SIZE
;
1034 __be32
*b
= (__be32
*)(dstiv
+ size
);
1037 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
1038 for (; size
>= 4; size
-= 4) {
1039 prev
= be32_to_cpu(*--b
);
1041 *b
= cpu_to_be32(c
);
1049 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
1051 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
1053 u32 temp
= be32_to_cpu(*--b
);
1056 c
= (u64
)temp
+ 1; // No of block can processed without overflow
1057 if ((bytes
/ AES_BLOCK_SIZE
) >= c
)
1058 bytes
= c
* AES_BLOCK_SIZE
;
1062 static int chcr_update_tweak(struct skcipher_request
*req
, u8
*iv
,
1065 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1066 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1067 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1068 struct crypto_aes_ctx aes
;
1071 unsigned int keylen
;
1072 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1073 int round8
= round
/ 8;
1075 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1077 keylen
= ablkctx
->enckey_len
/ 2;
1078 key
= ablkctx
->key
+ keylen
;
1079 /* For a 192 bit key remove the padded zeroes which was
1080 * added in chcr_xts_setkey
1082 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx
->key_ctx_hdr
))
1083 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192
)
1084 ret
= aes_expandkey(&aes
, key
, keylen
- 8);
1086 ret
= aes_expandkey(&aes
, key
, keylen
);
1089 aes_encrypt(&aes
, iv
, iv
);
1090 for (i
= 0; i
< round8
; i
++)
1091 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1093 for (i
= 0; i
< (round
% 8); i
++)
1094 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1097 aes_decrypt(&aes
, iv
, iv
);
1099 memzero_explicit(&aes
, sizeof(aes
));
1103 static int chcr_update_cipher_iv(struct skcipher_request
*req
,
1104 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1106 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1107 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1108 int subtype
= get_cryptoalg_subtype(tfm
);
1111 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1112 ctr_add_iv(iv
, req
->iv
, (reqctx
->processed
/
1114 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1115 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1116 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1117 AES_BLOCK_SIZE
) + 1);
1118 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1119 ret
= chcr_update_tweak(req
, iv
, 0);
1120 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1122 /*Updated before sending last WR*/
1123 memcpy(iv
, req
->iv
, AES_BLOCK_SIZE
);
1125 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1132 /* We need separate function for final iv because in rfc3686 Initial counter
1133 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1134 * for subsequent update requests
1137 static int chcr_final_cipher_iv(struct skcipher_request
*req
,
1138 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1140 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1141 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1142 int subtype
= get_cryptoalg_subtype(tfm
);
1145 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1146 ctr_add_iv(iv
, req
->iv
, DIV_ROUND_UP(reqctx
->processed
,
1148 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
) {
1149 if (!reqctx
->partial_req
)
1150 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1152 ret
= chcr_update_tweak(req
, iv
, 1);
1154 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1155 /*Already updated for Decrypt*/
1157 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1164 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
1165 unsigned char *input
, int err
)
1167 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1168 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1169 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1170 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1171 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1172 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1173 struct chcr_context
*ctx
= c_ctx(tfm
);
1174 struct adapter
*adap
= padap(ctx
->dev
);
1175 struct cipher_wr_param wrparam
;
1176 struct sk_buff
*skb
;
1181 if (req
->cryptlen
== reqctx
->processed
) {
1182 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1184 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->iv
);
1189 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1190 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1191 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1192 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1193 bytes
= req
->cryptlen
- reqctx
->processed
;
1195 bytes
= rounddown(bytes
, 16);
1197 /*CTR mode counter overfloa*/
1198 bytes
= req
->cryptlen
- reqctx
->processed
;
1200 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1204 if (unlikely(bytes
== 0)) {
1205 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1207 memcpy(req
->iv
, reqctx
->init_iv
, IV
);
1208 atomic_inc(&adap
->chcr_stats
.fallback
);
1209 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1219 if (get_cryptoalg_subtype(tfm
) ==
1220 CRYPTO_ALG_SUB_TYPE_CTR
)
1221 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1222 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
];
1224 wrparam
.bytes
= bytes
;
1225 skb
= create_cipher_wr(&wrparam
);
1227 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1231 skb
->dev
= u_ctx
->lldi
.ports
[0];
1232 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1234 reqctx
->last_req_len
= bytes
;
1235 reqctx
->processed
+= bytes
;
1236 if (get_cryptoalg_subtype(tfm
) ==
1237 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1238 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1239 complete(&ctx
->cbc_aes_aio_done
);
1243 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1245 if (get_cryptoalg_subtype(tfm
) ==
1246 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1247 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1248 complete(&ctx
->cbc_aes_aio_done
);
1250 chcr_dec_wrcount(dev
);
1251 req
->base
.complete(&req
->base
, err
);
1255 static int process_cipher(struct skcipher_request
*req
,
1257 struct sk_buff
**skb
,
1258 unsigned short op_type
)
1260 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1261 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1262 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
1263 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1264 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
1265 struct cipher_wr_param wrparam
;
1266 int bytes
, err
= -EINVAL
;
1269 reqctx
->processed
= 0;
1270 reqctx
->partial_req
= 0;
1273 subtype
= get_cryptoalg_subtype(tfm
);
1274 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1275 (req
->cryptlen
== 0) ||
1276 (req
->cryptlen
% crypto_skcipher_blocksize(tfm
))) {
1277 if (req
->cryptlen
== 0 && subtype
!= CRYPTO_ALG_SUB_TYPE_XTS
)
1279 else if (req
->cryptlen
% crypto_skcipher_blocksize(tfm
) &&
1280 subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1282 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1283 ablkctx
->enckey_len
, req
->cryptlen
, ivsize
);
1287 err
= chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1290 if (req
->cryptlen
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1292 sizeof(struct cpl_rx_phys_dsgl
) +
1295 /* Can be sent as Imm*/
1296 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1298 dnents
= sg_nents_xlen(req
->dst
, req
->cryptlen
,
1299 CHCR_DST_SG_SIZE
, 0);
1300 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1301 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1302 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1303 reqctx
->imm
= (transhdr_len
+ IV
+ req
->cryptlen
) <=
1305 bytes
= IV
+ req
->cryptlen
;
1312 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1313 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1315 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1316 bytes
= req
->cryptlen
- reqctx
->processed
;
1318 bytes
= rounddown(bytes
, 16);
1320 bytes
= req
->cryptlen
;
1322 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
) {
1323 bytes
= adjust_ctr_overflow(req
->iv
, bytes
);
1325 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1326 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1327 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
1328 CTR_RFC3686_IV_SIZE
);
1330 /* initialize counter portion of counter block */
1331 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1332 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1333 memcpy(reqctx
->init_iv
, reqctx
->iv
, IV
);
1337 memcpy(reqctx
->iv
, req
->iv
, IV
);
1338 memcpy(reqctx
->init_iv
, req
->iv
, IV
);
1340 if (unlikely(bytes
== 0)) {
1341 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1343 fallback
: atomic_inc(&adap
->chcr_stats
.fallback
);
1344 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1350 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
?
1351 reqctx
->iv
: req
->iv
,
1355 reqctx
->op
= op_type
;
1356 reqctx
->srcsg
= req
->src
;
1357 reqctx
->dstsg
= req
->dst
;
1358 reqctx
->src_ofst
= 0;
1359 reqctx
->dst_ofst
= 0;
1362 wrparam
.bytes
= bytes
;
1363 *skb
= create_cipher_wr(&wrparam
);
1365 err
= PTR_ERR(*skb
);
1368 reqctx
->processed
= bytes
;
1369 reqctx
->last_req_len
= bytes
;
1370 reqctx
->partial_req
= !!(req
->cryptlen
- reqctx
->processed
);
1374 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1379 static int chcr_aes_encrypt(struct skcipher_request
*req
)
1381 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1382 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1383 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1384 struct sk_buff
*skb
= NULL
;
1386 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1387 struct chcr_context
*ctx
= c_ctx(tfm
);
1391 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1392 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1395 err
= chcr_inc_wrcount(dev
);
1398 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1400 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1405 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1406 &skb
, CHCR_ENCRYPT_OP
);
1409 skb
->dev
= u_ctx
->lldi
.ports
[0];
1410 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1412 if (get_cryptoalg_subtype(tfm
) ==
1413 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1414 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1415 reqctx
->partial_req
= 1;
1416 wait_for_completion(&ctx
->cbc_aes_aio_done
);
1418 return -EINPROGRESS
;
1420 chcr_dec_wrcount(dev
);
1424 static int chcr_aes_decrypt(struct skcipher_request
*req
)
1426 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1427 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1428 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1429 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1430 struct sk_buff
*skb
= NULL
;
1432 struct chcr_context
*ctx
= c_ctx(tfm
);
1436 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1437 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1440 err
= chcr_inc_wrcount(dev
);
1444 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1446 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))))
1448 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1449 &skb
, CHCR_DECRYPT_OP
);
1452 skb
->dev
= u_ctx
->lldi
.ports
[0];
1453 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1455 return -EINPROGRESS
;
1457 static int chcr_device_init(struct chcr_context
*ctx
)
1459 struct uld_ctx
*u_ctx
= NULL
;
1460 int txq_perchan
, ntxq
;
1461 int err
= 0, rxq_perchan
;
1464 u_ctx
= assign_chcr_device();
1467 pr_err("chcr device assignment fails\n");
1470 ctx
->dev
= &u_ctx
->dev
;
1471 ntxq
= u_ctx
->lldi
.ntxq
;
1472 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1473 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1475 ctx
->nrxq
= u_ctx
->lldi
.nrxq
;
1476 ctx
->rxq_perchan
= rxq_perchan
;
1477 ctx
->txq_perchan
= txq_perchan
;
1483 static int chcr_init_tfm(struct crypto_skcipher
*tfm
)
1485 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1486 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1487 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1489 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher(alg
->base
.cra_name
, 0,
1490 CRYPTO_ALG_NEED_FALLBACK
);
1491 if (IS_ERR(ablkctx
->sw_cipher
)) {
1492 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1493 return PTR_ERR(ablkctx
->sw_cipher
);
1495 init_completion(&ctx
->cbc_aes_aio_done
);
1496 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
));
1498 return chcr_device_init(ctx
);
1501 static int chcr_rfc3686_init(struct crypto_skcipher
*tfm
)
1503 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1504 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1505 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1507 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1508 * cannot be used as fallback in chcr_handle_cipher_response
1510 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher("ctr(aes)", 0,
1511 CRYPTO_ALG_NEED_FALLBACK
);
1512 if (IS_ERR(ablkctx
->sw_cipher
)) {
1513 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1514 return PTR_ERR(ablkctx
->sw_cipher
);
1516 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
));
1517 return chcr_device_init(ctx
);
1521 static void chcr_exit_tfm(struct crypto_skcipher
*tfm
)
1523 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1524 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1526 crypto_free_sync_skcipher(ablkctx
->sw_cipher
);
1529 static int get_alg_config(struct algo_param
*params
,
1530 unsigned int auth_size
)
1532 switch (auth_size
) {
1533 case SHA1_DIGEST_SIZE
:
1534 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1535 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1536 params
->result_size
= SHA1_DIGEST_SIZE
;
1538 case SHA224_DIGEST_SIZE
:
1539 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1540 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1541 params
->result_size
= SHA256_DIGEST_SIZE
;
1543 case SHA256_DIGEST_SIZE
:
1544 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1545 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1546 params
->result_size
= SHA256_DIGEST_SIZE
;
1548 case SHA384_DIGEST_SIZE
:
1549 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1550 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1551 params
->result_size
= SHA512_DIGEST_SIZE
;
1553 case SHA512_DIGEST_SIZE
:
1554 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1555 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1556 params
->result_size
= SHA512_DIGEST_SIZE
;
1559 pr_err("chcr : ERROR, unsupported digest size\n");
1565 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1567 crypto_free_shash(base_hash
);
1571 * create_hash_wr - Create hash work request
1572 * @req - Cipher req base
1574 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1575 struct hash_wr_param
*param
)
1577 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1578 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1579 struct chcr_context
*ctx
= h_ctx(tfm
);
1580 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1581 struct sk_buff
*skb
= NULL
;
1582 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1583 struct chcr_wr
*chcr_req
;
1584 struct ulptx_sgl
*ulptx
;
1585 unsigned int nents
= 0, transhdr_len
;
1586 unsigned int temp
= 0;
1587 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1589 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1591 unsigned int rx_channel_id
= req_ctx
->rxqidx
/ ctx
->rxq_perchan
;
1593 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1594 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1595 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1596 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1597 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1598 nents
+= param
->bfr_len
? 1 : 0;
1599 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1600 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1601 transhdr_len
= roundup(transhdr_len
, 16);
1603 skb
= alloc_skb(transhdr_len
, flags
);
1605 return ERR_PTR(-ENOMEM
);
1606 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1608 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1609 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 0);
1611 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1613 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1614 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1615 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1616 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1617 chcr_req
->sec_cpl
.seqno_numivs
=
1618 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1619 param
->opad_needed
, 0);
1621 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1622 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1624 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1625 param
->alg_prm
.result_size
);
1627 if (param
->opad_needed
)
1628 memcpy(chcr_req
->key_ctx
.key
+
1629 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1630 CHCR_HASH_MAX_DIGEST_SIZE
),
1631 hmacctx
->opad
, param
->alg_prm
.result_size
);
1633 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1634 param
->alg_prm
.mk_size
, 0,
1637 sizeof(chcr_req
->key_ctx
)) >> 4));
1638 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1639 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1641 if (param
->bfr_len
!= 0) {
1642 req_ctx
->hctx_wr
.dma_addr
=
1643 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1644 param
->bfr_len
, DMA_TO_DEVICE
);
1645 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1646 req_ctx
->hctx_wr
. dma_addr
)) {
1650 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1652 req_ctx
->hctx_wr
.dma_addr
= 0;
1654 chcr_add_hash_src_ent(req
, ulptx
, param
);
1655 /* Request upto max wr size */
1656 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1657 (param
->sg_len
+ param
->bfr_len
) : 0);
1658 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1659 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1660 param
->hash_size
, transhdr_len
,
1662 req_ctx
->hctx_wr
.skb
= skb
;
1666 return ERR_PTR(error
);
1669 static int chcr_ahash_update(struct ahash_request
*req
)
1671 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1672 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1673 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1674 struct chcr_context
*ctx
= h_ctx(rtfm
);
1675 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1676 struct sk_buff
*skb
;
1677 u8 remainder
= 0, bs
;
1678 unsigned int nbytes
= req
->nbytes
;
1679 struct hash_wr_param params
;
1684 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1685 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1688 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1690 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1691 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1692 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1694 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1695 + req_ctx
->reqlen
, nbytes
, 0);
1696 req_ctx
->reqlen
+= nbytes
;
1699 error
= chcr_inc_wrcount(dev
);
1702 /* Detach state for CHCR means lldi or padap is freed. Increasing
1703 * inflight count for dev guarantees that lldi and padap is valid
1705 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1707 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1712 chcr_init_hctx_per_wr(req_ctx
);
1713 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1718 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1719 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1720 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1721 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1722 if (params
.sg_len
> req
->nbytes
)
1723 params
.sg_len
= req
->nbytes
;
1724 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1726 params
.opad_needed
= 0;
1729 params
.bfr_len
= req_ctx
->reqlen
;
1731 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1733 params
.hash_size
= params
.alg_prm
.result_size
;
1734 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1735 skb
= create_hash_wr(req
, ¶ms
);
1737 error
= PTR_ERR(skb
);
1741 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1744 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1745 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1746 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1749 req_ctx
->reqlen
= remainder
;
1750 skb
->dev
= u_ctx
->lldi
.ports
[0];
1751 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1753 return -EINPROGRESS
;
1755 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1757 chcr_dec_wrcount(dev
);
1761 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1763 memset(bfr_ptr
, 0, bs
);
1766 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1768 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1771 static int chcr_ahash_final(struct ahash_request
*req
)
1773 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1774 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1775 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1776 struct hash_wr_param params
;
1777 struct sk_buff
*skb
;
1778 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1779 struct chcr_context
*ctx
= h_ctx(rtfm
);
1780 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1785 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1786 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1789 error
= chcr_inc_wrcount(dev
);
1793 chcr_init_hctx_per_wr(req_ctx
);
1794 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1795 params
.opad_needed
= 1;
1797 params
.opad_needed
= 0;
1799 req_ctx
->hctx_wr
.isfinal
= 1;
1800 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1801 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1802 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1803 params
.opad_needed
= 1;
1804 params
.kctx_len
*= 2;
1806 params
.opad_needed
= 0;
1809 req_ctx
->hctx_wr
.result
= 1;
1810 params
.bfr_len
= req_ctx
->reqlen
;
1811 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1812 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1813 if (req_ctx
->reqlen
== 0) {
1814 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1818 params
.bfr_len
= bs
;
1821 params
.scmd1
= req_ctx
->data_len
;
1825 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1826 skb
= create_hash_wr(req
, ¶ms
);
1828 error
= PTR_ERR(skb
);
1831 req_ctx
->reqlen
= 0;
1832 skb
->dev
= u_ctx
->lldi
.ports
[0];
1833 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1835 return -EINPROGRESS
;
1837 chcr_dec_wrcount(dev
);
1841 static int chcr_ahash_finup(struct ahash_request
*req
)
1843 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1844 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1845 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1846 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1847 struct chcr_context
*ctx
= h_ctx(rtfm
);
1848 struct sk_buff
*skb
;
1849 struct hash_wr_param params
;
1855 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1856 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1859 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1860 error
= chcr_inc_wrcount(dev
);
1864 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1866 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1870 chcr_init_hctx_per_wr(req_ctx
);
1871 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1877 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1878 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1879 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1880 params
.kctx_len
*= 2;
1881 params
.opad_needed
= 1;
1883 params
.opad_needed
= 0;
1886 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1887 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1888 if (params
.sg_len
< req
->nbytes
) {
1889 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1890 params
.kctx_len
/= 2;
1891 params
.opad_needed
= 0;
1895 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1897 params
.hash_size
= params
.alg_prm
.result_size
;
1902 params
.sg_len
= req
->nbytes
;
1903 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1904 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1907 params
.bfr_len
= req_ctx
->reqlen
;
1908 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1909 req_ctx
->hctx_wr
.result
= 1;
1910 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1911 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1912 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1916 params
.bfr_len
= bs
;
1918 skb
= create_hash_wr(req
, ¶ms
);
1920 error
= PTR_ERR(skb
);
1923 req_ctx
->reqlen
= 0;
1924 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1925 skb
->dev
= u_ctx
->lldi
.ports
[0];
1926 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1928 return -EINPROGRESS
;
1930 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1932 chcr_dec_wrcount(dev
);
1936 static int chcr_ahash_digest(struct ahash_request
*req
)
1938 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1939 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1940 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1941 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1942 struct chcr_context
*ctx
= h_ctx(rtfm
);
1943 struct sk_buff
*skb
;
1944 struct hash_wr_param params
;
1950 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1951 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1955 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1956 error
= chcr_inc_wrcount(dev
);
1960 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1962 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1967 chcr_init_hctx_per_wr(req_ctx
);
1968 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1974 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1975 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1976 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1977 params
.kctx_len
*= 2;
1978 params
.opad_needed
= 1;
1980 params
.opad_needed
= 0;
1982 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1983 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1984 if (params
.sg_len
< req
->nbytes
) {
1985 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1986 params
.kctx_len
/= 2;
1987 params
.opad_needed
= 0;
1992 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1993 params
.hash_size
= params
.alg_prm
.result_size
;
1995 params
.sg_len
= req
->nbytes
;
1996 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1999 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
2003 req_ctx
->hctx_wr
.result
= 1;
2004 req_ctx
->hctx_wr
.srcsg
= req
->src
;
2005 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
2007 if (req
->nbytes
== 0) {
2008 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
2010 params
.bfr_len
= bs
;
2013 skb
= create_hash_wr(req
, ¶ms
);
2015 error
= PTR_ERR(skb
);
2018 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
2019 skb
->dev
= u_ctx
->lldi
.ports
[0];
2020 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
2022 return -EINPROGRESS
;
2024 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2026 chcr_dec_wrcount(dev
);
2030 static int chcr_ahash_continue(struct ahash_request
*req
)
2032 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2033 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2034 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
2035 struct chcr_context
*ctx
= h_ctx(rtfm
);
2036 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2037 struct sk_buff
*skb
;
2038 struct hash_wr_param params
;
2044 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
2045 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
2048 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2049 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
2050 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
2051 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2052 params
.kctx_len
*= 2;
2053 params
.opad_needed
= 1;
2055 params
.opad_needed
= 0;
2057 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
2058 HASH_SPACE_LEFT(params
.kctx_len
),
2060 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
2061 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
2062 if (!hctx_wr
->result
||
2063 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
2064 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2065 params
.kctx_len
/= 2;
2066 params
.opad_needed
= 0;
2070 params
.sg_len
= rounddown(params
.sg_len
, bs
);
2071 params
.hash_size
= params
.alg_prm
.result_size
;
2076 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
2077 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
2080 reqctx
->data_len
+= params
.sg_len
;
2081 skb
= create_hash_wr(req
, ¶ms
);
2083 error
= PTR_ERR(skb
);
2086 hctx_wr
->processed
+= params
.sg_len
;
2087 skb
->dev
= u_ctx
->lldi
.ports
[0];
2088 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
2095 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
2096 unsigned char *input
,
2099 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2100 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2101 int digestsize
, updated_digestsize
;
2102 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2103 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
2104 struct chcr_dev
*dev
= h_ctx(tfm
)->dev
;
2108 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
2109 updated_digestsize
= digestsize
;
2110 if (digestsize
== SHA224_DIGEST_SIZE
)
2111 updated_digestsize
= SHA256_DIGEST_SIZE
;
2112 else if (digestsize
== SHA384_DIGEST_SIZE
)
2113 updated_digestsize
= SHA512_DIGEST_SIZE
;
2115 if (hctx_wr
->dma_addr
) {
2116 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
2117 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
2118 hctx_wr
->dma_addr
= 0;
2120 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
2122 if (hctx_wr
->result
== 1) {
2123 hctx_wr
->result
= 0;
2124 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
2127 memcpy(reqctx
->partial_hash
,
2128 input
+ sizeof(struct cpl_fw6_pld
),
2129 updated_digestsize
);
2134 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
2135 updated_digestsize
);
2137 err
= chcr_ahash_continue(req
);
2142 if (hctx_wr
->is_sg_map
)
2143 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2147 chcr_dec_wrcount(dev
);
2148 req
->base
.complete(&req
->base
, err
);
2152 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2153 * @req: crypto request
2155 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
2158 struct crypto_tfm
*tfm
= req
->tfm
;
2159 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2160 struct adapter
*adap
= padap(ctx
->dev
);
2162 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
2163 case CRYPTO_ALG_TYPE_AEAD
:
2164 err
= chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
2167 case CRYPTO_ALG_TYPE_SKCIPHER
:
2168 chcr_handle_cipher_resp(skcipher_request_cast(req
),
2171 case CRYPTO_ALG_TYPE_AHASH
:
2172 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
2174 atomic_inc(&adap
->chcr_stats
.complete
);
2177 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2179 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2180 struct chcr_ahash_req_ctx
*state
= out
;
2182 state
->reqlen
= req_ctx
->reqlen
;
2183 state
->data_len
= req_ctx
->data_len
;
2184 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2185 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2186 CHCR_HASH_MAX_DIGEST_SIZE
);
2187 chcr_init_hctx_per_wr(state
);
2191 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2193 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2194 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2196 req_ctx
->reqlen
= state
->reqlen
;
2197 req_ctx
->data_len
= state
->data_len
;
2198 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2199 req_ctx
->skbfr
= req_ctx
->bfr2
;
2200 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2201 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2202 CHCR_HASH_MAX_DIGEST_SIZE
);
2203 chcr_init_hctx_per_wr(req_ctx
);
2207 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2208 unsigned int keylen
)
2210 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2211 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2212 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2213 unsigned int i
, err
= 0, updated_digestsize
;
2215 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2217 /* use the key to calculate the ipad and opad. ipad will sent with the
2218 * first request's data. opad will be sent with the final hash result
2219 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2221 shash
->tfm
= hmacctx
->base_hash
;
2223 err
= crypto_shash_digest(shash
, key
, keylen
,
2227 keylen
= digestsize
;
2229 memcpy(hmacctx
->ipad
, key
, keylen
);
2231 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2232 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2234 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2235 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2236 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2239 updated_digestsize
= digestsize
;
2240 if (digestsize
== SHA224_DIGEST_SIZE
)
2241 updated_digestsize
= SHA256_DIGEST_SIZE
;
2242 else if (digestsize
== SHA384_DIGEST_SIZE
)
2243 updated_digestsize
= SHA512_DIGEST_SIZE
;
2244 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2245 hmacctx
->ipad
, digestsize
);
2248 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2250 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2251 hmacctx
->opad
, digestsize
);
2254 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2259 static int chcr_aes_xts_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
2260 unsigned int key_len
)
2262 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2263 unsigned short context_size
= 0;
2266 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2270 memcpy(ablkctx
->key
, key
, key_len
);
2271 ablkctx
->enckey_len
= key_len
;
2272 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2273 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2274 /* Both keys for xts must be aligned to 16 byte boundary
2275 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2277 if (key_len
== 48) {
2278 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
2280 memmove(ablkctx
->key
+ 32, ablkctx
->key
+ 24, 24);
2281 memset(ablkctx
->key
+ 24, 0, 8);
2282 memset(ablkctx
->key
+ 56, 0, 8);
2283 ablkctx
->enckey_len
= 64;
2284 ablkctx
->key_ctx_hdr
=
2285 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192
,
2286 CHCR_KEYCTX_NO_KEY
, 1,
2289 ablkctx
->key_ctx_hdr
=
2290 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2291 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2292 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2293 CHCR_KEYCTX_NO_KEY
, 1,
2296 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2299 ablkctx
->enckey_len
= 0;
2304 static int chcr_sha_init(struct ahash_request
*areq
)
2306 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2307 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2308 int digestsize
= crypto_ahash_digestsize(tfm
);
2310 req_ctx
->data_len
= 0;
2311 req_ctx
->reqlen
= 0;
2312 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2313 req_ctx
->skbfr
= req_ctx
->bfr2
;
2314 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2319 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2321 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2322 sizeof(struct chcr_ahash_req_ctx
));
2323 return chcr_device_init(crypto_tfm_ctx(tfm
));
2326 static int chcr_hmac_init(struct ahash_request
*areq
)
2328 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2329 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2330 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2331 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2332 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2334 chcr_sha_init(areq
);
2335 req_ctx
->data_len
= bs
;
2336 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2337 if (digestsize
== SHA224_DIGEST_SIZE
)
2338 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2339 SHA256_DIGEST_SIZE
);
2340 else if (digestsize
== SHA384_DIGEST_SIZE
)
2341 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2342 SHA512_DIGEST_SIZE
);
2344 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2350 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2352 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2353 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2354 unsigned int digestsize
=
2355 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2357 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2358 sizeof(struct chcr_ahash_req_ctx
));
2359 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2360 if (IS_ERR(hmacctx
->base_hash
))
2361 return PTR_ERR(hmacctx
->base_hash
);
2362 return chcr_device_init(crypto_tfm_ctx(tfm
));
2365 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2367 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2368 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2370 if (hmacctx
->base_hash
) {
2371 chcr_free_shash(hmacctx
->base_hash
);
2372 hmacctx
->base_hash
= NULL
;
2376 inline void chcr_aead_common_exit(struct aead_request
*req
)
2378 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2379 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2380 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2382 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2385 static int chcr_aead_common_init(struct aead_request
*req
)
2387 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2388 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2389 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2390 unsigned int authsize
= crypto_aead_authsize(tfm
);
2391 int error
= -EINVAL
;
2393 /* validate key size */
2394 if (aeadctx
->enckey_len
== 0)
2396 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2399 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2401 reqctx
->scratch_pad
= NULL
;
2403 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2415 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2416 int aadmax
, int wrlen
,
2417 unsigned short op_type
)
2419 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2421 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2422 dst_nents
> MAX_DSGL_ENT
||
2423 (req
->assoclen
> aadmax
) ||
2424 (wrlen
> SGE_MAX_WR_LEN
))
2429 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2431 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2432 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2433 struct aead_request
*subreq
= aead_request_ctx(req
);
2435 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2436 aead_request_set_callback(subreq
, req
->base
.flags
,
2437 req
->base
.complete
, req
->base
.data
);
2438 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2440 aead_request_set_ad(subreq
, req
->assoclen
);
2441 return op_type
? crypto_aead_decrypt(subreq
) :
2442 crypto_aead_encrypt(subreq
);
2445 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2449 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2450 struct chcr_context
*ctx
= a_ctx(tfm
);
2451 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2452 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2453 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2454 struct sk_buff
*skb
= NULL
;
2455 struct chcr_wr
*chcr_req
;
2456 struct cpl_rx_phys_dsgl
*phys_cpl
;
2457 struct ulptx_sgl
*ulptx
;
2458 unsigned int transhdr_len
;
2459 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2460 unsigned int kctx_len
= 0, dnents
, snents
;
2461 unsigned int authsize
= crypto_aead_authsize(tfm
);
2462 int error
= -EINVAL
;
2465 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2467 struct adapter
*adap
= padap(ctx
->dev
);
2468 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2470 if (req
->cryptlen
== 0)
2474 error
= chcr_aead_common_init(req
);
2476 return ERR_PTR(error
);
2478 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2479 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2482 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
2483 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
, 0);
2484 dnents
+= MIN_AUTH_SG
; // For IV
2485 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2486 CHCR_SRC_SG_SIZE
, 0);
2487 dst_size
= get_space_for_phys_dsgl(dnents
);
2488 kctx_len
= (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx
->key_ctx_hdr
)) << 4)
2489 - sizeof(chcr_req
->key_ctx
);
2490 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2491 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <
2493 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16)
2494 : (sgl_len(snents
) * 8);
2495 transhdr_len
+= temp
;
2496 transhdr_len
= roundup(transhdr_len
, 16);
2498 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2499 transhdr_len
, reqctx
->op
)) {
2500 atomic_inc(&adap
->chcr_stats
.fallback
);
2501 chcr_aead_common_exit(req
);
2502 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2504 skb
= alloc_skb(transhdr_len
, flags
);
2510 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2512 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2515 * Input order is AAD,IV and Payload. where IV should be included as
2516 * the part of authdata. All other fields should be filled according
2517 * to the hardware spec
2519 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2520 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
2521 chcr_req
->sec_cpl
.pldlen
= htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
2522 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2524 null
? 0 : IV
+ req
->assoclen
,
2525 req
->assoclen
+ IV
+ 1,
2526 (temp
& 0x1F0) >> 4);
2527 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2529 null
? 0 : req
->assoclen
+ IV
+ 1,
2531 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2532 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2533 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2535 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2536 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2537 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2539 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2541 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2544 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2545 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2546 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2547 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2548 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2549 aeadctx
->enckey_len
);
2551 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2552 aeadctx
->enckey_len
);
2554 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2555 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2556 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2557 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2558 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2559 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2560 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2561 memcpy(ivptr
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2562 memcpy(ivptr
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2563 CTR_RFC3686_IV_SIZE
);
2564 *(__be32
*)(ivptr
+ CTR_RFC3686_NONCE_SIZE
+
2565 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2567 memcpy(ivptr
, req
->iv
, IV
);
2569 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2570 chcr_add_aead_src_ent(req
, ulptx
);
2571 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2572 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2573 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
2574 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2575 transhdr_len
, temp
, 0);
2580 chcr_aead_common_exit(req
);
2582 return ERR_PTR(error
);
2585 int chcr_aead_dma_map(struct device
*dev
,
2586 struct aead_request
*req
,
2587 unsigned short op_type
)
2590 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2591 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2592 unsigned int authsize
= crypto_aead_authsize(tfm
);
2595 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2597 if (!req
->cryptlen
|| !dst_size
)
2599 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2601 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2604 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2607 if (req
->src
== req
->dst
) {
2608 error
= dma_map_sg(dev
, req
->src
,
2609 sg_nents_for_len(req
->src
, dst_size
),
2614 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2618 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2621 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2629 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2633 void chcr_aead_dma_unmap(struct device
*dev
,
2634 struct aead_request
*req
,
2635 unsigned short op_type
)
2637 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2638 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2639 unsigned int authsize
= crypto_aead_authsize(tfm
);
2642 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2644 if (!req
->cryptlen
|| !dst_size
)
2647 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2649 if (req
->src
== req
->dst
) {
2650 dma_unmap_sg(dev
, req
->src
,
2651 sg_nents_for_len(req
->src
, dst_size
),
2654 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2656 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2661 void chcr_add_aead_src_ent(struct aead_request
*req
,
2662 struct ulptx_sgl
*ulptx
)
2664 struct ulptx_walk ulp_walk
;
2665 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2668 u8
*buf
= (u8
*)ulptx
;
2670 if (reqctx
->b0_len
) {
2671 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2672 buf
+= reqctx
->b0_len
;
2674 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2675 buf
, req
->cryptlen
+ req
->assoclen
, 0);
2677 ulptx_walk_init(&ulp_walk
, ulptx
);
2679 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2681 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
+
2683 ulptx_walk_end(&ulp_walk
);
2687 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2688 struct cpl_rx_phys_dsgl
*phys_cpl
,
2691 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2692 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2693 struct dsgl_walk dsgl_walk
;
2694 unsigned int authsize
= crypto_aead_authsize(tfm
);
2695 struct chcr_context
*ctx
= a_ctx(tfm
);
2697 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2699 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2700 dsgl_walk_add_page(&dsgl_walk
, IV
+ reqctx
->b0_len
, reqctx
->iv_dma
);
2701 temp
= req
->assoclen
+ req
->cryptlen
+
2702 (reqctx
->op
? -authsize
: authsize
);
2703 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, 0);
2704 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2707 void chcr_add_cipher_src_ent(struct skcipher_request
*req
,
2709 struct cipher_wr_param
*wrparam
)
2711 struct ulptx_walk ulp_walk
;
2712 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2715 memcpy(buf
, reqctx
->iv
, IV
);
2718 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2719 buf
, wrparam
->bytes
, reqctx
->processed
);
2721 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2722 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2724 reqctx
->srcsg
= ulp_walk
.last_sg
;
2725 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2726 ulptx_walk_end(&ulp_walk
);
2730 void chcr_add_cipher_dst_ent(struct skcipher_request
*req
,
2731 struct cpl_rx_phys_dsgl
*phys_cpl
,
2732 struct cipher_wr_param
*wrparam
,
2735 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2736 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
2737 struct chcr_context
*ctx
= c_ctx(tfm
);
2738 struct dsgl_walk dsgl_walk
;
2739 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2741 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2742 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2744 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2745 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2746 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2749 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2750 struct ulptx_sgl
*ulptx
,
2751 struct hash_wr_param
*param
)
2753 struct ulptx_walk ulp_walk
;
2754 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2756 if (reqctx
->hctx_wr
.imm
) {
2757 u8
*buf
= (u8
*)ulptx
;
2759 if (param
->bfr_len
) {
2760 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2761 buf
+= param
->bfr_len
;
2764 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2765 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2768 ulptx_walk_init(&ulp_walk
, ulptx
);
2770 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2771 reqctx
->hctx_wr
.dma_addr
);
2772 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2773 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2774 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2775 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2776 ulptx_walk_end(&ulp_walk
);
2780 int chcr_hash_dma_map(struct device
*dev
,
2781 struct ahash_request
*req
)
2783 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2788 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2792 req_ctx
->hctx_wr
.is_sg_map
= 1;
2796 void chcr_hash_dma_unmap(struct device
*dev
,
2797 struct ahash_request
*req
)
2799 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2804 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2806 req_ctx
->hctx_wr
.is_sg_map
= 0;
2810 int chcr_cipher_dma_map(struct device
*dev
,
2811 struct skcipher_request
*req
)
2815 if (req
->src
== req
->dst
) {
2816 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2821 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2825 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2828 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2839 void chcr_cipher_dma_unmap(struct device
*dev
,
2840 struct skcipher_request
*req
)
2842 if (req
->src
== req
->dst
) {
2843 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2846 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2848 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2853 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2857 memset(block
, 0, csize
);
2862 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2865 data
= cpu_to_be32(msglen
);
2866 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2871 static int generate_b0(struct aead_request
*req
, u8
*ivptr
,
2872 unsigned short op_type
)
2874 unsigned int l
, lp
, m
;
2876 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2877 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2878 u8
*b0
= reqctx
->scratch_pad
;
2880 m
= crypto_aead_authsize(aead
);
2882 memcpy(b0
, ivptr
, 16);
2887 /* set m, bits 3-5 */
2888 *b0
|= (8 * ((m
- 2) / 2));
2890 /* set adata, bit 6, if associated data is used */
2893 rc
= set_msg_len(b0
+ 16 - l
,
2894 (op_type
== CHCR_DECRYPT_OP
) ?
2895 req
->cryptlen
- m
: req
->cryptlen
, l
);
2900 static inline int crypto_ccm_check_iv(const u8
*iv
)
2902 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2903 if (iv
[0] < 1 || iv
[0] > 7)
2909 static int ccm_format_packet(struct aead_request
*req
,
2911 unsigned int sub_type
,
2912 unsigned short op_type
,
2913 unsigned int assoclen
)
2915 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2916 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2917 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2920 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2922 memcpy(ivptr
+ 1, &aeadctx
->salt
[0], 3);
2923 memcpy(ivptr
+ 4, req
->iv
, 8);
2924 memset(ivptr
+ 12, 0, 4);
2926 memcpy(ivptr
, req
->iv
, 16);
2929 put_unaligned_be16(assoclen
, &reqctx
->scratch_pad
[16]);
2931 rc
= generate_b0(req
, ivptr
, op_type
);
2932 /* zero the ctr value */
2933 memset(ivptr
+ 15 - ivptr
[0], 0, ivptr
[0] + 1);
2937 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2938 unsigned int dst_size
,
2939 struct aead_request
*req
,
2940 unsigned short op_type
)
2942 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2943 struct chcr_context
*ctx
= a_ctx(tfm
);
2944 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2945 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2946 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2947 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2948 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2949 unsigned int ccm_xtra
;
2950 unsigned int tag_offset
= 0, auth_offset
= 0;
2951 unsigned int assoclen
;
2953 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2954 assoclen
= req
->assoclen
- 8;
2956 assoclen
= req
->assoclen
;
2957 ccm_xtra
= CCM_B0_SIZE
+
2958 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2960 auth_offset
= req
->cryptlen
?
2961 (req
->assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2962 if (op_type
== CHCR_DECRYPT_OP
) {
2963 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2964 tag_offset
= crypto_aead_authsize(tfm
);
2969 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
2971 htonl(req
->assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2972 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2973 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2974 1 + IV
, IV
+ assoclen
+ ccm_xtra
,
2975 req
->assoclen
+ IV
+ 1 + ccm_xtra
, 0);
2977 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2978 auth_offset
, tag_offset
,
2979 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2980 crypto_aead_authsize(tfm
));
2981 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2982 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2983 cipher_mode
, mac_mode
,
2984 aeadctx
->hmac_ctrl
, IV
>> 1);
2986 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2990 static int aead_ccm_validate_input(unsigned short op_type
,
2991 struct aead_request
*req
,
2992 struct chcr_aead_ctx
*aeadctx
,
2993 unsigned int sub_type
)
2995 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2996 if (crypto_ccm_check_iv(req
->iv
)) {
2997 pr_err("CCM: IV check fails\n");
3001 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
3002 pr_err("RFC4309: Invalid AAD length %d\n",
3010 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
3014 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3015 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3016 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3017 struct sk_buff
*skb
= NULL
;
3018 struct chcr_wr
*chcr_req
;
3019 struct cpl_rx_phys_dsgl
*phys_cpl
;
3020 struct ulptx_sgl
*ulptx
;
3021 unsigned int transhdr_len
;
3022 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
, snents
;
3023 unsigned int sub_type
, assoclen
= req
->assoclen
;
3024 unsigned int authsize
= crypto_aead_authsize(tfm
);
3025 int error
= -EINVAL
;
3027 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3029 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
3031 sub_type
= get_aead_subtype(tfm
);
3032 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
3034 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
3035 error
= chcr_aead_common_init(req
);
3037 return ERR_PTR(error
);
3039 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
3042 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
3043 + (reqctx
->op
? -authsize
: authsize
),
3044 CHCR_DST_SG_SIZE
, 0);
3045 dnents
+= MIN_CCM_SG
; // For IV and B0
3046 dst_size
= get_space_for_phys_dsgl(dnents
);
3047 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3048 CHCR_SRC_SG_SIZE
, 0);
3049 snents
+= MIN_CCM_SG
; //For B0
3050 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
3051 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3052 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
+
3053 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
3054 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
+
3055 reqctx
->b0_len
, 16) :
3056 (sgl_len(snents
) * 8);
3057 transhdr_len
+= temp
;
3058 transhdr_len
= roundup(transhdr_len
, 16);
3060 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
3061 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
3062 atomic_inc(&adap
->chcr_stats
.fallback
);
3063 chcr_aead_common_exit(req
);
3064 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3066 skb
= alloc_skb(transhdr_len
, flags
);
3073 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3075 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
3077 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3078 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3079 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3080 aeadctx
->key
, aeadctx
->enckey_len
);
3082 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3083 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3084 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
3085 error
= ccm_format_packet(req
, ivptr
, sub_type
, reqctx
->op
, assoclen
);
3088 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3089 chcr_add_aead_src_ent(req
, ulptx
);
3091 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3092 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3093 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
+
3094 reqctx
->b0_len
) : 0);
3095 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
3096 transhdr_len
, temp
, 0);
3103 chcr_aead_common_exit(req
);
3104 return ERR_PTR(error
);
3107 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
3111 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3112 struct chcr_context
*ctx
= a_ctx(tfm
);
3113 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3114 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3115 struct sk_buff
*skb
= NULL
;
3116 struct chcr_wr
*chcr_req
;
3117 struct cpl_rx_phys_dsgl
*phys_cpl
;
3118 struct ulptx_sgl
*ulptx
;
3119 unsigned int transhdr_len
, dnents
= 0, snents
;
3120 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
3121 unsigned int authsize
= crypto_aead_authsize(tfm
);
3122 int error
= -EINVAL
;
3124 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3126 struct adapter
*adap
= padap(ctx
->dev
);
3127 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
3129 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
3130 assoclen
= req
->assoclen
- 8;
3133 error
= chcr_aead_common_init(req
);
3135 return ERR_PTR(error
);
3136 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
3137 (reqctx
->op
? -authsize
: authsize
),
3138 CHCR_DST_SG_SIZE
, 0);
3139 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3140 CHCR_SRC_SG_SIZE
, 0);
3141 dnents
+= MIN_GCM_SG
; // For IV
3142 dst_size
= get_space_for_phys_dsgl(dnents
);
3143 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
3144 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3145 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <=
3147 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16) :
3148 (sgl_len(snents
) * 8);
3149 transhdr_len
+= temp
;
3150 transhdr_len
= roundup(transhdr_len
, 16);
3151 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
3152 transhdr_len
, reqctx
->op
)) {
3154 atomic_inc(&adap
->chcr_stats
.fallback
);
3155 chcr_aead_common_exit(req
);
3156 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3158 skb
= alloc_skb(transhdr_len
, flags
);
3164 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3166 //Offset of tag from end
3167 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
3168 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
3169 rx_channel_id
, 2, 1);
3170 chcr_req
->sec_cpl
.pldlen
=
3171 htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
3172 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3173 assoclen
? 1 + IV
: 0,
3174 assoclen
? IV
+ assoclen
: 0,
3175 req
->assoclen
+ IV
+ 1, 0);
3176 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
3177 FILL_SEC_CPL_AUTHINSERT(0, req
->assoclen
+ IV
+ 1,
3179 chcr_req
->sec_cpl
.seqno_numivs
=
3180 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
3181 CHCR_ENCRYPT_OP
) ? 1 : 0,
3182 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
3183 CHCR_SCMD_AUTH_MODE_GHASH
,
3184 aeadctx
->hmac_ctrl
, IV
>> 1);
3185 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3187 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3188 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3189 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3190 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3192 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3193 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3194 /* prepare a 16 byte iv */
3195 /* S A L T | IV | 0x00000001 */
3196 if (get_aead_subtype(tfm
) ==
3197 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3198 memcpy(ivptr
, aeadctx
->salt
, 4);
3199 memcpy(ivptr
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3201 memcpy(ivptr
, req
->iv
, GCM_AES_IV_SIZE
);
3203 put_unaligned_be32(0x01, &ivptr
[12]);
3204 ulptx
= (struct ulptx_sgl
*)(ivptr
+ 16);
3206 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3207 chcr_add_aead_src_ent(req
, ulptx
);
3208 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3209 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3210 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
3211 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3212 transhdr_len
, temp
, reqctx
->verify
);
3217 chcr_aead_common_exit(req
);
3218 return ERR_PTR(error
);
3223 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3225 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3226 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3228 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3229 CRYPTO_ALG_NEED_FALLBACK
|
3231 if (IS_ERR(aeadctx
->sw_cipher
))
3232 return PTR_ERR(aeadctx
->sw_cipher
);
3233 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3234 sizeof(struct aead_request
) +
3235 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3236 return chcr_device_init(a_ctx(tfm
));
3239 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3241 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3243 crypto_free_aead(aeadctx
->sw_cipher
);
3246 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3247 unsigned int authsize
)
3249 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3251 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3252 aeadctx
->mayverify
= VERIFY_HW
;
3253 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3255 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3256 unsigned int authsize
)
3258 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3259 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3261 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3262 * true for sha1. authsize == 12 condition should be before
3263 * authsize == (maxauth >> 1)
3265 if (authsize
== ICV_4
) {
3266 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3267 aeadctx
->mayverify
= VERIFY_HW
;
3268 } else if (authsize
== ICV_6
) {
3269 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3270 aeadctx
->mayverify
= VERIFY_HW
;
3271 } else if (authsize
== ICV_10
) {
3272 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3273 aeadctx
->mayverify
= VERIFY_HW
;
3274 } else if (authsize
== ICV_12
) {
3275 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3276 aeadctx
->mayverify
= VERIFY_HW
;
3277 } else if (authsize
== ICV_14
) {
3278 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3279 aeadctx
->mayverify
= VERIFY_HW
;
3280 } else if (authsize
== (maxauth
>> 1)) {
3281 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3282 aeadctx
->mayverify
= VERIFY_HW
;
3283 } else if (authsize
== maxauth
) {
3284 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3285 aeadctx
->mayverify
= VERIFY_HW
;
3287 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3288 aeadctx
->mayverify
= VERIFY_SW
;
3290 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3294 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3296 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3300 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3301 aeadctx
->mayverify
= VERIFY_HW
;
3304 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3305 aeadctx
->mayverify
= VERIFY_HW
;
3308 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3309 aeadctx
->mayverify
= VERIFY_HW
;
3312 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3313 aeadctx
->mayverify
= VERIFY_HW
;
3316 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3317 aeadctx
->mayverify
= VERIFY_HW
;
3321 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3322 aeadctx
->mayverify
= VERIFY_SW
;
3327 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3330 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3331 unsigned int authsize
)
3333 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3337 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3338 aeadctx
->mayverify
= VERIFY_HW
;
3341 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3342 aeadctx
->mayverify
= VERIFY_HW
;
3345 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3346 aeadctx
->mayverify
= VERIFY_HW
;
3351 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3354 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3355 unsigned int authsize
)
3357 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3361 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3362 aeadctx
->mayverify
= VERIFY_HW
;
3365 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3366 aeadctx
->mayverify
= VERIFY_HW
;
3369 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3370 aeadctx
->mayverify
= VERIFY_HW
;
3373 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3374 aeadctx
->mayverify
= VERIFY_HW
;
3377 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3378 aeadctx
->mayverify
= VERIFY_HW
;
3381 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3382 aeadctx
->mayverify
= VERIFY_HW
;
3385 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3386 aeadctx
->mayverify
= VERIFY_HW
;
3391 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3394 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3396 unsigned int keylen
)
3398 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3399 unsigned char ck_size
, mk_size
;
3400 int key_ctx_size
= 0;
3402 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3403 if (keylen
== AES_KEYSIZE_128
) {
3404 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3405 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3406 } else if (keylen
== AES_KEYSIZE_192
) {
3407 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3408 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3409 } else if (keylen
== AES_KEYSIZE_256
) {
3410 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3411 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3413 aeadctx
->enckey_len
= 0;
3416 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3418 memcpy(aeadctx
->key
, key
, keylen
);
3419 aeadctx
->enckey_len
= keylen
;
3424 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3426 unsigned int keylen
)
3428 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3431 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3432 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3433 CRYPTO_TFM_REQ_MASK
);
3434 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3437 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3440 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3441 unsigned int keylen
)
3443 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3447 aeadctx
->enckey_len
= 0;
3450 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3451 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3452 CRYPTO_TFM_REQ_MASK
);
3453 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3457 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3458 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3461 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3462 unsigned int keylen
)
3464 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3465 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3466 unsigned int ck_size
;
3467 int ret
= 0, key_ctx_size
= 0;
3468 struct crypto_aes_ctx aes
;
3470 aeadctx
->enckey_len
= 0;
3471 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3472 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3473 & CRYPTO_TFM_REQ_MASK
);
3474 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3478 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3480 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3481 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3483 if (keylen
== AES_KEYSIZE_128
) {
3484 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3485 } else if (keylen
== AES_KEYSIZE_192
) {
3486 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3487 } else if (keylen
== AES_KEYSIZE_256
) {
3488 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3490 pr_err("GCM: Invalid key length %d\n", keylen
);
3495 memcpy(aeadctx
->key
, key
, keylen
);
3496 aeadctx
->enckey_len
= keylen
;
3497 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3499 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3500 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3503 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3504 * It will go in key context
3506 ret
= aes_expandkey(&aes
, key
, keylen
);
3508 aeadctx
->enckey_len
= 0;
3511 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3512 aes_encrypt(&aes
, gctx
->ghash_h
, gctx
->ghash_h
);
3513 memzero_explicit(&aes
, sizeof(aes
));
3519 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3520 unsigned int keylen
)
3522 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3523 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3524 /* it contains auth and cipher key both*/
3525 struct crypto_authenc_keys keys
;
3526 unsigned int bs
, subtype
;
3527 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3528 int err
= 0, i
, key_ctx_len
= 0;
3529 unsigned char ck_size
= 0;
3530 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3531 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3532 struct algo_param param
;
3536 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3537 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3538 & CRYPTO_TFM_REQ_MASK
);
3539 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3543 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3546 if (get_alg_config(¶m
, max_authsize
)) {
3547 pr_err("chcr : Unsupported digest size\n");
3550 subtype
= get_aead_subtype(authenc
);
3551 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3552 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3553 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3555 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3556 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3557 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3559 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3560 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3561 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3562 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3563 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3564 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3566 pr_err("chcr : Unsupported cipher key\n");
3570 /* Copy only encryption key. We use authkey to generate h(ipad) and
3571 * h(opad) so authkey is not needed again. authkeylen size have the
3572 * size of the hash digest size.
3574 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3575 aeadctx
->enckey_len
= keys
.enckeylen
;
3576 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3577 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3579 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3580 aeadctx
->enckey_len
<< 3);
3582 base_hash
= chcr_alloc_shash(max_authsize
);
3583 if (IS_ERR(base_hash
)) {
3584 pr_err("chcr : Base driver cannot be loaded\n");
3585 aeadctx
->enckey_len
= 0;
3586 memzero_explicit(&keys
, sizeof(keys
));
3590 SHASH_DESC_ON_STACK(shash
, base_hash
);
3592 shash
->tfm
= base_hash
;
3593 bs
= crypto_shash_blocksize(base_hash
);
3594 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3595 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3597 if (keys
.authkeylen
> bs
) {
3598 err
= crypto_shash_digest(shash
, keys
.authkey
,
3602 pr_err("chcr : Base driver cannot be loaded\n");
3605 keys
.authkeylen
= max_authsize
;
3607 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3609 /* Compute the ipad-digest*/
3610 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3611 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3612 for (i
= 0; i
< bs
>> 2; i
++)
3613 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3615 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3618 /* Compute the opad-digest */
3619 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3620 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3621 for (i
= 0; i
< bs
>> 2; i
++)
3622 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3624 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3627 /* convert the ipad and opad digest to network order */
3628 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3629 chcr_change_order(o_ptr
, param
.result_size
);
3630 key_ctx_len
= sizeof(struct _key_ctx
) +
3631 roundup(keys
.enckeylen
, 16) +
3632 (param
.result_size
+ align
) * 2;
3633 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3634 0, 1, key_ctx_len
>> 4);
3635 actx
->auth_mode
= param
.auth_mode
;
3636 chcr_free_shash(base_hash
);
3638 memzero_explicit(&keys
, sizeof(keys
));
3642 aeadctx
->enckey_len
= 0;
3643 memzero_explicit(&keys
, sizeof(keys
));
3644 if (!IS_ERR(base_hash
))
3645 chcr_free_shash(base_hash
);
3649 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3650 const u8
*key
, unsigned int keylen
)
3652 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3653 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3654 struct crypto_authenc_keys keys
;
3656 /* it contains auth and cipher key both*/
3657 unsigned int subtype
;
3658 int key_ctx_len
= 0;
3659 unsigned char ck_size
= 0;
3661 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3662 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3663 & CRYPTO_TFM_REQ_MASK
);
3664 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3668 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3671 subtype
= get_aead_subtype(authenc
);
3672 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3673 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3674 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3676 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3677 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3678 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3680 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3681 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3682 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3683 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3684 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3685 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3687 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3690 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3691 aeadctx
->enckey_len
= keys
.enckeylen
;
3692 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3693 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3694 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3695 aeadctx
->enckey_len
<< 3);
3697 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3699 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3700 0, key_ctx_len
>> 4);
3701 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3702 memzero_explicit(&keys
, sizeof(keys
));
3705 aeadctx
->enckey_len
= 0;
3706 memzero_explicit(&keys
, sizeof(keys
));
3710 static int chcr_aead_op(struct aead_request
*req
,
3712 create_wr_t create_wr_fn
)
3714 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3715 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3716 struct chcr_context
*ctx
= a_ctx(tfm
);
3717 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
3718 struct sk_buff
*skb
;
3719 struct chcr_dev
*cdev
;
3721 cdev
= a_ctx(tfm
)->dev
;
3723 pr_err("chcr : %s : No crypto device.\n", __func__
);
3727 if (chcr_inc_wrcount(cdev
)) {
3728 /* Detach state for CHCR means lldi or padap is freed.
3729 * We cannot increment fallback here.
3731 return chcr_aead_fallback(req
, reqctx
->op
);
3734 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3736 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))) {
3737 chcr_dec_wrcount(cdev
);
3741 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3742 crypto_ipsec_check_assoclen(req
->assoclen
) != 0) {
3743 pr_err("RFC4106: Invalid value of assoclen %d\n",
3748 /* Form a WR from req */
3749 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
], size
);
3751 if (IS_ERR_OR_NULL(skb
)) {
3752 chcr_dec_wrcount(cdev
);
3753 return PTR_ERR_OR_ZERO(skb
);
3756 skb
->dev
= u_ctx
->lldi
.ports
[0];
3757 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
3759 return -EINPROGRESS
;
3762 static int chcr_aead_encrypt(struct aead_request
*req
)
3764 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3765 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3766 struct chcr_context
*ctx
= a_ctx(tfm
);
3770 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3771 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3774 reqctx
->verify
= VERIFY_HW
;
3775 reqctx
->op
= CHCR_ENCRYPT_OP
;
3777 switch (get_aead_subtype(tfm
)) {
3778 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3779 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3780 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3781 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3782 return chcr_aead_op(req
, 0, create_authenc_wr
);
3783 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3784 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3785 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3787 return chcr_aead_op(req
, 0, create_gcm_wr
);
3791 static int chcr_aead_decrypt(struct aead_request
*req
)
3793 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3794 struct chcr_context
*ctx
= a_ctx(tfm
);
3795 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3796 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3801 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3802 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3805 if (aeadctx
->mayverify
== VERIFY_SW
) {
3806 size
= crypto_aead_maxauthsize(tfm
);
3807 reqctx
->verify
= VERIFY_SW
;
3810 reqctx
->verify
= VERIFY_HW
;
3812 reqctx
->op
= CHCR_DECRYPT_OP
;
3813 switch (get_aead_subtype(tfm
)) {
3814 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3815 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3816 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3817 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3818 return chcr_aead_op(req
, size
, create_authenc_wr
);
3819 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3820 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3821 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3823 return chcr_aead_op(req
, size
, create_gcm_wr
);
3827 static struct chcr_alg_template driver_algs
[] = {
3830 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3833 .base
.cra_name
= "cbc(aes)",
3834 .base
.cra_driver_name
= "cbc-aes-chcr",
3835 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3837 .init
= chcr_init_tfm
,
3838 .exit
= chcr_exit_tfm
,
3839 .min_keysize
= AES_MIN_KEY_SIZE
,
3840 .max_keysize
= AES_MAX_KEY_SIZE
,
3841 .ivsize
= AES_BLOCK_SIZE
,
3842 .setkey
= chcr_aes_cbc_setkey
,
3843 .encrypt
= chcr_aes_encrypt
,
3844 .decrypt
= chcr_aes_decrypt
,
3848 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3851 .base
.cra_name
= "xts(aes)",
3852 .base
.cra_driver_name
= "xts-aes-chcr",
3853 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3855 .init
= chcr_init_tfm
,
3856 .exit
= chcr_exit_tfm
,
3857 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3858 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3859 .ivsize
= AES_BLOCK_SIZE
,
3860 .setkey
= chcr_aes_xts_setkey
,
3861 .encrypt
= chcr_aes_encrypt
,
3862 .decrypt
= chcr_aes_decrypt
,
3866 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3869 .base
.cra_name
= "ctr(aes)",
3870 .base
.cra_driver_name
= "ctr-aes-chcr",
3871 .base
.cra_blocksize
= 1,
3873 .init
= chcr_init_tfm
,
3874 .exit
= chcr_exit_tfm
,
3875 .min_keysize
= AES_MIN_KEY_SIZE
,
3876 .max_keysize
= AES_MAX_KEY_SIZE
,
3877 .ivsize
= AES_BLOCK_SIZE
,
3878 .setkey
= chcr_aes_ctr_setkey
,
3879 .encrypt
= chcr_aes_encrypt
,
3880 .decrypt
= chcr_aes_decrypt
,
3884 .type
= CRYPTO_ALG_TYPE_SKCIPHER
|
3885 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3888 .base
.cra_name
= "rfc3686(ctr(aes))",
3889 .base
.cra_driver_name
= "rfc3686-ctr-aes-chcr",
3890 .base
.cra_blocksize
= 1,
3892 .init
= chcr_rfc3686_init
,
3893 .exit
= chcr_exit_tfm
,
3894 .min_keysize
= AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3895 .max_keysize
= AES_MAX_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3896 .ivsize
= CTR_RFC3686_IV_SIZE
,
3897 .setkey
= chcr_aes_rfc3686_setkey
,
3898 .encrypt
= chcr_aes_encrypt
,
3899 .decrypt
= chcr_aes_decrypt
,
3904 .type
= CRYPTO_ALG_TYPE_AHASH
,
3907 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3910 .cra_driver_name
= "sha1-chcr",
3911 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3916 .type
= CRYPTO_ALG_TYPE_AHASH
,
3919 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3921 .cra_name
= "sha256",
3922 .cra_driver_name
= "sha256-chcr",
3923 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3928 .type
= CRYPTO_ALG_TYPE_AHASH
,
3931 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3933 .cra_name
= "sha224",
3934 .cra_driver_name
= "sha224-chcr",
3935 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3940 .type
= CRYPTO_ALG_TYPE_AHASH
,
3943 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3945 .cra_name
= "sha384",
3946 .cra_driver_name
= "sha384-chcr",
3947 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3952 .type
= CRYPTO_ALG_TYPE_AHASH
,
3955 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3957 .cra_name
= "sha512",
3958 .cra_driver_name
= "sha512-chcr",
3959 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3965 .type
= CRYPTO_ALG_TYPE_HMAC
,
3968 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3970 .cra_name
= "hmac(sha1)",
3971 .cra_driver_name
= "hmac-sha1-chcr",
3972 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3977 .type
= CRYPTO_ALG_TYPE_HMAC
,
3980 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3982 .cra_name
= "hmac(sha224)",
3983 .cra_driver_name
= "hmac-sha224-chcr",
3984 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3989 .type
= CRYPTO_ALG_TYPE_HMAC
,
3992 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3994 .cra_name
= "hmac(sha256)",
3995 .cra_driver_name
= "hmac-sha256-chcr",
3996 .cra_blocksize
= SHA256_BLOCK_SIZE
,
4001 .type
= CRYPTO_ALG_TYPE_HMAC
,
4004 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
4006 .cra_name
= "hmac(sha384)",
4007 .cra_driver_name
= "hmac-sha384-chcr",
4008 .cra_blocksize
= SHA384_BLOCK_SIZE
,
4013 .type
= CRYPTO_ALG_TYPE_HMAC
,
4016 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
4018 .cra_name
= "hmac(sha512)",
4019 .cra_driver_name
= "hmac-sha512-chcr",
4020 .cra_blocksize
= SHA512_BLOCK_SIZE
,
4024 /* Add AEAD Algorithms */
4026 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
4030 .cra_name
= "gcm(aes)",
4031 .cra_driver_name
= "gcm-aes-chcr",
4033 .cra_priority
= CHCR_AEAD_PRIORITY
,
4034 .cra_ctxsize
= sizeof(struct chcr_context
) +
4035 sizeof(struct chcr_aead_ctx
) +
4036 sizeof(struct chcr_gcm_ctx
),
4038 .ivsize
= GCM_AES_IV_SIZE
,
4039 .maxauthsize
= GHASH_DIGEST_SIZE
,
4040 .setkey
= chcr_gcm_setkey
,
4041 .setauthsize
= chcr_gcm_setauthsize
,
4045 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
4049 .cra_name
= "rfc4106(gcm(aes))",
4050 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
4052 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4053 .cra_ctxsize
= sizeof(struct chcr_context
) +
4054 sizeof(struct chcr_aead_ctx
) +
4055 sizeof(struct chcr_gcm_ctx
),
4058 .ivsize
= GCM_RFC4106_IV_SIZE
,
4059 .maxauthsize
= GHASH_DIGEST_SIZE
,
4060 .setkey
= chcr_gcm_setkey
,
4061 .setauthsize
= chcr_4106_4309_setauthsize
,
4065 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
4069 .cra_name
= "ccm(aes)",
4070 .cra_driver_name
= "ccm-aes-chcr",
4072 .cra_priority
= CHCR_AEAD_PRIORITY
,
4073 .cra_ctxsize
= sizeof(struct chcr_context
) +
4074 sizeof(struct chcr_aead_ctx
),
4077 .ivsize
= AES_BLOCK_SIZE
,
4078 .maxauthsize
= GHASH_DIGEST_SIZE
,
4079 .setkey
= chcr_aead_ccm_setkey
,
4080 .setauthsize
= chcr_ccm_setauthsize
,
4084 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
4088 .cra_name
= "rfc4309(ccm(aes))",
4089 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
4091 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4092 .cra_ctxsize
= sizeof(struct chcr_context
) +
4093 sizeof(struct chcr_aead_ctx
),
4097 .maxauthsize
= GHASH_DIGEST_SIZE
,
4098 .setkey
= chcr_aead_rfc4309_setkey
,
4099 .setauthsize
= chcr_4106_4309_setauthsize
,
4103 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4107 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
4109 "authenc-hmac-sha1-cbc-aes-chcr",
4110 .cra_blocksize
= AES_BLOCK_SIZE
,
4111 .cra_priority
= CHCR_AEAD_PRIORITY
,
4112 .cra_ctxsize
= sizeof(struct chcr_context
) +
4113 sizeof(struct chcr_aead_ctx
) +
4114 sizeof(struct chcr_authenc_ctx
),
4117 .ivsize
= AES_BLOCK_SIZE
,
4118 .maxauthsize
= SHA1_DIGEST_SIZE
,
4119 .setkey
= chcr_authenc_setkey
,
4120 .setauthsize
= chcr_authenc_setauthsize
,
4124 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4129 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
4131 "authenc-hmac-sha256-cbc-aes-chcr",
4132 .cra_blocksize
= AES_BLOCK_SIZE
,
4133 .cra_priority
= CHCR_AEAD_PRIORITY
,
4134 .cra_ctxsize
= sizeof(struct chcr_context
) +
4135 sizeof(struct chcr_aead_ctx
) +
4136 sizeof(struct chcr_authenc_ctx
),
4139 .ivsize
= AES_BLOCK_SIZE
,
4140 .maxauthsize
= SHA256_DIGEST_SIZE
,
4141 .setkey
= chcr_authenc_setkey
,
4142 .setauthsize
= chcr_authenc_setauthsize
,
4146 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4150 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
4152 "authenc-hmac-sha224-cbc-aes-chcr",
4153 .cra_blocksize
= AES_BLOCK_SIZE
,
4154 .cra_priority
= CHCR_AEAD_PRIORITY
,
4155 .cra_ctxsize
= sizeof(struct chcr_context
) +
4156 sizeof(struct chcr_aead_ctx
) +
4157 sizeof(struct chcr_authenc_ctx
),
4159 .ivsize
= AES_BLOCK_SIZE
,
4160 .maxauthsize
= SHA224_DIGEST_SIZE
,
4161 .setkey
= chcr_authenc_setkey
,
4162 .setauthsize
= chcr_authenc_setauthsize
,
4166 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4170 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
4172 "authenc-hmac-sha384-cbc-aes-chcr",
4173 .cra_blocksize
= AES_BLOCK_SIZE
,
4174 .cra_priority
= CHCR_AEAD_PRIORITY
,
4175 .cra_ctxsize
= sizeof(struct chcr_context
) +
4176 sizeof(struct chcr_aead_ctx
) +
4177 sizeof(struct chcr_authenc_ctx
),
4180 .ivsize
= AES_BLOCK_SIZE
,
4181 .maxauthsize
= SHA384_DIGEST_SIZE
,
4182 .setkey
= chcr_authenc_setkey
,
4183 .setauthsize
= chcr_authenc_setauthsize
,
4187 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4191 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4193 "authenc-hmac-sha512-cbc-aes-chcr",
4194 .cra_blocksize
= AES_BLOCK_SIZE
,
4195 .cra_priority
= CHCR_AEAD_PRIORITY
,
4196 .cra_ctxsize
= sizeof(struct chcr_context
) +
4197 sizeof(struct chcr_aead_ctx
) +
4198 sizeof(struct chcr_authenc_ctx
),
4201 .ivsize
= AES_BLOCK_SIZE
,
4202 .maxauthsize
= SHA512_DIGEST_SIZE
,
4203 .setkey
= chcr_authenc_setkey
,
4204 .setauthsize
= chcr_authenc_setauthsize
,
4208 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4212 .cra_name
= "authenc(digest_null,cbc(aes))",
4214 "authenc-digest_null-cbc-aes-chcr",
4215 .cra_blocksize
= AES_BLOCK_SIZE
,
4216 .cra_priority
= CHCR_AEAD_PRIORITY
,
4217 .cra_ctxsize
= sizeof(struct chcr_context
) +
4218 sizeof(struct chcr_aead_ctx
) +
4219 sizeof(struct chcr_authenc_ctx
),
4222 .ivsize
= AES_BLOCK_SIZE
,
4224 .setkey
= chcr_aead_digest_null_setkey
,
4225 .setauthsize
= chcr_authenc_null_setauthsize
,
4229 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4233 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4235 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4237 .cra_priority
= CHCR_AEAD_PRIORITY
,
4238 .cra_ctxsize
= sizeof(struct chcr_context
) +
4239 sizeof(struct chcr_aead_ctx
) +
4240 sizeof(struct chcr_authenc_ctx
),
4243 .ivsize
= CTR_RFC3686_IV_SIZE
,
4244 .maxauthsize
= SHA1_DIGEST_SIZE
,
4245 .setkey
= chcr_authenc_setkey
,
4246 .setauthsize
= chcr_authenc_setauthsize
,
4250 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4255 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4257 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4259 .cra_priority
= CHCR_AEAD_PRIORITY
,
4260 .cra_ctxsize
= sizeof(struct chcr_context
) +
4261 sizeof(struct chcr_aead_ctx
) +
4262 sizeof(struct chcr_authenc_ctx
),
4265 .ivsize
= CTR_RFC3686_IV_SIZE
,
4266 .maxauthsize
= SHA256_DIGEST_SIZE
,
4267 .setkey
= chcr_authenc_setkey
,
4268 .setauthsize
= chcr_authenc_setauthsize
,
4272 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4276 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4278 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4280 .cra_priority
= CHCR_AEAD_PRIORITY
,
4281 .cra_ctxsize
= sizeof(struct chcr_context
) +
4282 sizeof(struct chcr_aead_ctx
) +
4283 sizeof(struct chcr_authenc_ctx
),
4285 .ivsize
= CTR_RFC3686_IV_SIZE
,
4286 .maxauthsize
= SHA224_DIGEST_SIZE
,
4287 .setkey
= chcr_authenc_setkey
,
4288 .setauthsize
= chcr_authenc_setauthsize
,
4292 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4296 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4298 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4300 .cra_priority
= CHCR_AEAD_PRIORITY
,
4301 .cra_ctxsize
= sizeof(struct chcr_context
) +
4302 sizeof(struct chcr_aead_ctx
) +
4303 sizeof(struct chcr_authenc_ctx
),
4306 .ivsize
= CTR_RFC3686_IV_SIZE
,
4307 .maxauthsize
= SHA384_DIGEST_SIZE
,
4308 .setkey
= chcr_authenc_setkey
,
4309 .setauthsize
= chcr_authenc_setauthsize
,
4313 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4317 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4319 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4321 .cra_priority
= CHCR_AEAD_PRIORITY
,
4322 .cra_ctxsize
= sizeof(struct chcr_context
) +
4323 sizeof(struct chcr_aead_ctx
) +
4324 sizeof(struct chcr_authenc_ctx
),
4327 .ivsize
= CTR_RFC3686_IV_SIZE
,
4328 .maxauthsize
= SHA512_DIGEST_SIZE
,
4329 .setkey
= chcr_authenc_setkey
,
4330 .setauthsize
= chcr_authenc_setauthsize
,
4334 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4338 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4340 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4342 .cra_priority
= CHCR_AEAD_PRIORITY
,
4343 .cra_ctxsize
= sizeof(struct chcr_context
) +
4344 sizeof(struct chcr_aead_ctx
) +
4345 sizeof(struct chcr_authenc_ctx
),
4348 .ivsize
= CTR_RFC3686_IV_SIZE
,
4350 .setkey
= chcr_aead_digest_null_setkey
,
4351 .setauthsize
= chcr_authenc_null_setauthsize
,
4357 * chcr_unregister_alg - Deregister crypto algorithms with
4360 static int chcr_unregister_alg(void)
4364 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4365 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4366 case CRYPTO_ALG_TYPE_SKCIPHER
:
4367 if (driver_algs
[i
].is_registered
)
4368 crypto_unregister_skcipher(
4369 &driver_algs
[i
].alg
.skcipher
);
4371 case CRYPTO_ALG_TYPE_AEAD
:
4372 if (driver_algs
[i
].is_registered
)
4373 crypto_unregister_aead(
4374 &driver_algs
[i
].alg
.aead
);
4376 case CRYPTO_ALG_TYPE_AHASH
:
4377 if (driver_algs
[i
].is_registered
)
4378 crypto_unregister_ahash(
4379 &driver_algs
[i
].alg
.hash
);
4382 driver_algs
[i
].is_registered
= 0;
4387 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4388 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4389 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4392 * chcr_register_alg - Register crypto algorithms with kernel framework.
4394 static int chcr_register_alg(void)
4396 struct crypto_alg ai
;
4397 struct ahash_alg
*a_hash
;
4401 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4402 if (driver_algs
[i
].is_registered
)
4404 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4405 case CRYPTO_ALG_TYPE_SKCIPHER
:
4406 driver_algs
[i
].alg
.skcipher
.base
.cra_priority
=
4408 driver_algs
[i
].alg
.skcipher
.base
.cra_module
= THIS_MODULE
;
4409 driver_algs
[i
].alg
.skcipher
.base
.cra_flags
=
4410 CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
4411 CRYPTO_ALG_NEED_FALLBACK
;
4412 driver_algs
[i
].alg
.skcipher
.base
.cra_ctxsize
=
4413 sizeof(struct chcr_context
) +
4414 sizeof(struct ablk_ctx
);
4415 driver_algs
[i
].alg
.skcipher
.base
.cra_alignmask
= 0;
4417 err
= crypto_register_skcipher(&driver_algs
[i
].alg
.skcipher
);
4418 name
= driver_algs
[i
].alg
.skcipher
.base
.cra_driver_name
;
4420 case CRYPTO_ALG_TYPE_AEAD
:
4421 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4422 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4423 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4424 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4425 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4426 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4427 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4428 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4429 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4431 case CRYPTO_ALG_TYPE_AHASH
:
4432 a_hash
= &driver_algs
[i
].alg
.hash
;
4433 a_hash
->update
= chcr_ahash_update
;
4434 a_hash
->final
= chcr_ahash_final
;
4435 a_hash
->finup
= chcr_ahash_finup
;
4436 a_hash
->digest
= chcr_ahash_digest
;
4437 a_hash
->export
= chcr_ahash_export
;
4438 a_hash
->import
= chcr_ahash_import
;
4439 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4440 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4441 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4442 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4443 a_hash
->halg
.base
.cra_alignmask
= 0;
4444 a_hash
->halg
.base
.cra_exit
= NULL
;
4446 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4447 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4448 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4449 a_hash
->init
= chcr_hmac_init
;
4450 a_hash
->setkey
= chcr_ahash_setkey
;
4451 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4453 a_hash
->init
= chcr_sha_init
;
4454 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4455 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4457 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4458 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4459 name
= ai
.cra_driver_name
;
4463 pr_err("chcr : %s : Algorithm registration failed\n",
4467 driver_algs
[i
].is_registered
= 1;
4473 chcr_unregister_alg();
4478 * start_crypto - Register the crypto algorithms.
4479 * This should called once when the first device comesup. After this
4480 * kernel will start calling driver APIs for crypto operations.
4482 int start_crypto(void)
4484 return chcr_register_alg();
4488 * stop_crypto - Deregister all the crypto algorithms with kernel.
4489 * This should be called once when the last device goes down. After this
4490 * kernel will not call the driver API for crypto operations.
4492 int stop_crypto(void)
4494 chcr_unregister_alg();