2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return container_of(ctx
->dev
, struct uld_ctx
, dev
);
129 static inline int is_ofld_imm(const struct sk_buff
*skb
)
131 return (skb
->len
<= SGE_MAX_WR_LEN
);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
136 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
139 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
145 unsigned int skip_len
= 0;
148 if (sg_dma_len(sg
) <= skip
) {
149 skip
-= sg_dma_len(sg
);
158 while (sg
&& reqlen
) {
159 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
160 nents
+= DIV_ROUND_UP(less
, entlen
);
168 static inline int get_aead_subtype(struct crypto_aead
*aead
)
170 struct aead_alg
*alg
= crypto_aead_alg(aead
);
171 struct chcr_alg_template
*chcr_crypto_alg
=
172 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
173 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
176 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
178 u8 temp
[SHA512_DIGEST_SIZE
];
179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
180 int authsize
= crypto_aead_authsize(tfm
);
181 struct cpl_fw6_pld
*fw6_pld
;
184 fw6_pld
= (struct cpl_fw6_pld
*)input
;
185 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
186 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
187 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
190 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
191 authsize
, req
->assoclen
+
192 req
->cryptlen
- authsize
);
193 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
201 static int chcr_inc_wrcount(struct chcr_dev
*dev
)
203 if (dev
->state
== CHCR_DETACH
)
205 atomic_inc(&dev
->inflight
);
209 static inline void chcr_dec_wrcount(struct chcr_dev
*dev
)
211 atomic_dec(&dev
->inflight
);
214 static inline int chcr_handle_aead_resp(struct aead_request
*req
,
215 unsigned char *input
,
218 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
219 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
220 struct chcr_dev
*dev
= a_ctx(tfm
)->dev
;
222 chcr_aead_common_exit(req
);
223 if (reqctx
->verify
== VERIFY_SW
) {
224 chcr_verify_tag(req
, input
, &err
);
225 reqctx
->verify
= VERIFY_HW
;
227 chcr_dec_wrcount(dev
);
228 req
->base
.complete(&req
->base
, err
);
233 static void get_aes_decrypt_key(unsigned char *dec_key
,
234 const unsigned char *key
,
235 unsigned int keylength
)
243 case AES_KEYLENGTH_128BIT
:
244 nk
= KEYLENGTH_4BYTES
;
245 nr
= NUMBER_OF_ROUNDS_10
;
247 case AES_KEYLENGTH_192BIT
:
248 nk
= KEYLENGTH_6BYTES
;
249 nr
= NUMBER_OF_ROUNDS_12
;
251 case AES_KEYLENGTH_256BIT
:
252 nk
= KEYLENGTH_8BYTES
;
253 nr
= NUMBER_OF_ROUNDS_14
;
258 for (i
= 0; i
< nk
; i
++)
259 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
262 temp
= w_ring
[nk
- 1];
263 while (i
+ nk
< (nr
+ 1) * 4) {
266 temp
= (temp
<< 8) | (temp
>> 24);
267 temp
= aes_ks_subword(temp
);
268 temp
^= round_constant
[i
/ nk
];
269 } else if (nk
== 8 && (i
% 4 == 0)) {
270 temp
= aes_ks_subword(temp
);
272 w_ring
[i
% nk
] ^= temp
;
273 temp
= w_ring
[i
% nk
];
277 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
278 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
285 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
287 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
290 case SHA1_DIGEST_SIZE
:
291 base_hash
= crypto_alloc_shash("sha1", 0, 0);
293 case SHA224_DIGEST_SIZE
:
294 base_hash
= crypto_alloc_shash("sha224", 0, 0);
296 case SHA256_DIGEST_SIZE
:
297 base_hash
= crypto_alloc_shash("sha256", 0, 0);
299 case SHA384_DIGEST_SIZE
:
300 base_hash
= crypto_alloc_shash("sha384", 0, 0);
302 case SHA512_DIGEST_SIZE
:
303 base_hash
= crypto_alloc_shash("sha512", 0, 0);
310 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
311 char *iopad
, char *result_hash
,
314 struct sha1_state sha1_st
;
315 struct sha256_state sha256_st
;
316 struct sha512_state sha512_st
;
319 if (digest_size
== SHA1_DIGEST_SIZE
) {
320 error
= crypto_shash_init(desc
) ?:
321 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
322 crypto_shash_export(desc
, (void *)&sha1_st
);
323 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
324 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha256_st
);
328 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
330 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
331 error
= crypto_shash_init(desc
) ?:
332 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
333 crypto_shash_export(desc
, (void *)&sha256_st
);
334 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
336 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
337 error
= crypto_shash_init(desc
) ?:
338 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
339 crypto_shash_export(desc
, (void *)&sha512_st
);
340 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
342 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
343 error
= crypto_shash_init(desc
) ?:
344 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
345 crypto_shash_export(desc
, (void *)&sha512_st
);
346 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
349 pr_err("Unknown digest size %d\n", digest_size
);
354 static void chcr_change_order(char *buf
, int ds
)
358 if (ds
== SHA512_DIGEST_SIZE
) {
359 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
360 *((__be64
*)buf
+ i
) =
361 cpu_to_be64(*((u64
*)buf
+ i
));
363 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
364 *((__be32
*)buf
+ i
) =
365 cpu_to_be32(*((u32
*)buf
+ i
));
369 static inline int is_hmac(struct crypto_tfm
*tfm
)
371 struct crypto_alg
*alg
= tfm
->__crt_alg
;
372 struct chcr_alg_template
*chcr_crypto_alg
=
373 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
375 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
380 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
381 struct cpl_rx_phys_dsgl
*dsgl
)
385 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
388 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
391 struct cpl_rx_phys_dsgl
*phys_cpl
;
393 phys_cpl
= walk
->dsgl
;
395 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
396 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 phys_cpl
->pcirlxorder_to_noofsgentr
=
398 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
404 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
405 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
406 phys_cpl
->rss_hdr_int
.hash_val
= 0;
407 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
410 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
419 walk
->to
->len
[j
% 8] = htons(size
);
420 walk
->to
->addr
[j
% 8] = cpu_to_be64(addr
);
427 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
428 struct scatterlist
*sg
,
433 unsigned int left_size
= slen
, len
= 0;
434 unsigned int j
= walk
->nents
;
440 if (sg_dma_len(sg
) <= skip
) {
441 skip
-= sg_dma_len(sg
);
450 while (left_size
&& sg
) {
451 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
454 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
455 walk
->to
->len
[j
% 8] = htons(ent_len
);
456 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
465 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
466 skip_len
) + skip_len
;
467 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
474 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
475 struct ulptx_sgl
*ulp
)
480 walk
->pair
= ulp
->sge
;
481 walk
->last_sg
= NULL
;
482 walk
->last_sg_len
= 0;
485 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
487 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
488 ULPTX_NSGE_V(walk
->nents
));
492 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
499 if (walk
->nents
== 0) {
500 walk
->sgl
->len0
= cpu_to_be32(size
);
501 walk
->sgl
->addr0
= cpu_to_be64(addr
);
503 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(addr
);
504 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
505 walk
->pair_idx
= !walk
->pair_idx
;
512 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
513 struct scatterlist
*sg
,
524 if (sg_dma_len(sg
) <= skip
) {
525 skip
-= sg_dma_len(sg
);
533 WARN(!sg
, "SG should not be null here\n");
534 if (sg
&& (walk
->nents
== 0)) {
535 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
536 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
537 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
538 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
542 walk
->last_sg_len
= sgmin
+ skip_len
;
544 if (sg_dma_len(sg
) == skip_len
) {
551 small
= min(sg_dma_len(sg
) - skip_len
, len
);
552 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
553 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
554 walk
->pair
->addr
[walk
->pair_idx
] =
555 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
556 walk
->pair_idx
= !walk
->pair_idx
;
563 walk
->last_sg_len
= skip_len
;
564 if (sg_dma_len(sg
) == skip_len
) {
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher
*tfm
)
573 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
574 struct chcr_alg_template
*chcr_crypto_alg
=
575 container_of(alg
, struct chcr_alg_template
, alg
.skcipher
);
577 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
580 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
582 struct adapter
*adap
= netdev2adap(dev
);
583 struct sge_uld_txq_info
*txq_info
=
584 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
585 struct sge_uld_txq
*txq
;
589 txq
= &txq_info
->uldtxq
[idx
];
590 spin_lock(&txq
->sendq
.lock
);
593 spin_unlock(&txq
->sendq
.lock
);
598 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
599 struct _key_ctx
*key_ctx
)
601 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
602 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
605 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
606 ablkctx
->enckey_len
>> 1);
607 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
608 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
613 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
616 unsigned int srcskip
)
620 int soffset
= 0, sless
;
622 if (sg_dma_len(src
) == srcskip
) {
626 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
627 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
632 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
641 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
642 struct scatterlist
*dst
,
645 unsigned int srcskip
,
646 unsigned int dstskip
)
648 int srclen
= 0, dstlen
= 0;
649 int srcsg
= minsg
, dstsg
= minsg
;
650 int offset
= 0, soffset
= 0, less
, sless
= 0;
652 if (sg_dma_len(src
) == srcskip
) {
656 if (sg_dma_len(dst
) == dstskip
) {
662 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
663 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
668 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
669 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
670 if (srclen
<= dstlen
)
672 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
673 dstskip
, CHCR_DST_SG_SIZE
);
676 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
684 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
691 return min(srclen
, dstlen
);
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher
*cipher
,
696 struct scatterlist
*src
,
697 struct scatterlist
*dst
,
700 unsigned short op_type
)
704 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
706 skcipher_request_set_sync_tfm(subreq
, cipher
);
707 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
708 skcipher_request_set_crypt(subreq
, src
, dst
,
711 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
712 crypto_skcipher_encrypt(subreq
);
713 skcipher_request_zero(subreq
);
719 static inline int get_qidxs(struct crypto_async_request
*req
,
720 unsigned int *txqidx
, unsigned int *rxqidx
)
722 struct crypto_tfm
*tfm
= req
->tfm
;
725 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
726 case CRYPTO_ALG_TYPE_AEAD
:
728 struct aead_request
*aead_req
=
729 container_of(req
, struct aead_request
, base
);
730 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(aead_req
);
731 *txqidx
= reqctx
->txqidx
;
732 *rxqidx
= reqctx
->rxqidx
;
735 case CRYPTO_ALG_TYPE_SKCIPHER
:
737 struct skcipher_request
*sk_req
=
738 container_of(req
, struct skcipher_request
, base
);
739 struct chcr_skcipher_req_ctx
*reqctx
=
740 skcipher_request_ctx(sk_req
);
741 *txqidx
= reqctx
->txqidx
;
742 *rxqidx
= reqctx
->rxqidx
;
745 case CRYPTO_ALG_TYPE_AHASH
:
747 struct ahash_request
*ahash_req
=
748 container_of(req
, struct ahash_request
, base
);
749 struct chcr_ahash_req_ctx
*reqctx
=
750 ahash_request_ctx(ahash_req
);
751 *txqidx
= reqctx
->txqidx
;
752 *rxqidx
= reqctx
->rxqidx
;
757 /* should never get here */
764 static inline void create_wreq(struct chcr_context
*ctx
,
765 struct chcr_wr
*chcr_req
,
766 struct crypto_async_request
*req
,
773 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
774 unsigned int tx_channel_id
, rx_channel_id
;
775 unsigned int txqidx
= 0, rxqidx
= 0;
776 unsigned int qid
, fid
;
778 get_qidxs(req
, &txqidx
, &rxqidx
);
779 qid
= u_ctx
->lldi
.rxq_ids
[rxqidx
];
780 fid
= u_ctx
->lldi
.rxq_ids
[0];
781 tx_channel_id
= txqidx
/ ctx
->txq_perchan
;
782 rx_channel_id
= rxqidx
/ ctx
->rxq_perchan
;
785 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
786 chcr_req
->wreq
.pld_size_hash_size
=
787 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
788 chcr_req
->wreq
.len16_pkd
=
789 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
790 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
791 chcr_req
->wreq
.rx_chid_to_rx_q_id
= FILL_WR_RX_Q_ID(rx_channel_id
, qid
,
794 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(tx_channel_id
, fid
);
795 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
796 ((sizeof(chcr_req
->wreq
)) >> 4)));
797 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
798 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
799 sizeof(chcr_req
->key_ctx
) + sc_len
);
803 * create_cipher_wr - form the WR for cipher operations
805 * @ctx: crypto driver context of the request.
806 * @qid: ingress qid where response of this WR should be received.
807 * @op_type: encryption or decryption
809 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
811 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
812 struct chcr_context
*ctx
= c_ctx(tfm
);
813 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
814 struct sk_buff
*skb
= NULL
;
815 struct chcr_wr
*chcr_req
;
816 struct cpl_rx_phys_dsgl
*phys_cpl
;
817 struct ulptx_sgl
*ulptx
;
818 struct chcr_skcipher_req_ctx
*reqctx
=
819 skcipher_request_ctx(wrparam
->req
);
820 unsigned int temp
= 0, transhdr_len
, dst_size
;
823 unsigned int kctx_len
;
824 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
825 GFP_KERNEL
: GFP_ATOMIC
;
826 struct adapter
*adap
= padap(ctx
->dev
);
827 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
829 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
831 dst_size
= get_space_for_phys_dsgl(nents
);
832 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
833 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
834 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
835 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
836 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
837 (sgl_len(nents
) * 8);
838 transhdr_len
+= temp
;
839 transhdr_len
= roundup(transhdr_len
, 16);
840 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
845 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
846 chcr_req
->sec_cpl
.op_ivinsrtofst
=
847 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
849 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
850 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
851 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
853 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
854 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
855 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
858 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
861 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
862 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
863 (!(get_cryptoalg_subtype(tfm
) ==
864 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
865 (!(get_cryptoalg_subtype(tfm
) ==
866 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
867 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
869 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
870 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
871 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
872 ablkctx
->enckey_len
);
874 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
875 (ablkctx
->enckey_len
>> 1),
876 ablkctx
->enckey_len
>> 1);
877 memcpy(chcr_req
->key_ctx
.key
+
878 (ablkctx
->enckey_len
>> 1),
880 ablkctx
->enckey_len
>> 1);
883 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
884 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
885 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
886 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
888 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
889 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
890 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
891 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
893 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
896 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
897 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
898 sg_pcopy_to_buffer(wrparam
->req
->src
,
899 sg_nents(wrparam
->req
->src
), wrparam
->req
->iv
, 16,
900 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
904 return ERR_PTR(error
);
907 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
911 if (keylen
== AES_KEYSIZE_128
)
912 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
913 else if (keylen
== AES_KEYSIZE_192
)
914 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
915 else if (keylen
== AES_KEYSIZE_256
)
916 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
922 static int chcr_cipher_fallback_setkey(struct crypto_skcipher
*cipher
,
926 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
928 crypto_sync_skcipher_clear_flags(ablkctx
->sw_cipher
,
929 CRYPTO_TFM_REQ_MASK
);
930 crypto_sync_skcipher_set_flags(ablkctx
->sw_cipher
,
931 cipher
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
932 return crypto_sync_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
935 static int chcr_aes_cbc_setkey(struct crypto_skcipher
*cipher
,
939 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
940 unsigned int ck_size
, context_size
;
944 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
948 ck_size
= chcr_keyctx_ck_size(keylen
);
949 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
950 memcpy(ablkctx
->key
, key
, keylen
);
951 ablkctx
->enckey_len
= keylen
;
952 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
953 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
954 keylen
+ alignment
) >> 4;
956 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
958 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
961 ablkctx
->enckey_len
= 0;
966 static int chcr_aes_ctr_setkey(struct crypto_skcipher
*cipher
,
970 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
971 unsigned int ck_size
, context_size
;
975 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
978 ck_size
= chcr_keyctx_ck_size(keylen
);
979 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
980 memcpy(ablkctx
->key
, key
, keylen
);
981 ablkctx
->enckey_len
= keylen
;
982 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
983 keylen
+ alignment
) >> 4;
985 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
987 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
991 ablkctx
->enckey_len
= 0;
996 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher
*cipher
,
1000 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
1001 unsigned int ck_size
, context_size
;
1005 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
1007 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
1008 CTR_RFC3686_NONCE_SIZE
);
1010 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1011 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
1015 ck_size
= chcr_keyctx_ck_size(keylen
);
1016 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
1017 memcpy(ablkctx
->key
, key
, keylen
);
1018 ablkctx
->enckey_len
= keylen
;
1019 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
1020 keylen
+ alignment
) >> 4;
1022 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
1023 0, 0, context_size
);
1024 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
1028 ablkctx
->enckey_len
= 0;
1032 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
1034 unsigned int size
= AES_BLOCK_SIZE
;
1035 __be32
*b
= (__be32
*)(dstiv
+ size
);
1038 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
1039 for (; size
>= 4; size
-= 4) {
1040 prev
= be32_to_cpu(*--b
);
1042 *b
= cpu_to_be32(c
);
1050 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
1052 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
1054 u32 temp
= be32_to_cpu(*--b
);
1057 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
1058 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
1059 bytes
= c
* AES_BLOCK_SIZE
;
1063 static int chcr_update_tweak(struct skcipher_request
*req
, u8
*iv
,
1066 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1067 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1068 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1069 struct crypto_aes_ctx aes
;
1072 unsigned int keylen
;
1073 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1074 int round8
= round
/ 8;
1076 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1078 keylen
= ablkctx
->enckey_len
/ 2;
1079 key
= ablkctx
->key
+ keylen
;
1080 ret
= aes_expandkey(&aes
, key
, keylen
);
1083 aes_encrypt(&aes
, iv
, iv
);
1084 for (i
= 0; i
< round8
; i
++)
1085 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1087 for (i
= 0; i
< (round
% 8); i
++)
1088 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1091 aes_decrypt(&aes
, iv
, iv
);
1093 memzero_explicit(&aes
, sizeof(aes
));
1097 static int chcr_update_cipher_iv(struct skcipher_request
*req
,
1098 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1100 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1101 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1102 int subtype
= get_cryptoalg_subtype(tfm
);
1105 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1106 ctr_add_iv(iv
, req
->iv
, (reqctx
->processed
/
1108 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1109 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1110 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1111 AES_BLOCK_SIZE
) + 1);
1112 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1113 ret
= chcr_update_tweak(req
, iv
, 0);
1114 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1116 /*Updated before sending last WR*/
1117 memcpy(iv
, req
->iv
, AES_BLOCK_SIZE
);
1119 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1126 /* We need separate function for final iv because in rfc3686 Initial counter
1127 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1128 * for subsequent update requests
1131 static int chcr_final_cipher_iv(struct skcipher_request
*req
,
1132 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1134 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1135 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1136 int subtype
= get_cryptoalg_subtype(tfm
);
1139 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1140 ctr_add_iv(iv
, req
->iv
, DIV_ROUND_UP(reqctx
->processed
,
1142 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
) {
1143 if (!reqctx
->partial_req
)
1144 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1146 ret
= chcr_update_tweak(req
, iv
, 1);
1148 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1149 /*Already updated for Decrypt*/
1151 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1158 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
1159 unsigned char *input
, int err
)
1161 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1162 struct chcr_context
*ctx
= c_ctx(tfm
);
1163 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1164 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1165 struct sk_buff
*skb
;
1166 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1167 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1168 struct cipher_wr_param wrparam
;
1169 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1174 if (req
->cryptlen
== reqctx
->processed
) {
1175 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1177 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->iv
);
1182 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1183 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1184 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1185 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1186 bytes
= req
->cryptlen
- reqctx
->processed
;
1188 bytes
= rounddown(bytes
, 16);
1190 /*CTR mode counter overfloa*/
1191 bytes
= req
->cryptlen
- reqctx
->processed
;
1193 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1197 if (unlikely(bytes
== 0)) {
1198 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1200 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1210 if (get_cryptoalg_subtype(tfm
) ==
1211 CRYPTO_ALG_SUB_TYPE_CTR
)
1212 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1213 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
];
1215 wrparam
.bytes
= bytes
;
1216 skb
= create_cipher_wr(&wrparam
);
1218 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1222 skb
->dev
= u_ctx
->lldi
.ports
[0];
1223 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1225 reqctx
->last_req_len
= bytes
;
1226 reqctx
->processed
+= bytes
;
1227 if (get_cryptoalg_subtype(tfm
) ==
1228 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1229 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1230 complete(&ctx
->cbc_aes_aio_done
);
1234 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1236 if (get_cryptoalg_subtype(tfm
) ==
1237 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1238 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1239 complete(&ctx
->cbc_aes_aio_done
);
1241 chcr_dec_wrcount(dev
);
1242 req
->base
.complete(&req
->base
, err
);
1246 static int process_cipher(struct skcipher_request
*req
,
1248 struct sk_buff
**skb
,
1249 unsigned short op_type
)
1251 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1252 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
1253 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1254 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1255 struct cipher_wr_param wrparam
;
1256 int bytes
, err
= -EINVAL
;
1258 reqctx
->processed
= 0;
1259 reqctx
->partial_req
= 0;
1262 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1263 (req
->cryptlen
== 0) ||
1264 (req
->cryptlen
% crypto_skcipher_blocksize(tfm
))) {
1265 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1266 ablkctx
->enckey_len
, req
->cryptlen
, ivsize
);
1270 err
= chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1273 if (req
->cryptlen
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1275 sizeof(struct cpl_rx_phys_dsgl
) +
1278 /* Can be sent as Imm*/
1279 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1281 dnents
= sg_nents_xlen(req
->dst
, req
->cryptlen
,
1282 CHCR_DST_SG_SIZE
, 0);
1283 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1284 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1285 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1286 reqctx
->imm
= (transhdr_len
+ IV
+ req
->cryptlen
) <=
1288 bytes
= IV
+ req
->cryptlen
;
1295 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1296 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1298 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1299 bytes
= req
->cryptlen
- reqctx
->processed
;
1301 bytes
= rounddown(bytes
, 16);
1303 bytes
= req
->cryptlen
;
1305 if (get_cryptoalg_subtype(tfm
) ==
1306 CRYPTO_ALG_SUB_TYPE_CTR
) {
1307 bytes
= adjust_ctr_overflow(req
->iv
, bytes
);
1309 if (get_cryptoalg_subtype(tfm
) ==
1310 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1311 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1312 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
1313 CTR_RFC3686_IV_SIZE
);
1315 /* initialize counter portion of counter block */
1316 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1317 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1321 memcpy(reqctx
->iv
, req
->iv
, IV
);
1323 if (unlikely(bytes
== 0)) {
1324 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1326 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1335 reqctx
->op
= op_type
;
1336 reqctx
->srcsg
= req
->src
;
1337 reqctx
->dstsg
= req
->dst
;
1338 reqctx
->src_ofst
= 0;
1339 reqctx
->dst_ofst
= 0;
1342 wrparam
.bytes
= bytes
;
1343 *skb
= create_cipher_wr(&wrparam
);
1345 err
= PTR_ERR(*skb
);
1348 reqctx
->processed
= bytes
;
1349 reqctx
->last_req_len
= bytes
;
1350 reqctx
->partial_req
= !!(req
->cryptlen
- reqctx
->processed
);
1354 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1359 static int chcr_aes_encrypt(struct skcipher_request
*req
)
1361 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1362 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1363 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1364 struct sk_buff
*skb
= NULL
;
1366 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1367 struct chcr_context
*ctx
= c_ctx(tfm
);
1371 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1372 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1375 err
= chcr_inc_wrcount(dev
);
1378 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1380 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1385 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1386 &skb
, CHCR_ENCRYPT_OP
);
1389 skb
->dev
= u_ctx
->lldi
.ports
[0];
1390 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1392 if (get_cryptoalg_subtype(tfm
) ==
1393 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1394 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1395 reqctx
->partial_req
= 1;
1396 wait_for_completion(&ctx
->cbc_aes_aio_done
);
1398 return -EINPROGRESS
;
1400 chcr_dec_wrcount(dev
);
1404 static int chcr_aes_decrypt(struct skcipher_request
*req
)
1406 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1407 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1408 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1409 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1410 struct sk_buff
*skb
= NULL
;
1412 struct chcr_context
*ctx
= c_ctx(tfm
);
1416 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1417 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1420 err
= chcr_inc_wrcount(dev
);
1424 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1426 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))))
1428 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1429 &skb
, CHCR_DECRYPT_OP
);
1432 skb
->dev
= u_ctx
->lldi
.ports
[0];
1433 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1435 return -EINPROGRESS
;
1437 static int chcr_device_init(struct chcr_context
*ctx
)
1439 struct uld_ctx
*u_ctx
= NULL
;
1440 int txq_perchan
, ntxq
;
1441 int err
= 0, rxq_perchan
;
1444 u_ctx
= assign_chcr_device();
1446 pr_err("chcr device assignment fails\n");
1449 ctx
->dev
= &u_ctx
->dev
;
1450 ntxq
= u_ctx
->lldi
.ntxq
;
1451 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1452 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1454 ctx
->nrxq
= u_ctx
->lldi
.nrxq
;
1455 ctx
->rxq_perchan
= rxq_perchan
;
1456 ctx
->txq_perchan
= txq_perchan
;
1462 static int chcr_init_tfm(struct crypto_skcipher
*tfm
)
1464 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1465 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1466 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1468 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher(alg
->base
.cra_name
, 0,
1469 CRYPTO_ALG_NEED_FALLBACK
);
1470 if (IS_ERR(ablkctx
->sw_cipher
)) {
1471 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1472 return PTR_ERR(ablkctx
->sw_cipher
);
1474 init_completion(&ctx
->cbc_aes_aio_done
);
1475 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
));
1477 return chcr_device_init(ctx
);
1480 static int chcr_rfc3686_init(struct crypto_skcipher
*tfm
)
1482 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1483 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1484 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1486 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1487 * cannot be used as fallback in chcr_handle_cipher_response
1489 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher("ctr(aes)", 0,
1490 CRYPTO_ALG_NEED_FALLBACK
);
1491 if (IS_ERR(ablkctx
->sw_cipher
)) {
1492 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1493 return PTR_ERR(ablkctx
->sw_cipher
);
1495 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
));
1496 return chcr_device_init(ctx
);
1500 static void chcr_exit_tfm(struct crypto_skcipher
*tfm
)
1502 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1503 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1505 crypto_free_sync_skcipher(ablkctx
->sw_cipher
);
1508 static int get_alg_config(struct algo_param
*params
,
1509 unsigned int auth_size
)
1511 switch (auth_size
) {
1512 case SHA1_DIGEST_SIZE
:
1513 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1514 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1515 params
->result_size
= SHA1_DIGEST_SIZE
;
1517 case SHA224_DIGEST_SIZE
:
1518 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1519 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1520 params
->result_size
= SHA256_DIGEST_SIZE
;
1522 case SHA256_DIGEST_SIZE
:
1523 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1524 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1525 params
->result_size
= SHA256_DIGEST_SIZE
;
1527 case SHA384_DIGEST_SIZE
:
1528 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1529 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1530 params
->result_size
= SHA512_DIGEST_SIZE
;
1532 case SHA512_DIGEST_SIZE
:
1533 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1534 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1535 params
->result_size
= SHA512_DIGEST_SIZE
;
1538 pr_err("chcr : ERROR, unsupported digest size\n");
1544 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1546 crypto_free_shash(base_hash
);
1550 * create_hash_wr - Create hash work request
1551 * @req - Cipher req base
1553 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1554 struct hash_wr_param
*param
)
1556 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1557 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1558 struct chcr_context
*ctx
= h_ctx(tfm
);
1559 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1560 struct sk_buff
*skb
= NULL
;
1561 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1562 struct chcr_wr
*chcr_req
;
1563 struct ulptx_sgl
*ulptx
;
1564 unsigned int nents
= 0, transhdr_len
;
1565 unsigned int temp
= 0;
1566 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1568 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1570 unsigned int rx_channel_id
= req_ctx
->rxqidx
/ ctx
->rxq_perchan
;
1572 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1573 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1574 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1575 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1576 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1577 nents
+= param
->bfr_len
? 1 : 0;
1578 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1579 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1580 transhdr_len
= roundup(transhdr_len
, 16);
1582 skb
= alloc_skb(transhdr_len
, flags
);
1584 return ERR_PTR(-ENOMEM
);
1585 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1587 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1588 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 0);
1590 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1592 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1593 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1594 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1595 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1596 chcr_req
->sec_cpl
.seqno_numivs
=
1597 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1598 param
->opad_needed
, 0);
1600 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1601 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1603 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1604 param
->alg_prm
.result_size
);
1606 if (param
->opad_needed
)
1607 memcpy(chcr_req
->key_ctx
.key
+
1608 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1609 CHCR_HASH_MAX_DIGEST_SIZE
),
1610 hmacctx
->opad
, param
->alg_prm
.result_size
);
1612 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1613 param
->alg_prm
.mk_size
, 0,
1616 sizeof(chcr_req
->key_ctx
)) >> 4));
1617 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1618 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1620 if (param
->bfr_len
!= 0) {
1621 req_ctx
->hctx_wr
.dma_addr
=
1622 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1623 param
->bfr_len
, DMA_TO_DEVICE
);
1624 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1625 req_ctx
->hctx_wr
. dma_addr
)) {
1629 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1631 req_ctx
->hctx_wr
.dma_addr
= 0;
1633 chcr_add_hash_src_ent(req
, ulptx
, param
);
1634 /* Request upto max wr size */
1635 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1636 (param
->sg_len
+ param
->bfr_len
) : 0);
1637 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1638 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1639 param
->hash_size
, transhdr_len
,
1641 req_ctx
->hctx_wr
.skb
= skb
;
1645 return ERR_PTR(error
);
1648 static int chcr_ahash_update(struct ahash_request
*req
)
1650 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1651 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1652 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1653 struct chcr_context
*ctx
= h_ctx(rtfm
);
1654 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1655 struct sk_buff
*skb
;
1656 u8 remainder
= 0, bs
;
1657 unsigned int nbytes
= req
->nbytes
;
1658 struct hash_wr_param params
;
1663 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1664 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1667 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1669 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1670 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1671 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1673 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1674 + req_ctx
->reqlen
, nbytes
, 0);
1675 req_ctx
->reqlen
+= nbytes
;
1678 error
= chcr_inc_wrcount(dev
);
1681 /* Detach state for CHCR means lldi or padap is freed. Increasing
1682 * inflight count for dev guarantees that lldi and padap is valid
1684 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1686 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1691 chcr_init_hctx_per_wr(req_ctx
);
1692 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1697 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1698 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1699 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1700 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1701 if (params
.sg_len
> req
->nbytes
)
1702 params
.sg_len
= req
->nbytes
;
1703 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1705 params
.opad_needed
= 0;
1708 params
.bfr_len
= req_ctx
->reqlen
;
1710 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1712 params
.hash_size
= params
.alg_prm
.result_size
;
1713 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1714 skb
= create_hash_wr(req
, ¶ms
);
1716 error
= PTR_ERR(skb
);
1720 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1723 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1724 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1725 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1728 req_ctx
->reqlen
= remainder
;
1729 skb
->dev
= u_ctx
->lldi
.ports
[0];
1730 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1732 return -EINPROGRESS
;
1734 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1736 chcr_dec_wrcount(dev
);
1740 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1742 memset(bfr_ptr
, 0, bs
);
1745 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1747 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1750 static int chcr_ahash_final(struct ahash_request
*req
)
1752 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1753 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1754 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1755 struct hash_wr_param params
;
1756 struct sk_buff
*skb
;
1757 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1758 struct chcr_context
*ctx
= h_ctx(rtfm
);
1759 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1760 int error
= -EINVAL
;
1764 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1765 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1768 error
= chcr_inc_wrcount(dev
);
1772 chcr_init_hctx_per_wr(req_ctx
);
1773 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1774 params
.opad_needed
= 1;
1776 params
.opad_needed
= 0;
1778 req_ctx
->hctx_wr
.isfinal
= 1;
1779 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1780 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1781 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1782 params
.opad_needed
= 1;
1783 params
.kctx_len
*= 2;
1785 params
.opad_needed
= 0;
1788 req_ctx
->hctx_wr
.result
= 1;
1789 params
.bfr_len
= req_ctx
->reqlen
;
1790 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1791 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1792 if (req_ctx
->reqlen
== 0) {
1793 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1797 params
.bfr_len
= bs
;
1800 params
.scmd1
= req_ctx
->data_len
;
1804 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1805 skb
= create_hash_wr(req
, ¶ms
);
1807 error
= PTR_ERR(skb
);
1810 req_ctx
->reqlen
= 0;
1811 skb
->dev
= u_ctx
->lldi
.ports
[0];
1812 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1814 return -EINPROGRESS
;
1816 chcr_dec_wrcount(dev
);
1820 static int chcr_ahash_finup(struct ahash_request
*req
)
1822 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1823 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1824 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1825 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1826 struct chcr_context
*ctx
= h_ctx(rtfm
);
1827 struct sk_buff
*skb
;
1828 struct hash_wr_param params
;
1834 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1835 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1838 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1839 error
= chcr_inc_wrcount(dev
);
1843 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1845 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1849 chcr_init_hctx_per_wr(req_ctx
);
1850 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1856 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1857 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1858 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1859 params
.kctx_len
*= 2;
1860 params
.opad_needed
= 1;
1862 params
.opad_needed
= 0;
1865 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1866 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1867 if (params
.sg_len
< req
->nbytes
) {
1868 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1869 params
.kctx_len
/= 2;
1870 params
.opad_needed
= 0;
1874 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1876 params
.hash_size
= params
.alg_prm
.result_size
;
1881 params
.sg_len
= req
->nbytes
;
1882 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1883 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1886 params
.bfr_len
= req_ctx
->reqlen
;
1887 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1888 req_ctx
->hctx_wr
.result
= 1;
1889 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1890 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1891 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1895 params
.bfr_len
= bs
;
1897 skb
= create_hash_wr(req
, ¶ms
);
1899 error
= PTR_ERR(skb
);
1902 req_ctx
->reqlen
= 0;
1903 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1904 skb
->dev
= u_ctx
->lldi
.ports
[0];
1905 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1907 return -EINPROGRESS
;
1909 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1911 chcr_dec_wrcount(dev
);
1915 static int chcr_ahash_digest(struct ahash_request
*req
)
1917 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1918 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1919 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1920 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1921 struct chcr_context
*ctx
= h_ctx(rtfm
);
1922 struct sk_buff
*skb
;
1923 struct hash_wr_param params
;
1929 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1930 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1934 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1935 error
= chcr_inc_wrcount(dev
);
1939 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1941 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1946 chcr_init_hctx_per_wr(req_ctx
);
1947 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1953 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1954 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1955 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1956 params
.kctx_len
*= 2;
1957 params
.opad_needed
= 1;
1959 params
.opad_needed
= 0;
1961 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1962 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1963 if (params
.sg_len
< req
->nbytes
) {
1964 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1965 params
.kctx_len
/= 2;
1966 params
.opad_needed
= 0;
1971 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1972 params
.hash_size
= params
.alg_prm
.result_size
;
1974 params
.sg_len
= req
->nbytes
;
1975 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1978 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1982 req_ctx
->hctx_wr
.result
= 1;
1983 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1984 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1986 if (req
->nbytes
== 0) {
1987 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1989 params
.bfr_len
= bs
;
1992 skb
= create_hash_wr(req
, ¶ms
);
1994 error
= PTR_ERR(skb
);
1997 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1998 skb
->dev
= u_ctx
->lldi
.ports
[0];
1999 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
2001 return -EINPROGRESS
;
2003 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2005 chcr_dec_wrcount(dev
);
2009 static int chcr_ahash_continue(struct ahash_request
*req
)
2011 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2012 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2013 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
2014 struct chcr_context
*ctx
= h_ctx(rtfm
);
2015 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2016 struct sk_buff
*skb
;
2017 struct hash_wr_param params
;
2023 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
2024 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
2027 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2028 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
2029 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
2030 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2031 params
.kctx_len
*= 2;
2032 params
.opad_needed
= 1;
2034 params
.opad_needed
= 0;
2036 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
2037 HASH_SPACE_LEFT(params
.kctx_len
),
2039 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
2040 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
2041 if (!hctx_wr
->result
||
2042 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
2043 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2044 params
.kctx_len
/= 2;
2045 params
.opad_needed
= 0;
2049 params
.sg_len
= rounddown(params
.sg_len
, bs
);
2050 params
.hash_size
= params
.alg_prm
.result_size
;
2055 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
2056 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
2059 reqctx
->data_len
+= params
.sg_len
;
2060 skb
= create_hash_wr(req
, ¶ms
);
2062 error
= PTR_ERR(skb
);
2065 hctx_wr
->processed
+= params
.sg_len
;
2066 skb
->dev
= u_ctx
->lldi
.ports
[0];
2067 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
2074 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
2075 unsigned char *input
,
2078 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2079 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2080 int digestsize
, updated_digestsize
;
2081 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2082 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
2083 struct chcr_dev
*dev
= h_ctx(tfm
)->dev
;
2087 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
2088 updated_digestsize
= digestsize
;
2089 if (digestsize
== SHA224_DIGEST_SIZE
)
2090 updated_digestsize
= SHA256_DIGEST_SIZE
;
2091 else if (digestsize
== SHA384_DIGEST_SIZE
)
2092 updated_digestsize
= SHA512_DIGEST_SIZE
;
2094 if (hctx_wr
->dma_addr
) {
2095 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
2096 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
2097 hctx_wr
->dma_addr
= 0;
2099 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
2101 if (hctx_wr
->result
== 1) {
2102 hctx_wr
->result
= 0;
2103 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
2106 memcpy(reqctx
->partial_hash
,
2107 input
+ sizeof(struct cpl_fw6_pld
),
2108 updated_digestsize
);
2113 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
2114 updated_digestsize
);
2116 err
= chcr_ahash_continue(req
);
2121 if (hctx_wr
->is_sg_map
)
2122 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2126 chcr_dec_wrcount(dev
);
2127 req
->base
.complete(&req
->base
, err
);
2131 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2132 * @req: crypto request
2134 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
2137 struct crypto_tfm
*tfm
= req
->tfm
;
2138 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2139 struct adapter
*adap
= padap(ctx
->dev
);
2141 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
2142 case CRYPTO_ALG_TYPE_AEAD
:
2143 err
= chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
2146 case CRYPTO_ALG_TYPE_SKCIPHER
:
2147 chcr_handle_cipher_resp(skcipher_request_cast(req
),
2150 case CRYPTO_ALG_TYPE_AHASH
:
2151 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
2153 atomic_inc(&adap
->chcr_stats
.complete
);
2156 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2158 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2159 struct chcr_ahash_req_ctx
*state
= out
;
2161 state
->reqlen
= req_ctx
->reqlen
;
2162 state
->data_len
= req_ctx
->data_len
;
2163 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2164 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2165 CHCR_HASH_MAX_DIGEST_SIZE
);
2166 chcr_init_hctx_per_wr(state
);
2170 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2172 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2173 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2175 req_ctx
->reqlen
= state
->reqlen
;
2176 req_ctx
->data_len
= state
->data_len
;
2177 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2178 req_ctx
->skbfr
= req_ctx
->bfr2
;
2179 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2180 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2181 CHCR_HASH_MAX_DIGEST_SIZE
);
2182 chcr_init_hctx_per_wr(req_ctx
);
2186 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2187 unsigned int keylen
)
2189 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2190 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2191 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2192 unsigned int i
, err
= 0, updated_digestsize
;
2194 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2196 /* use the key to calculate the ipad and opad. ipad will sent with the
2197 * first request's data. opad will be sent with the final hash result
2198 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2200 shash
->tfm
= hmacctx
->base_hash
;
2202 err
= crypto_shash_digest(shash
, key
, keylen
,
2206 keylen
= digestsize
;
2208 memcpy(hmacctx
->ipad
, key
, keylen
);
2210 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2211 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2213 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2214 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2215 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2218 updated_digestsize
= digestsize
;
2219 if (digestsize
== SHA224_DIGEST_SIZE
)
2220 updated_digestsize
= SHA256_DIGEST_SIZE
;
2221 else if (digestsize
== SHA384_DIGEST_SIZE
)
2222 updated_digestsize
= SHA512_DIGEST_SIZE
;
2223 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2224 hmacctx
->ipad
, digestsize
);
2227 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2229 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2230 hmacctx
->opad
, digestsize
);
2233 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2238 static int chcr_aes_xts_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
2239 unsigned int key_len
)
2241 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2242 unsigned short context_size
= 0;
2245 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2249 memcpy(ablkctx
->key
, key
, key_len
);
2250 ablkctx
->enckey_len
= key_len
;
2251 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2252 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2253 ablkctx
->key_ctx_hdr
=
2254 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2255 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2256 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2257 CHCR_KEYCTX_NO_KEY
, 1,
2259 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2262 ablkctx
->enckey_len
= 0;
2267 static int chcr_sha_init(struct ahash_request
*areq
)
2269 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2270 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2271 int digestsize
= crypto_ahash_digestsize(tfm
);
2273 req_ctx
->data_len
= 0;
2274 req_ctx
->reqlen
= 0;
2275 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2276 req_ctx
->skbfr
= req_ctx
->bfr2
;
2277 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2282 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2285 sizeof(struct chcr_ahash_req_ctx
));
2286 return chcr_device_init(crypto_tfm_ctx(tfm
));
2289 static int chcr_hmac_init(struct ahash_request
*areq
)
2291 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2292 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2293 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2294 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2295 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2297 chcr_sha_init(areq
);
2298 req_ctx
->data_len
= bs
;
2299 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2300 if (digestsize
== SHA224_DIGEST_SIZE
)
2301 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2302 SHA256_DIGEST_SIZE
);
2303 else if (digestsize
== SHA384_DIGEST_SIZE
)
2304 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2305 SHA512_DIGEST_SIZE
);
2307 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2313 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2315 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2316 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2317 unsigned int digestsize
=
2318 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2320 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2321 sizeof(struct chcr_ahash_req_ctx
));
2322 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2323 if (IS_ERR(hmacctx
->base_hash
))
2324 return PTR_ERR(hmacctx
->base_hash
);
2325 return chcr_device_init(crypto_tfm_ctx(tfm
));
2328 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2330 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2331 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2333 if (hmacctx
->base_hash
) {
2334 chcr_free_shash(hmacctx
->base_hash
);
2335 hmacctx
->base_hash
= NULL
;
2339 inline void chcr_aead_common_exit(struct aead_request
*req
)
2341 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2342 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2343 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2345 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2348 static int chcr_aead_common_init(struct aead_request
*req
)
2350 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2351 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2352 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2353 unsigned int authsize
= crypto_aead_authsize(tfm
);
2354 int error
= -EINVAL
;
2356 /* validate key size */
2357 if (aeadctx
->enckey_len
== 0)
2359 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2362 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2364 reqctx
->scratch_pad
= NULL
;
2366 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2378 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2379 int aadmax
, int wrlen
,
2380 unsigned short op_type
)
2382 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2384 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2385 dst_nents
> MAX_DSGL_ENT
||
2386 (req
->assoclen
> aadmax
) ||
2387 (wrlen
> SGE_MAX_WR_LEN
))
2392 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2394 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2395 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2396 struct aead_request
*subreq
= aead_request_ctx(req
);
2398 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2399 aead_request_set_callback(subreq
, req
->base
.flags
,
2400 req
->base
.complete
, req
->base
.data
);
2401 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2403 aead_request_set_ad(subreq
, req
->assoclen
);
2404 return op_type
? crypto_aead_decrypt(subreq
) :
2405 crypto_aead_encrypt(subreq
);
2408 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2412 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2413 struct chcr_context
*ctx
= a_ctx(tfm
);
2414 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2415 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2416 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2417 struct sk_buff
*skb
= NULL
;
2418 struct chcr_wr
*chcr_req
;
2419 struct cpl_rx_phys_dsgl
*phys_cpl
;
2420 struct ulptx_sgl
*ulptx
;
2421 unsigned int transhdr_len
;
2422 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2423 unsigned int kctx_len
= 0, dnents
, snents
;
2424 unsigned int authsize
= crypto_aead_authsize(tfm
);
2425 int error
= -EINVAL
;
2428 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2430 struct adapter
*adap
= padap(ctx
->dev
);
2431 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2433 if (req
->cryptlen
== 0)
2437 error
= chcr_aead_common_init(req
);
2439 return ERR_PTR(error
);
2441 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2442 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2445 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
2446 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
, 0);
2447 dnents
+= MIN_AUTH_SG
; // For IV
2448 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2449 CHCR_SRC_SG_SIZE
, 0);
2450 dst_size
= get_space_for_phys_dsgl(dnents
);
2451 kctx_len
= (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx
->key_ctx_hdr
)) << 4)
2452 - sizeof(chcr_req
->key_ctx
);
2453 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2454 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <
2456 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16)
2457 : (sgl_len(snents
) * 8);
2458 transhdr_len
+= temp
;
2459 transhdr_len
= roundup(transhdr_len
, 16);
2461 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2462 transhdr_len
, reqctx
->op
)) {
2463 atomic_inc(&adap
->chcr_stats
.fallback
);
2464 chcr_aead_common_exit(req
);
2465 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2467 skb
= alloc_skb(transhdr_len
, flags
);
2473 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2475 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2478 * Input order is AAD,IV and Payload. where IV should be included as
2479 * the part of authdata. All other fields should be filled according
2480 * to the hardware spec
2482 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2483 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
2484 chcr_req
->sec_cpl
.pldlen
= htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
2485 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2487 null
? 0 : IV
+ req
->assoclen
,
2488 req
->assoclen
+ IV
+ 1,
2489 (temp
& 0x1F0) >> 4);
2490 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2492 null
? 0 : req
->assoclen
+ IV
+ 1,
2494 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2495 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2496 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2498 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2499 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2500 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2502 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2504 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2507 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2508 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2509 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2510 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2511 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2512 aeadctx
->enckey_len
);
2514 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2515 aeadctx
->enckey_len
);
2517 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2518 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2519 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2520 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2521 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2522 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2523 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2524 memcpy(ivptr
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2525 memcpy(ivptr
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2526 CTR_RFC3686_IV_SIZE
);
2527 *(__be32
*)(ivptr
+ CTR_RFC3686_NONCE_SIZE
+
2528 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2530 memcpy(ivptr
, req
->iv
, IV
);
2532 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2533 chcr_add_aead_src_ent(req
, ulptx
);
2534 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2535 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2536 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
2537 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2538 transhdr_len
, temp
, 0);
2543 chcr_aead_common_exit(req
);
2545 return ERR_PTR(error
);
2548 int chcr_aead_dma_map(struct device
*dev
,
2549 struct aead_request
*req
,
2550 unsigned short op_type
)
2553 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2554 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2555 unsigned int authsize
= crypto_aead_authsize(tfm
);
2558 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2560 if (!req
->cryptlen
|| !dst_size
)
2562 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2564 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2567 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2570 if (req
->src
== req
->dst
) {
2571 error
= dma_map_sg(dev
, req
->src
,
2572 sg_nents_for_len(req
->src
, dst_size
),
2577 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2581 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2584 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2592 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2596 void chcr_aead_dma_unmap(struct device
*dev
,
2597 struct aead_request
*req
,
2598 unsigned short op_type
)
2600 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2601 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2602 unsigned int authsize
= crypto_aead_authsize(tfm
);
2605 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2607 if (!req
->cryptlen
|| !dst_size
)
2610 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2612 if (req
->src
== req
->dst
) {
2613 dma_unmap_sg(dev
, req
->src
,
2614 sg_nents_for_len(req
->src
, dst_size
),
2617 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2619 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2624 void chcr_add_aead_src_ent(struct aead_request
*req
,
2625 struct ulptx_sgl
*ulptx
)
2627 struct ulptx_walk ulp_walk
;
2628 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2631 u8
*buf
= (u8
*)ulptx
;
2633 if (reqctx
->b0_len
) {
2634 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2635 buf
+= reqctx
->b0_len
;
2637 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2638 buf
, req
->cryptlen
+ req
->assoclen
, 0);
2640 ulptx_walk_init(&ulp_walk
, ulptx
);
2642 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2644 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
+
2646 ulptx_walk_end(&ulp_walk
);
2650 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2651 struct cpl_rx_phys_dsgl
*phys_cpl
,
2654 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2655 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2656 struct dsgl_walk dsgl_walk
;
2657 unsigned int authsize
= crypto_aead_authsize(tfm
);
2658 struct chcr_context
*ctx
= a_ctx(tfm
);
2660 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2662 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2663 dsgl_walk_add_page(&dsgl_walk
, IV
+ reqctx
->b0_len
, reqctx
->iv_dma
);
2664 temp
= req
->assoclen
+ req
->cryptlen
+
2665 (reqctx
->op
? -authsize
: authsize
);
2666 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, 0);
2667 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2670 void chcr_add_cipher_src_ent(struct skcipher_request
*req
,
2672 struct cipher_wr_param
*wrparam
)
2674 struct ulptx_walk ulp_walk
;
2675 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2678 memcpy(buf
, reqctx
->iv
, IV
);
2681 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2682 buf
, wrparam
->bytes
, reqctx
->processed
);
2684 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2685 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2687 reqctx
->srcsg
= ulp_walk
.last_sg
;
2688 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2689 ulptx_walk_end(&ulp_walk
);
2693 void chcr_add_cipher_dst_ent(struct skcipher_request
*req
,
2694 struct cpl_rx_phys_dsgl
*phys_cpl
,
2695 struct cipher_wr_param
*wrparam
,
2698 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2699 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
2700 struct chcr_context
*ctx
= c_ctx(tfm
);
2701 struct dsgl_walk dsgl_walk
;
2702 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2704 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2705 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2707 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2708 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2709 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2712 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2713 struct ulptx_sgl
*ulptx
,
2714 struct hash_wr_param
*param
)
2716 struct ulptx_walk ulp_walk
;
2717 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2719 if (reqctx
->hctx_wr
.imm
) {
2720 u8
*buf
= (u8
*)ulptx
;
2722 if (param
->bfr_len
) {
2723 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2724 buf
+= param
->bfr_len
;
2727 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2728 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2731 ulptx_walk_init(&ulp_walk
, ulptx
);
2733 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2734 reqctx
->hctx_wr
.dma_addr
);
2735 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2736 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2737 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2738 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2739 ulptx_walk_end(&ulp_walk
);
2743 int chcr_hash_dma_map(struct device
*dev
,
2744 struct ahash_request
*req
)
2746 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2751 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2755 req_ctx
->hctx_wr
.is_sg_map
= 1;
2759 void chcr_hash_dma_unmap(struct device
*dev
,
2760 struct ahash_request
*req
)
2762 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2767 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2769 req_ctx
->hctx_wr
.is_sg_map
= 0;
2773 int chcr_cipher_dma_map(struct device
*dev
,
2774 struct skcipher_request
*req
)
2778 if (req
->src
== req
->dst
) {
2779 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2784 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2788 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2791 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2802 void chcr_cipher_dma_unmap(struct device
*dev
,
2803 struct skcipher_request
*req
)
2805 if (req
->src
== req
->dst
) {
2806 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2809 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2811 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2816 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2820 memset(block
, 0, csize
);
2825 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2828 data
= cpu_to_be32(msglen
);
2829 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2834 static int generate_b0(struct aead_request
*req
, u8
*ivptr
,
2835 unsigned short op_type
)
2837 unsigned int l
, lp
, m
;
2839 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2840 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2841 u8
*b0
= reqctx
->scratch_pad
;
2843 m
= crypto_aead_authsize(aead
);
2845 memcpy(b0
, ivptr
, 16);
2850 /* set m, bits 3-5 */
2851 *b0
|= (8 * ((m
- 2) / 2));
2853 /* set adata, bit 6, if associated data is used */
2856 rc
= set_msg_len(b0
+ 16 - l
,
2857 (op_type
== CHCR_DECRYPT_OP
) ?
2858 req
->cryptlen
- m
: req
->cryptlen
, l
);
2863 static inline int crypto_ccm_check_iv(const u8
*iv
)
2865 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2866 if (iv
[0] < 1 || iv
[0] > 7)
2872 static int ccm_format_packet(struct aead_request
*req
,
2874 unsigned int sub_type
,
2875 unsigned short op_type
,
2876 unsigned int assoclen
)
2878 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2879 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2880 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2883 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2885 memcpy(ivptr
+ 1, &aeadctx
->salt
[0], 3);
2886 memcpy(ivptr
+ 4, req
->iv
, 8);
2887 memset(ivptr
+ 12, 0, 4);
2889 memcpy(ivptr
, req
->iv
, 16);
2892 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2895 rc
= generate_b0(req
, ivptr
, op_type
);
2896 /* zero the ctr value */
2897 memset(ivptr
+ 15 - ivptr
[0], 0, ivptr
[0] + 1);
2901 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2902 unsigned int dst_size
,
2903 struct aead_request
*req
,
2904 unsigned short op_type
)
2906 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2907 struct chcr_context
*ctx
= a_ctx(tfm
);
2908 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2909 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2910 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2911 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2912 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2913 unsigned int ccm_xtra
;
2914 unsigned char tag_offset
= 0, auth_offset
= 0;
2915 unsigned int assoclen
;
2917 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2918 assoclen
= req
->assoclen
- 8;
2920 assoclen
= req
->assoclen
;
2921 ccm_xtra
= CCM_B0_SIZE
+
2922 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2924 auth_offset
= req
->cryptlen
?
2925 (req
->assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2926 if (op_type
== CHCR_DECRYPT_OP
) {
2927 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2928 tag_offset
= crypto_aead_authsize(tfm
);
2933 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
2935 htonl(req
->assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2936 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2937 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2938 1 + IV
, IV
+ assoclen
+ ccm_xtra
,
2939 req
->assoclen
+ IV
+ 1 + ccm_xtra
, 0);
2941 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2942 auth_offset
, tag_offset
,
2943 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2944 crypto_aead_authsize(tfm
));
2945 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2946 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2947 cipher_mode
, mac_mode
,
2948 aeadctx
->hmac_ctrl
, IV
>> 1);
2950 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2954 static int aead_ccm_validate_input(unsigned short op_type
,
2955 struct aead_request
*req
,
2956 struct chcr_aead_ctx
*aeadctx
,
2957 unsigned int sub_type
)
2959 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2960 if (crypto_ccm_check_iv(req
->iv
)) {
2961 pr_err("CCM: IV check fails\n");
2965 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2966 pr_err("RFC4309: Invalid AAD length %d\n",
2974 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2978 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2979 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2980 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2981 struct sk_buff
*skb
= NULL
;
2982 struct chcr_wr
*chcr_req
;
2983 struct cpl_rx_phys_dsgl
*phys_cpl
;
2984 struct ulptx_sgl
*ulptx
;
2985 unsigned int transhdr_len
;
2986 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
, snents
;
2987 unsigned int sub_type
, assoclen
= req
->assoclen
;
2988 unsigned int authsize
= crypto_aead_authsize(tfm
);
2989 int error
= -EINVAL
;
2991 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2993 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2995 sub_type
= get_aead_subtype(tfm
);
2996 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2998 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
2999 error
= chcr_aead_common_init(req
);
3001 return ERR_PTR(error
);
3003 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
3006 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
3007 + (reqctx
->op
? -authsize
: authsize
),
3008 CHCR_DST_SG_SIZE
, 0);
3009 dnents
+= MIN_CCM_SG
; // For IV and B0
3010 dst_size
= get_space_for_phys_dsgl(dnents
);
3011 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3012 CHCR_SRC_SG_SIZE
, 0);
3013 snents
+= MIN_CCM_SG
; //For B0
3014 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
3015 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3016 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
+
3017 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
3018 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
+
3019 reqctx
->b0_len
, 16) :
3020 (sgl_len(snents
) * 8);
3021 transhdr_len
+= temp
;
3022 transhdr_len
= roundup(transhdr_len
, 16);
3024 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
3025 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
3026 atomic_inc(&adap
->chcr_stats
.fallback
);
3027 chcr_aead_common_exit(req
);
3028 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3030 skb
= alloc_skb(transhdr_len
, flags
);
3037 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3039 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
3041 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3042 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3043 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3044 aeadctx
->key
, aeadctx
->enckey_len
);
3046 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3047 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3048 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
3049 error
= ccm_format_packet(req
, ivptr
, sub_type
, reqctx
->op
, assoclen
);
3052 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3053 chcr_add_aead_src_ent(req
, ulptx
);
3055 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3056 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3057 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
+
3058 reqctx
->b0_len
) : 0);
3059 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
3060 transhdr_len
, temp
, 0);
3067 chcr_aead_common_exit(req
);
3068 return ERR_PTR(error
);
3071 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
3075 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3076 struct chcr_context
*ctx
= a_ctx(tfm
);
3077 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3078 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3079 struct sk_buff
*skb
= NULL
;
3080 struct chcr_wr
*chcr_req
;
3081 struct cpl_rx_phys_dsgl
*phys_cpl
;
3082 struct ulptx_sgl
*ulptx
;
3083 unsigned int transhdr_len
, dnents
= 0, snents
;
3084 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
3085 unsigned int authsize
= crypto_aead_authsize(tfm
);
3086 int error
= -EINVAL
;
3088 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3090 struct adapter
*adap
= padap(ctx
->dev
);
3091 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
3093 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
3094 assoclen
= req
->assoclen
- 8;
3097 error
= chcr_aead_common_init(req
);
3099 return ERR_PTR(error
);
3100 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
3101 (reqctx
->op
? -authsize
: authsize
),
3102 CHCR_DST_SG_SIZE
, 0);
3103 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3104 CHCR_SRC_SG_SIZE
, 0);
3105 dnents
+= MIN_GCM_SG
; // For IV
3106 dst_size
= get_space_for_phys_dsgl(dnents
);
3107 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
3108 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3109 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <=
3111 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16) :
3112 (sgl_len(snents
) * 8);
3113 transhdr_len
+= temp
;
3114 transhdr_len
= roundup(transhdr_len
, 16);
3115 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
3116 transhdr_len
, reqctx
->op
)) {
3118 atomic_inc(&adap
->chcr_stats
.fallback
);
3119 chcr_aead_common_exit(req
);
3120 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3122 skb
= alloc_skb(transhdr_len
, flags
);
3128 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3130 //Offset of tag from end
3131 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
3132 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
3133 rx_channel_id
, 2, 1);
3134 chcr_req
->sec_cpl
.pldlen
=
3135 htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
3136 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3137 assoclen
? 1 + IV
: 0,
3138 assoclen
? IV
+ assoclen
: 0,
3139 req
->assoclen
+ IV
+ 1, 0);
3140 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
3141 FILL_SEC_CPL_AUTHINSERT(0, req
->assoclen
+ IV
+ 1,
3143 chcr_req
->sec_cpl
.seqno_numivs
=
3144 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
3145 CHCR_ENCRYPT_OP
) ? 1 : 0,
3146 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
3147 CHCR_SCMD_AUTH_MODE_GHASH
,
3148 aeadctx
->hmac_ctrl
, IV
>> 1);
3149 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3151 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3152 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3153 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3154 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3156 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3157 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3158 /* prepare a 16 byte iv */
3159 /* S A L T | IV | 0x00000001 */
3160 if (get_aead_subtype(tfm
) ==
3161 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3162 memcpy(ivptr
, aeadctx
->salt
, 4);
3163 memcpy(ivptr
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3165 memcpy(ivptr
, req
->iv
, GCM_AES_IV_SIZE
);
3167 *((unsigned int *)(ivptr
+ 12)) = htonl(0x01);
3169 ulptx
= (struct ulptx_sgl
*)(ivptr
+ 16);
3171 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3172 chcr_add_aead_src_ent(req
, ulptx
);
3173 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3174 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3175 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
3176 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3177 transhdr_len
, temp
, reqctx
->verify
);
3182 chcr_aead_common_exit(req
);
3183 return ERR_PTR(error
);
3188 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3190 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3191 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3193 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3194 CRYPTO_ALG_NEED_FALLBACK
|
3196 if (IS_ERR(aeadctx
->sw_cipher
))
3197 return PTR_ERR(aeadctx
->sw_cipher
);
3198 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3199 sizeof(struct aead_request
) +
3200 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3201 return chcr_device_init(a_ctx(tfm
));
3204 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3206 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3208 crypto_free_aead(aeadctx
->sw_cipher
);
3211 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3212 unsigned int authsize
)
3214 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3216 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3217 aeadctx
->mayverify
= VERIFY_HW
;
3218 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3220 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3221 unsigned int authsize
)
3223 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3224 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3226 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3227 * true for sha1. authsize == 12 condition should be before
3228 * authsize == (maxauth >> 1)
3230 if (authsize
== ICV_4
) {
3231 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3232 aeadctx
->mayverify
= VERIFY_HW
;
3233 } else if (authsize
== ICV_6
) {
3234 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3235 aeadctx
->mayverify
= VERIFY_HW
;
3236 } else if (authsize
== ICV_10
) {
3237 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3238 aeadctx
->mayverify
= VERIFY_HW
;
3239 } else if (authsize
== ICV_12
) {
3240 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3241 aeadctx
->mayverify
= VERIFY_HW
;
3242 } else if (authsize
== ICV_14
) {
3243 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3244 aeadctx
->mayverify
= VERIFY_HW
;
3245 } else if (authsize
== (maxauth
>> 1)) {
3246 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3247 aeadctx
->mayverify
= VERIFY_HW
;
3248 } else if (authsize
== maxauth
) {
3249 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3250 aeadctx
->mayverify
= VERIFY_HW
;
3252 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3253 aeadctx
->mayverify
= VERIFY_SW
;
3255 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3259 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3261 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3265 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3266 aeadctx
->mayverify
= VERIFY_HW
;
3269 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3270 aeadctx
->mayverify
= VERIFY_HW
;
3273 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3274 aeadctx
->mayverify
= VERIFY_HW
;
3277 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3278 aeadctx
->mayverify
= VERIFY_HW
;
3281 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3282 aeadctx
->mayverify
= VERIFY_HW
;
3286 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3287 aeadctx
->mayverify
= VERIFY_SW
;
3292 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3295 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3296 unsigned int authsize
)
3298 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3302 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3303 aeadctx
->mayverify
= VERIFY_HW
;
3306 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3307 aeadctx
->mayverify
= VERIFY_HW
;
3310 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3311 aeadctx
->mayverify
= VERIFY_HW
;
3316 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3319 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3320 unsigned int authsize
)
3322 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3326 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3327 aeadctx
->mayverify
= VERIFY_HW
;
3330 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3331 aeadctx
->mayverify
= VERIFY_HW
;
3334 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3335 aeadctx
->mayverify
= VERIFY_HW
;
3338 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3339 aeadctx
->mayverify
= VERIFY_HW
;
3342 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3343 aeadctx
->mayverify
= VERIFY_HW
;
3346 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3347 aeadctx
->mayverify
= VERIFY_HW
;
3350 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3351 aeadctx
->mayverify
= VERIFY_HW
;
3356 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3359 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3361 unsigned int keylen
)
3363 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3364 unsigned char ck_size
, mk_size
;
3365 int key_ctx_size
= 0;
3367 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3368 if (keylen
== AES_KEYSIZE_128
) {
3369 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3370 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3371 } else if (keylen
== AES_KEYSIZE_192
) {
3372 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3373 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3374 } else if (keylen
== AES_KEYSIZE_256
) {
3375 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3376 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3378 aeadctx
->enckey_len
= 0;
3381 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3383 memcpy(aeadctx
->key
, key
, keylen
);
3384 aeadctx
->enckey_len
= keylen
;
3389 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3391 unsigned int keylen
)
3393 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3396 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3397 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3398 CRYPTO_TFM_REQ_MASK
);
3399 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3402 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3405 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3406 unsigned int keylen
)
3408 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3412 aeadctx
->enckey_len
= 0;
3415 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3416 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3417 CRYPTO_TFM_REQ_MASK
);
3418 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3422 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3423 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3426 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3427 unsigned int keylen
)
3429 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3430 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3431 unsigned int ck_size
;
3432 int ret
= 0, key_ctx_size
= 0;
3433 struct crypto_aes_ctx aes
;
3435 aeadctx
->enckey_len
= 0;
3436 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3437 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3438 & CRYPTO_TFM_REQ_MASK
);
3439 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3443 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3445 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3446 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3448 if (keylen
== AES_KEYSIZE_128
) {
3449 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3450 } else if (keylen
== AES_KEYSIZE_192
) {
3451 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3452 } else if (keylen
== AES_KEYSIZE_256
) {
3453 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3455 pr_err("GCM: Invalid key length %d\n", keylen
);
3460 memcpy(aeadctx
->key
, key
, keylen
);
3461 aeadctx
->enckey_len
= keylen
;
3462 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3464 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3465 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3468 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3469 * It will go in key context
3471 ret
= aes_expandkey(&aes
, key
, keylen
);
3473 aeadctx
->enckey_len
= 0;
3476 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3477 aes_encrypt(&aes
, gctx
->ghash_h
, gctx
->ghash_h
);
3478 memzero_explicit(&aes
, sizeof(aes
));
3484 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3485 unsigned int keylen
)
3487 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3488 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3489 /* it contains auth and cipher key both*/
3490 struct crypto_authenc_keys keys
;
3491 unsigned int bs
, subtype
;
3492 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3493 int err
= 0, i
, key_ctx_len
= 0;
3494 unsigned char ck_size
= 0;
3495 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3496 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3497 struct algo_param param
;
3501 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3502 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3503 & CRYPTO_TFM_REQ_MASK
);
3504 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3508 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3511 if (get_alg_config(¶m
, max_authsize
)) {
3512 pr_err("chcr : Unsupported digest size\n");
3515 subtype
= get_aead_subtype(authenc
);
3516 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3517 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3518 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3520 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3521 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3522 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3524 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3525 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3526 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3527 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3528 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3529 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3531 pr_err("chcr : Unsupported cipher key\n");
3535 /* Copy only encryption key. We use authkey to generate h(ipad) and
3536 * h(opad) so authkey is not needed again. authkeylen size have the
3537 * size of the hash digest size.
3539 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3540 aeadctx
->enckey_len
= keys
.enckeylen
;
3541 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3542 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3544 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3545 aeadctx
->enckey_len
<< 3);
3547 base_hash
= chcr_alloc_shash(max_authsize
);
3548 if (IS_ERR(base_hash
)) {
3549 pr_err("chcr : Base driver cannot be loaded\n");
3550 aeadctx
->enckey_len
= 0;
3551 memzero_explicit(&keys
, sizeof(keys
));
3555 SHASH_DESC_ON_STACK(shash
, base_hash
);
3557 shash
->tfm
= base_hash
;
3558 bs
= crypto_shash_blocksize(base_hash
);
3559 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3560 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3562 if (keys
.authkeylen
> bs
) {
3563 err
= crypto_shash_digest(shash
, keys
.authkey
,
3567 pr_err("chcr : Base driver cannot be loaded\n");
3570 keys
.authkeylen
= max_authsize
;
3572 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3574 /* Compute the ipad-digest*/
3575 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3576 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3577 for (i
= 0; i
< bs
>> 2; i
++)
3578 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3580 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3583 /* Compute the opad-digest */
3584 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3585 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3586 for (i
= 0; i
< bs
>> 2; i
++)
3587 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3589 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3592 /* convert the ipad and opad digest to network order */
3593 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3594 chcr_change_order(o_ptr
, param
.result_size
);
3595 key_ctx_len
= sizeof(struct _key_ctx
) +
3596 roundup(keys
.enckeylen
, 16) +
3597 (param
.result_size
+ align
) * 2;
3598 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3599 0, 1, key_ctx_len
>> 4);
3600 actx
->auth_mode
= param
.auth_mode
;
3601 chcr_free_shash(base_hash
);
3603 memzero_explicit(&keys
, sizeof(keys
));
3607 aeadctx
->enckey_len
= 0;
3608 memzero_explicit(&keys
, sizeof(keys
));
3609 if (!IS_ERR(base_hash
))
3610 chcr_free_shash(base_hash
);
3614 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3615 const u8
*key
, unsigned int keylen
)
3617 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3618 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3619 struct crypto_authenc_keys keys
;
3621 /* it contains auth and cipher key both*/
3622 unsigned int subtype
;
3623 int key_ctx_len
= 0;
3624 unsigned char ck_size
= 0;
3626 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3627 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3628 & CRYPTO_TFM_REQ_MASK
);
3629 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3633 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3636 subtype
= get_aead_subtype(authenc
);
3637 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3638 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3639 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3641 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3642 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3643 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3645 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3646 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3647 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3648 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3649 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3650 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3652 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3655 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3656 aeadctx
->enckey_len
= keys
.enckeylen
;
3657 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3658 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3659 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3660 aeadctx
->enckey_len
<< 3);
3662 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3664 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3665 0, key_ctx_len
>> 4);
3666 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3667 memzero_explicit(&keys
, sizeof(keys
));
3670 aeadctx
->enckey_len
= 0;
3671 memzero_explicit(&keys
, sizeof(keys
));
3675 static int chcr_aead_op(struct aead_request
*req
,
3677 create_wr_t create_wr_fn
)
3679 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3680 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3681 struct chcr_context
*ctx
= a_ctx(tfm
);
3682 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
3683 struct sk_buff
*skb
;
3684 struct chcr_dev
*cdev
;
3686 cdev
= a_ctx(tfm
)->dev
;
3688 pr_err("chcr : %s : No crypto device.\n", __func__
);
3692 if (chcr_inc_wrcount(cdev
)) {
3693 /* Detach state for CHCR means lldi or padap is freed.
3694 * We cannot increment fallback here.
3696 return chcr_aead_fallback(req
, reqctx
->op
);
3699 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3701 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))) {
3702 chcr_dec_wrcount(cdev
);
3706 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3707 crypto_ipsec_check_assoclen(req
->assoclen
) != 0) {
3708 pr_err("RFC4106: Invalid value of assoclen %d\n",
3713 /* Form a WR from req */
3714 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
], size
);
3716 if (IS_ERR_OR_NULL(skb
)) {
3717 chcr_dec_wrcount(cdev
);
3718 return PTR_ERR_OR_ZERO(skb
);
3721 skb
->dev
= u_ctx
->lldi
.ports
[0];
3722 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
3724 return -EINPROGRESS
;
3727 static int chcr_aead_encrypt(struct aead_request
*req
)
3729 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3730 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3731 struct chcr_context
*ctx
= a_ctx(tfm
);
3735 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3736 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3739 reqctx
->verify
= VERIFY_HW
;
3740 reqctx
->op
= CHCR_ENCRYPT_OP
;
3742 switch (get_aead_subtype(tfm
)) {
3743 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3744 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3745 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3746 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3747 return chcr_aead_op(req
, 0, create_authenc_wr
);
3748 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3749 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3750 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3752 return chcr_aead_op(req
, 0, create_gcm_wr
);
3756 static int chcr_aead_decrypt(struct aead_request
*req
)
3758 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3759 struct chcr_context
*ctx
= a_ctx(tfm
);
3760 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3761 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3766 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3767 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3770 if (aeadctx
->mayverify
== VERIFY_SW
) {
3771 size
= crypto_aead_maxauthsize(tfm
);
3772 reqctx
->verify
= VERIFY_SW
;
3775 reqctx
->verify
= VERIFY_HW
;
3777 reqctx
->op
= CHCR_DECRYPT_OP
;
3778 switch (get_aead_subtype(tfm
)) {
3779 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3780 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3781 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3782 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3783 return chcr_aead_op(req
, size
, create_authenc_wr
);
3784 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3785 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3786 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3788 return chcr_aead_op(req
, size
, create_gcm_wr
);
3792 static struct chcr_alg_template driver_algs
[] = {
3795 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3798 .base
.cra_name
= "cbc(aes)",
3799 .base
.cra_driver_name
= "cbc-aes-chcr",
3800 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3802 .init
= chcr_init_tfm
,
3803 .exit
= chcr_exit_tfm
,
3804 .min_keysize
= AES_MIN_KEY_SIZE
,
3805 .max_keysize
= AES_MAX_KEY_SIZE
,
3806 .ivsize
= AES_BLOCK_SIZE
,
3807 .setkey
= chcr_aes_cbc_setkey
,
3808 .encrypt
= chcr_aes_encrypt
,
3809 .decrypt
= chcr_aes_decrypt
,
3813 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3816 .base
.cra_name
= "xts(aes)",
3817 .base
.cra_driver_name
= "xts-aes-chcr",
3818 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3820 .init
= chcr_init_tfm
,
3821 .exit
= chcr_exit_tfm
,
3822 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3823 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3824 .ivsize
= AES_BLOCK_SIZE
,
3825 .setkey
= chcr_aes_xts_setkey
,
3826 .encrypt
= chcr_aes_encrypt
,
3827 .decrypt
= chcr_aes_decrypt
,
3831 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3834 .base
.cra_name
= "ctr(aes)",
3835 .base
.cra_driver_name
= "ctr-aes-chcr",
3836 .base
.cra_blocksize
= 1,
3838 .init
= chcr_init_tfm
,
3839 .exit
= chcr_exit_tfm
,
3840 .min_keysize
= AES_MIN_KEY_SIZE
,
3841 .max_keysize
= AES_MAX_KEY_SIZE
,
3842 .ivsize
= AES_BLOCK_SIZE
,
3843 .setkey
= chcr_aes_ctr_setkey
,
3844 .encrypt
= chcr_aes_encrypt
,
3845 .decrypt
= chcr_aes_decrypt
,
3849 .type
= CRYPTO_ALG_TYPE_SKCIPHER
|
3850 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3853 .base
.cra_name
= "rfc3686(ctr(aes))",
3854 .base
.cra_driver_name
= "rfc3686-ctr-aes-chcr",
3855 .base
.cra_blocksize
= 1,
3857 .init
= chcr_rfc3686_init
,
3858 .exit
= chcr_exit_tfm
,
3859 .min_keysize
= AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3860 .max_keysize
= AES_MAX_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3861 .ivsize
= CTR_RFC3686_IV_SIZE
,
3862 .setkey
= chcr_aes_rfc3686_setkey
,
3863 .encrypt
= chcr_aes_encrypt
,
3864 .decrypt
= chcr_aes_decrypt
,
3869 .type
= CRYPTO_ALG_TYPE_AHASH
,
3872 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3875 .cra_driver_name
= "sha1-chcr",
3876 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3881 .type
= CRYPTO_ALG_TYPE_AHASH
,
3884 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3886 .cra_name
= "sha256",
3887 .cra_driver_name
= "sha256-chcr",
3888 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3893 .type
= CRYPTO_ALG_TYPE_AHASH
,
3896 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3898 .cra_name
= "sha224",
3899 .cra_driver_name
= "sha224-chcr",
3900 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3905 .type
= CRYPTO_ALG_TYPE_AHASH
,
3908 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3910 .cra_name
= "sha384",
3911 .cra_driver_name
= "sha384-chcr",
3912 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3917 .type
= CRYPTO_ALG_TYPE_AHASH
,
3920 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3922 .cra_name
= "sha512",
3923 .cra_driver_name
= "sha512-chcr",
3924 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3930 .type
= CRYPTO_ALG_TYPE_HMAC
,
3933 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3935 .cra_name
= "hmac(sha1)",
3936 .cra_driver_name
= "hmac-sha1-chcr",
3937 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3942 .type
= CRYPTO_ALG_TYPE_HMAC
,
3945 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3947 .cra_name
= "hmac(sha224)",
3948 .cra_driver_name
= "hmac-sha224-chcr",
3949 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3954 .type
= CRYPTO_ALG_TYPE_HMAC
,
3957 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3959 .cra_name
= "hmac(sha256)",
3960 .cra_driver_name
= "hmac-sha256-chcr",
3961 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3966 .type
= CRYPTO_ALG_TYPE_HMAC
,
3969 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3971 .cra_name
= "hmac(sha384)",
3972 .cra_driver_name
= "hmac-sha384-chcr",
3973 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3978 .type
= CRYPTO_ALG_TYPE_HMAC
,
3981 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3983 .cra_name
= "hmac(sha512)",
3984 .cra_driver_name
= "hmac-sha512-chcr",
3985 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3989 /* Add AEAD Algorithms */
3991 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3995 .cra_name
= "gcm(aes)",
3996 .cra_driver_name
= "gcm-aes-chcr",
3998 .cra_priority
= CHCR_AEAD_PRIORITY
,
3999 .cra_ctxsize
= sizeof(struct chcr_context
) +
4000 sizeof(struct chcr_aead_ctx
) +
4001 sizeof(struct chcr_gcm_ctx
),
4003 .ivsize
= GCM_AES_IV_SIZE
,
4004 .maxauthsize
= GHASH_DIGEST_SIZE
,
4005 .setkey
= chcr_gcm_setkey
,
4006 .setauthsize
= chcr_gcm_setauthsize
,
4010 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
4014 .cra_name
= "rfc4106(gcm(aes))",
4015 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
4017 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4018 .cra_ctxsize
= sizeof(struct chcr_context
) +
4019 sizeof(struct chcr_aead_ctx
) +
4020 sizeof(struct chcr_gcm_ctx
),
4023 .ivsize
= GCM_RFC4106_IV_SIZE
,
4024 .maxauthsize
= GHASH_DIGEST_SIZE
,
4025 .setkey
= chcr_gcm_setkey
,
4026 .setauthsize
= chcr_4106_4309_setauthsize
,
4030 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
4034 .cra_name
= "ccm(aes)",
4035 .cra_driver_name
= "ccm-aes-chcr",
4037 .cra_priority
= CHCR_AEAD_PRIORITY
,
4038 .cra_ctxsize
= sizeof(struct chcr_context
) +
4039 sizeof(struct chcr_aead_ctx
),
4042 .ivsize
= AES_BLOCK_SIZE
,
4043 .maxauthsize
= GHASH_DIGEST_SIZE
,
4044 .setkey
= chcr_aead_ccm_setkey
,
4045 .setauthsize
= chcr_ccm_setauthsize
,
4049 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
4053 .cra_name
= "rfc4309(ccm(aes))",
4054 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
4056 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4057 .cra_ctxsize
= sizeof(struct chcr_context
) +
4058 sizeof(struct chcr_aead_ctx
),
4062 .maxauthsize
= GHASH_DIGEST_SIZE
,
4063 .setkey
= chcr_aead_rfc4309_setkey
,
4064 .setauthsize
= chcr_4106_4309_setauthsize
,
4068 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4072 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
4074 "authenc-hmac-sha1-cbc-aes-chcr",
4075 .cra_blocksize
= AES_BLOCK_SIZE
,
4076 .cra_priority
= CHCR_AEAD_PRIORITY
,
4077 .cra_ctxsize
= sizeof(struct chcr_context
) +
4078 sizeof(struct chcr_aead_ctx
) +
4079 sizeof(struct chcr_authenc_ctx
),
4082 .ivsize
= AES_BLOCK_SIZE
,
4083 .maxauthsize
= SHA1_DIGEST_SIZE
,
4084 .setkey
= chcr_authenc_setkey
,
4085 .setauthsize
= chcr_authenc_setauthsize
,
4089 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4094 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
4096 "authenc-hmac-sha256-cbc-aes-chcr",
4097 .cra_blocksize
= AES_BLOCK_SIZE
,
4098 .cra_priority
= CHCR_AEAD_PRIORITY
,
4099 .cra_ctxsize
= sizeof(struct chcr_context
) +
4100 sizeof(struct chcr_aead_ctx
) +
4101 sizeof(struct chcr_authenc_ctx
),
4104 .ivsize
= AES_BLOCK_SIZE
,
4105 .maxauthsize
= SHA256_DIGEST_SIZE
,
4106 .setkey
= chcr_authenc_setkey
,
4107 .setauthsize
= chcr_authenc_setauthsize
,
4111 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4115 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
4117 "authenc-hmac-sha224-cbc-aes-chcr",
4118 .cra_blocksize
= AES_BLOCK_SIZE
,
4119 .cra_priority
= CHCR_AEAD_PRIORITY
,
4120 .cra_ctxsize
= sizeof(struct chcr_context
) +
4121 sizeof(struct chcr_aead_ctx
) +
4122 sizeof(struct chcr_authenc_ctx
),
4124 .ivsize
= AES_BLOCK_SIZE
,
4125 .maxauthsize
= SHA224_DIGEST_SIZE
,
4126 .setkey
= chcr_authenc_setkey
,
4127 .setauthsize
= chcr_authenc_setauthsize
,
4131 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4135 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
4137 "authenc-hmac-sha384-cbc-aes-chcr",
4138 .cra_blocksize
= AES_BLOCK_SIZE
,
4139 .cra_priority
= CHCR_AEAD_PRIORITY
,
4140 .cra_ctxsize
= sizeof(struct chcr_context
) +
4141 sizeof(struct chcr_aead_ctx
) +
4142 sizeof(struct chcr_authenc_ctx
),
4145 .ivsize
= AES_BLOCK_SIZE
,
4146 .maxauthsize
= SHA384_DIGEST_SIZE
,
4147 .setkey
= chcr_authenc_setkey
,
4148 .setauthsize
= chcr_authenc_setauthsize
,
4152 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4156 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4158 "authenc-hmac-sha512-cbc-aes-chcr",
4159 .cra_blocksize
= AES_BLOCK_SIZE
,
4160 .cra_priority
= CHCR_AEAD_PRIORITY
,
4161 .cra_ctxsize
= sizeof(struct chcr_context
) +
4162 sizeof(struct chcr_aead_ctx
) +
4163 sizeof(struct chcr_authenc_ctx
),
4166 .ivsize
= AES_BLOCK_SIZE
,
4167 .maxauthsize
= SHA512_DIGEST_SIZE
,
4168 .setkey
= chcr_authenc_setkey
,
4169 .setauthsize
= chcr_authenc_setauthsize
,
4173 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4177 .cra_name
= "authenc(digest_null,cbc(aes))",
4179 "authenc-digest_null-cbc-aes-chcr",
4180 .cra_blocksize
= AES_BLOCK_SIZE
,
4181 .cra_priority
= CHCR_AEAD_PRIORITY
,
4182 .cra_ctxsize
= sizeof(struct chcr_context
) +
4183 sizeof(struct chcr_aead_ctx
) +
4184 sizeof(struct chcr_authenc_ctx
),
4187 .ivsize
= AES_BLOCK_SIZE
,
4189 .setkey
= chcr_aead_digest_null_setkey
,
4190 .setauthsize
= chcr_authenc_null_setauthsize
,
4194 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4198 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4200 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4202 .cra_priority
= CHCR_AEAD_PRIORITY
,
4203 .cra_ctxsize
= sizeof(struct chcr_context
) +
4204 sizeof(struct chcr_aead_ctx
) +
4205 sizeof(struct chcr_authenc_ctx
),
4208 .ivsize
= CTR_RFC3686_IV_SIZE
,
4209 .maxauthsize
= SHA1_DIGEST_SIZE
,
4210 .setkey
= chcr_authenc_setkey
,
4211 .setauthsize
= chcr_authenc_setauthsize
,
4215 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4220 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4222 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4224 .cra_priority
= CHCR_AEAD_PRIORITY
,
4225 .cra_ctxsize
= sizeof(struct chcr_context
) +
4226 sizeof(struct chcr_aead_ctx
) +
4227 sizeof(struct chcr_authenc_ctx
),
4230 .ivsize
= CTR_RFC3686_IV_SIZE
,
4231 .maxauthsize
= SHA256_DIGEST_SIZE
,
4232 .setkey
= chcr_authenc_setkey
,
4233 .setauthsize
= chcr_authenc_setauthsize
,
4237 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4241 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4243 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4245 .cra_priority
= CHCR_AEAD_PRIORITY
,
4246 .cra_ctxsize
= sizeof(struct chcr_context
) +
4247 sizeof(struct chcr_aead_ctx
) +
4248 sizeof(struct chcr_authenc_ctx
),
4250 .ivsize
= CTR_RFC3686_IV_SIZE
,
4251 .maxauthsize
= SHA224_DIGEST_SIZE
,
4252 .setkey
= chcr_authenc_setkey
,
4253 .setauthsize
= chcr_authenc_setauthsize
,
4257 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4261 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4263 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4265 .cra_priority
= CHCR_AEAD_PRIORITY
,
4266 .cra_ctxsize
= sizeof(struct chcr_context
) +
4267 sizeof(struct chcr_aead_ctx
) +
4268 sizeof(struct chcr_authenc_ctx
),
4271 .ivsize
= CTR_RFC3686_IV_SIZE
,
4272 .maxauthsize
= SHA384_DIGEST_SIZE
,
4273 .setkey
= chcr_authenc_setkey
,
4274 .setauthsize
= chcr_authenc_setauthsize
,
4278 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4282 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4284 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4286 .cra_priority
= CHCR_AEAD_PRIORITY
,
4287 .cra_ctxsize
= sizeof(struct chcr_context
) +
4288 sizeof(struct chcr_aead_ctx
) +
4289 sizeof(struct chcr_authenc_ctx
),
4292 .ivsize
= CTR_RFC3686_IV_SIZE
,
4293 .maxauthsize
= SHA512_DIGEST_SIZE
,
4294 .setkey
= chcr_authenc_setkey
,
4295 .setauthsize
= chcr_authenc_setauthsize
,
4299 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4303 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4305 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4307 .cra_priority
= CHCR_AEAD_PRIORITY
,
4308 .cra_ctxsize
= sizeof(struct chcr_context
) +
4309 sizeof(struct chcr_aead_ctx
) +
4310 sizeof(struct chcr_authenc_ctx
),
4313 .ivsize
= CTR_RFC3686_IV_SIZE
,
4315 .setkey
= chcr_aead_digest_null_setkey
,
4316 .setauthsize
= chcr_authenc_null_setauthsize
,
4322 * chcr_unregister_alg - Deregister crypto algorithms with
4325 static int chcr_unregister_alg(void)
4329 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4330 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4331 case CRYPTO_ALG_TYPE_SKCIPHER
:
4332 if (driver_algs
[i
].is_registered
)
4333 crypto_unregister_skcipher(
4334 &driver_algs
[i
].alg
.skcipher
);
4336 case CRYPTO_ALG_TYPE_AEAD
:
4337 if (driver_algs
[i
].is_registered
)
4338 crypto_unregister_aead(
4339 &driver_algs
[i
].alg
.aead
);
4341 case CRYPTO_ALG_TYPE_AHASH
:
4342 if (driver_algs
[i
].is_registered
)
4343 crypto_unregister_ahash(
4344 &driver_algs
[i
].alg
.hash
);
4347 driver_algs
[i
].is_registered
= 0;
4352 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4353 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4354 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4357 * chcr_register_alg - Register crypto algorithms with kernel framework.
4359 static int chcr_register_alg(void)
4361 struct crypto_alg ai
;
4362 struct ahash_alg
*a_hash
;
4366 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4367 if (driver_algs
[i
].is_registered
)
4369 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4370 case CRYPTO_ALG_TYPE_SKCIPHER
:
4371 driver_algs
[i
].alg
.skcipher
.base
.cra_priority
=
4373 driver_algs
[i
].alg
.skcipher
.base
.cra_module
= THIS_MODULE
;
4374 driver_algs
[i
].alg
.skcipher
.base
.cra_flags
=
4375 CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
4376 CRYPTO_ALG_NEED_FALLBACK
;
4377 driver_algs
[i
].alg
.skcipher
.base
.cra_ctxsize
=
4378 sizeof(struct chcr_context
) +
4379 sizeof(struct ablk_ctx
);
4380 driver_algs
[i
].alg
.skcipher
.base
.cra_alignmask
= 0;
4382 err
= crypto_register_skcipher(&driver_algs
[i
].alg
.skcipher
);
4383 name
= driver_algs
[i
].alg
.skcipher
.base
.cra_driver_name
;
4385 case CRYPTO_ALG_TYPE_AEAD
:
4386 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4387 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4388 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4389 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4390 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4391 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4392 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4393 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4394 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4396 case CRYPTO_ALG_TYPE_AHASH
:
4397 a_hash
= &driver_algs
[i
].alg
.hash
;
4398 a_hash
->update
= chcr_ahash_update
;
4399 a_hash
->final
= chcr_ahash_final
;
4400 a_hash
->finup
= chcr_ahash_finup
;
4401 a_hash
->digest
= chcr_ahash_digest
;
4402 a_hash
->export
= chcr_ahash_export
;
4403 a_hash
->import
= chcr_ahash_import
;
4404 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4405 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4406 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4407 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4408 a_hash
->halg
.base
.cra_alignmask
= 0;
4409 a_hash
->halg
.base
.cra_exit
= NULL
;
4411 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4412 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4413 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4414 a_hash
->init
= chcr_hmac_init
;
4415 a_hash
->setkey
= chcr_ahash_setkey
;
4416 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4418 a_hash
->init
= chcr_sha_init
;
4419 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4420 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4422 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4423 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4424 name
= ai
.cra_driver_name
;
4428 pr_err("chcr : %s : Algorithm registration failed\n",
4432 driver_algs
[i
].is_registered
= 1;
4438 chcr_unregister_alg();
4443 * start_crypto - Register the crypto algorithms.
4444 * This should called once when the first device comesup. After this
4445 * kernel will start calling driver APIs for crypto operations.
4447 int start_crypto(void)
4449 return chcr_register_alg();
4453 * stop_crypto - Deregister all the crypto algorithms with kernel.
4454 * This should be called once when the last device goes down. After this
4455 * kernel will not call the driver API for crypto operations.
4457 int stop_crypto(void)
4459 chcr_unregister_alg();