2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return container_of(ctx
->dev
, struct uld_ctx
, dev
);
129 static inline int is_ofld_imm(const struct sk_buff
*skb
)
131 return (skb
->len
<= SGE_MAX_WR_LEN
);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
136 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
139 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
145 unsigned int skip_len
= 0;
148 if (sg_dma_len(sg
) <= skip
) {
149 skip
-= sg_dma_len(sg
);
158 while (sg
&& reqlen
) {
159 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
160 nents
+= DIV_ROUND_UP(less
, entlen
);
168 static inline int get_aead_subtype(struct crypto_aead
*aead
)
170 struct aead_alg
*alg
= crypto_aead_alg(aead
);
171 struct chcr_alg_template
*chcr_crypto_alg
=
172 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
173 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
176 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
178 u8 temp
[SHA512_DIGEST_SIZE
];
179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
180 int authsize
= crypto_aead_authsize(tfm
);
181 struct cpl_fw6_pld
*fw6_pld
;
184 fw6_pld
= (struct cpl_fw6_pld
*)input
;
185 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
186 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
187 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
190 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
191 authsize
, req
->assoclen
+
192 req
->cryptlen
- authsize
);
193 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
201 static int chcr_inc_wrcount(struct chcr_dev
*dev
)
203 if (dev
->state
== CHCR_DETACH
)
205 atomic_inc(&dev
->inflight
);
209 static inline void chcr_dec_wrcount(struct chcr_dev
*dev
)
211 atomic_dec(&dev
->inflight
);
214 static inline int chcr_handle_aead_resp(struct aead_request
*req
,
215 unsigned char *input
,
218 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
219 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
220 struct chcr_dev
*dev
= a_ctx(tfm
)->dev
;
222 chcr_aead_common_exit(req
);
223 if (reqctx
->verify
== VERIFY_SW
) {
224 chcr_verify_tag(req
, input
, &err
);
225 reqctx
->verify
= VERIFY_HW
;
227 chcr_dec_wrcount(dev
);
228 req
->base
.complete(&req
->base
, err
);
233 static void get_aes_decrypt_key(unsigned char *dec_key
,
234 const unsigned char *key
,
235 unsigned int keylength
)
243 case AES_KEYLENGTH_128BIT
:
244 nk
= KEYLENGTH_4BYTES
;
245 nr
= NUMBER_OF_ROUNDS_10
;
247 case AES_KEYLENGTH_192BIT
:
248 nk
= KEYLENGTH_6BYTES
;
249 nr
= NUMBER_OF_ROUNDS_12
;
251 case AES_KEYLENGTH_256BIT
:
252 nk
= KEYLENGTH_8BYTES
;
253 nr
= NUMBER_OF_ROUNDS_14
;
258 for (i
= 0; i
< nk
; i
++)
259 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
262 temp
= w_ring
[nk
- 1];
263 while (i
+ nk
< (nr
+ 1) * 4) {
266 temp
= (temp
<< 8) | (temp
>> 24);
267 temp
= aes_ks_subword(temp
);
268 temp
^= round_constant
[i
/ nk
];
269 } else if (nk
== 8 && (i
% 4 == 0)) {
270 temp
= aes_ks_subword(temp
);
272 w_ring
[i
% nk
] ^= temp
;
273 temp
= w_ring
[i
% nk
];
277 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
278 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
285 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
287 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
290 case SHA1_DIGEST_SIZE
:
291 base_hash
= crypto_alloc_shash("sha1", 0, 0);
293 case SHA224_DIGEST_SIZE
:
294 base_hash
= crypto_alloc_shash("sha224", 0, 0);
296 case SHA256_DIGEST_SIZE
:
297 base_hash
= crypto_alloc_shash("sha256", 0, 0);
299 case SHA384_DIGEST_SIZE
:
300 base_hash
= crypto_alloc_shash("sha384", 0, 0);
302 case SHA512_DIGEST_SIZE
:
303 base_hash
= crypto_alloc_shash("sha512", 0, 0);
310 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
311 char *iopad
, char *result_hash
,
314 struct sha1_state sha1_st
;
315 struct sha256_state sha256_st
;
316 struct sha512_state sha512_st
;
319 if (digest_size
== SHA1_DIGEST_SIZE
) {
320 error
= crypto_shash_init(desc
) ?:
321 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
322 crypto_shash_export(desc
, (void *)&sha1_st
);
323 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
324 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha256_st
);
328 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
330 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
331 error
= crypto_shash_init(desc
) ?:
332 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
333 crypto_shash_export(desc
, (void *)&sha256_st
);
334 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
336 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
337 error
= crypto_shash_init(desc
) ?:
338 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
339 crypto_shash_export(desc
, (void *)&sha512_st
);
340 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
342 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
343 error
= crypto_shash_init(desc
) ?:
344 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
345 crypto_shash_export(desc
, (void *)&sha512_st
);
346 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
349 pr_err("Unknown digest size %d\n", digest_size
);
354 static void chcr_change_order(char *buf
, int ds
)
358 if (ds
== SHA512_DIGEST_SIZE
) {
359 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
360 *((__be64
*)buf
+ i
) =
361 cpu_to_be64(*((u64
*)buf
+ i
));
363 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
364 *((__be32
*)buf
+ i
) =
365 cpu_to_be32(*((u32
*)buf
+ i
));
369 static inline int is_hmac(struct crypto_tfm
*tfm
)
371 struct crypto_alg
*alg
= tfm
->__crt_alg
;
372 struct chcr_alg_template
*chcr_crypto_alg
=
373 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
375 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
380 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
381 struct cpl_rx_phys_dsgl
*dsgl
)
385 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
388 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
391 struct cpl_rx_phys_dsgl
*phys_cpl
;
393 phys_cpl
= walk
->dsgl
;
395 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
396 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 phys_cpl
->pcirlxorder_to_noofsgentr
=
398 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
404 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
405 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
406 phys_cpl
->rss_hdr_int
.hash_val
= 0;
407 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
410 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
419 walk
->to
->len
[j
% 8] = htons(size
);
420 walk
->to
->addr
[j
% 8] = cpu_to_be64(addr
);
427 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
428 struct scatterlist
*sg
,
433 unsigned int left_size
= slen
, len
= 0;
434 unsigned int j
= walk
->nents
;
440 if (sg_dma_len(sg
) <= skip
) {
441 skip
-= sg_dma_len(sg
);
450 while (left_size
&& sg
) {
451 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
454 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
455 walk
->to
->len
[j
% 8] = htons(ent_len
);
456 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
465 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
466 skip_len
) + skip_len
;
467 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
474 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
475 struct ulptx_sgl
*ulp
)
480 walk
->pair
= ulp
->sge
;
481 walk
->last_sg
= NULL
;
482 walk
->last_sg_len
= 0;
485 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
487 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
488 ULPTX_NSGE_V(walk
->nents
));
492 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
499 if (walk
->nents
== 0) {
500 walk
->sgl
->len0
= cpu_to_be32(size
);
501 walk
->sgl
->addr0
= cpu_to_be64(addr
);
503 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(addr
);
504 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
505 walk
->pair_idx
= !walk
->pair_idx
;
512 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
513 struct scatterlist
*sg
,
524 if (sg_dma_len(sg
) <= skip
) {
525 skip
-= sg_dma_len(sg
);
533 WARN(!sg
, "SG should not be null here\n");
534 if (sg
&& (walk
->nents
== 0)) {
535 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
536 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
537 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
538 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
542 walk
->last_sg_len
= sgmin
+ skip_len
;
544 if (sg_dma_len(sg
) == skip_len
) {
551 small
= min(sg_dma_len(sg
) - skip_len
, len
);
552 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
553 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
554 walk
->pair
->addr
[walk
->pair_idx
] =
555 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
556 walk
->pair_idx
= !walk
->pair_idx
;
563 walk
->last_sg_len
= skip_len
;
564 if (sg_dma_len(sg
) == skip_len
) {
571 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
573 struct crypto_alg
*alg
= tfm
->__crt_alg
;
574 struct chcr_alg_template
*chcr_crypto_alg
=
575 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
577 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
580 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
582 struct adapter
*adap
= netdev2adap(dev
);
583 struct sge_uld_txq_info
*txq_info
=
584 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
585 struct sge_uld_txq
*txq
;
589 txq
= &txq_info
->uldtxq
[idx
];
590 spin_lock(&txq
->sendq
.lock
);
593 spin_unlock(&txq
->sendq
.lock
);
598 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
599 struct _key_ctx
*key_ctx
)
601 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
602 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
605 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
606 ablkctx
->enckey_len
>> 1);
607 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
608 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
613 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
616 unsigned int srcskip
)
620 int soffset
= 0, sless
;
622 if (sg_dma_len(src
) == srcskip
) {
626 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
627 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
632 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
641 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
642 struct scatterlist
*dst
,
645 unsigned int srcskip
,
646 unsigned int dstskip
)
648 int srclen
= 0, dstlen
= 0;
649 int srcsg
= minsg
, dstsg
= minsg
;
650 int offset
= 0, soffset
= 0, less
, sless
= 0;
652 if (sg_dma_len(src
) == srcskip
) {
656 if (sg_dma_len(dst
) == dstskip
) {
662 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
663 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
668 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
669 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
670 if (srclen
<= dstlen
)
672 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
673 dstskip
, CHCR_DST_SG_SIZE
);
676 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
684 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
691 return min(srclen
, dstlen
);
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher
*cipher
,
696 struct scatterlist
*src
,
697 struct scatterlist
*dst
,
700 unsigned short op_type
)
704 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
706 skcipher_request_set_sync_tfm(subreq
, cipher
);
707 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
708 skcipher_request_set_crypt(subreq
, src
, dst
,
711 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
712 crypto_skcipher_encrypt(subreq
);
713 skcipher_request_zero(subreq
);
718 static inline void create_wreq(struct chcr_context
*ctx
,
719 struct chcr_wr
*chcr_req
,
720 struct crypto_async_request
*req
,
727 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
728 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
731 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
732 chcr_req
->wreq
.pld_size_hash_size
=
733 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
734 chcr_req
->wreq
.len16_pkd
=
735 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
736 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
737 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
738 FILL_WR_RX_Q_ID(ctx
->tx_chan_id
, qid
,
739 !!lcb
, ctx
->tx_qidx
);
741 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->tx_chan_id
,
743 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
744 ((sizeof(chcr_req
->wreq
)) >> 4)));
746 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
747 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
748 sizeof(chcr_req
->key_ctx
) + sc_len
);
752 * create_cipher_wr - form the WR for cipher operations
754 * @ctx: crypto driver context of the request.
755 * @qid: ingress qid where response of this WR should be received.
756 * @op_type: encryption or decryption
758 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
760 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
761 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
762 struct sk_buff
*skb
= NULL
;
763 struct chcr_wr
*chcr_req
;
764 struct cpl_rx_phys_dsgl
*phys_cpl
;
765 struct ulptx_sgl
*ulptx
;
766 struct chcr_blkcipher_req_ctx
*reqctx
=
767 ablkcipher_request_ctx(wrparam
->req
);
768 unsigned int temp
= 0, transhdr_len
, dst_size
;
771 unsigned int kctx_len
;
772 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
773 GFP_KERNEL
: GFP_ATOMIC
;
774 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
776 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
778 dst_size
= get_space_for_phys_dsgl(nents
);
779 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
780 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
781 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
782 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
783 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
784 (sgl_len(nents
) * 8);
785 transhdr_len
+= temp
;
786 transhdr_len
= roundup(transhdr_len
, 16);
787 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
792 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
793 chcr_req
->sec_cpl
.op_ivinsrtofst
=
794 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm
)->tx_chan_id
, 2, 1);
796 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
797 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
798 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
800 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
801 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
805 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
808 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
809 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
810 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
811 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
812 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
813 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
814 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
816 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
817 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
818 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
819 ablkctx
->enckey_len
);
821 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
822 (ablkctx
->enckey_len
>> 1),
823 ablkctx
->enckey_len
>> 1);
824 memcpy(chcr_req
->key_ctx
.key
+
825 (ablkctx
->enckey_len
>> 1),
827 ablkctx
->enckey_len
>> 1);
830 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
831 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
832 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
833 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
835 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
836 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
837 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
838 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
840 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
843 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
844 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
845 sg_pcopy_to_buffer(wrparam
->req
->src
,
846 sg_nents(wrparam
->req
->src
), wrparam
->req
->info
, 16,
847 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
851 return ERR_PTR(error
);
854 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
858 if (keylen
== AES_KEYSIZE_128
)
859 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
860 else if (keylen
== AES_KEYSIZE_192
)
861 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
862 else if (keylen
== AES_KEYSIZE_256
)
863 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
869 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
873 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
874 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
877 crypto_sync_skcipher_clear_flags(ablkctx
->sw_cipher
,
878 CRYPTO_TFM_REQ_MASK
);
879 crypto_sync_skcipher_set_flags(ablkctx
->sw_cipher
,
880 cipher
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
881 err
= crypto_sync_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
882 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
884 crypto_sync_skcipher_get_flags(ablkctx
->sw_cipher
) &
889 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
893 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
894 unsigned int ck_size
, context_size
;
898 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
902 ck_size
= chcr_keyctx_ck_size(keylen
);
903 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
904 memcpy(ablkctx
->key
, key
, keylen
);
905 ablkctx
->enckey_len
= keylen
;
906 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
907 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
908 keylen
+ alignment
) >> 4;
910 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
912 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
915 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
916 ablkctx
->enckey_len
= 0;
921 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
925 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
926 unsigned int ck_size
, context_size
;
930 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
933 ck_size
= chcr_keyctx_ck_size(keylen
);
934 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
935 memcpy(ablkctx
->key
, key
, keylen
);
936 ablkctx
->enckey_len
= keylen
;
937 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
938 keylen
+ alignment
) >> 4;
940 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
942 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
946 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
947 ablkctx
->enckey_len
= 0;
952 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
956 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
957 unsigned int ck_size
, context_size
;
961 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
963 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
964 CTR_RFC3686_NONCE_SIZE
);
966 keylen
-= CTR_RFC3686_NONCE_SIZE
;
967 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
971 ck_size
= chcr_keyctx_ck_size(keylen
);
972 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
973 memcpy(ablkctx
->key
, key
, keylen
);
974 ablkctx
->enckey_len
= keylen
;
975 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
976 keylen
+ alignment
) >> 4;
978 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
980 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
984 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
985 ablkctx
->enckey_len
= 0;
989 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
991 unsigned int size
= AES_BLOCK_SIZE
;
992 __be32
*b
= (__be32
*)(dstiv
+ size
);
995 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
996 for (; size
>= 4; size
-= 4) {
997 prev
= be32_to_cpu(*--b
);
1007 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
1009 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
1011 u32 temp
= be32_to_cpu(*--b
);
1014 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
1015 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
1016 bytes
= c
* AES_BLOCK_SIZE
;
1020 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
,
1023 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1024 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1025 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1026 struct crypto_cipher
*cipher
;
1029 unsigned int keylen
;
1030 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1031 int round8
= round
/ 8;
1033 cipher
= ablkctx
->aes_generic
;
1034 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1036 keylen
= ablkctx
->enckey_len
/ 2;
1037 key
= ablkctx
->key
+ keylen
;
1038 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
1041 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
1042 for (i
= 0; i
< round8
; i
++)
1043 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1045 for (i
= 0; i
< (round
% 8); i
++)
1046 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1049 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
1054 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
1055 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1057 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1058 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1059 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1062 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1063 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1065 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1066 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1067 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1068 AES_BLOCK_SIZE
) + 1);
1069 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1070 ret
= chcr_update_tweak(req
, iv
, 0);
1071 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1073 /*Updated before sending last WR*/
1074 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
1076 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1083 /* We need separate function for final iv because in rfc3686 Initial counter
1084 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085 * for subsequent update requests
1088 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
1089 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1091 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1092 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1093 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1096 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1097 ctr_add_iv(iv
, req
->info
, DIV_ROUND_UP(reqctx
->processed
,
1099 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1100 ret
= chcr_update_tweak(req
, iv
, 1);
1101 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1102 /*Already updated for Decrypt*/
1104 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1111 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
1112 unsigned char *input
, int err
)
1114 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1115 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1116 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1117 struct sk_buff
*skb
;
1118 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1119 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1120 struct cipher_wr_param wrparam
;
1121 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1126 if (req
->nbytes
== reqctx
->processed
) {
1127 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1129 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1134 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1135 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1136 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1137 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1138 bytes
= req
->nbytes
- reqctx
->processed
;
1140 bytes
= rounddown(bytes
, 16);
1142 /*CTR mode counter overfloa*/
1143 bytes
= req
->nbytes
- reqctx
->processed
;
1145 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1149 if (unlikely(bytes
== 0)) {
1150 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1152 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1162 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1163 CRYPTO_ALG_SUB_TYPE_CTR
)
1164 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1165 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
];
1167 wrparam
.bytes
= bytes
;
1168 skb
= create_cipher_wr(&wrparam
);
1170 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1174 skb
->dev
= u_ctx
->lldi
.ports
[0];
1175 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1177 reqctx
->last_req_len
= bytes
;
1178 reqctx
->processed
+= bytes
;
1181 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1183 chcr_dec_wrcount(dev
);
1184 req
->base
.complete(&req
->base
, err
);
1188 static int process_cipher(struct ablkcipher_request
*req
,
1190 struct sk_buff
**skb
,
1191 unsigned short op_type
)
1193 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1194 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1195 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1196 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1197 struct cipher_wr_param wrparam
;
1198 int bytes
, err
= -EINVAL
;
1200 reqctx
->processed
= 0;
1203 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1204 (req
->nbytes
== 0) ||
1205 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1206 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1211 err
= chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1214 if (req
->nbytes
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1216 sizeof(struct cpl_rx_phys_dsgl
) +
1219 /* Can be sent as Imm*/
1220 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1222 dnents
= sg_nents_xlen(req
->dst
, req
->nbytes
,
1223 CHCR_DST_SG_SIZE
, 0);
1224 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1225 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1226 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1227 reqctx
->imm
= (transhdr_len
+ IV
+ req
->nbytes
) <=
1229 bytes
= IV
+ req
->nbytes
;
1236 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1237 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1239 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1240 bytes
= req
->nbytes
- reqctx
->processed
;
1242 bytes
= rounddown(bytes
, 16);
1244 bytes
= req
->nbytes
;
1246 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1247 CRYPTO_ALG_SUB_TYPE_CTR
) {
1248 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1250 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1251 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1252 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1253 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1254 CTR_RFC3686_IV_SIZE
);
1256 /* initialize counter portion of counter block */
1257 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1258 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1262 memcpy(reqctx
->iv
, req
->info
, IV
);
1264 if (unlikely(bytes
== 0)) {
1265 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1267 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1276 reqctx
->op
= op_type
;
1277 reqctx
->srcsg
= req
->src
;
1278 reqctx
->dstsg
= req
->dst
;
1279 reqctx
->src_ofst
= 0;
1280 reqctx
->dst_ofst
= 0;
1283 wrparam
.bytes
= bytes
;
1284 *skb
= create_cipher_wr(&wrparam
);
1286 err
= PTR_ERR(*skb
);
1289 reqctx
->processed
= bytes
;
1290 reqctx
->last_req_len
= bytes
;
1294 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1299 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1301 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1302 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1303 struct sk_buff
*skb
= NULL
;
1304 int err
, isfull
= 0;
1305 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1307 err
= chcr_inc_wrcount(dev
);
1310 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1311 c_ctx(tfm
)->tx_qidx
))) {
1313 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1319 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1320 &skb
, CHCR_ENCRYPT_OP
);
1323 skb
->dev
= u_ctx
->lldi
.ports
[0];
1324 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1326 return isfull
? -EBUSY
: -EINPROGRESS
;
1328 chcr_dec_wrcount(dev
);
1332 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1334 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1335 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1336 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1337 struct sk_buff
*skb
= NULL
;
1338 int err
, isfull
= 0;
1340 err
= chcr_inc_wrcount(dev
);
1344 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1345 c_ctx(tfm
)->tx_qidx
))) {
1347 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1351 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1352 &skb
, CHCR_DECRYPT_OP
);
1355 skb
->dev
= u_ctx
->lldi
.ports
[0];
1356 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1358 return isfull
? -EBUSY
: -EINPROGRESS
;
1361 static int chcr_device_init(struct chcr_context
*ctx
)
1363 struct uld_ctx
*u_ctx
= NULL
;
1365 int txq_perchan
, txq_idx
, ntxq
;
1366 int err
= 0, rxq_perchan
, rxq_idx
;
1368 id
= smp_processor_id();
1370 u_ctx
= assign_chcr_device();
1373 pr_err("chcr device assignment fails\n");
1376 ctx
->dev
= &u_ctx
->dev
;
1377 ntxq
= u_ctx
->lldi
.ntxq
;
1378 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1379 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1380 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1381 ctx
->tx_chan_id
= ctx
->dev
->tx_channel_id
;
1382 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1383 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1384 rxq_idx
= ctx
->tx_chan_id
* rxq_perchan
;
1385 rxq_idx
+= id
% rxq_perchan
;
1386 txq_idx
= ctx
->tx_chan_id
* txq_perchan
;
1387 txq_idx
+= id
% txq_perchan
;
1388 ctx
->rx_qidx
= rxq_idx
;
1389 ctx
->tx_qidx
= txq_idx
;
1390 /* Channel Id used by SGE to forward packet to Host.
1391 * Same value should be used in cpl_fw6_pld RSS_CH field
1392 * by FW. Driver programs PCI channel ID to be used in fw
1393 * at the time of queue allocation with value "pi->tx_chan"
1395 ctx
->pci_chan_id
= txq_idx
/ txq_perchan
;
1401 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1403 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1404 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1405 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1407 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher(alg
->cra_name
, 0,
1408 CRYPTO_ALG_NEED_FALLBACK
);
1409 if (IS_ERR(ablkctx
->sw_cipher
)) {
1410 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1411 return PTR_ERR(ablkctx
->sw_cipher
);
1414 if (get_cryptoalg_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_XTS
) {
1415 /* To update tweak*/
1416 ablkctx
->aes_generic
= crypto_alloc_cipher("aes-generic", 0, 0);
1417 if (IS_ERR(ablkctx
->aes_generic
)) {
1418 pr_err("failed to allocate aes cipher for tweak\n");
1419 return PTR_ERR(ablkctx
->aes_generic
);
1422 ablkctx
->aes_generic
= NULL
;
1424 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1425 return chcr_device_init(crypto_tfm_ctx(tfm
));
1428 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1430 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1431 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1432 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1434 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1435 * cannot be used as fallback in chcr_handle_cipher_response
1437 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher("ctr(aes)", 0,
1438 CRYPTO_ALG_NEED_FALLBACK
);
1439 if (IS_ERR(ablkctx
->sw_cipher
)) {
1440 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1441 return PTR_ERR(ablkctx
->sw_cipher
);
1443 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1444 return chcr_device_init(crypto_tfm_ctx(tfm
));
1448 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1450 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1451 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1453 crypto_free_sync_skcipher(ablkctx
->sw_cipher
);
1454 if (ablkctx
->aes_generic
)
1455 crypto_free_cipher(ablkctx
->aes_generic
);
1458 static int get_alg_config(struct algo_param
*params
,
1459 unsigned int auth_size
)
1461 switch (auth_size
) {
1462 case SHA1_DIGEST_SIZE
:
1463 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1464 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1465 params
->result_size
= SHA1_DIGEST_SIZE
;
1467 case SHA224_DIGEST_SIZE
:
1468 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1469 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1470 params
->result_size
= SHA256_DIGEST_SIZE
;
1472 case SHA256_DIGEST_SIZE
:
1473 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1474 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1475 params
->result_size
= SHA256_DIGEST_SIZE
;
1477 case SHA384_DIGEST_SIZE
:
1478 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1479 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1480 params
->result_size
= SHA512_DIGEST_SIZE
;
1482 case SHA512_DIGEST_SIZE
:
1483 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1484 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1485 params
->result_size
= SHA512_DIGEST_SIZE
;
1488 pr_err("chcr : ERROR, unsupported digest size\n");
1494 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1496 crypto_free_shash(base_hash
);
1500 * create_hash_wr - Create hash work request
1501 * @req - Cipher req base
1503 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1504 struct hash_wr_param
*param
)
1506 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1507 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1508 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
1509 struct sk_buff
*skb
= NULL
;
1510 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1511 struct chcr_wr
*chcr_req
;
1512 struct ulptx_sgl
*ulptx
;
1513 unsigned int nents
= 0, transhdr_len
;
1514 unsigned int temp
= 0;
1515 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1517 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1520 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1521 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1522 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1523 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1524 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1525 nents
+= param
->bfr_len
? 1 : 0;
1526 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1527 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1528 transhdr_len
= roundup(transhdr_len
, 16);
1530 skb
= alloc_skb(transhdr_len
, flags
);
1532 return ERR_PTR(-ENOMEM
);
1533 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1535 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1536 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm
)->tx_chan_id
, 2, 0);
1537 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1539 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1540 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1541 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1542 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1543 chcr_req
->sec_cpl
.seqno_numivs
=
1544 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1545 param
->opad_needed
, 0);
1547 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1548 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1550 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1551 param
->alg_prm
.result_size
);
1553 if (param
->opad_needed
)
1554 memcpy(chcr_req
->key_ctx
.key
+
1555 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1556 CHCR_HASH_MAX_DIGEST_SIZE
),
1557 hmacctx
->opad
, param
->alg_prm
.result_size
);
1559 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1560 param
->alg_prm
.mk_size
, 0,
1563 sizeof(chcr_req
->key_ctx
)) >> 4));
1564 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1565 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1567 if (param
->bfr_len
!= 0) {
1568 req_ctx
->hctx_wr
.dma_addr
=
1569 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1570 param
->bfr_len
, DMA_TO_DEVICE
);
1571 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1572 req_ctx
->hctx_wr
. dma_addr
)) {
1576 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1578 req_ctx
->hctx_wr
.dma_addr
= 0;
1580 chcr_add_hash_src_ent(req
, ulptx
, param
);
1581 /* Request upto max wr size */
1582 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1583 (param
->sg_len
+ param
->bfr_len
) : 0);
1584 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1585 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1586 param
->hash_size
, transhdr_len
,
1588 req_ctx
->hctx_wr
.skb
= skb
;
1592 return ERR_PTR(error
);
1595 static int chcr_ahash_update(struct ahash_request
*req
)
1597 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1598 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1599 struct uld_ctx
*u_ctx
= NULL
;
1600 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1601 struct sk_buff
*skb
;
1602 u8 remainder
= 0, bs
;
1603 unsigned int nbytes
= req
->nbytes
;
1604 struct hash_wr_param params
;
1605 int error
, isfull
= 0;
1607 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1608 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1610 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1611 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1612 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1614 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1615 + req_ctx
->reqlen
, nbytes
, 0);
1616 req_ctx
->reqlen
+= nbytes
;
1619 error
= chcr_inc_wrcount(dev
);
1622 /* Detach state for CHCR means lldi or padap is freed. Increasing
1623 * inflight count for dev guarantees that lldi and padap is valid
1625 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1626 h_ctx(rtfm
)->tx_qidx
))) {
1628 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1634 chcr_init_hctx_per_wr(req_ctx
);
1635 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1640 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1641 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1642 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1643 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1644 if (params
.sg_len
> req
->nbytes
)
1645 params
.sg_len
= req
->nbytes
;
1646 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1648 params
.opad_needed
= 0;
1651 params
.bfr_len
= req_ctx
->reqlen
;
1653 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1655 params
.hash_size
= params
.alg_prm
.result_size
;
1656 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1657 skb
= create_hash_wr(req
, ¶ms
);
1659 error
= PTR_ERR(skb
);
1663 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1666 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1667 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1668 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1671 req_ctx
->reqlen
= remainder
;
1672 skb
->dev
= u_ctx
->lldi
.ports
[0];
1673 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1676 return isfull
? -EBUSY
: -EINPROGRESS
;
1678 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1680 chcr_dec_wrcount(dev
);
1684 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1686 memset(bfr_ptr
, 0, bs
);
1689 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1691 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1694 static int chcr_ahash_final(struct ahash_request
*req
)
1696 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1697 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1698 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1699 struct hash_wr_param params
;
1700 struct sk_buff
*skb
;
1701 struct uld_ctx
*u_ctx
= NULL
;
1702 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1703 int error
= -EINVAL
;
1705 error
= chcr_inc_wrcount(dev
);
1709 chcr_init_hctx_per_wr(req_ctx
);
1710 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1711 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1712 params
.opad_needed
= 1;
1714 params
.opad_needed
= 0;
1716 req_ctx
->hctx_wr
.isfinal
= 1;
1717 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1718 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1719 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1720 params
.opad_needed
= 1;
1721 params
.kctx_len
*= 2;
1723 params
.opad_needed
= 0;
1726 req_ctx
->hctx_wr
.result
= 1;
1727 params
.bfr_len
= req_ctx
->reqlen
;
1728 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1729 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1730 if (req_ctx
->reqlen
== 0) {
1731 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1735 params
.bfr_len
= bs
;
1738 params
.scmd1
= req_ctx
->data_len
;
1742 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1743 skb
= create_hash_wr(req
, ¶ms
);
1745 error
= PTR_ERR(skb
);
1748 req_ctx
->reqlen
= 0;
1749 skb
->dev
= u_ctx
->lldi
.ports
[0];
1750 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1752 return -EINPROGRESS
;
1754 chcr_dec_wrcount(dev
);
1758 static int chcr_ahash_finup(struct ahash_request
*req
)
1760 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1761 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1762 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1763 struct uld_ctx
*u_ctx
= NULL
;
1764 struct sk_buff
*skb
;
1765 struct hash_wr_param params
;
1767 int error
, isfull
= 0;
1769 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1770 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1771 error
= chcr_inc_wrcount(dev
);
1775 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1776 h_ctx(rtfm
)->tx_qidx
))) {
1778 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1783 chcr_init_hctx_per_wr(req_ctx
);
1784 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1790 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1791 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1792 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1793 params
.kctx_len
*= 2;
1794 params
.opad_needed
= 1;
1796 params
.opad_needed
= 0;
1799 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1800 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1801 if (params
.sg_len
< req
->nbytes
) {
1802 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1803 params
.kctx_len
/= 2;
1804 params
.opad_needed
= 0;
1808 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1810 params
.hash_size
= params
.alg_prm
.result_size
;
1815 params
.sg_len
= req
->nbytes
;
1816 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1817 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1820 params
.bfr_len
= req_ctx
->reqlen
;
1821 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1822 req_ctx
->hctx_wr
.result
= 1;
1823 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1824 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1825 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1829 params
.bfr_len
= bs
;
1831 skb
= create_hash_wr(req
, ¶ms
);
1833 error
= PTR_ERR(skb
);
1836 req_ctx
->reqlen
= 0;
1837 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1838 skb
->dev
= u_ctx
->lldi
.ports
[0];
1839 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1842 return isfull
? -EBUSY
: -EINPROGRESS
;
1844 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1846 chcr_dec_wrcount(dev
);
1850 static int chcr_ahash_digest(struct ahash_request
*req
)
1852 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1853 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1854 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1855 struct uld_ctx
*u_ctx
= NULL
;
1856 struct sk_buff
*skb
;
1857 struct hash_wr_param params
;
1859 int error
, isfull
= 0;
1862 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1863 error
= chcr_inc_wrcount(dev
);
1867 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1868 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1869 h_ctx(rtfm
)->tx_qidx
))) {
1871 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1877 chcr_init_hctx_per_wr(req_ctx
);
1878 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1884 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1885 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1886 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1887 params
.kctx_len
*= 2;
1888 params
.opad_needed
= 1;
1890 params
.opad_needed
= 0;
1892 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1893 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1894 if (params
.sg_len
< req
->nbytes
) {
1895 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1896 params
.kctx_len
/= 2;
1897 params
.opad_needed
= 0;
1902 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1903 params
.hash_size
= params
.alg_prm
.result_size
;
1905 params
.sg_len
= req
->nbytes
;
1906 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1909 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1913 req_ctx
->hctx_wr
.result
= 1;
1914 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1915 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1917 if (req
->nbytes
== 0) {
1918 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1920 params
.bfr_len
= bs
;
1923 skb
= create_hash_wr(req
, ¶ms
);
1925 error
= PTR_ERR(skb
);
1928 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1929 skb
->dev
= u_ctx
->lldi
.ports
[0];
1930 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1932 return isfull
? -EBUSY
: -EINPROGRESS
;
1934 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1936 chcr_dec_wrcount(dev
);
1940 static int chcr_ahash_continue(struct ahash_request
*req
)
1942 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1943 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1944 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1945 struct uld_ctx
*u_ctx
= NULL
;
1946 struct sk_buff
*skb
;
1947 struct hash_wr_param params
;
1951 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1952 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1953 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1954 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1955 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1956 params
.kctx_len
*= 2;
1957 params
.opad_needed
= 1;
1959 params
.opad_needed
= 0;
1961 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
1962 HASH_SPACE_LEFT(params
.kctx_len
),
1964 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
1965 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
1966 if (!hctx_wr
->result
||
1967 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
1968 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1969 params
.kctx_len
/= 2;
1970 params
.opad_needed
= 0;
1974 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1975 params
.hash_size
= params
.alg_prm
.result_size
;
1980 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1981 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
1984 reqctx
->data_len
+= params
.sg_len
;
1985 skb
= create_hash_wr(req
, ¶ms
);
1987 error
= PTR_ERR(skb
);
1990 hctx_wr
->processed
+= params
.sg_len
;
1991 skb
->dev
= u_ctx
->lldi
.ports
[0];
1992 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1999 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
2000 unsigned char *input
,
2003 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2004 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2005 int digestsize
, updated_digestsize
;
2006 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2007 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
2008 struct chcr_dev
*dev
= h_ctx(tfm
)->dev
;
2012 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
2013 updated_digestsize
= digestsize
;
2014 if (digestsize
== SHA224_DIGEST_SIZE
)
2015 updated_digestsize
= SHA256_DIGEST_SIZE
;
2016 else if (digestsize
== SHA384_DIGEST_SIZE
)
2017 updated_digestsize
= SHA512_DIGEST_SIZE
;
2019 if (hctx_wr
->dma_addr
) {
2020 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
2021 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
2022 hctx_wr
->dma_addr
= 0;
2024 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
2026 if (hctx_wr
->result
== 1) {
2027 hctx_wr
->result
= 0;
2028 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
2031 memcpy(reqctx
->partial_hash
,
2032 input
+ sizeof(struct cpl_fw6_pld
),
2033 updated_digestsize
);
2038 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
2039 updated_digestsize
);
2041 err
= chcr_ahash_continue(req
);
2046 if (hctx_wr
->is_sg_map
)
2047 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2051 chcr_dec_wrcount(dev
);
2052 req
->base
.complete(&req
->base
, err
);
2056 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2057 * @req: crypto request
2059 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
2062 struct crypto_tfm
*tfm
= req
->tfm
;
2063 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2064 struct adapter
*adap
= padap(ctx
->dev
);
2066 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
2067 case CRYPTO_ALG_TYPE_AEAD
:
2068 err
= chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
2071 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2072 chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
2075 case CRYPTO_ALG_TYPE_AHASH
:
2076 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
2078 atomic_inc(&adap
->chcr_stats
.complete
);
2081 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2083 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2084 struct chcr_ahash_req_ctx
*state
= out
;
2086 state
->reqlen
= req_ctx
->reqlen
;
2087 state
->data_len
= req_ctx
->data_len
;
2088 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2089 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2090 CHCR_HASH_MAX_DIGEST_SIZE
);
2091 chcr_init_hctx_per_wr(state
);
2095 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2097 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2098 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2100 req_ctx
->reqlen
= state
->reqlen
;
2101 req_ctx
->data_len
= state
->data_len
;
2102 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2103 req_ctx
->skbfr
= req_ctx
->bfr2
;
2104 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2105 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2106 CHCR_HASH_MAX_DIGEST_SIZE
);
2107 chcr_init_hctx_per_wr(req_ctx
);
2111 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2112 unsigned int keylen
)
2114 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2115 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2116 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2117 unsigned int i
, err
= 0, updated_digestsize
;
2119 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2121 /* use the key to calculate the ipad and opad. ipad will sent with the
2122 * first request's data. opad will be sent with the final hash result
2123 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2125 shash
->tfm
= hmacctx
->base_hash
;
2127 err
= crypto_shash_digest(shash
, key
, keylen
,
2131 keylen
= digestsize
;
2133 memcpy(hmacctx
->ipad
, key
, keylen
);
2135 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2136 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2138 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2139 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2140 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2143 updated_digestsize
= digestsize
;
2144 if (digestsize
== SHA224_DIGEST_SIZE
)
2145 updated_digestsize
= SHA256_DIGEST_SIZE
;
2146 else if (digestsize
== SHA384_DIGEST_SIZE
)
2147 updated_digestsize
= SHA512_DIGEST_SIZE
;
2148 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2149 hmacctx
->ipad
, digestsize
);
2152 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2154 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2155 hmacctx
->opad
, digestsize
);
2158 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2163 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
2164 unsigned int key_len
)
2166 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2167 unsigned short context_size
= 0;
2170 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2174 memcpy(ablkctx
->key
, key
, key_len
);
2175 ablkctx
->enckey_len
= key_len
;
2176 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2177 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2178 ablkctx
->key_ctx_hdr
=
2179 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2180 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2181 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2182 CHCR_KEYCTX_NO_KEY
, 1,
2184 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2187 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2188 ablkctx
->enckey_len
= 0;
2193 static int chcr_sha_init(struct ahash_request
*areq
)
2195 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2196 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2197 int digestsize
= crypto_ahash_digestsize(tfm
);
2199 req_ctx
->data_len
= 0;
2200 req_ctx
->reqlen
= 0;
2201 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2202 req_ctx
->skbfr
= req_ctx
->bfr2
;
2203 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2208 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2210 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2211 sizeof(struct chcr_ahash_req_ctx
));
2212 return chcr_device_init(crypto_tfm_ctx(tfm
));
2215 static int chcr_hmac_init(struct ahash_request
*areq
)
2217 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2218 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2219 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2220 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2221 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2223 chcr_sha_init(areq
);
2224 req_ctx
->data_len
= bs
;
2225 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2226 if (digestsize
== SHA224_DIGEST_SIZE
)
2227 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2228 SHA256_DIGEST_SIZE
);
2229 else if (digestsize
== SHA384_DIGEST_SIZE
)
2230 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2231 SHA512_DIGEST_SIZE
);
2233 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2239 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2241 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2242 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2243 unsigned int digestsize
=
2244 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2246 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2247 sizeof(struct chcr_ahash_req_ctx
));
2248 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2249 if (IS_ERR(hmacctx
->base_hash
))
2250 return PTR_ERR(hmacctx
->base_hash
);
2251 return chcr_device_init(crypto_tfm_ctx(tfm
));
2254 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2256 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2257 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2259 if (hmacctx
->base_hash
) {
2260 chcr_free_shash(hmacctx
->base_hash
);
2261 hmacctx
->base_hash
= NULL
;
2265 inline void chcr_aead_common_exit(struct aead_request
*req
)
2267 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2268 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2269 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2271 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2274 static int chcr_aead_common_init(struct aead_request
*req
)
2276 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2277 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2278 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2279 unsigned int authsize
= crypto_aead_authsize(tfm
);
2280 int error
= -EINVAL
;
2282 /* validate key size */
2283 if (aeadctx
->enckey_len
== 0)
2285 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2288 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2290 reqctx
->scratch_pad
= NULL
;
2292 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2304 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2305 int aadmax
, int wrlen
,
2306 unsigned short op_type
)
2308 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2310 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2311 dst_nents
> MAX_DSGL_ENT
||
2312 (req
->assoclen
> aadmax
) ||
2313 (wrlen
> SGE_MAX_WR_LEN
))
2318 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2320 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2321 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2322 struct aead_request
*subreq
= aead_request_ctx(req
);
2324 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2325 aead_request_set_callback(subreq
, req
->base
.flags
,
2326 req
->base
.complete
, req
->base
.data
);
2327 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2329 aead_request_set_ad(subreq
, req
->assoclen
);
2330 return op_type
? crypto_aead_decrypt(subreq
) :
2331 crypto_aead_encrypt(subreq
);
2334 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2338 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2339 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2340 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2341 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2342 struct sk_buff
*skb
= NULL
;
2343 struct chcr_wr
*chcr_req
;
2344 struct cpl_rx_phys_dsgl
*phys_cpl
;
2345 struct ulptx_sgl
*ulptx
;
2346 unsigned int transhdr_len
;
2347 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2348 unsigned int kctx_len
= 0, dnents
, snents
;
2349 unsigned int authsize
= crypto_aead_authsize(tfm
);
2350 int error
= -EINVAL
;
2353 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2355 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2357 if (req
->cryptlen
== 0)
2361 error
= chcr_aead_common_init(req
);
2363 return ERR_PTR(error
);
2365 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2366 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2369 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
2370 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
, 0);
2371 dnents
+= MIN_AUTH_SG
; // For IV
2372 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2373 CHCR_SRC_SG_SIZE
, 0);
2374 dst_size
= get_space_for_phys_dsgl(dnents
);
2375 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
2376 - sizeof(chcr_req
->key_ctx
);
2377 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2378 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <
2380 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16)
2381 : (sgl_len(snents
) * 8);
2382 transhdr_len
+= temp
;
2383 transhdr_len
= roundup(transhdr_len
, 16);
2385 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2386 transhdr_len
, reqctx
->op
)) {
2387 atomic_inc(&adap
->chcr_stats
.fallback
);
2388 chcr_aead_common_exit(req
);
2389 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2391 skb
= alloc_skb(transhdr_len
, flags
);
2397 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2399 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2402 * Input order is AAD,IV and Payload. where IV should be included as
2403 * the part of authdata. All other fields should be filled according
2404 * to the hardware spec
2406 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2407 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm
)->tx_chan_id
, 2, 1);
2408 chcr_req
->sec_cpl
.pldlen
= htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
2409 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2411 null
? 0 : IV
+ req
->assoclen
,
2412 req
->assoclen
+ IV
+ 1,
2413 (temp
& 0x1F0) >> 4);
2414 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2416 null
? 0 : req
->assoclen
+ IV
+ 1,
2418 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2419 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2420 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2422 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2423 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2424 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2426 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2428 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2431 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2432 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2433 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2434 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2435 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2436 aeadctx
->enckey_len
);
2438 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2439 aeadctx
->enckey_len
);
2441 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2442 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2443 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2444 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2445 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2446 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2447 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2448 memcpy(ivptr
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2449 memcpy(ivptr
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2450 CTR_RFC3686_IV_SIZE
);
2451 *(__be32
*)(ivptr
+ CTR_RFC3686_NONCE_SIZE
+
2452 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2454 memcpy(ivptr
, req
->iv
, IV
);
2456 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2457 chcr_add_aead_src_ent(req
, ulptx
);
2458 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2459 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2460 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
2461 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2462 transhdr_len
, temp
, 0);
2467 chcr_aead_common_exit(req
);
2469 return ERR_PTR(error
);
2472 int chcr_aead_dma_map(struct device
*dev
,
2473 struct aead_request
*req
,
2474 unsigned short op_type
)
2477 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2478 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2479 unsigned int authsize
= crypto_aead_authsize(tfm
);
2482 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2483 -authsize
: authsize
);
2484 if (!req
->cryptlen
|| !dst_size
)
2486 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2488 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2491 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2494 if (req
->src
== req
->dst
) {
2495 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2500 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2504 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2507 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2515 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2519 void chcr_aead_dma_unmap(struct device
*dev
,
2520 struct aead_request
*req
,
2521 unsigned short op_type
)
2523 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2524 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2525 unsigned int authsize
= crypto_aead_authsize(tfm
);
2528 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2529 -authsize
: authsize
);
2530 if (!req
->cryptlen
|| !dst_size
)
2533 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2535 if (req
->src
== req
->dst
) {
2536 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2539 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2541 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2546 void chcr_add_aead_src_ent(struct aead_request
*req
,
2547 struct ulptx_sgl
*ulptx
)
2549 struct ulptx_walk ulp_walk
;
2550 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2553 u8
*buf
= (u8
*)ulptx
;
2555 if (reqctx
->b0_len
) {
2556 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2557 buf
+= reqctx
->b0_len
;
2559 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2560 buf
, req
->cryptlen
+ req
->assoclen
, 0);
2562 ulptx_walk_init(&ulp_walk
, ulptx
);
2564 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2566 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
+
2568 ulptx_walk_end(&ulp_walk
);
2572 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2573 struct cpl_rx_phys_dsgl
*phys_cpl
,
2576 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2577 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2578 struct dsgl_walk dsgl_walk
;
2579 unsigned int authsize
= crypto_aead_authsize(tfm
);
2580 struct chcr_context
*ctx
= a_ctx(tfm
);
2583 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2584 dsgl_walk_add_page(&dsgl_walk
, IV
+ reqctx
->b0_len
, reqctx
->iv_dma
);
2585 temp
= req
->assoclen
+ req
->cryptlen
+
2586 (reqctx
->op
? -authsize
: authsize
);
2587 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, 0);
2588 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2591 void chcr_add_cipher_src_ent(struct ablkcipher_request
*req
,
2593 struct cipher_wr_param
*wrparam
)
2595 struct ulptx_walk ulp_walk
;
2596 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2599 memcpy(buf
, reqctx
->iv
, IV
);
2602 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2603 buf
, wrparam
->bytes
, reqctx
->processed
);
2605 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2606 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2608 reqctx
->srcsg
= ulp_walk
.last_sg
;
2609 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2610 ulptx_walk_end(&ulp_walk
);
2614 void chcr_add_cipher_dst_ent(struct ablkcipher_request
*req
,
2615 struct cpl_rx_phys_dsgl
*phys_cpl
,
2616 struct cipher_wr_param
*wrparam
,
2619 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2620 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
2621 struct chcr_context
*ctx
= c_ctx(tfm
);
2622 struct dsgl_walk dsgl_walk
;
2624 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2625 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2627 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2628 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2630 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2633 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2634 struct ulptx_sgl
*ulptx
,
2635 struct hash_wr_param
*param
)
2637 struct ulptx_walk ulp_walk
;
2638 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2640 if (reqctx
->hctx_wr
.imm
) {
2641 u8
*buf
= (u8
*)ulptx
;
2643 if (param
->bfr_len
) {
2644 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2645 buf
+= param
->bfr_len
;
2648 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2649 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2652 ulptx_walk_init(&ulp_walk
, ulptx
);
2654 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2655 reqctx
->hctx_wr
.dma_addr
);
2656 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2657 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2658 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2659 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2660 ulptx_walk_end(&ulp_walk
);
2664 int chcr_hash_dma_map(struct device
*dev
,
2665 struct ahash_request
*req
)
2667 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2672 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2676 req_ctx
->hctx_wr
.is_sg_map
= 1;
2680 void chcr_hash_dma_unmap(struct device
*dev
,
2681 struct ahash_request
*req
)
2683 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2688 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2690 req_ctx
->hctx_wr
.is_sg_map
= 0;
2694 int chcr_cipher_dma_map(struct device
*dev
,
2695 struct ablkcipher_request
*req
)
2699 if (req
->src
== req
->dst
) {
2700 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2705 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2709 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2712 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2723 void chcr_cipher_dma_unmap(struct device
*dev
,
2724 struct ablkcipher_request
*req
)
2726 if (req
->src
== req
->dst
) {
2727 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2730 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2732 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2737 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2741 memset(block
, 0, csize
);
2746 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2749 data
= cpu_to_be32(msglen
);
2750 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2755 static int generate_b0(struct aead_request
*req
, u8
*ivptr
,
2756 unsigned short op_type
)
2758 unsigned int l
, lp
, m
;
2760 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2761 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2762 u8
*b0
= reqctx
->scratch_pad
;
2764 m
= crypto_aead_authsize(aead
);
2766 memcpy(b0
, ivptr
, 16);
2771 /* set m, bits 3-5 */
2772 *b0
|= (8 * ((m
- 2) / 2));
2774 /* set adata, bit 6, if associated data is used */
2777 rc
= set_msg_len(b0
+ 16 - l
,
2778 (op_type
== CHCR_DECRYPT_OP
) ?
2779 req
->cryptlen
- m
: req
->cryptlen
, l
);
2784 static inline int crypto_ccm_check_iv(const u8
*iv
)
2786 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2787 if (iv
[0] < 1 || iv
[0] > 7)
2793 static int ccm_format_packet(struct aead_request
*req
,
2795 unsigned int sub_type
,
2796 unsigned short op_type
,
2797 unsigned int assoclen
)
2799 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2800 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2801 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2804 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2806 memcpy(ivptr
+ 1, &aeadctx
->salt
[0], 3);
2807 memcpy(ivptr
+ 4, req
->iv
, 8);
2808 memset(ivptr
+ 12, 0, 4);
2810 memcpy(ivptr
, req
->iv
, 16);
2813 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2816 rc
= generate_b0(req
, ivptr
, op_type
);
2817 /* zero the ctr value */
2818 memset(ivptr
+ 15 - ivptr
[0], 0, ivptr
[0] + 1);
2822 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2823 unsigned int dst_size
,
2824 struct aead_request
*req
,
2825 unsigned short op_type
)
2827 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2828 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2829 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2830 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2831 unsigned int c_id
= a_ctx(tfm
)->tx_chan_id
;
2832 unsigned int ccm_xtra
;
2833 unsigned char tag_offset
= 0, auth_offset
= 0;
2834 unsigned int assoclen
;
2836 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2837 assoclen
= req
->assoclen
- 8;
2839 assoclen
= req
->assoclen
;
2840 ccm_xtra
= CCM_B0_SIZE
+
2841 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2843 auth_offset
= req
->cryptlen
?
2844 (req
->assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2845 if (op_type
== CHCR_DECRYPT_OP
) {
2846 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2847 tag_offset
= crypto_aead_authsize(tfm
);
2853 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2856 htonl(req
->assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2857 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2858 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2859 1 + IV
, IV
+ assoclen
+ ccm_xtra
,
2860 req
->assoclen
+ IV
+ 1 + ccm_xtra
, 0);
2862 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2863 auth_offset
, tag_offset
,
2864 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2865 crypto_aead_authsize(tfm
));
2866 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2867 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2868 cipher_mode
, mac_mode
,
2869 aeadctx
->hmac_ctrl
, IV
>> 1);
2871 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2875 static int aead_ccm_validate_input(unsigned short op_type
,
2876 struct aead_request
*req
,
2877 struct chcr_aead_ctx
*aeadctx
,
2878 unsigned int sub_type
)
2880 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2881 if (crypto_ccm_check_iv(req
->iv
)) {
2882 pr_err("CCM: IV check fails\n");
2886 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2887 pr_err("RFC4309: Invalid AAD length %d\n",
2895 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2899 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2900 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2901 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2902 struct sk_buff
*skb
= NULL
;
2903 struct chcr_wr
*chcr_req
;
2904 struct cpl_rx_phys_dsgl
*phys_cpl
;
2905 struct ulptx_sgl
*ulptx
;
2906 unsigned int transhdr_len
;
2907 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
, snents
;
2908 unsigned int sub_type
, assoclen
= req
->assoclen
;
2909 unsigned int authsize
= crypto_aead_authsize(tfm
);
2910 int error
= -EINVAL
;
2912 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2914 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2916 sub_type
= get_aead_subtype(tfm
);
2917 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2919 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
2920 error
= chcr_aead_common_init(req
);
2922 return ERR_PTR(error
);
2924 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
2927 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
2928 + (reqctx
->op
? -authsize
: authsize
),
2929 CHCR_DST_SG_SIZE
, 0);
2930 dnents
+= MIN_CCM_SG
; // For IV and B0
2931 dst_size
= get_space_for_phys_dsgl(dnents
);
2932 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2933 CHCR_SRC_SG_SIZE
, 0);
2934 snents
+= MIN_CCM_SG
; //For B0
2935 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
2936 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2937 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
+
2938 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
2939 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
+
2940 reqctx
->b0_len
, 16) :
2941 (sgl_len(snents
) * 8);
2942 transhdr_len
+= temp
;
2943 transhdr_len
= roundup(transhdr_len
, 16);
2945 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
2946 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
2947 atomic_inc(&adap
->chcr_stats
.fallback
);
2948 chcr_aead_common_exit(req
);
2949 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2951 skb
= alloc_skb(transhdr_len
, flags
);
2958 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2960 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
2962 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2963 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2964 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2965 aeadctx
->key
, aeadctx
->enckey_len
);
2967 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2968 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2969 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2970 error
= ccm_format_packet(req
, ivptr
, sub_type
, reqctx
->op
, assoclen
);
2973 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2974 chcr_add_aead_src_ent(req
, ulptx
);
2976 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2977 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2978 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
+
2979 reqctx
->b0_len
) : 0);
2980 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
2981 transhdr_len
, temp
, 0);
2988 chcr_aead_common_exit(req
);
2989 return ERR_PTR(error
);
2992 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
2996 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2997 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2998 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2999 struct sk_buff
*skb
= NULL
;
3000 struct chcr_wr
*chcr_req
;
3001 struct cpl_rx_phys_dsgl
*phys_cpl
;
3002 struct ulptx_sgl
*ulptx
;
3003 unsigned int transhdr_len
, dnents
= 0, snents
;
3004 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
3005 unsigned int authsize
= crypto_aead_authsize(tfm
);
3006 int error
= -EINVAL
;
3008 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3010 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
3012 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
3013 assoclen
= req
->assoclen
- 8;
3016 error
= chcr_aead_common_init(req
);
3018 return ERR_PTR(error
);
3019 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
3020 (reqctx
->op
? -authsize
: authsize
),
3021 CHCR_DST_SG_SIZE
, 0);
3022 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3023 CHCR_SRC_SG_SIZE
, 0);
3024 dnents
+= MIN_GCM_SG
; // For IV
3025 dst_size
= get_space_for_phys_dsgl(dnents
);
3026 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
3027 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3028 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <=
3030 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16) :
3031 (sgl_len(snents
) * 8);
3032 transhdr_len
+= temp
;
3033 transhdr_len
= roundup(transhdr_len
, 16);
3034 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
3035 transhdr_len
, reqctx
->op
)) {
3037 atomic_inc(&adap
->chcr_stats
.fallback
);
3038 chcr_aead_common_exit(req
);
3039 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3041 skb
= alloc_skb(transhdr_len
, flags
);
3047 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3049 //Offset of tag from end
3050 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
3051 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
3052 a_ctx(tfm
)->tx_chan_id
, 2, 1);
3053 chcr_req
->sec_cpl
.pldlen
=
3054 htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
3055 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3056 assoclen
? 1 + IV
: 0,
3057 assoclen
? IV
+ assoclen
: 0,
3058 req
->assoclen
+ IV
+ 1, 0);
3059 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
3060 FILL_SEC_CPL_AUTHINSERT(0, req
->assoclen
+ IV
+ 1,
3062 chcr_req
->sec_cpl
.seqno_numivs
=
3063 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
3064 CHCR_ENCRYPT_OP
) ? 1 : 0,
3065 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
3066 CHCR_SCMD_AUTH_MODE_GHASH
,
3067 aeadctx
->hmac_ctrl
, IV
>> 1);
3068 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3070 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3071 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3072 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3073 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3075 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3076 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3077 /* prepare a 16 byte iv */
3078 /* S A L T | IV | 0x00000001 */
3079 if (get_aead_subtype(tfm
) ==
3080 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3081 memcpy(ivptr
, aeadctx
->salt
, 4);
3082 memcpy(ivptr
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3084 memcpy(ivptr
, req
->iv
, GCM_AES_IV_SIZE
);
3086 *((unsigned int *)(ivptr
+ 12)) = htonl(0x01);
3088 ulptx
= (struct ulptx_sgl
*)(ivptr
+ 16);
3090 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3091 chcr_add_aead_src_ent(req
, ulptx
);
3092 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3093 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3094 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
3095 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3096 transhdr_len
, temp
, reqctx
->verify
);
3101 chcr_aead_common_exit(req
);
3102 return ERR_PTR(error
);
3107 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3109 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3110 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3112 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3113 CRYPTO_ALG_NEED_FALLBACK
|
3115 if (IS_ERR(aeadctx
->sw_cipher
))
3116 return PTR_ERR(aeadctx
->sw_cipher
);
3117 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3118 sizeof(struct aead_request
) +
3119 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3120 return chcr_device_init(a_ctx(tfm
));
3123 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3125 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3127 crypto_free_aead(aeadctx
->sw_cipher
);
3130 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3131 unsigned int authsize
)
3133 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3135 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3136 aeadctx
->mayverify
= VERIFY_HW
;
3137 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3139 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3140 unsigned int authsize
)
3142 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3143 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3145 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3146 * true for sha1. authsize == 12 condition should be before
3147 * authsize == (maxauth >> 1)
3149 if (authsize
== ICV_4
) {
3150 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3151 aeadctx
->mayverify
= VERIFY_HW
;
3152 } else if (authsize
== ICV_6
) {
3153 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3154 aeadctx
->mayverify
= VERIFY_HW
;
3155 } else if (authsize
== ICV_10
) {
3156 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3157 aeadctx
->mayverify
= VERIFY_HW
;
3158 } else if (authsize
== ICV_12
) {
3159 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3160 aeadctx
->mayverify
= VERIFY_HW
;
3161 } else if (authsize
== ICV_14
) {
3162 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3163 aeadctx
->mayverify
= VERIFY_HW
;
3164 } else if (authsize
== (maxauth
>> 1)) {
3165 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3166 aeadctx
->mayverify
= VERIFY_HW
;
3167 } else if (authsize
== maxauth
) {
3168 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3169 aeadctx
->mayverify
= VERIFY_HW
;
3171 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3172 aeadctx
->mayverify
= VERIFY_SW
;
3174 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3178 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3180 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3184 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3185 aeadctx
->mayverify
= VERIFY_HW
;
3188 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3189 aeadctx
->mayverify
= VERIFY_HW
;
3192 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3193 aeadctx
->mayverify
= VERIFY_HW
;
3196 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3197 aeadctx
->mayverify
= VERIFY_HW
;
3200 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3201 aeadctx
->mayverify
= VERIFY_HW
;
3205 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3206 aeadctx
->mayverify
= VERIFY_SW
;
3210 crypto_tfm_set_flags((struct crypto_tfm
*) tfm
,
3211 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3214 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3217 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3218 unsigned int authsize
)
3220 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3224 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3225 aeadctx
->mayverify
= VERIFY_HW
;
3228 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3229 aeadctx
->mayverify
= VERIFY_HW
;
3232 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3233 aeadctx
->mayverify
= VERIFY_HW
;
3236 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3237 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3240 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3243 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3244 unsigned int authsize
)
3246 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3250 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3251 aeadctx
->mayverify
= VERIFY_HW
;
3254 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3255 aeadctx
->mayverify
= VERIFY_HW
;
3258 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3259 aeadctx
->mayverify
= VERIFY_HW
;
3262 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3263 aeadctx
->mayverify
= VERIFY_HW
;
3266 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3267 aeadctx
->mayverify
= VERIFY_HW
;
3270 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3271 aeadctx
->mayverify
= VERIFY_HW
;
3274 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3275 aeadctx
->mayverify
= VERIFY_HW
;
3278 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3279 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3282 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3285 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3287 unsigned int keylen
)
3289 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3290 unsigned char ck_size
, mk_size
;
3291 int key_ctx_size
= 0;
3293 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3294 if (keylen
== AES_KEYSIZE_128
) {
3295 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3296 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3297 } else if (keylen
== AES_KEYSIZE_192
) {
3298 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3299 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3300 } else if (keylen
== AES_KEYSIZE_256
) {
3301 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3302 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3304 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3305 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3306 aeadctx
->enckey_len
= 0;
3309 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3311 memcpy(aeadctx
->key
, key
, keylen
);
3312 aeadctx
->enckey_len
= keylen
;
3317 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3319 unsigned int keylen
)
3321 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3324 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3325 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3326 CRYPTO_TFM_REQ_MASK
);
3327 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3328 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3329 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3330 CRYPTO_TFM_RES_MASK
);
3333 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3336 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3337 unsigned int keylen
)
3339 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3343 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3344 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3345 aeadctx
->enckey_len
= 0;
3348 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3349 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3350 CRYPTO_TFM_REQ_MASK
);
3351 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3352 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3353 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3354 CRYPTO_TFM_RES_MASK
);
3358 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3359 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3362 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3363 unsigned int keylen
)
3365 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3366 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3367 struct crypto_cipher
*cipher
;
3368 unsigned int ck_size
;
3369 int ret
= 0, key_ctx_size
= 0;
3371 aeadctx
->enckey_len
= 0;
3372 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3373 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3374 & CRYPTO_TFM_REQ_MASK
);
3375 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3376 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3377 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3378 CRYPTO_TFM_RES_MASK
);
3382 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3384 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3385 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3387 if (keylen
== AES_KEYSIZE_128
) {
3388 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3389 } else if (keylen
== AES_KEYSIZE_192
) {
3390 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3391 } else if (keylen
== AES_KEYSIZE_256
) {
3392 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3394 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3395 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3396 pr_err("GCM: Invalid key length %d\n", keylen
);
3401 memcpy(aeadctx
->key
, key
, keylen
);
3402 aeadctx
->enckey_len
= keylen
;
3403 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3405 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3406 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3409 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3410 * It will go in key context
3412 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
3413 if (IS_ERR(cipher
)) {
3414 aeadctx
->enckey_len
= 0;
3419 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
3421 aeadctx
->enckey_len
= 0;
3424 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3425 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
3428 crypto_free_cipher(cipher
);
3433 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3434 unsigned int keylen
)
3436 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3437 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3438 /* it contains auth and cipher key both*/
3439 struct crypto_authenc_keys keys
;
3440 unsigned int bs
, subtype
;
3441 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3442 int err
= 0, i
, key_ctx_len
= 0;
3443 unsigned char ck_size
= 0;
3444 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3445 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3446 struct algo_param param
;
3450 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3451 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3452 & CRYPTO_TFM_REQ_MASK
);
3453 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3454 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3455 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3456 & CRYPTO_TFM_RES_MASK
);
3460 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3461 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3465 if (get_alg_config(¶m
, max_authsize
)) {
3466 pr_err("chcr : Unsupported digest size\n");
3469 subtype
= get_aead_subtype(authenc
);
3470 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3471 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3472 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3474 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3475 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3476 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3478 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3479 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3480 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3481 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3482 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3483 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3485 pr_err("chcr : Unsupported cipher key\n");
3489 /* Copy only encryption key. We use authkey to generate h(ipad) and
3490 * h(opad) so authkey is not needed again. authkeylen size have the
3491 * size of the hash digest size.
3493 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3494 aeadctx
->enckey_len
= keys
.enckeylen
;
3495 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3496 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3498 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3499 aeadctx
->enckey_len
<< 3);
3501 base_hash
= chcr_alloc_shash(max_authsize
);
3502 if (IS_ERR(base_hash
)) {
3503 pr_err("chcr : Base driver cannot be loaded\n");
3504 aeadctx
->enckey_len
= 0;
3505 memzero_explicit(&keys
, sizeof(keys
));
3509 SHASH_DESC_ON_STACK(shash
, base_hash
);
3511 shash
->tfm
= base_hash
;
3512 bs
= crypto_shash_blocksize(base_hash
);
3513 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3514 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3516 if (keys
.authkeylen
> bs
) {
3517 err
= crypto_shash_digest(shash
, keys
.authkey
,
3521 pr_err("chcr : Base driver cannot be loaded\n");
3524 keys
.authkeylen
= max_authsize
;
3526 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3528 /* Compute the ipad-digest*/
3529 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3530 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3531 for (i
= 0; i
< bs
>> 2; i
++)
3532 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3534 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3537 /* Compute the opad-digest */
3538 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3539 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3540 for (i
= 0; i
< bs
>> 2; i
++)
3541 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3543 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3546 /* convert the ipad and opad digest to network order */
3547 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3548 chcr_change_order(o_ptr
, param
.result_size
);
3549 key_ctx_len
= sizeof(struct _key_ctx
) +
3550 roundup(keys
.enckeylen
, 16) +
3551 (param
.result_size
+ align
) * 2;
3552 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3553 0, 1, key_ctx_len
>> 4);
3554 actx
->auth_mode
= param
.auth_mode
;
3555 chcr_free_shash(base_hash
);
3557 memzero_explicit(&keys
, sizeof(keys
));
3561 aeadctx
->enckey_len
= 0;
3562 memzero_explicit(&keys
, sizeof(keys
));
3563 if (!IS_ERR(base_hash
))
3564 chcr_free_shash(base_hash
);
3568 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3569 const u8
*key
, unsigned int keylen
)
3571 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3572 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3573 struct crypto_authenc_keys keys
;
3575 /* it contains auth and cipher key both*/
3576 unsigned int subtype
;
3577 int key_ctx_len
= 0;
3578 unsigned char ck_size
= 0;
3580 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3581 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3582 & CRYPTO_TFM_REQ_MASK
);
3583 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3584 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3585 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3586 & CRYPTO_TFM_RES_MASK
);
3590 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3591 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3594 subtype
= get_aead_subtype(authenc
);
3595 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3596 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3597 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3599 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3600 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3601 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3603 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3604 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3605 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3606 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3607 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3608 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3610 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3613 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3614 aeadctx
->enckey_len
= keys
.enckeylen
;
3615 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3616 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3617 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3618 aeadctx
->enckey_len
<< 3);
3620 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3622 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3623 0, key_ctx_len
>> 4);
3624 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3625 memzero_explicit(&keys
, sizeof(keys
));
3628 aeadctx
->enckey_len
= 0;
3629 memzero_explicit(&keys
, sizeof(keys
));
3633 static int chcr_aead_op(struct aead_request
*req
,
3635 create_wr_t create_wr_fn
)
3637 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3638 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3639 struct uld_ctx
*u_ctx
;
3640 struct sk_buff
*skb
;
3642 struct chcr_dev
*cdev
;
3644 cdev
= a_ctx(tfm
)->dev
;
3646 pr_err("chcr : %s : No crypto device.\n", __func__
);
3650 if (chcr_inc_wrcount(cdev
)) {
3651 /* Detach state for CHCR means lldi or padap is freed.
3652 * We cannot increment fallback here.
3654 return chcr_aead_fallback(req
, reqctx
->op
);
3657 u_ctx
= ULD_CTX(a_ctx(tfm
));
3658 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3659 a_ctx(tfm
)->tx_qidx
)) {
3661 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
3662 chcr_dec_wrcount(cdev
);
3667 /* Form a WR from req */
3668 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[a_ctx(tfm
)->rx_qidx
], size
);
3670 if (IS_ERR_OR_NULL(skb
)) {
3671 chcr_dec_wrcount(cdev
);
3672 return PTR_ERR_OR_ZERO(skb
);
3675 skb
->dev
= u_ctx
->lldi
.ports
[0];
3676 set_wr_txq(skb
, CPL_PRIORITY_DATA
, a_ctx(tfm
)->tx_qidx
);
3678 return isfull
? -EBUSY
: -EINPROGRESS
;
3681 static int chcr_aead_encrypt(struct aead_request
*req
)
3683 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3684 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3686 reqctx
->verify
= VERIFY_HW
;
3687 reqctx
->op
= CHCR_ENCRYPT_OP
;
3689 switch (get_aead_subtype(tfm
)) {
3690 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3691 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3692 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3693 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3694 return chcr_aead_op(req
, 0, create_authenc_wr
);
3695 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3696 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3697 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3699 return chcr_aead_op(req
, 0, create_gcm_wr
);
3703 static int chcr_aead_decrypt(struct aead_request
*req
)
3705 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3706 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3707 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3710 if (aeadctx
->mayverify
== VERIFY_SW
) {
3711 size
= crypto_aead_maxauthsize(tfm
);
3712 reqctx
->verify
= VERIFY_SW
;
3715 reqctx
->verify
= VERIFY_HW
;
3717 reqctx
->op
= CHCR_DECRYPT_OP
;
3718 switch (get_aead_subtype(tfm
)) {
3719 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3720 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3721 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3722 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3723 return chcr_aead_op(req
, size
, create_authenc_wr
);
3724 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3725 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3726 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3728 return chcr_aead_op(req
, size
, create_gcm_wr
);
3732 static struct chcr_alg_template driver_algs
[] = {
3735 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3738 .cra_name
= "cbc(aes)",
3739 .cra_driver_name
= "cbc-aes-chcr",
3740 .cra_blocksize
= AES_BLOCK_SIZE
,
3741 .cra_init
= chcr_cra_init
,
3742 .cra_exit
= chcr_cra_exit
,
3743 .cra_u
.ablkcipher
= {
3744 .min_keysize
= AES_MIN_KEY_SIZE
,
3745 .max_keysize
= AES_MAX_KEY_SIZE
,
3746 .ivsize
= AES_BLOCK_SIZE
,
3747 .setkey
= chcr_aes_cbc_setkey
,
3748 .encrypt
= chcr_aes_encrypt
,
3749 .decrypt
= chcr_aes_decrypt
,
3754 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3757 .cra_name
= "xts(aes)",
3758 .cra_driver_name
= "xts-aes-chcr",
3759 .cra_blocksize
= AES_BLOCK_SIZE
,
3760 .cra_init
= chcr_cra_init
,
3762 .cra_u
.ablkcipher
= {
3763 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3764 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3765 .ivsize
= AES_BLOCK_SIZE
,
3766 .setkey
= chcr_aes_xts_setkey
,
3767 .encrypt
= chcr_aes_encrypt
,
3768 .decrypt
= chcr_aes_decrypt
,
3773 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3776 .cra_name
= "ctr(aes)",
3777 .cra_driver_name
= "ctr-aes-chcr",
3779 .cra_init
= chcr_cra_init
,
3780 .cra_exit
= chcr_cra_exit
,
3781 .cra_u
.ablkcipher
= {
3782 .min_keysize
= AES_MIN_KEY_SIZE
,
3783 .max_keysize
= AES_MAX_KEY_SIZE
,
3784 .ivsize
= AES_BLOCK_SIZE
,
3785 .setkey
= chcr_aes_ctr_setkey
,
3786 .encrypt
= chcr_aes_encrypt
,
3787 .decrypt
= chcr_aes_decrypt
,
3792 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3793 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3796 .cra_name
= "rfc3686(ctr(aes))",
3797 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3799 .cra_init
= chcr_rfc3686_init
,
3800 .cra_exit
= chcr_cra_exit
,
3801 .cra_u
.ablkcipher
= {
3802 .min_keysize
= AES_MIN_KEY_SIZE
+
3803 CTR_RFC3686_NONCE_SIZE
,
3804 .max_keysize
= AES_MAX_KEY_SIZE
+
3805 CTR_RFC3686_NONCE_SIZE
,
3806 .ivsize
= CTR_RFC3686_IV_SIZE
,
3807 .setkey
= chcr_aes_rfc3686_setkey
,
3808 .encrypt
= chcr_aes_encrypt
,
3809 .decrypt
= chcr_aes_decrypt
,
3815 .type
= CRYPTO_ALG_TYPE_AHASH
,
3818 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3821 .cra_driver_name
= "sha1-chcr",
3822 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3827 .type
= CRYPTO_ALG_TYPE_AHASH
,
3830 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3832 .cra_name
= "sha256",
3833 .cra_driver_name
= "sha256-chcr",
3834 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3839 .type
= CRYPTO_ALG_TYPE_AHASH
,
3842 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3844 .cra_name
= "sha224",
3845 .cra_driver_name
= "sha224-chcr",
3846 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3851 .type
= CRYPTO_ALG_TYPE_AHASH
,
3854 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3856 .cra_name
= "sha384",
3857 .cra_driver_name
= "sha384-chcr",
3858 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3863 .type
= CRYPTO_ALG_TYPE_AHASH
,
3866 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3868 .cra_name
= "sha512",
3869 .cra_driver_name
= "sha512-chcr",
3870 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3876 .type
= CRYPTO_ALG_TYPE_HMAC
,
3879 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3881 .cra_name
= "hmac(sha1)",
3882 .cra_driver_name
= "hmac-sha1-chcr",
3883 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3888 .type
= CRYPTO_ALG_TYPE_HMAC
,
3891 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3893 .cra_name
= "hmac(sha224)",
3894 .cra_driver_name
= "hmac-sha224-chcr",
3895 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3900 .type
= CRYPTO_ALG_TYPE_HMAC
,
3903 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3905 .cra_name
= "hmac(sha256)",
3906 .cra_driver_name
= "hmac-sha256-chcr",
3907 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3912 .type
= CRYPTO_ALG_TYPE_HMAC
,
3915 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3917 .cra_name
= "hmac(sha384)",
3918 .cra_driver_name
= "hmac-sha384-chcr",
3919 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3924 .type
= CRYPTO_ALG_TYPE_HMAC
,
3927 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3929 .cra_name
= "hmac(sha512)",
3930 .cra_driver_name
= "hmac-sha512-chcr",
3931 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3935 /* Add AEAD Algorithms */
3937 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3941 .cra_name
= "gcm(aes)",
3942 .cra_driver_name
= "gcm-aes-chcr",
3944 .cra_priority
= CHCR_AEAD_PRIORITY
,
3945 .cra_ctxsize
= sizeof(struct chcr_context
) +
3946 sizeof(struct chcr_aead_ctx
) +
3947 sizeof(struct chcr_gcm_ctx
),
3949 .ivsize
= GCM_AES_IV_SIZE
,
3950 .maxauthsize
= GHASH_DIGEST_SIZE
,
3951 .setkey
= chcr_gcm_setkey
,
3952 .setauthsize
= chcr_gcm_setauthsize
,
3956 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3960 .cra_name
= "rfc4106(gcm(aes))",
3961 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3963 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3964 .cra_ctxsize
= sizeof(struct chcr_context
) +
3965 sizeof(struct chcr_aead_ctx
) +
3966 sizeof(struct chcr_gcm_ctx
),
3969 .ivsize
= GCM_RFC4106_IV_SIZE
,
3970 .maxauthsize
= GHASH_DIGEST_SIZE
,
3971 .setkey
= chcr_gcm_setkey
,
3972 .setauthsize
= chcr_4106_4309_setauthsize
,
3976 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3980 .cra_name
= "ccm(aes)",
3981 .cra_driver_name
= "ccm-aes-chcr",
3983 .cra_priority
= CHCR_AEAD_PRIORITY
,
3984 .cra_ctxsize
= sizeof(struct chcr_context
) +
3985 sizeof(struct chcr_aead_ctx
),
3988 .ivsize
= AES_BLOCK_SIZE
,
3989 .maxauthsize
= GHASH_DIGEST_SIZE
,
3990 .setkey
= chcr_aead_ccm_setkey
,
3991 .setauthsize
= chcr_ccm_setauthsize
,
3995 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
3999 .cra_name
= "rfc4309(ccm(aes))",
4000 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
4002 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4003 .cra_ctxsize
= sizeof(struct chcr_context
) +
4004 sizeof(struct chcr_aead_ctx
),
4008 .maxauthsize
= GHASH_DIGEST_SIZE
,
4009 .setkey
= chcr_aead_rfc4309_setkey
,
4010 .setauthsize
= chcr_4106_4309_setauthsize
,
4014 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4018 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
4020 "authenc-hmac-sha1-cbc-aes-chcr",
4021 .cra_blocksize
= AES_BLOCK_SIZE
,
4022 .cra_priority
= CHCR_AEAD_PRIORITY
,
4023 .cra_ctxsize
= sizeof(struct chcr_context
) +
4024 sizeof(struct chcr_aead_ctx
) +
4025 sizeof(struct chcr_authenc_ctx
),
4028 .ivsize
= AES_BLOCK_SIZE
,
4029 .maxauthsize
= SHA1_DIGEST_SIZE
,
4030 .setkey
= chcr_authenc_setkey
,
4031 .setauthsize
= chcr_authenc_setauthsize
,
4035 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4040 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
4042 "authenc-hmac-sha256-cbc-aes-chcr",
4043 .cra_blocksize
= AES_BLOCK_SIZE
,
4044 .cra_priority
= CHCR_AEAD_PRIORITY
,
4045 .cra_ctxsize
= sizeof(struct chcr_context
) +
4046 sizeof(struct chcr_aead_ctx
) +
4047 sizeof(struct chcr_authenc_ctx
),
4050 .ivsize
= AES_BLOCK_SIZE
,
4051 .maxauthsize
= SHA256_DIGEST_SIZE
,
4052 .setkey
= chcr_authenc_setkey
,
4053 .setauthsize
= chcr_authenc_setauthsize
,
4057 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4061 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
4063 "authenc-hmac-sha224-cbc-aes-chcr",
4064 .cra_blocksize
= AES_BLOCK_SIZE
,
4065 .cra_priority
= CHCR_AEAD_PRIORITY
,
4066 .cra_ctxsize
= sizeof(struct chcr_context
) +
4067 sizeof(struct chcr_aead_ctx
) +
4068 sizeof(struct chcr_authenc_ctx
),
4070 .ivsize
= AES_BLOCK_SIZE
,
4071 .maxauthsize
= SHA224_DIGEST_SIZE
,
4072 .setkey
= chcr_authenc_setkey
,
4073 .setauthsize
= chcr_authenc_setauthsize
,
4077 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4081 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
4083 "authenc-hmac-sha384-cbc-aes-chcr",
4084 .cra_blocksize
= AES_BLOCK_SIZE
,
4085 .cra_priority
= CHCR_AEAD_PRIORITY
,
4086 .cra_ctxsize
= sizeof(struct chcr_context
) +
4087 sizeof(struct chcr_aead_ctx
) +
4088 sizeof(struct chcr_authenc_ctx
),
4091 .ivsize
= AES_BLOCK_SIZE
,
4092 .maxauthsize
= SHA384_DIGEST_SIZE
,
4093 .setkey
= chcr_authenc_setkey
,
4094 .setauthsize
= chcr_authenc_setauthsize
,
4098 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4102 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4104 "authenc-hmac-sha512-cbc-aes-chcr",
4105 .cra_blocksize
= AES_BLOCK_SIZE
,
4106 .cra_priority
= CHCR_AEAD_PRIORITY
,
4107 .cra_ctxsize
= sizeof(struct chcr_context
) +
4108 sizeof(struct chcr_aead_ctx
) +
4109 sizeof(struct chcr_authenc_ctx
),
4112 .ivsize
= AES_BLOCK_SIZE
,
4113 .maxauthsize
= SHA512_DIGEST_SIZE
,
4114 .setkey
= chcr_authenc_setkey
,
4115 .setauthsize
= chcr_authenc_setauthsize
,
4119 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4123 .cra_name
= "authenc(digest_null,cbc(aes))",
4125 "authenc-digest_null-cbc-aes-chcr",
4126 .cra_blocksize
= AES_BLOCK_SIZE
,
4127 .cra_priority
= CHCR_AEAD_PRIORITY
,
4128 .cra_ctxsize
= sizeof(struct chcr_context
) +
4129 sizeof(struct chcr_aead_ctx
) +
4130 sizeof(struct chcr_authenc_ctx
),
4133 .ivsize
= AES_BLOCK_SIZE
,
4135 .setkey
= chcr_aead_digest_null_setkey
,
4136 .setauthsize
= chcr_authenc_null_setauthsize
,
4140 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4144 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4146 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4148 .cra_priority
= CHCR_AEAD_PRIORITY
,
4149 .cra_ctxsize
= sizeof(struct chcr_context
) +
4150 sizeof(struct chcr_aead_ctx
) +
4151 sizeof(struct chcr_authenc_ctx
),
4154 .ivsize
= CTR_RFC3686_IV_SIZE
,
4155 .maxauthsize
= SHA1_DIGEST_SIZE
,
4156 .setkey
= chcr_authenc_setkey
,
4157 .setauthsize
= chcr_authenc_setauthsize
,
4161 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4166 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4168 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4170 .cra_priority
= CHCR_AEAD_PRIORITY
,
4171 .cra_ctxsize
= sizeof(struct chcr_context
) +
4172 sizeof(struct chcr_aead_ctx
) +
4173 sizeof(struct chcr_authenc_ctx
),
4176 .ivsize
= CTR_RFC3686_IV_SIZE
,
4177 .maxauthsize
= SHA256_DIGEST_SIZE
,
4178 .setkey
= chcr_authenc_setkey
,
4179 .setauthsize
= chcr_authenc_setauthsize
,
4183 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4187 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4189 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4191 .cra_priority
= CHCR_AEAD_PRIORITY
,
4192 .cra_ctxsize
= sizeof(struct chcr_context
) +
4193 sizeof(struct chcr_aead_ctx
) +
4194 sizeof(struct chcr_authenc_ctx
),
4196 .ivsize
= CTR_RFC3686_IV_SIZE
,
4197 .maxauthsize
= SHA224_DIGEST_SIZE
,
4198 .setkey
= chcr_authenc_setkey
,
4199 .setauthsize
= chcr_authenc_setauthsize
,
4203 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4207 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4209 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4211 .cra_priority
= CHCR_AEAD_PRIORITY
,
4212 .cra_ctxsize
= sizeof(struct chcr_context
) +
4213 sizeof(struct chcr_aead_ctx
) +
4214 sizeof(struct chcr_authenc_ctx
),
4217 .ivsize
= CTR_RFC3686_IV_SIZE
,
4218 .maxauthsize
= SHA384_DIGEST_SIZE
,
4219 .setkey
= chcr_authenc_setkey
,
4220 .setauthsize
= chcr_authenc_setauthsize
,
4224 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4228 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4230 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4232 .cra_priority
= CHCR_AEAD_PRIORITY
,
4233 .cra_ctxsize
= sizeof(struct chcr_context
) +
4234 sizeof(struct chcr_aead_ctx
) +
4235 sizeof(struct chcr_authenc_ctx
),
4238 .ivsize
= CTR_RFC3686_IV_SIZE
,
4239 .maxauthsize
= SHA512_DIGEST_SIZE
,
4240 .setkey
= chcr_authenc_setkey
,
4241 .setauthsize
= chcr_authenc_setauthsize
,
4245 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4249 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4251 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4253 .cra_priority
= CHCR_AEAD_PRIORITY
,
4254 .cra_ctxsize
= sizeof(struct chcr_context
) +
4255 sizeof(struct chcr_aead_ctx
) +
4256 sizeof(struct chcr_authenc_ctx
),
4259 .ivsize
= CTR_RFC3686_IV_SIZE
,
4261 .setkey
= chcr_aead_digest_null_setkey
,
4262 .setauthsize
= chcr_authenc_null_setauthsize
,
4268 * chcr_unregister_alg - Deregister crypto algorithms with
4271 static int chcr_unregister_alg(void)
4275 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4276 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4277 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4278 if (driver_algs
[i
].is_registered
)
4279 crypto_unregister_alg(
4280 &driver_algs
[i
].alg
.crypto
);
4282 case CRYPTO_ALG_TYPE_AEAD
:
4283 if (driver_algs
[i
].is_registered
)
4284 crypto_unregister_aead(
4285 &driver_algs
[i
].alg
.aead
);
4287 case CRYPTO_ALG_TYPE_AHASH
:
4288 if (driver_algs
[i
].is_registered
)
4289 crypto_unregister_ahash(
4290 &driver_algs
[i
].alg
.hash
);
4293 driver_algs
[i
].is_registered
= 0;
4298 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4299 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4300 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4303 * chcr_register_alg - Register crypto algorithms with kernel framework.
4305 static int chcr_register_alg(void)
4307 struct crypto_alg ai
;
4308 struct ahash_alg
*a_hash
;
4312 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4313 if (driver_algs
[i
].is_registered
)
4315 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4316 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4317 driver_algs
[i
].alg
.crypto
.cra_priority
=
4319 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
4320 driver_algs
[i
].alg
.crypto
.cra_flags
=
4321 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
4322 CRYPTO_ALG_NEED_FALLBACK
;
4323 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
4324 sizeof(struct chcr_context
) +
4325 sizeof(struct ablk_ctx
);
4326 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
4327 driver_algs
[i
].alg
.crypto
.cra_type
=
4328 &crypto_ablkcipher_type
;
4329 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
4330 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
4332 case CRYPTO_ALG_TYPE_AEAD
:
4333 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4334 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4335 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4336 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4337 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4338 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4339 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4340 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4341 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4343 case CRYPTO_ALG_TYPE_AHASH
:
4344 a_hash
= &driver_algs
[i
].alg
.hash
;
4345 a_hash
->update
= chcr_ahash_update
;
4346 a_hash
->final
= chcr_ahash_final
;
4347 a_hash
->finup
= chcr_ahash_finup
;
4348 a_hash
->digest
= chcr_ahash_digest
;
4349 a_hash
->export
= chcr_ahash_export
;
4350 a_hash
->import
= chcr_ahash_import
;
4351 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4352 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4353 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4354 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4355 a_hash
->halg
.base
.cra_alignmask
= 0;
4356 a_hash
->halg
.base
.cra_exit
= NULL
;
4358 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4359 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4360 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4361 a_hash
->init
= chcr_hmac_init
;
4362 a_hash
->setkey
= chcr_ahash_setkey
;
4363 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4365 a_hash
->init
= chcr_sha_init
;
4366 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4367 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4369 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4370 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4371 name
= ai
.cra_driver_name
;
4375 pr_err("chcr : %s : Algorithm registration failed\n",
4379 driver_algs
[i
].is_registered
= 1;
4385 chcr_unregister_alg();
4390 * start_crypto - Register the crypto algorithms.
4391 * This should called once when the first device comesup. After this
4392 * kernel will start calling driver APIs for crypto operations.
4394 int start_crypto(void)
4396 return chcr_register_alg();
4400 * stop_crypto - Deregister all the crypto algorithms with kernel.
4401 * This should be called once when the last device goes down. After this
4402 * kernel will not call the driver API for crypto operations.
4404 int stop_crypto(void)
4406 chcr_unregister_alg();