2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return container_of(ctx
->dev
, struct uld_ctx
, dev
);
129 static inline int is_ofld_imm(const struct sk_buff
*skb
)
131 return (skb
->len
<= SGE_MAX_WR_LEN
);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
136 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
139 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
145 unsigned int skip_len
= 0;
148 if (sg_dma_len(sg
) <= skip
) {
149 skip
-= sg_dma_len(sg
);
158 while (sg
&& reqlen
) {
159 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
160 nents
+= DIV_ROUND_UP(less
, entlen
);
168 static inline int get_aead_subtype(struct crypto_aead
*aead
)
170 struct aead_alg
*alg
= crypto_aead_alg(aead
);
171 struct chcr_alg_template
*chcr_crypto_alg
=
172 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
173 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
176 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
178 u8 temp
[SHA512_DIGEST_SIZE
];
179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
180 int authsize
= crypto_aead_authsize(tfm
);
181 struct cpl_fw6_pld
*fw6_pld
;
184 fw6_pld
= (struct cpl_fw6_pld
*)input
;
185 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
186 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
187 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
190 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
191 authsize
, req
->assoclen
+
192 req
->cryptlen
- authsize
);
193 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
201 static int chcr_inc_wrcount(struct chcr_dev
*dev
)
205 spin_lock_bh(&dev
->lock_chcr_dev
);
206 if (dev
->state
== CHCR_DETACH
)
209 atomic_inc(&dev
->inflight
);
211 spin_unlock_bh(&dev
->lock_chcr_dev
);
216 static inline void chcr_dec_wrcount(struct chcr_dev
*dev
)
218 atomic_dec(&dev
->inflight
);
221 static inline int chcr_handle_aead_resp(struct aead_request
*req
,
222 unsigned char *input
,
225 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
226 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
227 struct chcr_dev
*dev
= a_ctx(tfm
)->dev
;
229 chcr_aead_common_exit(req
);
230 if (reqctx
->verify
== VERIFY_SW
) {
231 chcr_verify_tag(req
, input
, &err
);
232 reqctx
->verify
= VERIFY_HW
;
234 chcr_dec_wrcount(dev
);
235 req
->base
.complete(&req
->base
, err
);
240 static void get_aes_decrypt_key(unsigned char *dec_key
,
241 const unsigned char *key
,
242 unsigned int keylength
)
250 case AES_KEYLENGTH_128BIT
:
251 nk
= KEYLENGTH_4BYTES
;
252 nr
= NUMBER_OF_ROUNDS_10
;
254 case AES_KEYLENGTH_192BIT
:
255 nk
= KEYLENGTH_6BYTES
;
256 nr
= NUMBER_OF_ROUNDS_12
;
258 case AES_KEYLENGTH_256BIT
:
259 nk
= KEYLENGTH_8BYTES
;
260 nr
= NUMBER_OF_ROUNDS_14
;
265 for (i
= 0; i
< nk
; i
++)
266 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
269 temp
= w_ring
[nk
- 1];
270 while (i
+ nk
< (nr
+ 1) * 4) {
273 temp
= (temp
<< 8) | (temp
>> 24);
274 temp
= aes_ks_subword(temp
);
275 temp
^= round_constant
[i
/ nk
];
276 } else if (nk
== 8 && (i
% 4 == 0)) {
277 temp
= aes_ks_subword(temp
);
279 w_ring
[i
% nk
] ^= temp
;
280 temp
= w_ring
[i
% nk
];
284 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
285 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
292 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
294 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
297 case SHA1_DIGEST_SIZE
:
298 base_hash
= crypto_alloc_shash("sha1", 0, 0);
300 case SHA224_DIGEST_SIZE
:
301 base_hash
= crypto_alloc_shash("sha224", 0, 0);
303 case SHA256_DIGEST_SIZE
:
304 base_hash
= crypto_alloc_shash("sha256", 0, 0);
306 case SHA384_DIGEST_SIZE
:
307 base_hash
= crypto_alloc_shash("sha384", 0, 0);
309 case SHA512_DIGEST_SIZE
:
310 base_hash
= crypto_alloc_shash("sha512", 0, 0);
317 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
318 char *iopad
, char *result_hash
,
321 struct sha1_state sha1_st
;
322 struct sha256_state sha256_st
;
323 struct sha512_state sha512_st
;
326 if (digest_size
== SHA1_DIGEST_SIZE
) {
327 error
= crypto_shash_init(desc
) ?:
328 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
329 crypto_shash_export(desc
, (void *)&sha1_st
);
330 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
331 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
332 error
= crypto_shash_init(desc
) ?:
333 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
334 crypto_shash_export(desc
, (void *)&sha256_st
);
335 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
337 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
338 error
= crypto_shash_init(desc
) ?:
339 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
340 crypto_shash_export(desc
, (void *)&sha256_st
);
341 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
343 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
344 error
= crypto_shash_init(desc
) ?:
345 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
346 crypto_shash_export(desc
, (void *)&sha512_st
);
347 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
349 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
350 error
= crypto_shash_init(desc
) ?:
351 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
352 crypto_shash_export(desc
, (void *)&sha512_st
);
353 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
356 pr_err("Unknown digest size %d\n", digest_size
);
361 static void chcr_change_order(char *buf
, int ds
)
365 if (ds
== SHA512_DIGEST_SIZE
) {
366 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
367 *((__be64
*)buf
+ i
) =
368 cpu_to_be64(*((u64
*)buf
+ i
));
370 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
371 *((__be32
*)buf
+ i
) =
372 cpu_to_be32(*((u32
*)buf
+ i
));
376 static inline int is_hmac(struct crypto_tfm
*tfm
)
378 struct crypto_alg
*alg
= tfm
->__crt_alg
;
379 struct chcr_alg_template
*chcr_crypto_alg
=
380 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
382 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
387 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
388 struct cpl_rx_phys_dsgl
*dsgl
)
392 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
395 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
398 struct cpl_rx_phys_dsgl
*phys_cpl
;
400 phys_cpl
= walk
->dsgl
;
402 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
403 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
404 phys_cpl
->pcirlxorder_to_noofsgentr
=
405 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
406 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
407 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
408 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
409 CPL_RX_PHYS_DSGL_DCAID_V(0) |
410 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
411 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
412 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
413 phys_cpl
->rss_hdr_int
.hash_val
= 0;
414 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
417 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
426 walk
->to
->len
[j
% 8] = htons(size
);
427 walk
->to
->addr
[j
% 8] = cpu_to_be64(addr
);
434 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
435 struct scatterlist
*sg
,
440 unsigned int left_size
= slen
, len
= 0;
441 unsigned int j
= walk
->nents
;
447 if (sg_dma_len(sg
) <= skip
) {
448 skip
-= sg_dma_len(sg
);
457 while (left_size
&& sg
) {
458 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
461 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
462 walk
->to
->len
[j
% 8] = htons(ent_len
);
463 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
472 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
473 skip_len
) + skip_len
;
474 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
481 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
482 struct ulptx_sgl
*ulp
)
487 walk
->pair
= ulp
->sge
;
488 walk
->last_sg
= NULL
;
489 walk
->last_sg_len
= 0;
492 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
494 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
495 ULPTX_NSGE_V(walk
->nents
));
499 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
506 if (walk
->nents
== 0) {
507 walk
->sgl
->len0
= cpu_to_be32(size
);
508 walk
->sgl
->addr0
= cpu_to_be64(addr
);
510 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(addr
);
511 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
512 walk
->pair_idx
= !walk
->pair_idx
;
519 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
520 struct scatterlist
*sg
,
531 if (sg_dma_len(sg
) <= skip
) {
532 skip
-= sg_dma_len(sg
);
540 WARN(!sg
, "SG should not be null here\n");
541 if (sg
&& (walk
->nents
== 0)) {
542 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
543 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
544 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
545 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
549 walk
->last_sg_len
= sgmin
+ skip_len
;
551 if (sg_dma_len(sg
) == skip_len
) {
558 small
= min(sg_dma_len(sg
) - skip_len
, len
);
559 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
560 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
561 walk
->pair
->addr
[walk
->pair_idx
] =
562 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
563 walk
->pair_idx
= !walk
->pair_idx
;
570 walk
->last_sg_len
= skip_len
;
571 if (sg_dma_len(sg
) == skip_len
) {
578 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
580 struct crypto_alg
*alg
= tfm
->__crt_alg
;
581 struct chcr_alg_template
*chcr_crypto_alg
=
582 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
584 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
587 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
589 struct adapter
*adap
= netdev2adap(dev
);
590 struct sge_uld_txq_info
*txq_info
=
591 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
592 struct sge_uld_txq
*txq
;
596 txq
= &txq_info
->uldtxq
[idx
];
597 spin_lock(&txq
->sendq
.lock
);
600 spin_unlock(&txq
->sendq
.lock
);
605 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
606 struct _key_ctx
*key_ctx
)
608 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
609 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
612 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
613 ablkctx
->enckey_len
>> 1);
614 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
615 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
620 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
623 unsigned int srcskip
)
627 int soffset
= 0, sless
;
629 if (sg_dma_len(src
) == srcskip
) {
633 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
634 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
639 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
648 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
649 struct scatterlist
*dst
,
652 unsigned int srcskip
,
653 unsigned int dstskip
)
655 int srclen
= 0, dstlen
= 0;
656 int srcsg
= minsg
, dstsg
= minsg
;
657 int offset
= 0, soffset
= 0, less
, sless
= 0;
659 if (sg_dma_len(src
) == srcskip
) {
663 if (sg_dma_len(dst
) == dstskip
) {
669 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
670 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
675 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
676 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
677 if (srclen
<= dstlen
)
679 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
680 dstskip
, CHCR_DST_SG_SIZE
);
683 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
691 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
698 return min(srclen
, dstlen
);
701 static int chcr_cipher_fallback(struct crypto_sync_skcipher
*cipher
,
703 struct scatterlist
*src
,
704 struct scatterlist
*dst
,
707 unsigned short op_type
)
711 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
713 skcipher_request_set_sync_tfm(subreq
, cipher
);
714 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
715 skcipher_request_set_crypt(subreq
, src
, dst
,
718 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
719 crypto_skcipher_encrypt(subreq
);
720 skcipher_request_zero(subreq
);
725 static inline void create_wreq(struct chcr_context
*ctx
,
726 struct chcr_wr
*chcr_req
,
727 struct crypto_async_request
*req
,
734 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
735 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
738 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
739 chcr_req
->wreq
.pld_size_hash_size
=
740 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
741 chcr_req
->wreq
.len16_pkd
=
742 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
743 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
744 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
745 FILL_WR_RX_Q_ID(ctx
->tx_chan_id
, qid
,
746 !!lcb
, ctx
->tx_qidx
);
748 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->tx_chan_id
,
750 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
751 ((sizeof(chcr_req
->wreq
)) >> 4)));
753 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
754 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
755 sizeof(chcr_req
->key_ctx
) + sc_len
);
759 * create_cipher_wr - form the WR for cipher operations
761 * @ctx: crypto driver context of the request.
762 * @qid: ingress qid where response of this WR should be received.
763 * @op_type: encryption or decryption
765 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
767 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
768 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
769 struct sk_buff
*skb
= NULL
;
770 struct chcr_wr
*chcr_req
;
771 struct cpl_rx_phys_dsgl
*phys_cpl
;
772 struct ulptx_sgl
*ulptx
;
773 struct chcr_blkcipher_req_ctx
*reqctx
=
774 ablkcipher_request_ctx(wrparam
->req
);
775 unsigned int temp
= 0, transhdr_len
, dst_size
;
778 unsigned int kctx_len
;
779 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
780 GFP_KERNEL
: GFP_ATOMIC
;
781 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
783 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
785 dst_size
= get_space_for_phys_dsgl(nents
);
786 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
787 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
788 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
789 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
790 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
791 (sgl_len(nents
) * 8);
792 transhdr_len
+= temp
;
793 transhdr_len
= roundup(transhdr_len
, 16);
794 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
799 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
800 chcr_req
->sec_cpl
.op_ivinsrtofst
=
801 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm
)->tx_chan_id
, 2, 1);
803 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
804 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
805 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
807 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
808 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
809 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
812 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
815 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
816 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
817 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
818 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
819 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
820 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
821 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
823 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
824 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
825 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
826 ablkctx
->enckey_len
);
828 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
829 (ablkctx
->enckey_len
>> 1),
830 ablkctx
->enckey_len
>> 1);
831 memcpy(chcr_req
->key_ctx
.key
+
832 (ablkctx
->enckey_len
>> 1),
834 ablkctx
->enckey_len
>> 1);
837 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
838 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
839 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
840 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
842 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
843 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
844 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
845 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
847 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
850 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
851 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
852 sg_pcopy_to_buffer(wrparam
->req
->src
,
853 sg_nents(wrparam
->req
->src
), wrparam
->req
->info
, 16,
854 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
858 return ERR_PTR(error
);
861 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
865 if (keylen
== AES_KEYSIZE_128
)
866 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
867 else if (keylen
== AES_KEYSIZE_192
)
868 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
869 else if (keylen
== AES_KEYSIZE_256
)
870 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
876 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
880 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
881 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
884 crypto_sync_skcipher_clear_flags(ablkctx
->sw_cipher
,
885 CRYPTO_TFM_REQ_MASK
);
886 crypto_sync_skcipher_set_flags(ablkctx
->sw_cipher
,
887 cipher
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
888 err
= crypto_sync_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
889 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
891 crypto_sync_skcipher_get_flags(ablkctx
->sw_cipher
) &
896 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
900 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
901 unsigned int ck_size
, context_size
;
905 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
909 ck_size
= chcr_keyctx_ck_size(keylen
);
910 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
911 memcpy(ablkctx
->key
, key
, keylen
);
912 ablkctx
->enckey_len
= keylen
;
913 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
914 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
915 keylen
+ alignment
) >> 4;
917 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
919 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
922 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
923 ablkctx
->enckey_len
= 0;
928 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
932 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
933 unsigned int ck_size
, context_size
;
937 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
940 ck_size
= chcr_keyctx_ck_size(keylen
);
941 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
942 memcpy(ablkctx
->key
, key
, keylen
);
943 ablkctx
->enckey_len
= keylen
;
944 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
945 keylen
+ alignment
) >> 4;
947 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
949 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
953 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
954 ablkctx
->enckey_len
= 0;
959 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
963 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
964 unsigned int ck_size
, context_size
;
968 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
970 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
971 CTR_RFC3686_NONCE_SIZE
);
973 keylen
-= CTR_RFC3686_NONCE_SIZE
;
974 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
978 ck_size
= chcr_keyctx_ck_size(keylen
);
979 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
980 memcpy(ablkctx
->key
, key
, keylen
);
981 ablkctx
->enckey_len
= keylen
;
982 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
983 keylen
+ alignment
) >> 4;
985 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
987 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
991 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
992 ablkctx
->enckey_len
= 0;
996 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
998 unsigned int size
= AES_BLOCK_SIZE
;
999 __be32
*b
= (__be32
*)(dstiv
+ size
);
1002 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
1003 for (; size
>= 4; size
-= 4) {
1004 prev
= be32_to_cpu(*--b
);
1006 *b
= cpu_to_be32(c
);
1014 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
1016 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
1018 u32 temp
= be32_to_cpu(*--b
);
1021 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
1022 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
1023 bytes
= c
* AES_BLOCK_SIZE
;
1027 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
,
1030 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1031 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1032 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1033 struct crypto_cipher
*cipher
;
1036 unsigned int keylen
;
1037 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1038 int round8
= round
/ 8;
1040 cipher
= ablkctx
->aes_generic
;
1041 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1043 keylen
= ablkctx
->enckey_len
/ 2;
1044 key
= ablkctx
->key
+ keylen
;
1045 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
1048 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
1049 for (i
= 0; i
< round8
; i
++)
1050 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1052 for (i
= 0; i
< (round
% 8); i
++)
1053 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1056 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
1061 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
1062 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1064 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1065 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1066 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1069 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1070 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1072 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1073 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1074 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1075 AES_BLOCK_SIZE
) + 1);
1076 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1077 ret
= chcr_update_tweak(req
, iv
, 0);
1078 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1080 /*Updated before sending last WR*/
1081 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
1083 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1090 /* We need separate function for final iv because in rfc3686 Initial counter
1091 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1092 * for subsequent update requests
1095 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
1096 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1098 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1099 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1100 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1103 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1104 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1106 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1107 ret
= chcr_update_tweak(req
, iv
, 1);
1108 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1109 /*Already updated for Decrypt*/
1111 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1118 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
1119 unsigned char *input
, int err
)
1121 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1122 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1123 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1124 struct sk_buff
*skb
;
1125 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1126 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1127 struct cipher_wr_param wrparam
;
1128 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1133 if (req
->nbytes
== reqctx
->processed
) {
1134 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1136 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1141 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1142 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1143 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1144 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1145 bytes
= req
->nbytes
- reqctx
->processed
;
1147 bytes
= rounddown(bytes
, 16);
1149 /*CTR mode counter overfloa*/
1150 bytes
= req
->nbytes
- reqctx
->processed
;
1152 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1156 if (unlikely(bytes
== 0)) {
1157 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1159 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1169 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1170 CRYPTO_ALG_SUB_TYPE_CTR
)
1171 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1172 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
];
1174 wrparam
.bytes
= bytes
;
1175 skb
= create_cipher_wr(&wrparam
);
1177 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1181 skb
->dev
= u_ctx
->lldi
.ports
[0];
1182 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1184 reqctx
->last_req_len
= bytes
;
1185 reqctx
->processed
+= bytes
;
1188 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1190 chcr_dec_wrcount(dev
);
1191 req
->base
.complete(&req
->base
, err
);
1195 static int process_cipher(struct ablkcipher_request
*req
,
1197 struct sk_buff
**skb
,
1198 unsigned short op_type
)
1200 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1201 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1202 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1203 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1204 struct cipher_wr_param wrparam
;
1205 int bytes
, err
= -EINVAL
;
1207 reqctx
->processed
= 0;
1210 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1211 (req
->nbytes
== 0) ||
1212 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1213 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1214 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1218 err
= chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1221 if (req
->nbytes
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1223 sizeof(struct cpl_rx_phys_dsgl
) +
1226 /* Can be sent as Imm*/
1227 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1229 dnents
= sg_nents_xlen(req
->dst
, req
->nbytes
,
1230 CHCR_DST_SG_SIZE
, 0);
1231 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1232 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1233 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1234 reqctx
->imm
= (transhdr_len
+ IV
+ req
->nbytes
) <=
1236 bytes
= IV
+ req
->nbytes
;
1243 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1244 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1246 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1247 bytes
= req
->nbytes
- reqctx
->processed
;
1249 bytes
= rounddown(bytes
, 16);
1251 bytes
= req
->nbytes
;
1253 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1254 CRYPTO_ALG_SUB_TYPE_CTR
) {
1255 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1257 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1258 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1259 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1260 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1261 CTR_RFC3686_IV_SIZE
);
1263 /* initialize counter portion of counter block */
1264 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1265 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1269 memcpy(reqctx
->iv
, req
->info
, IV
);
1271 if (unlikely(bytes
== 0)) {
1272 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1274 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1283 reqctx
->op
= op_type
;
1284 reqctx
->srcsg
= req
->src
;
1285 reqctx
->dstsg
= req
->dst
;
1286 reqctx
->src_ofst
= 0;
1287 reqctx
->dst_ofst
= 0;
1290 wrparam
.bytes
= bytes
;
1291 *skb
= create_cipher_wr(&wrparam
);
1293 err
= PTR_ERR(*skb
);
1296 reqctx
->processed
= bytes
;
1297 reqctx
->last_req_len
= bytes
;
1301 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1306 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1308 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1309 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1310 struct sk_buff
*skb
= NULL
;
1311 int err
, isfull
= 0;
1312 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1314 err
= chcr_inc_wrcount(dev
);
1317 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1318 c_ctx(tfm
)->tx_qidx
))) {
1320 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1326 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1327 &skb
, CHCR_ENCRYPT_OP
);
1330 skb
->dev
= u_ctx
->lldi
.ports
[0];
1331 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1333 return isfull
? -EBUSY
: -EINPROGRESS
;
1335 chcr_dec_wrcount(dev
);
1339 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1341 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1342 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1343 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1344 struct sk_buff
*skb
= NULL
;
1345 int err
, isfull
= 0;
1347 err
= chcr_inc_wrcount(dev
);
1351 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1352 c_ctx(tfm
)->tx_qidx
))) {
1354 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1358 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1359 &skb
, CHCR_DECRYPT_OP
);
1362 skb
->dev
= u_ctx
->lldi
.ports
[0];
1363 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1365 return isfull
? -EBUSY
: -EINPROGRESS
;
1368 static int chcr_device_init(struct chcr_context
*ctx
)
1370 struct uld_ctx
*u_ctx
= NULL
;
1372 int txq_perchan
, txq_idx
, ntxq
;
1373 int err
= 0, rxq_perchan
, rxq_idx
;
1375 id
= smp_processor_id();
1377 u_ctx
= assign_chcr_device();
1380 pr_err("chcr device assignment fails\n");
1383 ctx
->dev
= &u_ctx
->dev
;
1384 ntxq
= u_ctx
->lldi
.ntxq
;
1385 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1386 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1387 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1388 ctx
->tx_chan_id
= ctx
->dev
->tx_channel_id
;
1389 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1390 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1391 rxq_idx
= ctx
->tx_chan_id
* rxq_perchan
;
1392 rxq_idx
+= id
% rxq_perchan
;
1393 txq_idx
= ctx
->tx_chan_id
* txq_perchan
;
1394 txq_idx
+= id
% txq_perchan
;
1395 ctx
->rx_qidx
= rxq_idx
;
1396 ctx
->tx_qidx
= txq_idx
;
1397 /* Channel Id used by SGE to forward packet to Host.
1398 * Same value should be used in cpl_fw6_pld RSS_CH field
1399 * by FW. Driver programs PCI channel ID to be used in fw
1400 * at the time of queue allocation with value "pi->tx_chan"
1402 ctx
->pci_chan_id
= txq_idx
/ txq_perchan
;
1408 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1410 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1411 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1412 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1414 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher(alg
->cra_name
, 0,
1415 CRYPTO_ALG_NEED_FALLBACK
);
1416 if (IS_ERR(ablkctx
->sw_cipher
)) {
1417 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1418 return PTR_ERR(ablkctx
->sw_cipher
);
1421 if (get_cryptoalg_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_XTS
) {
1422 /* To update tweak*/
1423 ablkctx
->aes_generic
= crypto_alloc_cipher("aes-generic", 0, 0);
1424 if (IS_ERR(ablkctx
->aes_generic
)) {
1425 pr_err("failed to allocate aes cipher for tweak\n");
1426 return PTR_ERR(ablkctx
->aes_generic
);
1429 ablkctx
->aes_generic
= NULL
;
1431 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1432 return chcr_device_init(crypto_tfm_ctx(tfm
));
1435 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1437 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1438 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1439 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1441 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1442 * cannot be used as fallback in chcr_handle_cipher_response
1444 ablkctx
->sw_cipher
= crypto_alloc_sync_skcipher("ctr(aes)", 0,
1445 CRYPTO_ALG_NEED_FALLBACK
);
1446 if (IS_ERR(ablkctx
->sw_cipher
)) {
1447 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1448 return PTR_ERR(ablkctx
->sw_cipher
);
1450 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1451 return chcr_device_init(crypto_tfm_ctx(tfm
));
1455 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1457 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1458 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1460 crypto_free_sync_skcipher(ablkctx
->sw_cipher
);
1461 if (ablkctx
->aes_generic
)
1462 crypto_free_cipher(ablkctx
->aes_generic
);
1465 static int get_alg_config(struct algo_param
*params
,
1466 unsigned int auth_size
)
1468 switch (auth_size
) {
1469 case SHA1_DIGEST_SIZE
:
1470 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1471 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1472 params
->result_size
= SHA1_DIGEST_SIZE
;
1474 case SHA224_DIGEST_SIZE
:
1475 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1476 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1477 params
->result_size
= SHA256_DIGEST_SIZE
;
1479 case SHA256_DIGEST_SIZE
:
1480 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1481 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1482 params
->result_size
= SHA256_DIGEST_SIZE
;
1484 case SHA384_DIGEST_SIZE
:
1485 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1486 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1487 params
->result_size
= SHA512_DIGEST_SIZE
;
1489 case SHA512_DIGEST_SIZE
:
1490 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1491 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1492 params
->result_size
= SHA512_DIGEST_SIZE
;
1495 pr_err("chcr : ERROR, unsupported digest size\n");
1501 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1503 crypto_free_shash(base_hash
);
1507 * create_hash_wr - Create hash work request
1508 * @req - Cipher req base
1510 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1511 struct hash_wr_param
*param
)
1513 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1514 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1515 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
1516 struct sk_buff
*skb
= NULL
;
1517 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1518 struct chcr_wr
*chcr_req
;
1519 struct ulptx_sgl
*ulptx
;
1520 unsigned int nents
= 0, transhdr_len
;
1521 unsigned int temp
= 0;
1522 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1524 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1527 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1528 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1529 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1530 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1531 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1532 nents
+= param
->bfr_len
? 1 : 0;
1533 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1534 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1535 transhdr_len
= roundup(transhdr_len
, 16);
1537 skb
= alloc_skb(transhdr_len
, flags
);
1539 return ERR_PTR(-ENOMEM
);
1540 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1542 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1543 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm
)->tx_chan_id
, 2, 0);
1544 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1546 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1547 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1548 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1549 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1550 chcr_req
->sec_cpl
.seqno_numivs
=
1551 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1552 param
->opad_needed
, 0);
1554 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1555 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1557 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1558 param
->alg_prm
.result_size
);
1560 if (param
->opad_needed
)
1561 memcpy(chcr_req
->key_ctx
.key
+
1562 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1563 CHCR_HASH_MAX_DIGEST_SIZE
),
1564 hmacctx
->opad
, param
->alg_prm
.result_size
);
1566 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1567 param
->alg_prm
.mk_size
, 0,
1570 sizeof(chcr_req
->key_ctx
)) >> 4));
1571 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1572 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1574 if (param
->bfr_len
!= 0) {
1575 req_ctx
->hctx_wr
.dma_addr
=
1576 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1577 param
->bfr_len
, DMA_TO_DEVICE
);
1578 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1579 req_ctx
->hctx_wr
. dma_addr
)) {
1583 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1585 req_ctx
->hctx_wr
.dma_addr
= 0;
1587 chcr_add_hash_src_ent(req
, ulptx
, param
);
1588 /* Request upto max wr size */
1589 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1590 (param
->sg_len
+ param
->bfr_len
) : 0);
1591 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1592 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1593 param
->hash_size
, transhdr_len
,
1595 req_ctx
->hctx_wr
.skb
= skb
;
1599 return ERR_PTR(error
);
1602 static int chcr_ahash_update(struct ahash_request
*req
)
1604 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1605 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1606 struct uld_ctx
*u_ctx
= NULL
;
1607 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1608 struct sk_buff
*skb
;
1609 u8 remainder
= 0, bs
;
1610 unsigned int nbytes
= req
->nbytes
;
1611 struct hash_wr_param params
;
1612 int error
, isfull
= 0;
1614 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1615 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1617 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1618 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1619 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1621 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1622 + req_ctx
->reqlen
, nbytes
, 0);
1623 req_ctx
->reqlen
+= nbytes
;
1626 error
= chcr_inc_wrcount(dev
);
1629 /* Detach state for CHCR means lldi or padap is freed. Increasing
1630 * inflight count for dev guarantees that lldi and padap is valid
1632 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1633 h_ctx(rtfm
)->tx_qidx
))) {
1635 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1641 chcr_init_hctx_per_wr(req_ctx
);
1642 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1647 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1648 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1649 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1650 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1651 if (params
.sg_len
> req
->nbytes
)
1652 params
.sg_len
= req
->nbytes
;
1653 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1655 params
.opad_needed
= 0;
1658 params
.bfr_len
= req_ctx
->reqlen
;
1660 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1662 params
.hash_size
= params
.alg_prm
.result_size
;
1663 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1664 skb
= create_hash_wr(req
, ¶ms
);
1666 error
= PTR_ERR(skb
);
1670 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1673 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1674 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1675 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1678 req_ctx
->reqlen
= remainder
;
1679 skb
->dev
= u_ctx
->lldi
.ports
[0];
1680 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1683 return isfull
? -EBUSY
: -EINPROGRESS
;
1685 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1687 chcr_dec_wrcount(dev
);
1691 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1693 memset(bfr_ptr
, 0, bs
);
1696 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1698 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1701 static int chcr_ahash_final(struct ahash_request
*req
)
1703 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1704 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1705 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1706 struct hash_wr_param params
;
1707 struct sk_buff
*skb
;
1708 struct uld_ctx
*u_ctx
= NULL
;
1709 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1710 int error
= -EINVAL
;
1712 error
= chcr_inc_wrcount(dev
);
1716 chcr_init_hctx_per_wr(req_ctx
);
1717 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1718 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1719 params
.opad_needed
= 1;
1721 params
.opad_needed
= 0;
1723 req_ctx
->hctx_wr
.isfinal
= 1;
1724 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1725 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1726 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1727 params
.opad_needed
= 1;
1728 params
.kctx_len
*= 2;
1730 params
.opad_needed
= 0;
1733 req_ctx
->hctx_wr
.result
= 1;
1734 params
.bfr_len
= req_ctx
->reqlen
;
1735 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1736 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1737 if (req_ctx
->reqlen
== 0) {
1738 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1742 params
.bfr_len
= bs
;
1745 params
.scmd1
= req_ctx
->data_len
;
1749 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1750 skb
= create_hash_wr(req
, ¶ms
);
1752 error
= PTR_ERR(skb
);
1755 req_ctx
->reqlen
= 0;
1756 skb
->dev
= u_ctx
->lldi
.ports
[0];
1757 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1759 return -EINPROGRESS
;
1761 chcr_dec_wrcount(dev
);
1765 static int chcr_ahash_finup(struct ahash_request
*req
)
1767 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1768 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1769 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1770 struct uld_ctx
*u_ctx
= NULL
;
1771 struct sk_buff
*skb
;
1772 struct hash_wr_param params
;
1774 int error
, isfull
= 0;
1776 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1777 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1778 error
= chcr_inc_wrcount(dev
);
1782 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1783 h_ctx(rtfm
)->tx_qidx
))) {
1785 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1790 chcr_init_hctx_per_wr(req_ctx
);
1791 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1797 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1798 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1799 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1800 params
.kctx_len
*= 2;
1801 params
.opad_needed
= 1;
1803 params
.opad_needed
= 0;
1806 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1807 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1808 if (params
.sg_len
< req
->nbytes
) {
1809 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1810 params
.kctx_len
/= 2;
1811 params
.opad_needed
= 0;
1815 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1817 params
.hash_size
= params
.alg_prm
.result_size
;
1822 params
.sg_len
= req
->nbytes
;
1823 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1824 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1827 params
.bfr_len
= req_ctx
->reqlen
;
1828 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1829 req_ctx
->hctx_wr
.result
= 1;
1830 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1831 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1832 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1836 params
.bfr_len
= bs
;
1838 skb
= create_hash_wr(req
, ¶ms
);
1840 error
= PTR_ERR(skb
);
1843 req_ctx
->reqlen
= 0;
1844 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1845 skb
->dev
= u_ctx
->lldi
.ports
[0];
1846 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1849 return isfull
? -EBUSY
: -EINPROGRESS
;
1851 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1853 chcr_dec_wrcount(dev
);
1857 static int chcr_ahash_digest(struct ahash_request
*req
)
1859 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1860 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1861 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1862 struct uld_ctx
*u_ctx
= NULL
;
1863 struct sk_buff
*skb
;
1864 struct hash_wr_param params
;
1866 int error
, isfull
= 0;
1869 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1870 error
= chcr_inc_wrcount(dev
);
1874 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1875 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1876 h_ctx(rtfm
)->tx_qidx
))) {
1878 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1884 chcr_init_hctx_per_wr(req_ctx
);
1885 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1891 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1892 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1893 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1894 params
.kctx_len
*= 2;
1895 params
.opad_needed
= 1;
1897 params
.opad_needed
= 0;
1899 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1900 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1901 if (params
.sg_len
< req
->nbytes
) {
1902 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1903 params
.kctx_len
/= 2;
1904 params
.opad_needed
= 0;
1909 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1910 params
.hash_size
= params
.alg_prm
.result_size
;
1912 params
.sg_len
= req
->nbytes
;
1913 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1916 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1920 req_ctx
->hctx_wr
.result
= 1;
1921 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1922 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1924 if (req
->nbytes
== 0) {
1925 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1927 params
.bfr_len
= bs
;
1930 skb
= create_hash_wr(req
, ¶ms
);
1932 error
= PTR_ERR(skb
);
1935 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1936 skb
->dev
= u_ctx
->lldi
.ports
[0];
1937 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1939 return isfull
? -EBUSY
: -EINPROGRESS
;
1941 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1943 chcr_dec_wrcount(dev
);
1947 static int chcr_ahash_continue(struct ahash_request
*req
)
1949 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1950 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1951 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1952 struct uld_ctx
*u_ctx
= NULL
;
1953 struct sk_buff
*skb
;
1954 struct hash_wr_param params
;
1958 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1959 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1960 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1961 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1962 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1963 params
.kctx_len
*= 2;
1964 params
.opad_needed
= 1;
1966 params
.opad_needed
= 0;
1968 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
1969 HASH_SPACE_LEFT(params
.kctx_len
),
1971 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
1972 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
1973 if (!hctx_wr
->result
||
1974 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
1975 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1976 params
.kctx_len
/= 2;
1977 params
.opad_needed
= 0;
1981 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1982 params
.hash_size
= params
.alg_prm
.result_size
;
1987 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1988 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
1991 reqctx
->data_len
+= params
.sg_len
;
1992 skb
= create_hash_wr(req
, ¶ms
);
1994 error
= PTR_ERR(skb
);
1997 hctx_wr
->processed
+= params
.sg_len
;
1998 skb
->dev
= u_ctx
->lldi
.ports
[0];
1999 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
2006 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
2007 unsigned char *input
,
2010 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2011 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2012 int digestsize
, updated_digestsize
;
2013 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2014 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
2015 struct chcr_dev
*dev
= h_ctx(tfm
)->dev
;
2019 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
2020 updated_digestsize
= digestsize
;
2021 if (digestsize
== SHA224_DIGEST_SIZE
)
2022 updated_digestsize
= SHA256_DIGEST_SIZE
;
2023 else if (digestsize
== SHA384_DIGEST_SIZE
)
2024 updated_digestsize
= SHA512_DIGEST_SIZE
;
2026 if (hctx_wr
->dma_addr
) {
2027 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
2028 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
2029 hctx_wr
->dma_addr
= 0;
2031 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
2033 if (hctx_wr
->result
== 1) {
2034 hctx_wr
->result
= 0;
2035 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
2038 memcpy(reqctx
->partial_hash
,
2039 input
+ sizeof(struct cpl_fw6_pld
),
2040 updated_digestsize
);
2045 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
2046 updated_digestsize
);
2048 err
= chcr_ahash_continue(req
);
2053 if (hctx_wr
->is_sg_map
)
2054 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2058 chcr_dec_wrcount(dev
);
2059 req
->base
.complete(&req
->base
, err
);
2063 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2064 * @req: crypto request
2066 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
2069 struct crypto_tfm
*tfm
= req
->tfm
;
2070 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2071 struct adapter
*adap
= padap(ctx
->dev
);
2073 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
2074 case CRYPTO_ALG_TYPE_AEAD
:
2075 err
= chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
2078 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2079 chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
2082 case CRYPTO_ALG_TYPE_AHASH
:
2083 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
2085 atomic_inc(&adap
->chcr_stats
.complete
);
2088 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2090 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2091 struct chcr_ahash_req_ctx
*state
= out
;
2093 state
->reqlen
= req_ctx
->reqlen
;
2094 state
->data_len
= req_ctx
->data_len
;
2095 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2096 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2097 CHCR_HASH_MAX_DIGEST_SIZE
);
2098 chcr_init_hctx_per_wr(state
);
2102 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2104 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2105 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2107 req_ctx
->reqlen
= state
->reqlen
;
2108 req_ctx
->data_len
= state
->data_len
;
2109 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2110 req_ctx
->skbfr
= req_ctx
->bfr2
;
2111 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2112 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2113 CHCR_HASH_MAX_DIGEST_SIZE
);
2114 chcr_init_hctx_per_wr(req_ctx
);
2118 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2119 unsigned int keylen
)
2121 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2122 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2123 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2124 unsigned int i
, err
= 0, updated_digestsize
;
2126 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2128 /* use the key to calculate the ipad and opad. ipad will sent with the
2129 * first request's data. opad will be sent with the final hash result
2130 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2132 shash
->tfm
= hmacctx
->base_hash
;
2134 err
= crypto_shash_digest(shash
, key
, keylen
,
2138 keylen
= digestsize
;
2140 memcpy(hmacctx
->ipad
, key
, keylen
);
2142 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2143 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2145 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2146 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2147 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2150 updated_digestsize
= digestsize
;
2151 if (digestsize
== SHA224_DIGEST_SIZE
)
2152 updated_digestsize
= SHA256_DIGEST_SIZE
;
2153 else if (digestsize
== SHA384_DIGEST_SIZE
)
2154 updated_digestsize
= SHA512_DIGEST_SIZE
;
2155 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2156 hmacctx
->ipad
, digestsize
);
2159 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2161 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2162 hmacctx
->opad
, digestsize
);
2165 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2170 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
2171 unsigned int key_len
)
2173 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2174 unsigned short context_size
= 0;
2177 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2181 memcpy(ablkctx
->key
, key
, key_len
);
2182 ablkctx
->enckey_len
= key_len
;
2183 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2184 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2185 ablkctx
->key_ctx_hdr
=
2186 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2187 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2188 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2189 CHCR_KEYCTX_NO_KEY
, 1,
2191 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2194 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2195 ablkctx
->enckey_len
= 0;
2200 static int chcr_sha_init(struct ahash_request
*areq
)
2202 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2203 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2204 int digestsize
= crypto_ahash_digestsize(tfm
);
2206 req_ctx
->data_len
= 0;
2207 req_ctx
->reqlen
= 0;
2208 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2209 req_ctx
->skbfr
= req_ctx
->bfr2
;
2210 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2215 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2217 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2218 sizeof(struct chcr_ahash_req_ctx
));
2219 return chcr_device_init(crypto_tfm_ctx(tfm
));
2222 static int chcr_hmac_init(struct ahash_request
*areq
)
2224 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2225 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2226 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2227 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2228 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2230 chcr_sha_init(areq
);
2231 req_ctx
->data_len
= bs
;
2232 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2233 if (digestsize
== SHA224_DIGEST_SIZE
)
2234 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2235 SHA256_DIGEST_SIZE
);
2236 else if (digestsize
== SHA384_DIGEST_SIZE
)
2237 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2238 SHA512_DIGEST_SIZE
);
2240 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2246 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2248 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2249 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2250 unsigned int digestsize
=
2251 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2253 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2254 sizeof(struct chcr_ahash_req_ctx
));
2255 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2256 if (IS_ERR(hmacctx
->base_hash
))
2257 return PTR_ERR(hmacctx
->base_hash
);
2258 return chcr_device_init(crypto_tfm_ctx(tfm
));
2261 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2263 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2264 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2266 if (hmacctx
->base_hash
) {
2267 chcr_free_shash(hmacctx
->base_hash
);
2268 hmacctx
->base_hash
= NULL
;
2272 inline void chcr_aead_common_exit(struct aead_request
*req
)
2274 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2275 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2276 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2278 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2281 static int chcr_aead_common_init(struct aead_request
*req
)
2283 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2284 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2285 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2286 unsigned int authsize
= crypto_aead_authsize(tfm
);
2287 int error
= -EINVAL
;
2289 /* validate key size */
2290 if (aeadctx
->enckey_len
== 0)
2292 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2295 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2297 reqctx
->scratch_pad
= NULL
;
2299 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2311 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2312 int aadmax
, int wrlen
,
2313 unsigned short op_type
)
2315 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2317 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2318 dst_nents
> MAX_DSGL_ENT
||
2319 (req
->assoclen
> aadmax
) ||
2320 (wrlen
> SGE_MAX_WR_LEN
))
2325 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2327 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2328 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2329 struct aead_request
*subreq
= aead_request_ctx(req
);
2331 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2332 aead_request_set_callback(subreq
, req
->base
.flags
,
2333 req
->base
.complete
, req
->base
.data
);
2334 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2336 aead_request_set_ad(subreq
, req
->assoclen
);
2337 return op_type
? crypto_aead_decrypt(subreq
) :
2338 crypto_aead_encrypt(subreq
);
2341 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2345 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2346 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2347 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2348 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2349 struct sk_buff
*skb
= NULL
;
2350 struct chcr_wr
*chcr_req
;
2351 struct cpl_rx_phys_dsgl
*phys_cpl
;
2352 struct ulptx_sgl
*ulptx
;
2353 unsigned int transhdr_len
;
2354 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2355 unsigned int kctx_len
= 0, dnents
, snents
;
2356 unsigned int authsize
= crypto_aead_authsize(tfm
);
2357 int error
= -EINVAL
;
2360 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2362 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2364 if (req
->cryptlen
== 0)
2368 error
= chcr_aead_common_init(req
);
2370 return ERR_PTR(error
);
2372 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2373 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2376 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
2377 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
, 0);
2378 dnents
+= MIN_AUTH_SG
; // For IV
2379 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2380 CHCR_SRC_SG_SIZE
, 0);
2381 dst_size
= get_space_for_phys_dsgl(dnents
);
2382 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
2383 - sizeof(chcr_req
->key_ctx
);
2384 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2385 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <
2387 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16)
2388 : (sgl_len(snents
) * 8);
2389 transhdr_len
+= temp
;
2390 transhdr_len
= roundup(transhdr_len
, 16);
2392 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2393 transhdr_len
, reqctx
->op
)) {
2394 atomic_inc(&adap
->chcr_stats
.fallback
);
2395 chcr_aead_common_exit(req
);
2396 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2398 skb
= alloc_skb(transhdr_len
, flags
);
2404 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2406 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2409 * Input order is AAD,IV and Payload. where IV should be included as
2410 * the part of authdata. All other fields should be filled according
2411 * to the hardware spec
2413 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2414 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm
)->tx_chan_id
, 2, 1);
2415 chcr_req
->sec_cpl
.pldlen
= htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
2416 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2418 null
? 0 : IV
+ req
->assoclen
,
2419 req
->assoclen
+ IV
+ 1,
2420 (temp
& 0x1F0) >> 4);
2421 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2423 null
? 0 : req
->assoclen
+ IV
+ 1,
2425 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2426 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2427 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2429 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2430 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2431 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2433 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2435 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2438 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2439 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2440 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2441 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2442 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2443 aeadctx
->enckey_len
);
2445 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2446 aeadctx
->enckey_len
);
2448 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2449 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2450 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2451 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2452 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2453 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2454 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2455 memcpy(ivptr
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2456 memcpy(ivptr
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2457 CTR_RFC3686_IV_SIZE
);
2458 *(__be32
*)(ivptr
+ CTR_RFC3686_NONCE_SIZE
+
2459 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2461 memcpy(ivptr
, req
->iv
, IV
);
2463 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2464 chcr_add_aead_src_ent(req
, ulptx
);
2465 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2466 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2467 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
2468 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2469 transhdr_len
, temp
, 0);
2474 chcr_aead_common_exit(req
);
2476 return ERR_PTR(error
);
2479 int chcr_aead_dma_map(struct device
*dev
,
2480 struct aead_request
*req
,
2481 unsigned short op_type
)
2484 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2485 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2486 unsigned int authsize
= crypto_aead_authsize(tfm
);
2489 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2490 -authsize
: authsize
);
2491 if (!req
->cryptlen
|| !dst_size
)
2493 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2495 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2498 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2501 if (req
->src
== req
->dst
) {
2502 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2507 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2511 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2514 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2522 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2526 void chcr_aead_dma_unmap(struct device
*dev
,
2527 struct aead_request
*req
,
2528 unsigned short op_type
)
2530 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2531 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2532 unsigned int authsize
= crypto_aead_authsize(tfm
);
2535 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2536 -authsize
: authsize
);
2537 if (!req
->cryptlen
|| !dst_size
)
2540 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2542 if (req
->src
== req
->dst
) {
2543 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2546 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2548 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2553 void chcr_add_aead_src_ent(struct aead_request
*req
,
2554 struct ulptx_sgl
*ulptx
)
2556 struct ulptx_walk ulp_walk
;
2557 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2560 u8
*buf
= (u8
*)ulptx
;
2562 if (reqctx
->b0_len
) {
2563 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2564 buf
+= reqctx
->b0_len
;
2566 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2567 buf
, req
->cryptlen
+ req
->assoclen
, 0);
2569 ulptx_walk_init(&ulp_walk
, ulptx
);
2571 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2573 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
+
2575 ulptx_walk_end(&ulp_walk
);
2579 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2580 struct cpl_rx_phys_dsgl
*phys_cpl
,
2583 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2584 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2585 struct dsgl_walk dsgl_walk
;
2586 unsigned int authsize
= crypto_aead_authsize(tfm
);
2587 struct chcr_context
*ctx
= a_ctx(tfm
);
2590 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2591 dsgl_walk_add_page(&dsgl_walk
, IV
+ reqctx
->b0_len
, reqctx
->iv_dma
);
2592 temp
= req
->assoclen
+ req
->cryptlen
+
2593 (reqctx
->op
? -authsize
: authsize
);
2594 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, 0);
2595 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2598 void chcr_add_cipher_src_ent(struct ablkcipher_request
*req
,
2600 struct cipher_wr_param
*wrparam
)
2602 struct ulptx_walk ulp_walk
;
2603 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2606 memcpy(buf
, reqctx
->iv
, IV
);
2609 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2610 buf
, wrparam
->bytes
, reqctx
->processed
);
2612 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2613 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2615 reqctx
->srcsg
= ulp_walk
.last_sg
;
2616 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2617 ulptx_walk_end(&ulp_walk
);
2621 void chcr_add_cipher_dst_ent(struct ablkcipher_request
*req
,
2622 struct cpl_rx_phys_dsgl
*phys_cpl
,
2623 struct cipher_wr_param
*wrparam
,
2626 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2627 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
2628 struct chcr_context
*ctx
= c_ctx(tfm
);
2629 struct dsgl_walk dsgl_walk
;
2631 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2632 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2634 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2635 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2637 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2640 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2641 struct ulptx_sgl
*ulptx
,
2642 struct hash_wr_param
*param
)
2644 struct ulptx_walk ulp_walk
;
2645 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2647 if (reqctx
->hctx_wr
.imm
) {
2648 u8
*buf
= (u8
*)ulptx
;
2650 if (param
->bfr_len
) {
2651 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2652 buf
+= param
->bfr_len
;
2655 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2656 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2659 ulptx_walk_init(&ulp_walk
, ulptx
);
2661 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2662 reqctx
->hctx_wr
.dma_addr
);
2663 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2664 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2665 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2666 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2667 ulptx_walk_end(&ulp_walk
);
2671 int chcr_hash_dma_map(struct device
*dev
,
2672 struct ahash_request
*req
)
2674 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2679 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2683 req_ctx
->hctx_wr
.is_sg_map
= 1;
2687 void chcr_hash_dma_unmap(struct device
*dev
,
2688 struct ahash_request
*req
)
2690 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2695 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2697 req_ctx
->hctx_wr
.is_sg_map
= 0;
2701 int chcr_cipher_dma_map(struct device
*dev
,
2702 struct ablkcipher_request
*req
)
2706 if (req
->src
== req
->dst
) {
2707 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2712 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2716 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2719 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2730 void chcr_cipher_dma_unmap(struct device
*dev
,
2731 struct ablkcipher_request
*req
)
2733 if (req
->src
== req
->dst
) {
2734 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2737 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2739 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2744 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2748 memset(block
, 0, csize
);
2753 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2756 data
= cpu_to_be32(msglen
);
2757 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2762 static int generate_b0(struct aead_request
*req
, u8
*ivptr
,
2763 unsigned short op_type
)
2765 unsigned int l
, lp
, m
;
2767 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2768 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2769 u8
*b0
= reqctx
->scratch_pad
;
2771 m
= crypto_aead_authsize(aead
);
2773 memcpy(b0
, ivptr
, 16);
2778 /* set m, bits 3-5 */
2779 *b0
|= (8 * ((m
- 2) / 2));
2781 /* set adata, bit 6, if associated data is used */
2784 rc
= set_msg_len(b0
+ 16 - l
,
2785 (op_type
== CHCR_DECRYPT_OP
) ?
2786 req
->cryptlen
- m
: req
->cryptlen
, l
);
2791 static inline int crypto_ccm_check_iv(const u8
*iv
)
2793 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2794 if (iv
[0] < 1 || iv
[0] > 7)
2800 static int ccm_format_packet(struct aead_request
*req
,
2802 unsigned int sub_type
,
2803 unsigned short op_type
,
2804 unsigned int assoclen
)
2806 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2807 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2808 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2811 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2813 memcpy(ivptr
+ 1, &aeadctx
->salt
[0], 3);
2814 memcpy(ivptr
+ 4, req
->iv
, 8);
2815 memset(ivptr
+ 12, 0, 4);
2817 memcpy(ivptr
, req
->iv
, 16);
2820 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2823 rc
= generate_b0(req
, ivptr
, op_type
);
2824 /* zero the ctr value */
2825 memset(ivptr
+ 15 - ivptr
[0], 0, ivptr
[0] + 1);
2829 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2830 unsigned int dst_size
,
2831 struct aead_request
*req
,
2832 unsigned short op_type
)
2834 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2835 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2836 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2837 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2838 unsigned int c_id
= a_ctx(tfm
)->tx_chan_id
;
2839 unsigned int ccm_xtra
;
2840 unsigned char tag_offset
= 0, auth_offset
= 0;
2841 unsigned int assoclen
;
2843 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2844 assoclen
= req
->assoclen
- 8;
2846 assoclen
= req
->assoclen
;
2847 ccm_xtra
= CCM_B0_SIZE
+
2848 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2850 auth_offset
= req
->cryptlen
?
2851 (req
->assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2852 if (op_type
== CHCR_DECRYPT_OP
) {
2853 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2854 tag_offset
= crypto_aead_authsize(tfm
);
2860 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2863 htonl(req
->assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2864 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2865 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2866 1 + IV
, IV
+ assoclen
+ ccm_xtra
,
2867 req
->assoclen
+ IV
+ 1 + ccm_xtra
, 0);
2869 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2870 auth_offset
, tag_offset
,
2871 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2872 crypto_aead_authsize(tfm
));
2873 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2874 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2875 cipher_mode
, mac_mode
,
2876 aeadctx
->hmac_ctrl
, IV
>> 1);
2878 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2882 static int aead_ccm_validate_input(unsigned short op_type
,
2883 struct aead_request
*req
,
2884 struct chcr_aead_ctx
*aeadctx
,
2885 unsigned int sub_type
)
2887 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2888 if (crypto_ccm_check_iv(req
->iv
)) {
2889 pr_err("CCM: IV check fails\n");
2893 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2894 pr_err("RFC4309: Invalid AAD length %d\n",
2902 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2906 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2907 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2908 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2909 struct sk_buff
*skb
= NULL
;
2910 struct chcr_wr
*chcr_req
;
2911 struct cpl_rx_phys_dsgl
*phys_cpl
;
2912 struct ulptx_sgl
*ulptx
;
2913 unsigned int transhdr_len
;
2914 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
, snents
;
2915 unsigned int sub_type
, assoclen
= req
->assoclen
;
2916 unsigned int authsize
= crypto_aead_authsize(tfm
);
2917 int error
= -EINVAL
;
2919 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2921 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2923 sub_type
= get_aead_subtype(tfm
);
2924 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2926 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
2927 error
= chcr_aead_common_init(req
);
2929 return ERR_PTR(error
);
2931 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
2934 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
2935 + (reqctx
->op
? -authsize
: authsize
),
2936 CHCR_DST_SG_SIZE
, 0);
2937 dnents
+= MIN_CCM_SG
; // For IV and B0
2938 dst_size
= get_space_for_phys_dsgl(dnents
);
2939 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2940 CHCR_SRC_SG_SIZE
, 0);
2941 snents
+= MIN_CCM_SG
; //For B0
2942 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
2943 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2944 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
+
2945 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
2946 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
+
2947 reqctx
->b0_len
, 16) :
2948 (sgl_len(snents
) * 8);
2949 transhdr_len
+= temp
;
2950 transhdr_len
= roundup(transhdr_len
, 16);
2952 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
2953 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
2954 atomic_inc(&adap
->chcr_stats
.fallback
);
2955 chcr_aead_common_exit(req
);
2956 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2958 skb
= alloc_skb(transhdr_len
, flags
);
2965 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2967 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
2969 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2970 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2971 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2972 aeadctx
->key
, aeadctx
->enckey_len
);
2974 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2975 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2976 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2977 error
= ccm_format_packet(req
, ivptr
, sub_type
, reqctx
->op
, assoclen
);
2980 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2981 chcr_add_aead_src_ent(req
, ulptx
);
2983 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2984 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2985 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
+
2986 reqctx
->b0_len
) : 0);
2987 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
2988 transhdr_len
, temp
, 0);
2995 chcr_aead_common_exit(req
);
2996 return ERR_PTR(error
);
2999 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
3003 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3004 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3005 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3006 struct sk_buff
*skb
= NULL
;
3007 struct chcr_wr
*chcr_req
;
3008 struct cpl_rx_phys_dsgl
*phys_cpl
;
3009 struct ulptx_sgl
*ulptx
;
3010 unsigned int transhdr_len
, dnents
= 0, snents
;
3011 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
3012 unsigned int authsize
= crypto_aead_authsize(tfm
);
3013 int error
= -EINVAL
;
3015 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3017 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
3019 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
3020 assoclen
= req
->assoclen
- 8;
3023 error
= chcr_aead_common_init(req
);
3025 return ERR_PTR(error
);
3026 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
3027 (reqctx
->op
? -authsize
: authsize
),
3028 CHCR_DST_SG_SIZE
, 0);
3029 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3030 CHCR_SRC_SG_SIZE
, 0);
3031 dnents
+= MIN_GCM_SG
; // For IV
3032 dst_size
= get_space_for_phys_dsgl(dnents
);
3033 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
3034 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3035 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <=
3037 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16) :
3038 (sgl_len(snents
) * 8);
3039 transhdr_len
+= temp
;
3040 transhdr_len
= roundup(transhdr_len
, 16);
3041 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
3042 transhdr_len
, reqctx
->op
)) {
3044 atomic_inc(&adap
->chcr_stats
.fallback
);
3045 chcr_aead_common_exit(req
);
3046 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3048 skb
= alloc_skb(transhdr_len
, flags
);
3054 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3056 //Offset of tag from end
3057 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
3058 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
3059 a_ctx(tfm
)->tx_chan_id
, 2, 1);
3060 chcr_req
->sec_cpl
.pldlen
=
3061 htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
3062 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3063 assoclen
? 1 + IV
: 0,
3064 assoclen
? IV
+ assoclen
: 0,
3065 req
->assoclen
+ IV
+ 1, 0);
3066 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
3067 FILL_SEC_CPL_AUTHINSERT(0, req
->assoclen
+ IV
+ 1,
3069 chcr_req
->sec_cpl
.seqno_numivs
=
3070 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
3071 CHCR_ENCRYPT_OP
) ? 1 : 0,
3072 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
3073 CHCR_SCMD_AUTH_MODE_GHASH
,
3074 aeadctx
->hmac_ctrl
, IV
>> 1);
3075 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3077 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3078 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3079 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3080 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3082 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3083 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3084 /* prepare a 16 byte iv */
3085 /* S A L T | IV | 0x00000001 */
3086 if (get_aead_subtype(tfm
) ==
3087 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3088 memcpy(ivptr
, aeadctx
->salt
, 4);
3089 memcpy(ivptr
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3091 memcpy(ivptr
, req
->iv
, GCM_AES_IV_SIZE
);
3093 *((unsigned int *)(ivptr
+ 12)) = htonl(0x01);
3095 ulptx
= (struct ulptx_sgl
*)(ivptr
+ 16);
3097 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3098 chcr_add_aead_src_ent(req
, ulptx
);
3099 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3100 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3101 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
3102 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3103 transhdr_len
, temp
, reqctx
->verify
);
3108 chcr_aead_common_exit(req
);
3109 return ERR_PTR(error
);
3114 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3116 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3117 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3119 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3120 CRYPTO_ALG_NEED_FALLBACK
|
3122 if (IS_ERR(aeadctx
->sw_cipher
))
3123 return PTR_ERR(aeadctx
->sw_cipher
);
3124 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3125 sizeof(struct aead_request
) +
3126 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3127 return chcr_device_init(a_ctx(tfm
));
3130 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3132 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3134 crypto_free_aead(aeadctx
->sw_cipher
);
3137 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3138 unsigned int authsize
)
3140 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3142 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3143 aeadctx
->mayverify
= VERIFY_HW
;
3144 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3146 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3147 unsigned int authsize
)
3149 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3150 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3152 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3153 * true for sha1. authsize == 12 condition should be before
3154 * authsize == (maxauth >> 1)
3156 if (authsize
== ICV_4
) {
3157 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3158 aeadctx
->mayverify
= VERIFY_HW
;
3159 } else if (authsize
== ICV_6
) {
3160 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3161 aeadctx
->mayverify
= VERIFY_HW
;
3162 } else if (authsize
== ICV_10
) {
3163 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3164 aeadctx
->mayverify
= VERIFY_HW
;
3165 } else if (authsize
== ICV_12
) {
3166 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3167 aeadctx
->mayverify
= VERIFY_HW
;
3168 } else if (authsize
== ICV_14
) {
3169 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3170 aeadctx
->mayverify
= VERIFY_HW
;
3171 } else if (authsize
== (maxauth
>> 1)) {
3172 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3173 aeadctx
->mayverify
= VERIFY_HW
;
3174 } else if (authsize
== maxauth
) {
3175 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3176 aeadctx
->mayverify
= VERIFY_HW
;
3178 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3179 aeadctx
->mayverify
= VERIFY_SW
;
3181 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3185 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3187 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3191 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3192 aeadctx
->mayverify
= VERIFY_HW
;
3195 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3196 aeadctx
->mayverify
= VERIFY_HW
;
3199 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3200 aeadctx
->mayverify
= VERIFY_HW
;
3203 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3204 aeadctx
->mayverify
= VERIFY_HW
;
3207 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3208 aeadctx
->mayverify
= VERIFY_HW
;
3212 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3213 aeadctx
->mayverify
= VERIFY_SW
;
3217 crypto_tfm_set_flags((struct crypto_tfm
*) tfm
,
3218 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3221 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3224 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3225 unsigned int authsize
)
3227 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3231 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3232 aeadctx
->mayverify
= VERIFY_HW
;
3235 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3236 aeadctx
->mayverify
= VERIFY_HW
;
3239 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3240 aeadctx
->mayverify
= VERIFY_HW
;
3243 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3244 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3247 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3250 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3251 unsigned int authsize
)
3253 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3257 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3258 aeadctx
->mayverify
= VERIFY_HW
;
3261 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3262 aeadctx
->mayverify
= VERIFY_HW
;
3265 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3266 aeadctx
->mayverify
= VERIFY_HW
;
3269 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3270 aeadctx
->mayverify
= VERIFY_HW
;
3273 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3274 aeadctx
->mayverify
= VERIFY_HW
;
3277 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3278 aeadctx
->mayverify
= VERIFY_HW
;
3281 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3282 aeadctx
->mayverify
= VERIFY_HW
;
3285 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3286 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3289 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3292 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3294 unsigned int keylen
)
3296 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3297 unsigned char ck_size
, mk_size
;
3298 int key_ctx_size
= 0;
3300 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3301 if (keylen
== AES_KEYSIZE_128
) {
3302 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3303 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3304 } else if (keylen
== AES_KEYSIZE_192
) {
3305 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3306 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3307 } else if (keylen
== AES_KEYSIZE_256
) {
3308 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3309 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3311 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3312 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3313 aeadctx
->enckey_len
= 0;
3316 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3318 memcpy(aeadctx
->key
, key
, keylen
);
3319 aeadctx
->enckey_len
= keylen
;
3324 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3326 unsigned int keylen
)
3328 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3331 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3332 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3333 CRYPTO_TFM_REQ_MASK
);
3334 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3335 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3336 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3337 CRYPTO_TFM_RES_MASK
);
3340 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3343 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3344 unsigned int keylen
)
3346 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3350 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3351 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3352 aeadctx
->enckey_len
= 0;
3355 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3356 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3357 CRYPTO_TFM_REQ_MASK
);
3358 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3359 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3360 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3361 CRYPTO_TFM_RES_MASK
);
3365 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3366 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3369 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3370 unsigned int keylen
)
3372 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3373 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3374 struct crypto_cipher
*cipher
;
3375 unsigned int ck_size
;
3376 int ret
= 0, key_ctx_size
= 0;
3378 aeadctx
->enckey_len
= 0;
3379 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3380 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3381 & CRYPTO_TFM_REQ_MASK
);
3382 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3383 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3384 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3385 CRYPTO_TFM_RES_MASK
);
3389 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3391 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3392 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3394 if (keylen
== AES_KEYSIZE_128
) {
3395 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3396 } else if (keylen
== AES_KEYSIZE_192
) {
3397 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3398 } else if (keylen
== AES_KEYSIZE_256
) {
3399 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3401 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3402 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3403 pr_err("GCM: Invalid key length %d\n", keylen
);
3408 memcpy(aeadctx
->key
, key
, keylen
);
3409 aeadctx
->enckey_len
= keylen
;
3410 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3412 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3413 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3416 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3417 * It will go in key context
3419 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
3420 if (IS_ERR(cipher
)) {
3421 aeadctx
->enckey_len
= 0;
3426 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
3428 aeadctx
->enckey_len
= 0;
3431 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3432 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
3435 crypto_free_cipher(cipher
);
3440 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3441 unsigned int keylen
)
3443 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3444 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3445 /* it contains auth and cipher key both*/
3446 struct crypto_authenc_keys keys
;
3447 unsigned int bs
, subtype
;
3448 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3449 int err
= 0, i
, key_ctx_len
= 0;
3450 unsigned char ck_size
= 0;
3451 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3452 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3453 struct algo_param param
;
3457 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3458 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3459 & CRYPTO_TFM_REQ_MASK
);
3460 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3461 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3462 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3463 & CRYPTO_TFM_RES_MASK
);
3467 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3468 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3472 if (get_alg_config(¶m
, max_authsize
)) {
3473 pr_err("chcr : Unsupported digest size\n");
3476 subtype
= get_aead_subtype(authenc
);
3477 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3478 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3479 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3481 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3482 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3483 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3485 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3486 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3487 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3488 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3489 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3490 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3492 pr_err("chcr : Unsupported cipher key\n");
3496 /* Copy only encryption key. We use authkey to generate h(ipad) and
3497 * h(opad) so authkey is not needed again. authkeylen size have the
3498 * size of the hash digest size.
3500 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3501 aeadctx
->enckey_len
= keys
.enckeylen
;
3502 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3503 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3505 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3506 aeadctx
->enckey_len
<< 3);
3508 base_hash
= chcr_alloc_shash(max_authsize
);
3509 if (IS_ERR(base_hash
)) {
3510 pr_err("chcr : Base driver cannot be loaded\n");
3511 aeadctx
->enckey_len
= 0;
3512 memzero_explicit(&keys
, sizeof(keys
));
3516 SHASH_DESC_ON_STACK(shash
, base_hash
);
3518 shash
->tfm
= base_hash
;
3519 bs
= crypto_shash_blocksize(base_hash
);
3520 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3521 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3523 if (keys
.authkeylen
> bs
) {
3524 err
= crypto_shash_digest(shash
, keys
.authkey
,
3528 pr_err("chcr : Base driver cannot be loaded\n");
3531 keys
.authkeylen
= max_authsize
;
3533 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3535 /* Compute the ipad-digest*/
3536 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3537 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3538 for (i
= 0; i
< bs
>> 2; i
++)
3539 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3541 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3544 /* Compute the opad-digest */
3545 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3546 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3547 for (i
= 0; i
< bs
>> 2; i
++)
3548 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3550 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3553 /* convert the ipad and opad digest to network order */
3554 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3555 chcr_change_order(o_ptr
, param
.result_size
);
3556 key_ctx_len
= sizeof(struct _key_ctx
) +
3557 roundup(keys
.enckeylen
, 16) +
3558 (param
.result_size
+ align
) * 2;
3559 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3560 0, 1, key_ctx_len
>> 4);
3561 actx
->auth_mode
= param
.auth_mode
;
3562 chcr_free_shash(base_hash
);
3564 memzero_explicit(&keys
, sizeof(keys
));
3568 aeadctx
->enckey_len
= 0;
3569 memzero_explicit(&keys
, sizeof(keys
));
3570 if (!IS_ERR(base_hash
))
3571 chcr_free_shash(base_hash
);
3575 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3576 const u8
*key
, unsigned int keylen
)
3578 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3579 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3580 struct crypto_authenc_keys keys
;
3582 /* it contains auth and cipher key both*/
3583 unsigned int subtype
;
3584 int key_ctx_len
= 0;
3585 unsigned char ck_size
= 0;
3587 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3588 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3589 & CRYPTO_TFM_REQ_MASK
);
3590 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3591 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3592 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3593 & CRYPTO_TFM_RES_MASK
);
3597 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3598 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3601 subtype
= get_aead_subtype(authenc
);
3602 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3603 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3604 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3606 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3607 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3608 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3610 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3611 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3612 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3613 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3614 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3615 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3617 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3620 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3621 aeadctx
->enckey_len
= keys
.enckeylen
;
3622 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3623 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3624 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3625 aeadctx
->enckey_len
<< 3);
3627 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3629 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3630 0, key_ctx_len
>> 4);
3631 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3632 memzero_explicit(&keys
, sizeof(keys
));
3635 aeadctx
->enckey_len
= 0;
3636 memzero_explicit(&keys
, sizeof(keys
));
3640 static int chcr_aead_op(struct aead_request
*req
,
3642 create_wr_t create_wr_fn
)
3644 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3645 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3646 struct uld_ctx
*u_ctx
;
3647 struct sk_buff
*skb
;
3649 struct chcr_dev
*cdev
;
3651 cdev
= a_ctx(tfm
)->dev
;
3653 pr_err("chcr : %s : No crypto device.\n", __func__
);
3657 if (chcr_inc_wrcount(cdev
)) {
3658 /* Detach state for CHCR means lldi or padap is freed.
3659 * We cannot increment fallback here.
3661 return chcr_aead_fallback(req
, reqctx
->op
);
3664 u_ctx
= ULD_CTX(a_ctx(tfm
));
3665 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3666 a_ctx(tfm
)->tx_qidx
)) {
3668 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
3669 chcr_dec_wrcount(cdev
);
3674 /* Form a WR from req */
3675 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[a_ctx(tfm
)->rx_qidx
], size
);
3677 if (IS_ERR_OR_NULL(skb
)) {
3678 chcr_dec_wrcount(cdev
);
3679 return PTR_ERR_OR_ZERO(skb
);
3682 skb
->dev
= u_ctx
->lldi
.ports
[0];
3683 set_wr_txq(skb
, CPL_PRIORITY_DATA
, a_ctx(tfm
)->tx_qidx
);
3685 return isfull
? -EBUSY
: -EINPROGRESS
;
3688 static int chcr_aead_encrypt(struct aead_request
*req
)
3690 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3691 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3693 reqctx
->verify
= VERIFY_HW
;
3694 reqctx
->op
= CHCR_ENCRYPT_OP
;
3696 switch (get_aead_subtype(tfm
)) {
3697 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3698 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3699 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3700 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3701 return chcr_aead_op(req
, 0, create_authenc_wr
);
3702 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3703 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3704 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3706 return chcr_aead_op(req
, 0, create_gcm_wr
);
3710 static int chcr_aead_decrypt(struct aead_request
*req
)
3712 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3713 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3714 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3717 if (aeadctx
->mayverify
== VERIFY_SW
) {
3718 size
= crypto_aead_maxauthsize(tfm
);
3719 reqctx
->verify
= VERIFY_SW
;
3722 reqctx
->verify
= VERIFY_HW
;
3724 reqctx
->op
= CHCR_DECRYPT_OP
;
3725 switch (get_aead_subtype(tfm
)) {
3726 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3727 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3728 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3729 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3730 return chcr_aead_op(req
, size
, create_authenc_wr
);
3731 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3732 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3733 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3735 return chcr_aead_op(req
, size
, create_gcm_wr
);
3739 static struct chcr_alg_template driver_algs
[] = {
3742 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3745 .cra_name
= "cbc(aes)",
3746 .cra_driver_name
= "cbc-aes-chcr",
3747 .cra_blocksize
= AES_BLOCK_SIZE
,
3748 .cra_init
= chcr_cra_init
,
3749 .cra_exit
= chcr_cra_exit
,
3750 .cra_u
.ablkcipher
= {
3751 .min_keysize
= AES_MIN_KEY_SIZE
,
3752 .max_keysize
= AES_MAX_KEY_SIZE
,
3753 .ivsize
= AES_BLOCK_SIZE
,
3754 .setkey
= chcr_aes_cbc_setkey
,
3755 .encrypt
= chcr_aes_encrypt
,
3756 .decrypt
= chcr_aes_decrypt
,
3761 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3764 .cra_name
= "xts(aes)",
3765 .cra_driver_name
= "xts-aes-chcr",
3766 .cra_blocksize
= AES_BLOCK_SIZE
,
3767 .cra_init
= chcr_cra_init
,
3769 .cra_u
.ablkcipher
= {
3770 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3771 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3772 .ivsize
= AES_BLOCK_SIZE
,
3773 .setkey
= chcr_aes_xts_setkey
,
3774 .encrypt
= chcr_aes_encrypt
,
3775 .decrypt
= chcr_aes_decrypt
,
3780 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3783 .cra_name
= "ctr(aes)",
3784 .cra_driver_name
= "ctr-aes-chcr",
3786 .cra_init
= chcr_cra_init
,
3787 .cra_exit
= chcr_cra_exit
,
3788 .cra_u
.ablkcipher
= {
3789 .min_keysize
= AES_MIN_KEY_SIZE
,
3790 .max_keysize
= AES_MAX_KEY_SIZE
,
3791 .ivsize
= AES_BLOCK_SIZE
,
3792 .setkey
= chcr_aes_ctr_setkey
,
3793 .encrypt
= chcr_aes_encrypt
,
3794 .decrypt
= chcr_aes_decrypt
,
3799 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3800 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3803 .cra_name
= "rfc3686(ctr(aes))",
3804 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3806 .cra_init
= chcr_rfc3686_init
,
3807 .cra_exit
= chcr_cra_exit
,
3808 .cra_u
.ablkcipher
= {
3809 .min_keysize
= AES_MIN_KEY_SIZE
+
3810 CTR_RFC3686_NONCE_SIZE
,
3811 .max_keysize
= AES_MAX_KEY_SIZE
+
3812 CTR_RFC3686_NONCE_SIZE
,
3813 .ivsize
= CTR_RFC3686_IV_SIZE
,
3814 .setkey
= chcr_aes_rfc3686_setkey
,
3815 .encrypt
= chcr_aes_encrypt
,
3816 .decrypt
= chcr_aes_decrypt
,
3822 .type
= CRYPTO_ALG_TYPE_AHASH
,
3825 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3828 .cra_driver_name
= "sha1-chcr",
3829 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3834 .type
= CRYPTO_ALG_TYPE_AHASH
,
3837 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3839 .cra_name
= "sha256",
3840 .cra_driver_name
= "sha256-chcr",
3841 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3846 .type
= CRYPTO_ALG_TYPE_AHASH
,
3849 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3851 .cra_name
= "sha224",
3852 .cra_driver_name
= "sha224-chcr",
3853 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3858 .type
= CRYPTO_ALG_TYPE_AHASH
,
3861 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3863 .cra_name
= "sha384",
3864 .cra_driver_name
= "sha384-chcr",
3865 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3870 .type
= CRYPTO_ALG_TYPE_AHASH
,
3873 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3875 .cra_name
= "sha512",
3876 .cra_driver_name
= "sha512-chcr",
3877 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3883 .type
= CRYPTO_ALG_TYPE_HMAC
,
3886 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3888 .cra_name
= "hmac(sha1)",
3889 .cra_driver_name
= "hmac-sha1-chcr",
3890 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3895 .type
= CRYPTO_ALG_TYPE_HMAC
,
3898 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3900 .cra_name
= "hmac(sha224)",
3901 .cra_driver_name
= "hmac-sha224-chcr",
3902 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3907 .type
= CRYPTO_ALG_TYPE_HMAC
,
3910 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3912 .cra_name
= "hmac(sha256)",
3913 .cra_driver_name
= "hmac-sha256-chcr",
3914 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3919 .type
= CRYPTO_ALG_TYPE_HMAC
,
3922 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3924 .cra_name
= "hmac(sha384)",
3925 .cra_driver_name
= "hmac-sha384-chcr",
3926 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3931 .type
= CRYPTO_ALG_TYPE_HMAC
,
3934 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3936 .cra_name
= "hmac(sha512)",
3937 .cra_driver_name
= "hmac-sha512-chcr",
3938 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3942 /* Add AEAD Algorithms */
3944 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3948 .cra_name
= "gcm(aes)",
3949 .cra_driver_name
= "gcm-aes-chcr",
3951 .cra_priority
= CHCR_AEAD_PRIORITY
,
3952 .cra_ctxsize
= sizeof(struct chcr_context
) +
3953 sizeof(struct chcr_aead_ctx
) +
3954 sizeof(struct chcr_gcm_ctx
),
3956 .ivsize
= GCM_AES_IV_SIZE
,
3957 .maxauthsize
= GHASH_DIGEST_SIZE
,
3958 .setkey
= chcr_gcm_setkey
,
3959 .setauthsize
= chcr_gcm_setauthsize
,
3963 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3967 .cra_name
= "rfc4106(gcm(aes))",
3968 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3970 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3971 .cra_ctxsize
= sizeof(struct chcr_context
) +
3972 sizeof(struct chcr_aead_ctx
) +
3973 sizeof(struct chcr_gcm_ctx
),
3976 .ivsize
= GCM_RFC4106_IV_SIZE
,
3977 .maxauthsize
= GHASH_DIGEST_SIZE
,
3978 .setkey
= chcr_gcm_setkey
,
3979 .setauthsize
= chcr_4106_4309_setauthsize
,
3983 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3987 .cra_name
= "ccm(aes)",
3988 .cra_driver_name
= "ccm-aes-chcr",
3990 .cra_priority
= CHCR_AEAD_PRIORITY
,
3991 .cra_ctxsize
= sizeof(struct chcr_context
) +
3992 sizeof(struct chcr_aead_ctx
),
3995 .ivsize
= AES_BLOCK_SIZE
,
3996 .maxauthsize
= GHASH_DIGEST_SIZE
,
3997 .setkey
= chcr_aead_ccm_setkey
,
3998 .setauthsize
= chcr_ccm_setauthsize
,
4002 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
4006 .cra_name
= "rfc4309(ccm(aes))",
4007 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
4009 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4010 .cra_ctxsize
= sizeof(struct chcr_context
) +
4011 sizeof(struct chcr_aead_ctx
),
4015 .maxauthsize
= GHASH_DIGEST_SIZE
,
4016 .setkey
= chcr_aead_rfc4309_setkey
,
4017 .setauthsize
= chcr_4106_4309_setauthsize
,
4021 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4025 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
4027 "authenc-hmac-sha1-cbc-aes-chcr",
4028 .cra_blocksize
= AES_BLOCK_SIZE
,
4029 .cra_priority
= CHCR_AEAD_PRIORITY
,
4030 .cra_ctxsize
= sizeof(struct chcr_context
) +
4031 sizeof(struct chcr_aead_ctx
) +
4032 sizeof(struct chcr_authenc_ctx
),
4035 .ivsize
= AES_BLOCK_SIZE
,
4036 .maxauthsize
= SHA1_DIGEST_SIZE
,
4037 .setkey
= chcr_authenc_setkey
,
4038 .setauthsize
= chcr_authenc_setauthsize
,
4042 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4047 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
4049 "authenc-hmac-sha256-cbc-aes-chcr",
4050 .cra_blocksize
= AES_BLOCK_SIZE
,
4051 .cra_priority
= CHCR_AEAD_PRIORITY
,
4052 .cra_ctxsize
= sizeof(struct chcr_context
) +
4053 sizeof(struct chcr_aead_ctx
) +
4054 sizeof(struct chcr_authenc_ctx
),
4057 .ivsize
= AES_BLOCK_SIZE
,
4058 .maxauthsize
= SHA256_DIGEST_SIZE
,
4059 .setkey
= chcr_authenc_setkey
,
4060 .setauthsize
= chcr_authenc_setauthsize
,
4064 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4068 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
4070 "authenc-hmac-sha224-cbc-aes-chcr",
4071 .cra_blocksize
= AES_BLOCK_SIZE
,
4072 .cra_priority
= CHCR_AEAD_PRIORITY
,
4073 .cra_ctxsize
= sizeof(struct chcr_context
) +
4074 sizeof(struct chcr_aead_ctx
) +
4075 sizeof(struct chcr_authenc_ctx
),
4077 .ivsize
= AES_BLOCK_SIZE
,
4078 .maxauthsize
= SHA224_DIGEST_SIZE
,
4079 .setkey
= chcr_authenc_setkey
,
4080 .setauthsize
= chcr_authenc_setauthsize
,
4084 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4088 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
4090 "authenc-hmac-sha384-cbc-aes-chcr",
4091 .cra_blocksize
= AES_BLOCK_SIZE
,
4092 .cra_priority
= CHCR_AEAD_PRIORITY
,
4093 .cra_ctxsize
= sizeof(struct chcr_context
) +
4094 sizeof(struct chcr_aead_ctx
) +
4095 sizeof(struct chcr_authenc_ctx
),
4098 .ivsize
= AES_BLOCK_SIZE
,
4099 .maxauthsize
= SHA384_DIGEST_SIZE
,
4100 .setkey
= chcr_authenc_setkey
,
4101 .setauthsize
= chcr_authenc_setauthsize
,
4105 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4109 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4111 "authenc-hmac-sha512-cbc-aes-chcr",
4112 .cra_blocksize
= AES_BLOCK_SIZE
,
4113 .cra_priority
= CHCR_AEAD_PRIORITY
,
4114 .cra_ctxsize
= sizeof(struct chcr_context
) +
4115 sizeof(struct chcr_aead_ctx
) +
4116 sizeof(struct chcr_authenc_ctx
),
4119 .ivsize
= AES_BLOCK_SIZE
,
4120 .maxauthsize
= SHA512_DIGEST_SIZE
,
4121 .setkey
= chcr_authenc_setkey
,
4122 .setauthsize
= chcr_authenc_setauthsize
,
4126 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4130 .cra_name
= "authenc(digest_null,cbc(aes))",
4132 "authenc-digest_null-cbc-aes-chcr",
4133 .cra_blocksize
= AES_BLOCK_SIZE
,
4134 .cra_priority
= CHCR_AEAD_PRIORITY
,
4135 .cra_ctxsize
= sizeof(struct chcr_context
) +
4136 sizeof(struct chcr_aead_ctx
) +
4137 sizeof(struct chcr_authenc_ctx
),
4140 .ivsize
= AES_BLOCK_SIZE
,
4142 .setkey
= chcr_aead_digest_null_setkey
,
4143 .setauthsize
= chcr_authenc_null_setauthsize
,
4147 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4151 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4153 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4155 .cra_priority
= CHCR_AEAD_PRIORITY
,
4156 .cra_ctxsize
= sizeof(struct chcr_context
) +
4157 sizeof(struct chcr_aead_ctx
) +
4158 sizeof(struct chcr_authenc_ctx
),
4161 .ivsize
= CTR_RFC3686_IV_SIZE
,
4162 .maxauthsize
= SHA1_DIGEST_SIZE
,
4163 .setkey
= chcr_authenc_setkey
,
4164 .setauthsize
= chcr_authenc_setauthsize
,
4168 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4173 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4175 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4177 .cra_priority
= CHCR_AEAD_PRIORITY
,
4178 .cra_ctxsize
= sizeof(struct chcr_context
) +
4179 sizeof(struct chcr_aead_ctx
) +
4180 sizeof(struct chcr_authenc_ctx
),
4183 .ivsize
= CTR_RFC3686_IV_SIZE
,
4184 .maxauthsize
= SHA256_DIGEST_SIZE
,
4185 .setkey
= chcr_authenc_setkey
,
4186 .setauthsize
= chcr_authenc_setauthsize
,
4190 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4194 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4196 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4198 .cra_priority
= CHCR_AEAD_PRIORITY
,
4199 .cra_ctxsize
= sizeof(struct chcr_context
) +
4200 sizeof(struct chcr_aead_ctx
) +
4201 sizeof(struct chcr_authenc_ctx
),
4203 .ivsize
= CTR_RFC3686_IV_SIZE
,
4204 .maxauthsize
= SHA224_DIGEST_SIZE
,
4205 .setkey
= chcr_authenc_setkey
,
4206 .setauthsize
= chcr_authenc_setauthsize
,
4210 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4214 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4216 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4218 .cra_priority
= CHCR_AEAD_PRIORITY
,
4219 .cra_ctxsize
= sizeof(struct chcr_context
) +
4220 sizeof(struct chcr_aead_ctx
) +
4221 sizeof(struct chcr_authenc_ctx
),
4224 .ivsize
= CTR_RFC3686_IV_SIZE
,
4225 .maxauthsize
= SHA384_DIGEST_SIZE
,
4226 .setkey
= chcr_authenc_setkey
,
4227 .setauthsize
= chcr_authenc_setauthsize
,
4231 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4235 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4237 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4239 .cra_priority
= CHCR_AEAD_PRIORITY
,
4240 .cra_ctxsize
= sizeof(struct chcr_context
) +
4241 sizeof(struct chcr_aead_ctx
) +
4242 sizeof(struct chcr_authenc_ctx
),
4245 .ivsize
= CTR_RFC3686_IV_SIZE
,
4246 .maxauthsize
= SHA512_DIGEST_SIZE
,
4247 .setkey
= chcr_authenc_setkey
,
4248 .setauthsize
= chcr_authenc_setauthsize
,
4252 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4256 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4258 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4260 .cra_priority
= CHCR_AEAD_PRIORITY
,
4261 .cra_ctxsize
= sizeof(struct chcr_context
) +
4262 sizeof(struct chcr_aead_ctx
) +
4263 sizeof(struct chcr_authenc_ctx
),
4266 .ivsize
= CTR_RFC3686_IV_SIZE
,
4268 .setkey
= chcr_aead_digest_null_setkey
,
4269 .setauthsize
= chcr_authenc_null_setauthsize
,
4275 * chcr_unregister_alg - Deregister crypto algorithms with
4278 static int chcr_unregister_alg(void)
4282 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4283 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4284 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4285 if (driver_algs
[i
].is_registered
)
4286 crypto_unregister_alg(
4287 &driver_algs
[i
].alg
.crypto
);
4289 case CRYPTO_ALG_TYPE_AEAD
:
4290 if (driver_algs
[i
].is_registered
)
4291 crypto_unregister_aead(
4292 &driver_algs
[i
].alg
.aead
);
4294 case CRYPTO_ALG_TYPE_AHASH
:
4295 if (driver_algs
[i
].is_registered
)
4296 crypto_unregister_ahash(
4297 &driver_algs
[i
].alg
.hash
);
4300 driver_algs
[i
].is_registered
= 0;
4305 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4306 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4307 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4310 * chcr_register_alg - Register crypto algorithms with kernel framework.
4312 static int chcr_register_alg(void)
4314 struct crypto_alg ai
;
4315 struct ahash_alg
*a_hash
;
4319 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4320 if (driver_algs
[i
].is_registered
)
4322 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4323 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4324 driver_algs
[i
].alg
.crypto
.cra_priority
=
4326 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
4327 driver_algs
[i
].alg
.crypto
.cra_flags
=
4328 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
4329 CRYPTO_ALG_NEED_FALLBACK
;
4330 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
4331 sizeof(struct chcr_context
) +
4332 sizeof(struct ablk_ctx
);
4333 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
4334 driver_algs
[i
].alg
.crypto
.cra_type
=
4335 &crypto_ablkcipher_type
;
4336 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
4337 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
4339 case CRYPTO_ALG_TYPE_AEAD
:
4340 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4341 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4342 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4343 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4344 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4345 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4346 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4347 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4348 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4350 case CRYPTO_ALG_TYPE_AHASH
:
4351 a_hash
= &driver_algs
[i
].alg
.hash
;
4352 a_hash
->update
= chcr_ahash_update
;
4353 a_hash
->final
= chcr_ahash_final
;
4354 a_hash
->finup
= chcr_ahash_finup
;
4355 a_hash
->digest
= chcr_ahash_digest
;
4356 a_hash
->export
= chcr_ahash_export
;
4357 a_hash
->import
= chcr_ahash_import
;
4358 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4359 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4360 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4361 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4362 a_hash
->halg
.base
.cra_alignmask
= 0;
4363 a_hash
->halg
.base
.cra_exit
= NULL
;
4365 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4366 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4367 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4368 a_hash
->init
= chcr_hmac_init
;
4369 a_hash
->setkey
= chcr_ahash_setkey
;
4370 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4372 a_hash
->init
= chcr_sha_init
;
4373 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4374 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4376 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4377 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4378 name
= ai
.cra_driver_name
;
4382 pr_err("chcr : %s : Algorithm registration failed\n",
4386 driver_algs
[i
].is_registered
= 1;
4392 chcr_unregister_alg();
4397 * start_crypto - Register the crypto algorithms.
4398 * This should called once when the first device comesup. After this
4399 * kernel will start calling driver APIs for crypto operations.
4401 int start_crypto(void)
4403 return chcr_register_alg();
4407 * stop_crypto - Deregister all the crypto algorithms with kernel.
4408 * This should be called once when the last device goes down. After this
4409 * kernel will not call the driver API for crypto operations.
4411 int stop_crypto(void)
4413 chcr_unregister_alg();