2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
73 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
75 return ctx
->crypto_ctx
->aeadctx
;
78 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
80 return ctx
->crypto_ctx
->ablkctx
;
83 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
85 return ctx
->crypto_ctx
->hmacctx
;
88 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
90 return gctx
->ctx
->gcm
;
93 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
95 return gctx
->ctx
->authenc
;
98 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
100 return ctx
->dev
->u_ctx
;
103 static inline int is_ofld_imm(const struct sk_buff
*skb
)
105 return (skb
->len
<= CRYPTO_MAX_IMM_TX_PKT_LEN
);
109 * sgl_len - calculates the size of an SGL of the given capacity
110 * @n: the number of SGL entries
111 * Calculates the number of flits needed for a scatter/gather list that
112 * can hold the given number of entries.
114 static inline unsigned int sgl_len(unsigned int n
)
117 return (3 * n
) / 2 + (n
& 1) + 2;
120 static void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
122 u8 temp
[SHA512_DIGEST_SIZE
];
123 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
124 int authsize
= crypto_aead_authsize(tfm
);
125 struct cpl_fw6_pld
*fw6_pld
;
128 fw6_pld
= (struct cpl_fw6_pld
*)input
;
129 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
130 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
131 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
134 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
135 authsize
, req
->assoclen
+
136 req
->cryptlen
- authsize
);
137 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
146 * chcr_handle_resp - Unmap the DMA buffers associated with the request
147 * @req: crypto request
149 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
152 struct crypto_tfm
*tfm
= req
->tfm
;
153 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
154 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
155 struct chcr_req_ctx ctx_req
;
156 unsigned int digestsize
, updated_digestsize
;
157 struct adapter
*adap
= padap(ctx
->dev
);
159 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
160 case CRYPTO_ALG_TYPE_AEAD
:
161 ctx_req
.req
.aead_req
= aead_request_cast(req
);
162 ctx_req
.ctx
.reqctx
= aead_request_ctx(ctx_req
.req
.aead_req
);
163 dma_unmap_sg(&u_ctx
->lldi
.pdev
->dev
, ctx_req
.ctx
.reqctx
->dst
,
164 ctx_req
.ctx
.reqctx
->dst_nents
, DMA_FROM_DEVICE
);
165 if (ctx_req
.ctx
.reqctx
->skb
) {
166 kfree_skb(ctx_req
.ctx
.reqctx
->skb
);
167 ctx_req
.ctx
.reqctx
->skb
= NULL
;
169 free_new_sg(ctx_req
.ctx
.reqctx
->newdstsg
);
170 ctx_req
.ctx
.reqctx
->newdstsg
= NULL
;
171 if (ctx_req
.ctx
.reqctx
->verify
== VERIFY_SW
) {
172 chcr_verify_tag(ctx_req
.req
.aead_req
, input
,
174 ctx_req
.ctx
.reqctx
->verify
= VERIFY_HW
;
176 ctx_req
.req
.aead_req
->base
.complete(req
, err
);
179 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
180 err
= chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
184 case CRYPTO_ALG_TYPE_AHASH
:
185 ctx_req
.req
.ahash_req
= ahash_request_cast(req
);
186 ctx_req
.ctx
.ahash_ctx
=
187 ahash_request_ctx(ctx_req
.req
.ahash_req
);
189 crypto_ahash_digestsize(crypto_ahash_reqtfm(
190 ctx_req
.req
.ahash_req
));
191 updated_digestsize
= digestsize
;
192 if (digestsize
== SHA224_DIGEST_SIZE
)
193 updated_digestsize
= SHA256_DIGEST_SIZE
;
194 else if (digestsize
== SHA384_DIGEST_SIZE
)
195 updated_digestsize
= SHA512_DIGEST_SIZE
;
196 if (ctx_req
.ctx
.ahash_ctx
->skb
) {
197 kfree_skb(ctx_req
.ctx
.ahash_ctx
->skb
);
198 ctx_req
.ctx
.ahash_ctx
->skb
= NULL
;
200 if (ctx_req
.ctx
.ahash_ctx
->result
== 1) {
201 ctx_req
.ctx
.ahash_ctx
->result
= 0;
202 memcpy(ctx_req
.req
.ahash_req
->result
, input
+
203 sizeof(struct cpl_fw6_pld
),
206 memcpy(ctx_req
.ctx
.ahash_ctx
->partial_hash
, input
+
207 sizeof(struct cpl_fw6_pld
),
210 ctx_req
.req
.ahash_req
->base
.complete(req
, err
);
213 atomic_inc(&adap
->chcr_stats
.complete
);
218 * calc_tx_flits_ofld - calculate # of flits for an offload packet
220 * Returns the number of flits needed for the given offload packet.
221 * These packets are already fully constructed and no additional headers
224 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
226 unsigned int flits
, cnt
;
228 if (is_ofld_imm(skb
))
229 return DIV_ROUND_UP(skb
->len
, 8);
231 flits
= skb_transport_offset(skb
) / 8; /* headers */
232 cnt
= skb_shinfo(skb
)->nr_frags
;
233 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
235 return flits
+ sgl_len(cnt
);
238 static inline void get_aes_decrypt_key(unsigned char *dec_key
,
239 const unsigned char *key
,
240 unsigned int keylength
)
248 case AES_KEYLENGTH_128BIT
:
249 nk
= KEYLENGTH_4BYTES
;
250 nr
= NUMBER_OF_ROUNDS_10
;
252 case AES_KEYLENGTH_192BIT
:
253 nk
= KEYLENGTH_6BYTES
;
254 nr
= NUMBER_OF_ROUNDS_12
;
256 case AES_KEYLENGTH_256BIT
:
257 nk
= KEYLENGTH_8BYTES
;
258 nr
= NUMBER_OF_ROUNDS_14
;
263 for (i
= 0; i
< nk
; i
++)
264 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
267 temp
= w_ring
[nk
- 1];
268 while (i
+ nk
< (nr
+ 1) * 4) {
271 temp
= (temp
<< 8) | (temp
>> 24);
272 temp
= aes_ks_subword(temp
);
273 temp
^= round_constant
[i
/ nk
];
274 } else if (nk
== 8 && (i
% 4 == 0)) {
275 temp
= aes_ks_subword(temp
);
277 w_ring
[i
% nk
] ^= temp
;
278 temp
= w_ring
[i
% nk
];
282 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
283 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
290 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
292 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
295 case SHA1_DIGEST_SIZE
:
296 base_hash
= crypto_alloc_shash("sha1", 0, 0);
298 case SHA224_DIGEST_SIZE
:
299 base_hash
= crypto_alloc_shash("sha224", 0, 0);
301 case SHA256_DIGEST_SIZE
:
302 base_hash
= crypto_alloc_shash("sha256", 0, 0);
304 case SHA384_DIGEST_SIZE
:
305 base_hash
= crypto_alloc_shash("sha384", 0, 0);
307 case SHA512_DIGEST_SIZE
:
308 base_hash
= crypto_alloc_shash("sha512", 0, 0);
315 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
316 char *iopad
, char *result_hash
,
319 struct sha1_state sha1_st
;
320 struct sha256_state sha256_st
;
321 struct sha512_state sha512_st
;
324 if (digest_size
== SHA1_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha1_st
);
328 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
329 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
330 error
= crypto_shash_init(desc
) ?:
331 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
332 crypto_shash_export(desc
, (void *)&sha256_st
);
333 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
335 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
336 error
= crypto_shash_init(desc
) ?:
337 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
338 crypto_shash_export(desc
, (void *)&sha256_st
);
339 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
341 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
342 error
= crypto_shash_init(desc
) ?:
343 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
344 crypto_shash_export(desc
, (void *)&sha512_st
);
345 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
347 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
348 error
= crypto_shash_init(desc
) ?:
349 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
350 crypto_shash_export(desc
, (void *)&sha512_st
);
351 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
354 pr_err("Unknown digest size %d\n", digest_size
);
359 static void chcr_change_order(char *buf
, int ds
)
363 if (ds
== SHA512_DIGEST_SIZE
) {
364 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
365 *((__be64
*)buf
+ i
) =
366 cpu_to_be64(*((u64
*)buf
+ i
));
368 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
369 *((__be32
*)buf
+ i
) =
370 cpu_to_be32(*((u32
*)buf
+ i
));
374 static inline int is_hmac(struct crypto_tfm
*tfm
)
376 struct crypto_alg
*alg
= tfm
->__crt_alg
;
377 struct chcr_alg_template
*chcr_crypto_alg
=
378 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
380 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
385 static void write_phys_cpl(struct cpl_rx_phys_dsgl
*phys_cpl
,
386 struct scatterlist
*sg
,
387 struct phys_sge_parm
*sg_param
)
389 struct phys_sge_pairs
*to
;
390 unsigned int len
= 0, left_size
= sg_param
->obsize
;
391 unsigned int nents
= sg_param
->nents
, i
, j
= 0;
393 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
394 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
395 phys_cpl
->pcirlxorder_to_noofsgentr
=
396 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
397 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
398 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
400 CPL_RX_PHYS_DSGL_DCAID_V(0) |
401 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents
));
402 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
403 phys_cpl
->rss_hdr_int
.qid
= htons(sg_param
->qid
);
404 phys_cpl
->rss_hdr_int
.hash_val
= 0;
405 to
= (struct phys_sge_pairs
*)((unsigned char *)phys_cpl
+
406 sizeof(struct cpl_rx_phys_dsgl
));
407 for (i
= 0; nents
&& left_size
; to
++) {
408 for (j
= 0; j
< 8 && nents
&& left_size
; j
++, nents
--) {
409 len
= min(left_size
, sg_dma_len(sg
));
410 to
->len
[j
] = htons(len
);
411 to
->addr
[j
] = cpu_to_be64(sg_dma_address(sg
));
418 static inline int map_writesg_phys_cpl(struct device
*dev
,
419 struct cpl_rx_phys_dsgl
*phys_cpl
,
420 struct scatterlist
*sg
,
421 struct phys_sge_parm
*sg_param
)
423 if (!sg
|| !sg_param
->nents
)
426 sg_param
->nents
= dma_map_sg(dev
, sg
, sg_param
->nents
, DMA_FROM_DEVICE
);
427 if (sg_param
->nents
== 0) {
428 pr_err("CHCR : DMA mapping failed\n");
431 write_phys_cpl(phys_cpl
, sg
, sg_param
);
435 static inline int get_aead_subtype(struct crypto_aead
*aead
)
437 struct aead_alg
*alg
= crypto_aead_alg(aead
);
438 struct chcr_alg_template
*chcr_crypto_alg
=
439 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
440 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
443 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
445 struct crypto_alg
*alg
= tfm
->__crt_alg
;
446 struct chcr_alg_template
*chcr_crypto_alg
=
447 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
449 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
452 static inline void write_buffer_to_skb(struct sk_buff
*skb
,
458 skb
->data_len
+= bfr_len
;
459 skb
->truesize
+= bfr_len
;
460 get_page(virt_to_page(bfr
));
461 skb_fill_page_desc(skb
, *frags
, virt_to_page(bfr
),
462 offset_in_page(bfr
), bfr_len
);
468 write_sg_to_skb(struct sk_buff
*skb
, unsigned int *frags
,
469 struct scatterlist
*sg
, unsigned int count
)
472 unsigned int page_len
;
475 skb
->data_len
+= count
;
476 skb
->truesize
+= count
;
479 if (!sg
|| (!(sg
->length
)))
483 page_len
= min(sg
->length
, count
);
484 skb_fill_page_desc(skb
, *frags
, spage
, sg
->offset
, page_len
);
491 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
493 struct adapter
*adap
= netdev2adap(dev
);
494 struct sge_uld_txq_info
*txq_info
=
495 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
496 struct sge_uld_txq
*txq
;
500 txq
= &txq_info
->uldtxq
[idx
];
501 spin_lock(&txq
->sendq
.lock
);
504 spin_unlock(&txq
->sendq
.lock
);
509 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
510 struct _key_ctx
*key_ctx
)
512 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
513 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
516 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
517 ablkctx
->enckey_len
>> 1);
518 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
519 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
523 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
524 struct scatterlist
*dst
,
530 int srclen
= 0, dstlen
= 0;
531 int srcsg
= minsg
, dstsg
= 0;
535 while (src
&& dst
&& ((srcsg
+ 1) <= MAX_SKB_FRAGS
) &&
536 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
537 srclen
+= src
->length
;
539 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
540 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
541 if (srclen
<= dstlen
)
543 dstlen
+= dst
->length
;
549 *sent
= srcsg
- minsg
;
551 return min(srclen
, dstlen
);
554 static int chcr_cipher_fallback(struct crypto_skcipher
*cipher
,
556 struct scatterlist
*src
,
557 struct scatterlist
*dst
,
560 unsigned short op_type
)
564 SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
565 skcipher_request_set_tfm(subreq
, cipher
);
566 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
567 skcipher_request_set_crypt(subreq
, src
, dst
,
570 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
571 crypto_skcipher_encrypt(subreq
);
572 skcipher_request_zero(subreq
);
577 static inline void create_wreq(struct chcr_context
*ctx
,
578 struct chcr_wr
*chcr_req
,
579 void *req
, struct sk_buff
*skb
,
580 int kctx_len
, int hash_sz
,
585 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
586 int iv_loc
= IV_DSGL
;
587 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
588 unsigned int immdatalen
= 0, nr_frags
= 0;
590 if (is_ofld_imm(skb
)) {
591 immdatalen
= skb
->data_len
;
592 iv_loc
= IV_IMMEDIATE
;
594 nr_frags
= skb_shinfo(skb
)->nr_frags
;
597 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE(immdatalen
,
598 ((sizeof(chcr_req
->key_ctx
) + kctx_len
) >> 4));
599 chcr_req
->wreq
.pld_size_hash_size
=
600 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths
[nr_frags
]) |
601 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
602 chcr_req
->wreq
.len16_pkd
=
603 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
604 (calc_tx_flits_ofld(skb
) * 8), 16)));
605 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
606 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
607 FILL_WR_RX_Q_ID(ctx
->dev
->rx_channel_id
, qid
,
608 is_iv
? iv_loc
: IV_NOP
, !!lcb
,
611 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->dev
->tx_channel_id
,
613 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb
) * 8),
614 16) - ((sizeof(chcr_req
->wreq
)) >> 4)));
616 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(immdatalen
);
617 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
618 sizeof(chcr_req
->key_ctx
) +
619 kctx_len
+ sc_len
+ immdatalen
);
623 * create_cipher_wr - form the WR for cipher operations
625 * @ctx: crypto driver context of the request.
626 * @qid: ingress qid where response of this WR should be received.
627 * @op_type: encryption or decryption
629 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
631 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
632 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
633 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
634 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
635 struct sk_buff
*skb
= NULL
;
636 struct chcr_wr
*chcr_req
;
637 struct cpl_rx_phys_dsgl
*phys_cpl
;
638 struct chcr_blkcipher_req_ctx
*reqctx
=
639 ablkcipher_request_ctx(wrparam
->req
);
640 struct phys_sge_parm sg_param
;
641 unsigned int frags
= 0, transhdr_len
, phys_dsgl
;
643 unsigned int ivsize
= AES_BLOCK_SIZE
, kctx_len
;
644 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
645 GFP_KERNEL
: GFP_ATOMIC
;
646 struct adapter
*adap
= padap(ctx
->dev
);
648 phys_dsgl
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
650 kctx_len
= (DIV_ROUND_UP(ablkctx
->enckey_len
, 16) * 16);
651 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
652 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
657 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
658 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
659 chcr_req
->sec_cpl
.op_ivinsrtofst
=
660 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2, 1);
662 chcr_req
->sec_cpl
.pldlen
= htonl(ivsize
+ wrparam
->bytes
);
663 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
664 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize
+ 1, 0);
666 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
667 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
668 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
671 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
674 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
675 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
676 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
677 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
678 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
679 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
680 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
682 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
683 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
684 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
685 ablkctx
->enckey_len
);
687 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
688 (ablkctx
->enckey_len
>> 1),
689 ablkctx
->enckey_len
>> 1);
690 memcpy(chcr_req
->key_ctx
.key
+
691 (ablkctx
->enckey_len
>> 1),
693 ablkctx
->enckey_len
>> 1);
696 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
697 sg_param
.nents
= reqctx
->dst_nents
;
698 sg_param
.obsize
= wrparam
->bytes
;
699 sg_param
.qid
= wrparam
->qid
;
700 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
701 reqctx
->dst
, &sg_param
);
705 skb_set_transport_header(skb
, transhdr_len
);
706 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
707 write_sg_to_skb(skb
, &frags
, wrparam
->srcsg
, wrparam
->bytes
);
708 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
709 create_wreq(ctx
, chcr_req
, &(wrparam
->req
->base
), skb
, kctx_len
, 0, 1,
710 sizeof(struct cpl_rx_phys_dsgl
) + phys_dsgl
,
711 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
718 return ERR_PTR(error
);
721 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
725 if (keylen
== AES_KEYSIZE_128
)
726 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
727 else if (keylen
== AES_KEYSIZE_192
)
728 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
729 else if (keylen
== AES_KEYSIZE_256
)
730 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
736 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
740 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
741 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
742 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
745 crypto_skcipher_clear_flags(ablkctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
746 crypto_skcipher_set_flags(ablkctx
->sw_cipher
, cipher
->base
.crt_flags
&
747 CRYPTO_TFM_REQ_MASK
);
748 err
= crypto_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
749 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
751 crypto_skcipher_get_flags(ablkctx
->sw_cipher
) &
756 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
760 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
761 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
762 unsigned int ck_size
, context_size
;
766 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
770 ck_size
= chcr_keyctx_ck_size(keylen
);
771 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
772 memcpy(ablkctx
->key
, key
, keylen
);
773 ablkctx
->enckey_len
= keylen
;
774 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
775 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
776 keylen
+ alignment
) >> 4;
778 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
780 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
783 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
784 ablkctx
->enckey_len
= 0;
789 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
793 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
794 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
795 unsigned int ck_size
, context_size
;
799 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
802 ck_size
= chcr_keyctx_ck_size(keylen
);
803 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
804 memcpy(ablkctx
->key
, key
, keylen
);
805 ablkctx
->enckey_len
= keylen
;
806 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
807 keylen
+ alignment
) >> 4;
809 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
811 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
815 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
816 ablkctx
->enckey_len
= 0;
821 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
825 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
826 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
827 unsigned int ck_size
, context_size
;
831 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
833 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
834 CTR_RFC3686_NONCE_SIZE
);
836 keylen
-= CTR_RFC3686_NONCE_SIZE
;
837 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
841 ck_size
= chcr_keyctx_ck_size(keylen
);
842 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
843 memcpy(ablkctx
->key
, key
, keylen
);
844 ablkctx
->enckey_len
= keylen
;
845 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
846 keylen
+ alignment
) >> 4;
848 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
850 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
854 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
855 ablkctx
->enckey_len
= 0;
859 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
861 unsigned int size
= AES_BLOCK_SIZE
;
862 __be32
*b
= (__be32
*)(dstiv
+ size
);
865 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
866 for (; size
>= 4; size
-= 4) {
867 prev
= be32_to_cpu(*--b
);
877 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
879 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
881 u32 temp
= be32_to_cpu(*--b
);
884 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
885 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
886 bytes
= c
* AES_BLOCK_SIZE
;
890 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
)
892 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
893 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
894 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
895 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
896 struct crypto_cipher
*cipher
;
901 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
902 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
904 if (IS_ERR(cipher
)) {
908 keylen
= ablkctx
->enckey_len
/ 2;
909 key
= ablkctx
->key
+ keylen
;
910 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
914 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
915 for (i
= 0; i
< (reqctx
->processed
/ AES_BLOCK_SIZE
); i
++)
916 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
918 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
920 crypto_free_cipher(cipher
);
925 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
926 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
928 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
929 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
930 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
933 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
934 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
936 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
937 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
938 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
939 AES_BLOCK_SIZE
) + 1);
940 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
941 ret
= chcr_update_tweak(req
, iv
);
942 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
944 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), iv
,
946 reqctx
->processed
- AES_BLOCK_SIZE
);
948 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
955 /* We need separate function for final iv because in rfc3686 Initial counter
956 * starts from 1 and buffer size of iv is 8 byte only which remains constant
957 * for subsequent update requests
960 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
961 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
963 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
964 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
965 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
968 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
969 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
971 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
972 ret
= chcr_update_tweak(req
, iv
);
973 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
975 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), iv
,
977 reqctx
->processed
- AES_BLOCK_SIZE
);
979 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
987 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
988 unsigned char *input
, int err
)
990 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
991 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
992 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
993 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
995 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
996 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
997 struct cipher_wr_param wrparam
;
1000 dma_unmap_sg(&u_ctx
->lldi
.pdev
->dev
, reqctx
->dst
, reqctx
->dst_nents
,
1004 kfree_skb(reqctx
->skb
);
1010 if (req
->nbytes
== reqctx
->processed
) {
1011 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1015 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1017 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1023 wrparam
.srcsg
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
,
1025 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
, reqctx
->dstsg
,
1027 if (!wrparam
.srcsg
|| !reqctx
->dst
) {
1028 pr_err("Input sg list length less that nbytes\n");
1032 bytes
= chcr_sg_ent_in_wr(wrparam
.srcsg
, reqctx
->dst
, 1,
1033 SPACE_LEFT(ablkctx
->enckey_len
),
1034 &wrparam
.snent
, &reqctx
->dst_nents
);
1035 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1036 bytes
= req
->nbytes
- reqctx
->processed
;
1038 bytes
= ROUND_16(bytes
);
1039 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1043 if (unlikely(bytes
== 0)) {
1044 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1048 req
->nbytes
- reqctx
->processed
,
1054 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1055 CRYPTO_ALG_SUB_TYPE_CTR
)
1056 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1057 reqctx
->processed
+= bytes
;
1058 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
1060 wrparam
.bytes
= bytes
;
1061 skb
= create_cipher_wr(&wrparam
);
1063 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1067 skb
->dev
= u_ctx
->lldi
.ports
[0];
1068 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1072 free_new_sg(reqctx
->newdstsg
);
1073 reqctx
->newdstsg
= NULL
;
1074 req
->base
.complete(&req
->base
, err
);
1078 static int process_cipher(struct ablkcipher_request
*req
,
1080 struct sk_buff
**skb
,
1081 unsigned short op_type
)
1083 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1084 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1085 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1086 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1087 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1088 struct cipher_wr_param wrparam
;
1089 int bytes
, nents
, err
= -EINVAL
;
1091 reqctx
->newdstsg
= NULL
;
1092 reqctx
->processed
= 0;
1095 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1096 (req
->nbytes
== 0) ||
1097 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1098 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1099 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1102 wrparam
.srcsg
= req
->src
;
1103 if (is_newsg(req
->dst
, &nents
)) {
1104 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
1105 if (IS_ERR(reqctx
->newdstsg
))
1106 return PTR_ERR(reqctx
->newdstsg
);
1107 reqctx
->dstsg
= reqctx
->newdstsg
;
1109 reqctx
->dstsg
= req
->dst
;
1111 bytes
= chcr_sg_ent_in_wr(wrparam
.srcsg
, reqctx
->dstsg
, MIN_CIPHER_SG
,
1112 SPACE_LEFT(ablkctx
->enckey_len
),
1114 &reqctx
->dst_nents
);
1115 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1116 bytes
= req
->nbytes
- reqctx
->processed
;
1118 bytes
= ROUND_16(bytes
);
1119 if (unlikely(bytes
> req
->nbytes
))
1120 bytes
= req
->nbytes
;
1121 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1122 CRYPTO_ALG_SUB_TYPE_CTR
) {
1123 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1125 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1126 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1127 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1128 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1129 CTR_RFC3686_IV_SIZE
);
1131 /* initialize counter portion of counter block */
1132 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1133 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1137 memcpy(reqctx
->iv
, req
->info
, ivsize
);
1139 if (unlikely(bytes
== 0)) {
1140 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1149 reqctx
->processed
= bytes
;
1150 reqctx
->dst
= reqctx
->dstsg
;
1151 reqctx
->op
= op_type
;
1154 wrparam
.bytes
= bytes
;
1155 *skb
= create_cipher_wr(&wrparam
);
1157 err
= PTR_ERR(*skb
);
1163 free_new_sg(reqctx
->newdstsg
);
1164 reqctx
->newdstsg
= NULL
;
1168 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1170 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1171 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1172 struct sk_buff
*skb
= NULL
;
1174 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1176 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1178 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1182 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], &skb
,
1186 skb
->dev
= u_ctx
->lldi
.ports
[0];
1187 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1189 return -EINPROGRESS
;
1192 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1194 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1195 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1196 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1197 struct sk_buff
*skb
= NULL
;
1200 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1202 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1206 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], &skb
,
1210 skb
->dev
= u_ctx
->lldi
.ports
[0];
1211 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1213 return -EINPROGRESS
;
1216 static int chcr_device_init(struct chcr_context
*ctx
)
1218 struct uld_ctx
*u_ctx
= NULL
;
1219 struct adapter
*adap
;
1221 int txq_perchan
, txq_idx
, ntxq
;
1222 int err
= 0, rxq_perchan
, rxq_idx
;
1224 id
= smp_processor_id();
1226 u_ctx
= assign_chcr_device();
1228 pr_err("chcr device assignment fails\n");
1231 ctx
->dev
= u_ctx
->dev
;
1232 adap
= padap(ctx
->dev
);
1233 ntxq
= min_not_zero((unsigned int)u_ctx
->lldi
.nrxq
,
1234 adap
->vres
.ncrypto_fc
);
1235 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1236 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1237 rxq_idx
= ctx
->dev
->tx_channel_id
* rxq_perchan
;
1238 rxq_idx
+= id
% rxq_perchan
;
1239 txq_idx
= ctx
->dev
->tx_channel_id
* txq_perchan
;
1240 txq_idx
+= id
% txq_perchan
;
1241 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1242 ctx
->rx_qidx
= rxq_idx
;
1243 ctx
->tx_qidx
= txq_idx
;
1244 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1245 ctx
->dev
->rx_channel_id
= 0;
1246 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1252 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1254 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1255 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1256 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1258 ablkctx
->sw_cipher
= crypto_alloc_skcipher(alg
->cra_name
, 0,
1259 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1260 if (IS_ERR(ablkctx
->sw_cipher
)) {
1261 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1262 return PTR_ERR(ablkctx
->sw_cipher
);
1264 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1265 return chcr_device_init(crypto_tfm_ctx(tfm
));
1268 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1270 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1271 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1272 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1274 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1275 * cannot be used as fallback in chcr_handle_cipher_response
1277 ablkctx
->sw_cipher
= crypto_alloc_skcipher("ctr(aes)", 0,
1278 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1279 if (IS_ERR(ablkctx
->sw_cipher
)) {
1280 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1281 return PTR_ERR(ablkctx
->sw_cipher
);
1283 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1284 return chcr_device_init(crypto_tfm_ctx(tfm
));
1288 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1290 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1291 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1293 crypto_free_skcipher(ablkctx
->sw_cipher
);
1296 static int get_alg_config(struct algo_param
*params
,
1297 unsigned int auth_size
)
1299 switch (auth_size
) {
1300 case SHA1_DIGEST_SIZE
:
1301 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1302 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1303 params
->result_size
= SHA1_DIGEST_SIZE
;
1305 case SHA224_DIGEST_SIZE
:
1306 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1307 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1308 params
->result_size
= SHA256_DIGEST_SIZE
;
1310 case SHA256_DIGEST_SIZE
:
1311 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1312 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1313 params
->result_size
= SHA256_DIGEST_SIZE
;
1315 case SHA384_DIGEST_SIZE
:
1316 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1317 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1318 params
->result_size
= SHA512_DIGEST_SIZE
;
1320 case SHA512_DIGEST_SIZE
:
1321 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1322 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1323 params
->result_size
= SHA512_DIGEST_SIZE
;
1326 pr_err("chcr : ERROR, unsupported digest size\n");
1332 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1334 crypto_free_shash(base_hash
);
1338 * create_hash_wr - Create hash work request
1339 * @req - Cipher req base
1341 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1342 struct hash_wr_param
*param
)
1344 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1345 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1346 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1347 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1348 struct sk_buff
*skb
= NULL
;
1349 struct chcr_wr
*chcr_req
;
1350 unsigned int frags
= 0, transhdr_len
, iopad_alignment
= 0;
1351 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1352 unsigned int kctx_len
= 0;
1353 u8 hash_size_in_response
= 0;
1354 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1356 struct adapter
*adap
= padap(ctx
->dev
);
1358 iopad_alignment
= KEYCTX_ALIGN_PAD(digestsize
);
1359 kctx_len
= param
->alg_prm
.result_size
+ iopad_alignment
;
1360 if (param
->opad_needed
)
1361 kctx_len
+= param
->alg_prm
.result_size
+ iopad_alignment
;
1363 if (req_ctx
->result
)
1364 hash_size_in_response
= digestsize
;
1366 hash_size_in_response
= param
->alg_prm
.result_size
;
1367 transhdr_len
= HASH_TRANSHDR_SIZE(kctx_len
);
1368 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
1372 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
1373 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1375 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1376 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2, 0);
1377 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1379 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1380 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1381 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1382 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1383 chcr_req
->sec_cpl
.seqno_numivs
=
1384 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1385 param
->opad_needed
, 0);
1387 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1388 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1390 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1391 param
->alg_prm
.result_size
);
1393 if (param
->opad_needed
)
1394 memcpy(chcr_req
->key_ctx
.key
+
1395 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1396 CHCR_HASH_MAX_DIGEST_SIZE
),
1397 hmacctx
->opad
, param
->alg_prm
.result_size
);
1399 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1400 param
->alg_prm
.mk_size
, 0,
1403 sizeof(chcr_req
->key_ctx
)) >> 4));
1404 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1406 skb_set_transport_header(skb
, transhdr_len
);
1407 if (param
->bfr_len
!= 0)
1408 write_buffer_to_skb(skb
, &frags
, req_ctx
->reqbfr
,
1410 if (param
->sg_len
!= 0)
1411 write_sg_to_skb(skb
, &frags
, req
->src
, param
->sg_len
);
1412 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1413 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
,
1414 hash_size_in_response
, 0, DUMMY_BYTES
, 0);
1420 static int chcr_ahash_update(struct ahash_request
*req
)
1422 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1423 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1424 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1425 struct uld_ctx
*u_ctx
= NULL
;
1426 struct sk_buff
*skb
;
1427 u8 remainder
= 0, bs
;
1428 unsigned int nbytes
= req
->nbytes
;
1429 struct hash_wr_param params
;
1431 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1433 u_ctx
= ULD_CTX(ctx
);
1434 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1436 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1440 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1441 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1442 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1444 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1445 + req_ctx
->reqlen
, nbytes
, 0);
1446 req_ctx
->reqlen
+= nbytes
;
1450 params
.opad_needed
= 0;
1453 params
.sg_len
= nbytes
- req_ctx
->reqlen
;
1454 params
.bfr_len
= req_ctx
->reqlen
;
1456 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1457 req_ctx
->result
= 0;
1458 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1459 skb
= create_hash_wr(req
, ¶ms
);
1466 temp
= req_ctx
->reqbfr
;
1467 req_ctx
->reqbfr
= req_ctx
->skbfr
;
1468 req_ctx
->skbfr
= temp
;
1469 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1470 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1473 req_ctx
->reqlen
= remainder
;
1474 skb
->dev
= u_ctx
->lldi
.ports
[0];
1475 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1478 return -EINPROGRESS
;
1481 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1483 memset(bfr_ptr
, 0, bs
);
1486 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1488 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1491 static int chcr_ahash_final(struct ahash_request
*req
)
1493 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1494 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1495 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1496 struct hash_wr_param params
;
1497 struct sk_buff
*skb
;
1498 struct uld_ctx
*u_ctx
= NULL
;
1499 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1501 u_ctx
= ULD_CTX(ctx
);
1502 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1503 params
.opad_needed
= 1;
1505 params
.opad_needed
= 0;
1507 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1508 req_ctx
->result
= 1;
1509 params
.bfr_len
= req_ctx
->reqlen
;
1510 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1511 if (req_ctx
->reqlen
== 0) {
1512 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1516 params
.bfr_len
= bs
;
1519 params
.scmd1
= req_ctx
->data_len
;
1523 skb
= create_hash_wr(req
, ¶ms
);
1527 skb
->dev
= u_ctx
->lldi
.ports
[0];
1528 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1530 return -EINPROGRESS
;
1533 static int chcr_ahash_finup(struct ahash_request
*req
)
1535 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1536 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1537 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1538 struct uld_ctx
*u_ctx
= NULL
;
1539 struct sk_buff
*skb
;
1540 struct hash_wr_param params
;
1543 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1544 u_ctx
= ULD_CTX(ctx
);
1546 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1548 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1552 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1553 params
.opad_needed
= 1;
1555 params
.opad_needed
= 0;
1557 params
.sg_len
= req
->nbytes
;
1558 params
.bfr_len
= req_ctx
->reqlen
;
1559 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1560 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1561 req_ctx
->result
= 1;
1562 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1563 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1567 params
.bfr_len
= bs
;
1569 params
.scmd1
= req_ctx
->data_len
;
1574 skb
= create_hash_wr(req
, ¶ms
);
1578 skb
->dev
= u_ctx
->lldi
.ports
[0];
1579 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1582 return -EINPROGRESS
;
1585 static int chcr_ahash_digest(struct ahash_request
*req
)
1587 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1588 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1589 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1590 struct uld_ctx
*u_ctx
= NULL
;
1591 struct sk_buff
*skb
;
1592 struct hash_wr_param params
;
1596 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1598 u_ctx
= ULD_CTX(ctx
);
1599 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1601 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1605 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1606 params
.opad_needed
= 1;
1608 params
.opad_needed
= 0;
1612 params
.sg_len
= req
->nbytes
;
1615 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1616 req_ctx
->result
= 1;
1617 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1619 if (req
->nbytes
== 0) {
1620 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1622 params
.bfr_len
= bs
;
1625 skb
= create_hash_wr(req
, ¶ms
);
1629 skb
->dev
= u_ctx
->lldi
.ports
[0];
1630 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1632 return -EINPROGRESS
;
1635 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
1637 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1638 struct chcr_ahash_req_ctx
*state
= out
;
1640 state
->reqlen
= req_ctx
->reqlen
;
1641 state
->data_len
= req_ctx
->data_len
;
1642 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
1643 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
1644 CHCR_HASH_MAX_DIGEST_SIZE
);
1648 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
1650 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1651 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
1653 req_ctx
->reqlen
= state
->reqlen
;
1654 req_ctx
->data_len
= state
->data_len
;
1655 req_ctx
->reqbfr
= req_ctx
->bfr1
;
1656 req_ctx
->skbfr
= req_ctx
->bfr2
;
1657 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
1658 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
1659 CHCR_HASH_MAX_DIGEST_SIZE
);
1663 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1664 unsigned int keylen
)
1666 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1667 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1668 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1669 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1670 unsigned int i
, err
= 0, updated_digestsize
;
1672 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
1674 /* use the key to calculate the ipad and opad. ipad will sent with the
1675 * first request's data. opad will be sent with the final hash result
1676 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1678 shash
->tfm
= hmacctx
->base_hash
;
1679 shash
->flags
= crypto_shash_get_flags(hmacctx
->base_hash
);
1681 err
= crypto_shash_digest(shash
, key
, keylen
,
1685 keylen
= digestsize
;
1687 memcpy(hmacctx
->ipad
, key
, keylen
);
1689 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
1690 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
1692 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
1693 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
1694 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
1697 updated_digestsize
= digestsize
;
1698 if (digestsize
== SHA224_DIGEST_SIZE
)
1699 updated_digestsize
= SHA256_DIGEST_SIZE
;
1700 else if (digestsize
== SHA384_DIGEST_SIZE
)
1701 updated_digestsize
= SHA512_DIGEST_SIZE
;
1702 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
1703 hmacctx
->ipad
, digestsize
);
1706 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
1708 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
1709 hmacctx
->opad
, digestsize
);
1712 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
1717 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1718 unsigned int key_len
)
1720 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
1721 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1722 unsigned short context_size
= 0;
1725 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
1729 memcpy(ablkctx
->key
, key
, key_len
);
1730 ablkctx
->enckey_len
= key_len
;
1731 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
1732 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
1733 ablkctx
->key_ctx_hdr
=
1734 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
1735 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
1736 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
1737 CHCR_KEYCTX_NO_KEY
, 1,
1739 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
1742 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1743 ablkctx
->enckey_len
= 0;
1748 static int chcr_sha_init(struct ahash_request
*areq
)
1750 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1751 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1752 int digestsize
= crypto_ahash_digestsize(tfm
);
1754 req_ctx
->data_len
= 0;
1755 req_ctx
->reqlen
= 0;
1756 req_ctx
->reqbfr
= req_ctx
->bfr1
;
1757 req_ctx
->skbfr
= req_ctx
->bfr2
;
1758 req_ctx
->skb
= NULL
;
1759 req_ctx
->result
= 0;
1760 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
1764 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
1766 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1767 sizeof(struct chcr_ahash_req_ctx
));
1768 return chcr_device_init(crypto_tfm_ctx(tfm
));
1771 static int chcr_hmac_init(struct ahash_request
*areq
)
1773 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1774 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
1775 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1776 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1777 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
1778 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1780 chcr_sha_init(areq
);
1781 req_ctx
->data_len
= bs
;
1782 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1783 if (digestsize
== SHA224_DIGEST_SIZE
)
1784 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1785 SHA256_DIGEST_SIZE
);
1786 else if (digestsize
== SHA384_DIGEST_SIZE
)
1787 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1788 SHA512_DIGEST_SIZE
);
1790 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1796 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
1798 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1799 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1800 unsigned int digestsize
=
1801 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
1803 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1804 sizeof(struct chcr_ahash_req_ctx
));
1805 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
1806 if (IS_ERR(hmacctx
->base_hash
))
1807 return PTR_ERR(hmacctx
->base_hash
);
1808 return chcr_device_init(crypto_tfm_ctx(tfm
));
1811 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
1813 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1814 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1816 if (hmacctx
->base_hash
) {
1817 chcr_free_shash(hmacctx
->base_hash
);
1818 hmacctx
->base_hash
= NULL
;
1822 static int is_newsg(struct scatterlist
*sgl
, unsigned int *newents
)
1828 if (sgl
->length
> CHCR_SG_SIZE
)
1830 nents
+= DIV_ROUND_UP(sgl
->length
, CHCR_SG_SIZE
);
1837 static inline void free_new_sg(struct scatterlist
*sgl
)
1842 static struct scatterlist
*alloc_new_sg(struct scatterlist
*sgl
,
1845 struct scatterlist
*newsg
, *sg
;
1846 int i
, len
, processed
= 0;
1850 newsg
= kmalloc_array(nents
, sizeof(struct scatterlist
), GFP_KERNEL
);
1852 return ERR_PTR(-ENOMEM
);
1854 sg_init_table(sg
, nents
);
1855 offset
= sgl
->offset
;
1856 spage
= sg_page(sgl
);
1857 for (i
= 0; i
< nents
; i
++) {
1858 len
= min_t(u32
, sgl
->length
- processed
, CHCR_SG_SIZE
);
1859 sg_set_page(sg
, spage
, len
, offset
);
1862 if (offset
>= PAGE_SIZE
) {
1863 offset
= offset
% PAGE_SIZE
;
1866 if (processed
== sgl
->length
) {
1871 spage
= sg_page(sgl
);
1872 offset
= sgl
->offset
;
1879 static int chcr_copy_assoc(struct aead_request
*req
,
1880 struct chcr_aead_ctx
*ctx
)
1882 SKCIPHER_REQUEST_ON_STACK(skreq
, ctx
->null
);
1884 skcipher_request_set_tfm(skreq
, ctx
->null
);
1885 skcipher_request_set_callback(skreq
, aead_request_flags(req
),
1887 skcipher_request_set_crypt(skreq
, req
->src
, req
->dst
, req
->assoclen
,
1890 return crypto_skcipher_encrypt(skreq
);
1892 static int chcr_aead_need_fallback(struct aead_request
*req
, int src_nent
,
1893 int aadmax
, int wrlen
,
1894 unsigned short op_type
)
1896 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
1898 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
1899 (req
->assoclen
> aadmax
) ||
1900 (src_nent
> MAX_SKB_FRAGS
) ||
1901 (wrlen
> MAX_WR_SIZE
))
1906 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
1908 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1909 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
1910 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
1911 struct aead_request
*subreq
= aead_request_ctx(req
);
1913 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
1914 aead_request_set_callback(subreq
, req
->base
.flags
,
1915 req
->base
.complete
, req
->base
.data
);
1916 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
1918 aead_request_set_ad(subreq
, req
->assoclen
);
1919 return op_type
? crypto_aead_decrypt(subreq
) :
1920 crypto_aead_encrypt(subreq
);
1923 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
1926 unsigned short op_type
)
1928 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1929 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
1930 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1931 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
1932 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
1933 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
1934 struct sk_buff
*skb
= NULL
;
1935 struct chcr_wr
*chcr_req
;
1936 struct cpl_rx_phys_dsgl
*phys_cpl
;
1937 struct phys_sge_parm sg_param
;
1938 struct scatterlist
*src
;
1939 unsigned int frags
= 0, transhdr_len
;
1940 unsigned int ivsize
= crypto_aead_ivsize(tfm
), dst_size
= 0;
1941 unsigned int kctx_len
= 0, nents
;
1942 unsigned short stop_offset
= 0;
1943 unsigned int assoclen
= req
->assoclen
;
1944 unsigned int authsize
= crypto_aead_authsize(tfm
);
1945 int error
= -EINVAL
, src_nent
;
1947 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1949 struct adapter
*adap
= padap(ctx
->dev
);
1951 reqctx
->newdstsg
= NULL
;
1952 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
1954 if (aeadctx
->enckey_len
== 0 || (req
->cryptlen
<= 0))
1957 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
1959 src_nent
= sg_nents_for_len(req
->src
, req
->assoclen
+ req
->cryptlen
);
1962 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, req
->assoclen
);
1964 if (req
->src
!= req
->dst
) {
1965 error
= chcr_copy_assoc(req
, aeadctx
);
1967 return ERR_PTR(error
);
1969 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
1970 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
1971 if (IS_ERR(reqctx
->newdstsg
))
1972 return ERR_CAST(reqctx
->newdstsg
);
1973 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
1974 reqctx
->newdstsg
, req
->assoclen
);
1976 if (req
->src
== req
->dst
)
1979 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
1980 req
->dst
, req
->assoclen
);
1982 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL
) {
1986 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
1987 (op_type
? -authsize
: authsize
));
1988 if (reqctx
->dst_nents
< 0) {
1989 pr_err("AUTHENC:Invalid Destination sg entries\n");
1993 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
1994 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
1995 - sizeof(chcr_req
->key_ctx
);
1996 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
1997 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_AUTH_SG
,
1999 transhdr_len
+ (sgl_len(src_nent
+ MIN_AUTH_SG
) * 8),
2001 atomic_inc(&adap
->chcr_stats
.fallback
);
2002 free_new_sg(reqctx
->newdstsg
);
2003 reqctx
->newdstsg
= NULL
;
2004 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2006 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2012 /* LLD is going to write the sge hdr. */
2013 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2016 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2018 stop_offset
= (op_type
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2021 * Input order is AAD,IV and Payload. where IV should be included as
2022 * the part of authdata. All other fields should be filled according
2023 * to the hardware spec
2025 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2026 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2,
2027 (ivsize
? (assoclen
+ 1) : 0));
2028 chcr_req
->sec_cpl
.pldlen
= htonl(assoclen
+ ivsize
+ req
->cryptlen
);
2029 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2030 assoclen
? 1 : 0, assoclen
,
2031 assoclen
+ ivsize
+ 1,
2032 (stop_offset
& 0x1F0) >> 4);
2033 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2035 null
? 0 : assoclen
+ ivsize
+ 1,
2036 stop_offset
, stop_offset
);
2037 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2038 (op_type
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2039 CHCR_SCMD_CIPHER_MODE_AES_CBC
,
2040 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2042 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2045 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2046 if (op_type
== CHCR_ENCRYPT_OP
)
2047 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2048 aeadctx
->enckey_len
);
2050 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2051 aeadctx
->enckey_len
);
2053 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) <<
2054 4), actx
->h_iopad
, kctx_len
-
2055 (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) << 4));
2057 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2058 sg_param
.nents
= reqctx
->dst_nents
;
2059 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2061 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2062 reqctx
->dst
, &sg_param
);
2066 skb_set_transport_header(skb
, transhdr_len
);
2070 write_sg_to_skb(skb
, &frags
, req
->src
, assoclen
);
2073 write_buffer_to_skb(skb
, &frags
, req
->iv
, ivsize
);
2074 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2075 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2076 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, size
, 1,
2077 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
, 0);
2086 free_new_sg(reqctx
->newdstsg
);
2087 reqctx
->newdstsg
= NULL
;
2088 return ERR_PTR(error
);
2091 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2095 memset(block
, 0, csize
);
2100 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2103 data
= cpu_to_be32(msglen
);
2104 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2109 static void generate_b0(struct aead_request
*req
,
2110 struct chcr_aead_ctx
*aeadctx
,
2111 unsigned short op_type
)
2113 unsigned int l
, lp
, m
;
2115 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2116 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2117 u8
*b0
= reqctx
->scratch_pad
;
2119 m
= crypto_aead_authsize(aead
);
2121 memcpy(b0
, reqctx
->iv
, 16);
2126 /* set m, bits 3-5 */
2127 *b0
|= (8 * ((m
- 2) / 2));
2129 /* set adata, bit 6, if associated data is used */
2132 rc
= set_msg_len(b0
+ 16 - l
,
2133 (op_type
== CHCR_DECRYPT_OP
) ?
2134 req
->cryptlen
- m
: req
->cryptlen
, l
);
2137 static inline int crypto_ccm_check_iv(const u8
*iv
)
2139 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2140 if (iv
[0] < 1 || iv
[0] > 7)
2146 static int ccm_format_packet(struct aead_request
*req
,
2147 struct chcr_aead_ctx
*aeadctx
,
2148 unsigned int sub_type
,
2149 unsigned short op_type
)
2151 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2154 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2156 memcpy(reqctx
->iv
+ 1, &aeadctx
->salt
[0], 3);
2157 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2158 memset(reqctx
->iv
+ 12, 0, 4);
2159 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2160 htons(req
->assoclen
- 8);
2162 memcpy(reqctx
->iv
, req
->iv
, 16);
2163 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2164 htons(req
->assoclen
);
2166 generate_b0(req
, aeadctx
, op_type
);
2167 /* zero the ctr value */
2168 memset(reqctx
->iv
+ 15 - reqctx
->iv
[0], 0, reqctx
->iv
[0] + 1);
2172 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2173 unsigned int dst_size
,
2174 struct aead_request
*req
,
2175 unsigned short op_type
,
2176 struct chcr_context
*chcrctx
)
2178 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2179 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2180 unsigned int ivsize
= AES_BLOCK_SIZE
;
2181 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2182 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2183 unsigned int c_id
= chcrctx
->dev
->rx_channel_id
;
2184 unsigned int ccm_xtra
;
2185 unsigned char tag_offset
= 0, auth_offset
= 0;
2186 unsigned int assoclen
;
2188 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2189 assoclen
= req
->assoclen
- 8;
2191 assoclen
= req
->assoclen
;
2192 ccm_xtra
= CCM_B0_SIZE
+
2193 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2195 auth_offset
= req
->cryptlen
?
2196 (assoclen
+ ivsize
+ 1 + ccm_xtra
) : 0;
2197 if (op_type
== CHCR_DECRYPT_OP
) {
2198 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2199 tag_offset
= crypto_aead_authsize(tfm
);
2205 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2206 2, (ivsize
? (assoclen
+ 1) : 0) +
2209 htonl(assoclen
+ ivsize
+ req
->cryptlen
+ ccm_xtra
);
2210 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2211 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2212 1, assoclen
+ ccm_xtra
, assoclen
2213 + ivsize
+ 1 + ccm_xtra
, 0);
2215 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2216 auth_offset
, tag_offset
,
2217 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2218 crypto_aead_authsize(tfm
));
2219 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2220 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2221 cipher_mode
, mac_mode
,
2222 aeadctx
->hmac_ctrl
, ivsize
>> 1);
2224 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2228 int aead_ccm_validate_input(unsigned short op_type
,
2229 struct aead_request
*req
,
2230 struct chcr_aead_ctx
*aeadctx
,
2231 unsigned int sub_type
)
2233 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2234 if (crypto_ccm_check_iv(req
->iv
)) {
2235 pr_err("CCM: IV check fails\n");
2239 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2240 pr_err("RFC4309: Invalid AAD length %d\n",
2245 if (aeadctx
->enckey_len
== 0) {
2246 pr_err("CCM: Encryption key not set\n");
2252 unsigned int fill_aead_req_fields(struct sk_buff
*skb
,
2253 struct aead_request
*req
,
2254 struct scatterlist
*src
,
2255 unsigned int ivsize
,
2256 struct chcr_aead_ctx
*aeadctx
)
2258 unsigned int frags
= 0;
2259 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2260 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2261 /* b0 and aad length(if available) */
2263 write_buffer_to_skb(skb
, &frags
, reqctx
->scratch_pad
, CCM_B0_SIZE
+
2264 (req
->assoclen
? CCM_AAD_FIELD_SIZE
: 0));
2265 if (req
->assoclen
) {
2266 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2267 write_sg_to_skb(skb
, &frags
, req
->src
,
2270 write_sg_to_skb(skb
, &frags
, req
->src
, req
->assoclen
);
2272 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
2274 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2279 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2282 unsigned short op_type
)
2284 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2285 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2286 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2287 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2288 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2289 struct sk_buff
*skb
= NULL
;
2290 struct chcr_wr
*chcr_req
;
2291 struct cpl_rx_phys_dsgl
*phys_cpl
;
2292 struct phys_sge_parm sg_param
;
2293 struct scatterlist
*src
;
2294 unsigned int frags
= 0, transhdr_len
, ivsize
= AES_BLOCK_SIZE
;
2295 unsigned int dst_size
= 0, kctx_len
, nents
;
2296 unsigned int sub_type
;
2297 unsigned int authsize
= crypto_aead_authsize(tfm
);
2298 int error
= -EINVAL
, src_nent
;
2299 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2301 struct adapter
*adap
= padap(ctx
->dev
);
2303 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
2305 reqctx
->newdstsg
= NULL
;
2306 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
2308 src_nent
= sg_nents_for_len(req
->src
, req
->assoclen
+ req
->cryptlen
);
2312 sub_type
= get_aead_subtype(tfm
);
2313 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, req
->assoclen
);
2314 if (req
->src
!= req
->dst
) {
2315 error
= chcr_copy_assoc(req
, aeadctx
);
2317 pr_err("AAD copy to destination buffer fails\n");
2318 return ERR_PTR(error
);
2321 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
2322 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
2323 if (IS_ERR(reqctx
->newdstsg
))
2324 return ERR_CAST(reqctx
->newdstsg
);
2325 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2326 reqctx
->newdstsg
, req
->assoclen
);
2328 if (req
->src
== req
->dst
)
2331 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2332 req
->dst
, req
->assoclen
);
2334 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
2335 (op_type
? -authsize
: authsize
));
2336 if (reqctx
->dst_nents
< 0) {
2337 pr_err("CCM:Invalid Destination sg entries\n");
2341 error
= aead_ccm_validate_input(op_type
, req
, aeadctx
, sub_type
);
2345 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
2346 kctx_len
= ((DIV_ROUND_UP(aeadctx
->enckey_len
, 16)) << 4) * 2;
2347 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2348 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_CCM_SG
,
2349 T6_MAX_AAD_SIZE
- 18,
2350 transhdr_len
+ (sgl_len(src_nent
+ MIN_CCM_SG
) * 8),
2352 atomic_inc(&adap
->chcr_stats
.fallback
);
2353 free_new_sg(reqctx
->newdstsg
);
2354 reqctx
->newdstsg
= NULL
;
2355 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2358 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2365 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2367 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2369 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, op_type
, ctx
);
2371 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2372 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2373 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) *
2374 16), aeadctx
->key
, aeadctx
->enckey_len
);
2376 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2377 error
= ccm_format_packet(req
, aeadctx
, sub_type
, op_type
);
2381 sg_param
.nents
= reqctx
->dst_nents
;
2382 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2384 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2385 reqctx
->dst
, &sg_param
);
2389 skb_set_transport_header(skb
, transhdr_len
);
2390 frags
= fill_aead_req_fields(skb
, req
, src
, ivsize
, aeadctx
);
2391 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2392 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, 0, 1,
2393 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
, 0);
2400 free_new_sg(reqctx
->newdstsg
);
2401 reqctx
->newdstsg
= NULL
;
2402 return ERR_PTR(error
);
2405 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
2408 unsigned short op_type
)
2410 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2411 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2412 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2413 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2414 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2415 struct sk_buff
*skb
= NULL
;
2416 struct chcr_wr
*chcr_req
;
2417 struct cpl_rx_phys_dsgl
*phys_cpl
;
2418 struct phys_sge_parm sg_param
;
2419 struct scatterlist
*src
;
2420 unsigned int frags
= 0, transhdr_len
;
2421 unsigned int ivsize
= AES_BLOCK_SIZE
;
2422 unsigned int dst_size
= 0, kctx_len
, nents
, assoclen
= req
->assoclen
;
2423 unsigned char tag_offset
= 0;
2424 unsigned int authsize
= crypto_aead_authsize(tfm
);
2425 int error
= -EINVAL
, src_nent
;
2426 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2428 struct adapter
*adap
= padap(ctx
->dev
);
2430 reqctx
->newdstsg
= NULL
;
2431 dst_size
= assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
2433 /* validate key size */
2434 if (aeadctx
->enckey_len
== 0)
2437 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
2439 src_nent
= sg_nents_for_len(req
->src
, assoclen
+ req
->cryptlen
);
2443 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, assoclen
);
2444 if (req
->src
!= req
->dst
) {
2445 error
= chcr_copy_assoc(req
, aeadctx
);
2447 return ERR_PTR(error
);
2450 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
2451 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
2452 if (IS_ERR(reqctx
->newdstsg
))
2453 return ERR_CAST(reqctx
->newdstsg
);
2454 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2455 reqctx
->newdstsg
, assoclen
);
2457 if (req
->src
== req
->dst
)
2460 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2461 req
->dst
, assoclen
);
2464 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
2465 (op_type
? -authsize
: authsize
));
2466 if (reqctx
->dst_nents
< 0) {
2467 pr_err("GCM:Invalid Destination sg entries\n");
2473 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
2474 kctx_len
= ((DIV_ROUND_UP(aeadctx
->enckey_len
, 16)) << 4) +
2476 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2477 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_GCM_SG
,
2479 transhdr_len
+ (sgl_len(src_nent
+ MIN_GCM_SG
) * 8),
2481 atomic_inc(&adap
->chcr_stats
.fallback
);
2482 free_new_sg(reqctx
->newdstsg
);
2483 reqctx
->newdstsg
= NULL
;
2484 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2486 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2492 /* NIC driver is going to write the sge hdr. */
2493 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2495 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2497 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
2498 assoclen
= req
->assoclen
- 8;
2500 tag_offset
= (op_type
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2501 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
2502 ctx
->dev
->rx_channel_id
, 2, (ivsize
?
2503 (assoclen
+ 1) : 0));
2504 chcr_req
->sec_cpl
.pldlen
=
2505 htonl(assoclen
+ ivsize
+ req
->cryptlen
);
2506 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2507 assoclen
? 1 : 0, assoclen
,
2508 assoclen
+ ivsize
+ 1, 0);
2509 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
2510 FILL_SEC_CPL_AUTHINSERT(0, assoclen
+ ivsize
+ 1,
2511 tag_offset
, tag_offset
);
2512 chcr_req
->sec_cpl
.seqno_numivs
=
2513 FILL_SEC_CPL_SCMD0_SEQNO(op_type
, (op_type
==
2514 CHCR_ENCRYPT_OP
) ? 1 : 0,
2515 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
2516 CHCR_SCMD_AUTH_MODE_GHASH
,
2517 aeadctx
->hmac_ctrl
, ivsize
>> 1);
2518 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2520 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2521 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2522 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) *
2523 16), GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
2525 /* prepare a 16 byte iv */
2526 /* S A L T | IV | 0x00000001 */
2527 if (get_aead_subtype(tfm
) ==
2528 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
2529 memcpy(reqctx
->iv
, aeadctx
->salt
, 4);
2530 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2532 memcpy(reqctx
->iv
, req
->iv
, 12);
2534 *((unsigned int *)(reqctx
->iv
+ 12)) = htonl(0x01);
2536 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2537 sg_param
.nents
= reqctx
->dst_nents
;
2538 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2540 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2541 reqctx
->dst
, &sg_param
);
2545 skb_set_transport_header(skb
, transhdr_len
);
2546 write_sg_to_skb(skb
, &frags
, req
->src
, assoclen
);
2547 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
2548 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2549 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2550 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, size
, 1,
2551 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
,
2561 free_new_sg(reqctx
->newdstsg
);
2562 reqctx
->newdstsg
= NULL
;
2563 return ERR_PTR(error
);
2568 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
2570 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2571 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2572 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2574 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
2575 CRYPTO_ALG_NEED_FALLBACK
|
2577 if (IS_ERR(aeadctx
->sw_cipher
))
2578 return PTR_ERR(aeadctx
->sw_cipher
);
2579 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
2580 sizeof(struct aead_request
) +
2581 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
2582 aeadctx
->null
= crypto_get_default_null_skcipher();
2583 if (IS_ERR(aeadctx
->null
))
2584 return PTR_ERR(aeadctx
->null
);
2585 return chcr_device_init(ctx
);
2588 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
2590 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2591 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2593 crypto_put_default_null_skcipher();
2594 crypto_free_aead(aeadctx
->sw_cipher
);
2597 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
2598 unsigned int authsize
)
2600 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2602 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
2603 aeadctx
->mayverify
= VERIFY_HW
;
2604 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2606 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
2607 unsigned int authsize
)
2609 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2610 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
2612 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2613 * true for sha1. authsize == 12 condition should be before
2614 * authsize == (maxauth >> 1)
2616 if (authsize
== ICV_4
) {
2617 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2618 aeadctx
->mayverify
= VERIFY_HW
;
2619 } else if (authsize
== ICV_6
) {
2620 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
2621 aeadctx
->mayverify
= VERIFY_HW
;
2622 } else if (authsize
== ICV_10
) {
2623 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
2624 aeadctx
->mayverify
= VERIFY_HW
;
2625 } else if (authsize
== ICV_12
) {
2626 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2627 aeadctx
->mayverify
= VERIFY_HW
;
2628 } else if (authsize
== ICV_14
) {
2629 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2630 aeadctx
->mayverify
= VERIFY_HW
;
2631 } else if (authsize
== (maxauth
>> 1)) {
2632 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2633 aeadctx
->mayverify
= VERIFY_HW
;
2634 } else if (authsize
== maxauth
) {
2635 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2636 aeadctx
->mayverify
= VERIFY_HW
;
2638 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2639 aeadctx
->mayverify
= VERIFY_SW
;
2641 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2645 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
2647 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2651 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2652 aeadctx
->mayverify
= VERIFY_HW
;
2655 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2656 aeadctx
->mayverify
= VERIFY_HW
;
2659 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2660 aeadctx
->mayverify
= VERIFY_HW
;
2663 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2664 aeadctx
->mayverify
= VERIFY_HW
;
2667 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2668 aeadctx
->mayverify
= VERIFY_HW
;
2672 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2673 aeadctx
->mayverify
= VERIFY_SW
;
2677 crypto_tfm_set_flags((struct crypto_tfm
*) tfm
,
2678 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2681 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2684 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
2685 unsigned int authsize
)
2687 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2691 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2692 aeadctx
->mayverify
= VERIFY_HW
;
2695 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2696 aeadctx
->mayverify
= VERIFY_HW
;
2699 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2700 aeadctx
->mayverify
= VERIFY_HW
;
2703 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
2704 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2707 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2710 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
2711 unsigned int authsize
)
2713 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2717 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2718 aeadctx
->mayverify
= VERIFY_HW
;
2721 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
2722 aeadctx
->mayverify
= VERIFY_HW
;
2725 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2726 aeadctx
->mayverify
= VERIFY_HW
;
2729 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
2730 aeadctx
->mayverify
= VERIFY_HW
;
2733 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2734 aeadctx
->mayverify
= VERIFY_HW
;
2737 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2738 aeadctx
->mayverify
= VERIFY_HW
;
2741 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2742 aeadctx
->mayverify
= VERIFY_HW
;
2745 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
2746 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2749 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2752 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
2754 unsigned int keylen
)
2756 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2757 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2758 unsigned char ck_size
, mk_size
;
2759 int key_ctx_size
= 0;
2761 key_ctx_size
= sizeof(struct _key_ctx
) +
2762 ((DIV_ROUND_UP(keylen
, 16)) << 4) * 2;
2763 if (keylen
== AES_KEYSIZE_128
) {
2764 mk_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2765 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2766 } else if (keylen
== AES_KEYSIZE_192
) {
2767 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2768 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
2769 } else if (keylen
== AES_KEYSIZE_256
) {
2770 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2771 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
2773 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2774 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2775 aeadctx
->enckey_len
= 0;
2778 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
2780 memcpy(aeadctx
->key
, key
, keylen
);
2781 aeadctx
->enckey_len
= keylen
;
2786 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
2788 unsigned int keylen
)
2790 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2791 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2794 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2795 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
2796 CRYPTO_TFM_REQ_MASK
);
2797 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2798 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2799 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2800 CRYPTO_TFM_RES_MASK
);
2803 return chcr_ccm_common_setkey(aead
, key
, keylen
);
2806 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
2807 unsigned int keylen
)
2809 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2810 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2814 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2815 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2816 aeadctx
->enckey_len
= 0;
2819 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2820 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
2821 CRYPTO_TFM_REQ_MASK
);
2822 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2823 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2824 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2825 CRYPTO_TFM_RES_MASK
);
2829 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
2830 return chcr_ccm_common_setkey(aead
, key
, keylen
);
2833 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
2834 unsigned int keylen
)
2836 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2837 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2838 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
2839 struct crypto_cipher
*cipher
;
2840 unsigned int ck_size
;
2841 int ret
= 0, key_ctx_size
= 0;
2843 aeadctx
->enckey_len
= 0;
2844 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2845 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
2846 & CRYPTO_TFM_REQ_MASK
);
2847 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2848 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2849 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2850 CRYPTO_TFM_RES_MASK
);
2854 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
2856 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
2857 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
2859 if (keylen
== AES_KEYSIZE_128
) {
2860 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2861 } else if (keylen
== AES_KEYSIZE_192
) {
2862 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2863 } else if (keylen
== AES_KEYSIZE_256
) {
2864 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2866 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2867 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2868 pr_err("GCM: Invalid key length %d\n", keylen
);
2873 memcpy(aeadctx
->key
, key
, keylen
);
2874 aeadctx
->enckey_len
= keylen
;
2875 key_ctx_size
= sizeof(struct _key_ctx
) +
2876 ((DIV_ROUND_UP(keylen
, 16)) << 4) +
2878 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
2879 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
2882 /* Calculate the H = CIPH(K, 0 repeated 16 times).
2883 * It will go in key context
2885 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
2886 if (IS_ERR(cipher
)) {
2887 aeadctx
->enckey_len
= 0;
2892 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
2894 aeadctx
->enckey_len
= 0;
2897 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
2898 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
2901 crypto_free_cipher(cipher
);
2906 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
2907 unsigned int keylen
)
2909 struct chcr_context
*ctx
= crypto_aead_ctx(authenc
);
2910 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2911 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2912 /* it contains auth and cipher key both*/
2913 struct crypto_authenc_keys keys
;
2915 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
2916 int err
= 0, i
, key_ctx_len
= 0;
2917 unsigned char ck_size
= 0;
2918 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
2919 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
2920 struct algo_param param
;
2924 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2925 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
2926 & CRYPTO_TFM_REQ_MASK
);
2927 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2928 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
2929 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
2930 & CRYPTO_TFM_RES_MASK
);
2934 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
2935 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2939 if (get_alg_config(¶m
, max_authsize
)) {
2940 pr_err("chcr : Unsupported digest size\n");
2943 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
2944 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2945 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
2946 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2947 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
2948 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2950 pr_err("chcr : Unsupported cipher key\n");
2954 /* Copy only encryption key. We use authkey to generate h(ipad) and
2955 * h(opad) so authkey is not needed again. authkeylen size have the
2956 * size of the hash digest size.
2958 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
2959 aeadctx
->enckey_len
= keys
.enckeylen
;
2960 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
2961 aeadctx
->enckey_len
<< 3);
2963 base_hash
= chcr_alloc_shash(max_authsize
);
2964 if (IS_ERR(base_hash
)) {
2965 pr_err("chcr : Base driver cannot be loaded\n");
2966 aeadctx
->enckey_len
= 0;
2970 SHASH_DESC_ON_STACK(shash
, base_hash
);
2971 shash
->tfm
= base_hash
;
2972 shash
->flags
= crypto_shash_get_flags(base_hash
);
2973 bs
= crypto_shash_blocksize(base_hash
);
2974 align
= KEYCTX_ALIGN_PAD(max_authsize
);
2975 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
2977 if (keys
.authkeylen
> bs
) {
2978 err
= crypto_shash_digest(shash
, keys
.authkey
,
2982 pr_err("chcr : Base driver cannot be loaded\n");
2985 keys
.authkeylen
= max_authsize
;
2987 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
2989 /* Compute the ipad-digest*/
2990 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
2991 memcpy(pad
, o_ptr
, keys
.authkeylen
);
2992 for (i
= 0; i
< bs
>> 2; i
++)
2993 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
2995 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
2998 /* Compute the opad-digest */
2999 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3000 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3001 for (i
= 0; i
< bs
>> 2; i
++)
3002 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3004 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3007 /* convert the ipad and opad digest to network order */
3008 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3009 chcr_change_order(o_ptr
, param
.result_size
);
3010 key_ctx_len
= sizeof(struct _key_ctx
) +
3011 ((DIV_ROUND_UP(keys
.enckeylen
, 16)) << 4) +
3012 (param
.result_size
+ align
) * 2;
3013 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3014 0, 1, key_ctx_len
>> 4);
3015 actx
->auth_mode
= param
.auth_mode
;
3016 chcr_free_shash(base_hash
);
3021 aeadctx
->enckey_len
= 0;
3022 if (!IS_ERR(base_hash
))
3023 chcr_free_shash(base_hash
);
3027 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3028 const u8
*key
, unsigned int keylen
)
3030 struct chcr_context
*ctx
= crypto_aead_ctx(authenc
);
3031 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3032 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3033 struct crypto_authenc_keys keys
;
3035 /* it contains auth and cipher key both*/
3036 int key_ctx_len
= 0;
3037 unsigned char ck_size
= 0;
3039 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3040 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3041 & CRYPTO_TFM_REQ_MASK
);
3042 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3043 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3044 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3045 & CRYPTO_TFM_RES_MASK
);
3049 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3050 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3053 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3054 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3055 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3056 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3057 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3058 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3060 pr_err("chcr : Unsupported cipher key\n");
3063 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3064 aeadctx
->enckey_len
= keys
.enckeylen
;
3065 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3066 aeadctx
->enckey_len
<< 3);
3067 key_ctx_len
= sizeof(struct _key_ctx
)
3068 + ((DIV_ROUND_UP(keys
.enckeylen
, 16)) << 4);
3070 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3071 0, key_ctx_len
>> 4);
3072 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3075 aeadctx
->enckey_len
= 0;
3078 static int chcr_aead_encrypt(struct aead_request
*req
)
3080 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3081 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3083 reqctx
->verify
= VERIFY_HW
;
3085 switch (get_aead_subtype(tfm
)) {
3086 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
:
3087 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL
:
3088 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3090 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3091 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3092 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3093 create_aead_ccm_wr
);
3095 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3100 static int chcr_aead_decrypt(struct aead_request
*req
)
3102 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3103 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
3104 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3107 if (aeadctx
->mayverify
== VERIFY_SW
) {
3108 size
= crypto_aead_maxauthsize(tfm
);
3109 reqctx
->verify
= VERIFY_SW
;
3112 reqctx
->verify
= VERIFY_HW
;
3115 switch (get_aead_subtype(tfm
)) {
3116 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
:
3117 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL
:
3118 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3120 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3121 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3122 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3123 create_aead_ccm_wr
);
3125 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3130 static int chcr_aead_op(struct aead_request
*req
,
3131 unsigned short op_type
,
3133 create_wr_t create_wr_fn
)
3135 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3136 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
3137 struct uld_ctx
*u_ctx
;
3138 struct sk_buff
*skb
;
3141 pr_err("chcr : %s : No crypto device.\n", __func__
);
3144 u_ctx
= ULD_CTX(ctx
);
3145 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3147 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3151 /* Form a WR from req */
3152 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], size
,
3155 if (IS_ERR(skb
) || !skb
)
3156 return PTR_ERR(skb
);
3158 skb
->dev
= u_ctx
->lldi
.ports
[0];
3159 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
3161 return -EINPROGRESS
;
3163 static struct chcr_alg_template driver_algs
[] = {
3166 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3169 .cra_name
= "cbc(aes)",
3170 .cra_driver_name
= "cbc-aes-chcr",
3171 .cra_blocksize
= AES_BLOCK_SIZE
,
3172 .cra_init
= chcr_cra_init
,
3173 .cra_exit
= chcr_cra_exit
,
3174 .cra_u
.ablkcipher
= {
3175 .min_keysize
= AES_MIN_KEY_SIZE
,
3176 .max_keysize
= AES_MAX_KEY_SIZE
,
3177 .ivsize
= AES_BLOCK_SIZE
,
3178 .setkey
= chcr_aes_cbc_setkey
,
3179 .encrypt
= chcr_aes_encrypt
,
3180 .decrypt
= chcr_aes_decrypt
,
3185 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3188 .cra_name
= "xts(aes)",
3189 .cra_driver_name
= "xts-aes-chcr",
3190 .cra_blocksize
= AES_BLOCK_SIZE
,
3191 .cra_init
= chcr_cra_init
,
3193 .cra_u
.ablkcipher
= {
3194 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3195 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3196 .ivsize
= AES_BLOCK_SIZE
,
3197 .setkey
= chcr_aes_xts_setkey
,
3198 .encrypt
= chcr_aes_encrypt
,
3199 .decrypt
= chcr_aes_decrypt
,
3204 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3207 .cra_name
= "ctr(aes)",
3208 .cra_driver_name
= "ctr-aes-chcr",
3210 .cra_init
= chcr_cra_init
,
3211 .cra_exit
= chcr_cra_exit
,
3212 .cra_u
.ablkcipher
= {
3213 .min_keysize
= AES_MIN_KEY_SIZE
,
3214 .max_keysize
= AES_MAX_KEY_SIZE
,
3215 .ivsize
= AES_BLOCK_SIZE
,
3216 .setkey
= chcr_aes_ctr_setkey
,
3217 .encrypt
= chcr_aes_encrypt
,
3218 .decrypt
= chcr_aes_decrypt
,
3223 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3224 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3227 .cra_name
= "rfc3686(ctr(aes))",
3228 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3230 .cra_init
= chcr_rfc3686_init
,
3231 .cra_exit
= chcr_cra_exit
,
3232 .cra_u
.ablkcipher
= {
3233 .min_keysize
= AES_MIN_KEY_SIZE
+
3234 CTR_RFC3686_NONCE_SIZE
,
3235 .max_keysize
= AES_MAX_KEY_SIZE
+
3236 CTR_RFC3686_NONCE_SIZE
,
3237 .ivsize
= CTR_RFC3686_IV_SIZE
,
3238 .setkey
= chcr_aes_rfc3686_setkey
,
3239 .encrypt
= chcr_aes_encrypt
,
3240 .decrypt
= chcr_aes_decrypt
,
3247 .type
= CRYPTO_ALG_TYPE_AHASH
,
3250 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3253 .cra_driver_name
= "sha1-chcr",
3254 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3259 .type
= CRYPTO_ALG_TYPE_AHASH
,
3262 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3264 .cra_name
= "sha256",
3265 .cra_driver_name
= "sha256-chcr",
3266 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3271 .type
= CRYPTO_ALG_TYPE_AHASH
,
3274 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3276 .cra_name
= "sha224",
3277 .cra_driver_name
= "sha224-chcr",
3278 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3283 .type
= CRYPTO_ALG_TYPE_AHASH
,
3286 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3288 .cra_name
= "sha384",
3289 .cra_driver_name
= "sha384-chcr",
3290 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3295 .type
= CRYPTO_ALG_TYPE_AHASH
,
3298 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3300 .cra_name
= "sha512",
3301 .cra_driver_name
= "sha512-chcr",
3302 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3308 .type
= CRYPTO_ALG_TYPE_HMAC
,
3311 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3313 .cra_name
= "hmac(sha1)",
3314 .cra_driver_name
= "hmac-sha1-chcr",
3315 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3320 .type
= CRYPTO_ALG_TYPE_HMAC
,
3323 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3325 .cra_name
= "hmac(sha224)",
3326 .cra_driver_name
= "hmac-sha224-chcr",
3327 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3332 .type
= CRYPTO_ALG_TYPE_HMAC
,
3335 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3337 .cra_name
= "hmac(sha256)",
3338 .cra_driver_name
= "hmac-sha256-chcr",
3339 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3344 .type
= CRYPTO_ALG_TYPE_HMAC
,
3347 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3349 .cra_name
= "hmac(sha384)",
3350 .cra_driver_name
= "hmac-sha384-chcr",
3351 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3356 .type
= CRYPTO_ALG_TYPE_HMAC
,
3359 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3361 .cra_name
= "hmac(sha512)",
3362 .cra_driver_name
= "hmac-sha512-chcr",
3363 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3367 /* Add AEAD Algorithms */
3369 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3373 .cra_name
= "gcm(aes)",
3374 .cra_driver_name
= "gcm-aes-chcr",
3376 .cra_priority
= CHCR_AEAD_PRIORITY
,
3377 .cra_ctxsize
= sizeof(struct chcr_context
) +
3378 sizeof(struct chcr_aead_ctx
) +
3379 sizeof(struct chcr_gcm_ctx
),
3382 .maxauthsize
= GHASH_DIGEST_SIZE
,
3383 .setkey
= chcr_gcm_setkey
,
3384 .setauthsize
= chcr_gcm_setauthsize
,
3388 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3392 .cra_name
= "rfc4106(gcm(aes))",
3393 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3395 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3396 .cra_ctxsize
= sizeof(struct chcr_context
) +
3397 sizeof(struct chcr_aead_ctx
) +
3398 sizeof(struct chcr_gcm_ctx
),
3402 .maxauthsize
= GHASH_DIGEST_SIZE
,
3403 .setkey
= chcr_gcm_setkey
,
3404 .setauthsize
= chcr_4106_4309_setauthsize
,
3408 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3412 .cra_name
= "ccm(aes)",
3413 .cra_driver_name
= "ccm-aes-chcr",
3415 .cra_priority
= CHCR_AEAD_PRIORITY
,
3416 .cra_ctxsize
= sizeof(struct chcr_context
) +
3417 sizeof(struct chcr_aead_ctx
),
3420 .ivsize
= AES_BLOCK_SIZE
,
3421 .maxauthsize
= GHASH_DIGEST_SIZE
,
3422 .setkey
= chcr_aead_ccm_setkey
,
3423 .setauthsize
= chcr_ccm_setauthsize
,
3427 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
3431 .cra_name
= "rfc4309(ccm(aes))",
3432 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
3434 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3435 .cra_ctxsize
= sizeof(struct chcr_context
) +
3436 sizeof(struct chcr_aead_ctx
),
3440 .maxauthsize
= GHASH_DIGEST_SIZE
,
3441 .setkey
= chcr_aead_rfc4309_setkey
,
3442 .setauthsize
= chcr_4106_4309_setauthsize
,
3446 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3450 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3452 "authenc-hmac-sha1-cbc-aes-chcr",
3453 .cra_blocksize
= AES_BLOCK_SIZE
,
3454 .cra_priority
= CHCR_AEAD_PRIORITY
,
3455 .cra_ctxsize
= sizeof(struct chcr_context
) +
3456 sizeof(struct chcr_aead_ctx
) +
3457 sizeof(struct chcr_authenc_ctx
),
3460 .ivsize
= AES_BLOCK_SIZE
,
3461 .maxauthsize
= SHA1_DIGEST_SIZE
,
3462 .setkey
= chcr_authenc_setkey
,
3463 .setauthsize
= chcr_authenc_setauthsize
,
3467 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3472 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3474 "authenc-hmac-sha256-cbc-aes-chcr",
3475 .cra_blocksize
= AES_BLOCK_SIZE
,
3476 .cra_priority
= CHCR_AEAD_PRIORITY
,
3477 .cra_ctxsize
= sizeof(struct chcr_context
) +
3478 sizeof(struct chcr_aead_ctx
) +
3479 sizeof(struct chcr_authenc_ctx
),
3482 .ivsize
= AES_BLOCK_SIZE
,
3483 .maxauthsize
= SHA256_DIGEST_SIZE
,
3484 .setkey
= chcr_authenc_setkey
,
3485 .setauthsize
= chcr_authenc_setauthsize
,
3489 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3493 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
3495 "authenc-hmac-sha224-cbc-aes-chcr",
3496 .cra_blocksize
= AES_BLOCK_SIZE
,
3497 .cra_priority
= CHCR_AEAD_PRIORITY
,
3498 .cra_ctxsize
= sizeof(struct chcr_context
) +
3499 sizeof(struct chcr_aead_ctx
) +
3500 sizeof(struct chcr_authenc_ctx
),
3502 .ivsize
= AES_BLOCK_SIZE
,
3503 .maxauthsize
= SHA224_DIGEST_SIZE
,
3504 .setkey
= chcr_authenc_setkey
,
3505 .setauthsize
= chcr_authenc_setauthsize
,
3509 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3513 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
3515 "authenc-hmac-sha384-cbc-aes-chcr",
3516 .cra_blocksize
= AES_BLOCK_SIZE
,
3517 .cra_priority
= CHCR_AEAD_PRIORITY
,
3518 .cra_ctxsize
= sizeof(struct chcr_context
) +
3519 sizeof(struct chcr_aead_ctx
) +
3520 sizeof(struct chcr_authenc_ctx
),
3523 .ivsize
= AES_BLOCK_SIZE
,
3524 .maxauthsize
= SHA384_DIGEST_SIZE
,
3525 .setkey
= chcr_authenc_setkey
,
3526 .setauthsize
= chcr_authenc_setauthsize
,
3530 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3534 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
3536 "authenc-hmac-sha512-cbc-aes-chcr",
3537 .cra_blocksize
= AES_BLOCK_SIZE
,
3538 .cra_priority
= CHCR_AEAD_PRIORITY
,
3539 .cra_ctxsize
= sizeof(struct chcr_context
) +
3540 sizeof(struct chcr_aead_ctx
) +
3541 sizeof(struct chcr_authenc_ctx
),
3544 .ivsize
= AES_BLOCK_SIZE
,
3545 .maxauthsize
= SHA512_DIGEST_SIZE
,
3546 .setkey
= chcr_authenc_setkey
,
3547 .setauthsize
= chcr_authenc_setauthsize
,
3551 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_NULL
,
3555 .cra_name
= "authenc(digest_null,cbc(aes))",
3557 "authenc-digest_null-cbc-aes-chcr",
3558 .cra_blocksize
= AES_BLOCK_SIZE
,
3559 .cra_priority
= CHCR_AEAD_PRIORITY
,
3560 .cra_ctxsize
= sizeof(struct chcr_context
) +
3561 sizeof(struct chcr_aead_ctx
) +
3562 sizeof(struct chcr_authenc_ctx
),
3565 .ivsize
= AES_BLOCK_SIZE
,
3567 .setkey
= chcr_aead_digest_null_setkey
,
3568 .setauthsize
= chcr_authenc_null_setauthsize
,
3574 * chcr_unregister_alg - Deregister crypto algorithms with
3577 static int chcr_unregister_alg(void)
3581 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3582 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
3583 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3584 if (driver_algs
[i
].is_registered
)
3585 crypto_unregister_alg(
3586 &driver_algs
[i
].alg
.crypto
);
3588 case CRYPTO_ALG_TYPE_AEAD
:
3589 if (driver_algs
[i
].is_registered
)
3590 crypto_unregister_aead(
3591 &driver_algs
[i
].alg
.aead
);
3593 case CRYPTO_ALG_TYPE_AHASH
:
3594 if (driver_algs
[i
].is_registered
)
3595 crypto_unregister_ahash(
3596 &driver_algs
[i
].alg
.hash
);
3599 driver_algs
[i
].is_registered
= 0;
3604 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3605 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3606 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3607 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3610 * chcr_register_alg - Register crypto algorithms with kernel framework.
3612 static int chcr_register_alg(void)
3614 struct crypto_alg ai
;
3615 struct ahash_alg
*a_hash
;
3619 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3620 if (driver_algs
[i
].is_registered
)
3622 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
3623 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3624 driver_algs
[i
].alg
.crypto
.cra_priority
=
3626 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
3627 driver_algs
[i
].alg
.crypto
.cra_flags
=
3628 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
3629 CRYPTO_ALG_NEED_FALLBACK
;
3630 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
3631 sizeof(struct chcr_context
) +
3632 sizeof(struct ablk_ctx
);
3633 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
3634 driver_algs
[i
].alg
.crypto
.cra_type
=
3635 &crypto_ablkcipher_type
;
3636 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
3637 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
3639 case CRYPTO_ALG_TYPE_AEAD
:
3640 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
3641 CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
|
3642 CRYPTO_ALG_NEED_FALLBACK
;
3643 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
3644 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
3645 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
3646 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
3647 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
3648 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
3649 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
3651 case CRYPTO_ALG_TYPE_AHASH
:
3652 a_hash
= &driver_algs
[i
].alg
.hash
;
3653 a_hash
->update
= chcr_ahash_update
;
3654 a_hash
->final
= chcr_ahash_final
;
3655 a_hash
->finup
= chcr_ahash_finup
;
3656 a_hash
->digest
= chcr_ahash_digest
;
3657 a_hash
->export
= chcr_ahash_export
;
3658 a_hash
->import
= chcr_ahash_import
;
3659 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
3660 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
3661 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
3662 a_hash
->halg
.base
.cra_flags
= AHASH_CRA_FLAGS
;
3663 a_hash
->halg
.base
.cra_alignmask
= 0;
3664 a_hash
->halg
.base
.cra_exit
= NULL
;
3665 a_hash
->halg
.base
.cra_type
= &crypto_ahash_type
;
3667 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
3668 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
3669 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
3670 a_hash
->init
= chcr_hmac_init
;
3671 a_hash
->setkey
= chcr_ahash_setkey
;
3672 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
3674 a_hash
->init
= chcr_sha_init
;
3675 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
3676 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
3678 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
3679 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
3680 name
= ai
.cra_driver_name
;
3684 pr_err("chcr : %s : Algorithm registration failed\n",
3688 driver_algs
[i
].is_registered
= 1;
3694 chcr_unregister_alg();
3699 * start_crypto - Register the crypto algorithms.
3700 * This should called once when the first device comesup. After this
3701 * kernel will start calling driver APIs for crypto operations.
3703 int start_crypto(void)
3705 return chcr_register_alg();
3709 * stop_crypto - Deregister all the crypto algorithms with kernel.
3710 * This should be called once when the last device goes down. After this
3711 * kernel will not call the driver API for crypto operations.
3713 int stop_crypto(void)
3715 chcr_unregister_alg();