]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/chelsio/chcr_algo.c
crypto: chcr - Move tfm ctx variable to request context
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/sha.h>
57#include <crypto/internal/hash.h>
58
59#include "t4fw_api.h"
60#include "t4_msg.h"
61#include "chcr_core.h"
62#include "chcr_algo.h"
63#include "chcr_crypto.h"
64
65static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66{
67 return ctx->crypto_ctx->ablkctx;
68}
69
70static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71{
72 return ctx->crypto_ctx->hmacctx;
73}
74
75static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76{
77 return ctx->dev->u_ctx;
78}
79
80static inline int is_ofld_imm(const struct sk_buff *skb)
81{
82 return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83}
84
85/*
86 * sgl_len - calculates the size of an SGL of the given capacity
87 * @n: the number of SGL entries
88 * Calculates the number of flits needed for a scatter/gather list that
89 * can hold the given number of entries.
90 */
91static inline unsigned int sgl_len(unsigned int n)
92{
93 n--;
94 return (3 * n) / 2 + (n & 1) + 2;
95}
96
97/*
98 * chcr_handle_resp - Unmap the DMA buffers associated with the request
99 * @req: crypto request
100 */
101int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102 int error_status)
103{
104 struct crypto_tfm *tfm = req->tfm;
105 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106 struct uld_ctx *u_ctx = ULD_CTX(ctx);
107 struct chcr_req_ctx ctx_req;
108 struct cpl_fw6_pld *fw6_pld;
109 unsigned int digestsize, updated_digestsize;
110
111 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112 case CRYPTO_ALG_TYPE_BLKCIPHER:
113 ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114 ctx_req.ctx.ablk_ctx =
115 ablkcipher_request_ctx(ctx_req.req.ablk_req);
116 if (!error_status) {
117 fw6_pld = (struct cpl_fw6_pld *)input;
118 memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119 AES_BLOCK_SIZE);
120 }
121 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
5c86a8ff 122 ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
324429d7
HS
123 if (ctx_req.ctx.ablk_ctx->skb) {
124 kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125 ctx_req.ctx.ablk_ctx->skb = NULL;
126 }
127 break;
128
129 case CRYPTO_ALG_TYPE_AHASH:
130 ctx_req.req.ahash_req = (struct ahash_request *)req;
131 ctx_req.ctx.ahash_ctx =
132 ahash_request_ctx(ctx_req.req.ahash_req);
133 digestsize =
134 crypto_ahash_digestsize(crypto_ahash_reqtfm(
135 ctx_req.req.ahash_req));
136 updated_digestsize = digestsize;
137 if (digestsize == SHA224_DIGEST_SIZE)
138 updated_digestsize = SHA256_DIGEST_SIZE;
139 else if (digestsize == SHA384_DIGEST_SIZE)
140 updated_digestsize = SHA512_DIGEST_SIZE;
5c86a8ff
HJ
141 if (ctx_req.ctx.ahash_ctx->skb) {
142 kfree_skb(ctx_req.ctx.ahash_ctx->skb);
324429d7 143 ctx_req.ctx.ahash_ctx->skb = NULL;
5c86a8ff 144 }
324429d7
HS
145 if (ctx_req.ctx.ahash_ctx->result == 1) {
146 ctx_req.ctx.ahash_ctx->result = 0;
147 memcpy(ctx_req.req.ahash_req->result, input +
148 sizeof(struct cpl_fw6_pld),
149 digestsize);
150 } else {
151 memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
152 sizeof(struct cpl_fw6_pld),
153 updated_digestsize);
154 }
324429d7
HS
155 break;
156 }
157 return 0;
158}
159
160/*
161 * calc_tx_flits_ofld - calculate # of flits for an offload packet
162 * @skb: the packet
163 * Returns the number of flits needed for the given offload packet.
164 * These packets are already fully constructed and no additional headers
165 * will be added.
166 */
167static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
168{
169 unsigned int flits, cnt;
170
171 if (is_ofld_imm(skb))
172 return DIV_ROUND_UP(skb->len, 8);
173
174 flits = skb_transport_offset(skb) / 8; /* headers */
175 cnt = skb_shinfo(skb)->nr_frags;
176 if (skb_tail_pointer(skb) != skb_transport_header(skb))
177 cnt++;
178 return flits + sgl_len(cnt);
179}
180
39f91a34
HJ
181static inline void get_aes_decrypt_key(unsigned char *dec_key,
182 const unsigned char *key,
183 unsigned int keylength)
184{
185 u32 temp;
186 u32 w_ring[MAX_NK];
187 int i, j, k;
188 u8 nr, nk;
189
190 switch (keylength) {
191 case AES_KEYLENGTH_128BIT:
192 nk = KEYLENGTH_4BYTES;
193 nr = NUMBER_OF_ROUNDS_10;
194 break;
195 case AES_KEYLENGTH_192BIT:
196 nk = KEYLENGTH_6BYTES;
197 nr = NUMBER_OF_ROUNDS_12;
198 break;
199 case AES_KEYLENGTH_256BIT:
200 nk = KEYLENGTH_8BYTES;
201 nr = NUMBER_OF_ROUNDS_14;
202 break;
203 default:
204 return;
205 }
206 for (i = 0; i < nk; i++)
207 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
208
209 i = 0;
210 temp = w_ring[nk - 1];
211 while (i + nk < (nr + 1) * 4) {
212 if (!(i % nk)) {
213 /* RotWord(temp) */
214 temp = (temp << 8) | (temp >> 24);
215 temp = aes_ks_subword(temp);
216 temp ^= round_constant[i / nk];
217 } else if (nk == 8 && (i % 4 == 0)) {
218 temp = aes_ks_subword(temp);
219 }
220 w_ring[i % nk] ^= temp;
221 temp = w_ring[i % nk];
222 i++;
223 }
224 i--;
225 for (k = 0, j = i % nk; k < nk; k++) {
226 *((u32 *)dec_key + k) = htonl(w_ring[j]);
227 j--;
228 if (j < 0)
229 j += nk;
230 }
231}
232
e7922729 233static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7
HS
234{
235 struct crypto_shash *base_hash = NULL;
324429d7
HS
236
237 switch (ds) {
238 case SHA1_DIGEST_SIZE:
e7922729 239 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
240 break;
241 case SHA224_DIGEST_SIZE:
e7922729 242 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
243 break;
244 case SHA256_DIGEST_SIZE:
e7922729 245 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
246 break;
247 case SHA384_DIGEST_SIZE:
e7922729 248 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
249 break;
250 case SHA512_DIGEST_SIZE:
e7922729 251 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
252 break;
253 }
324429d7 254
e7922729 255 return base_hash;
324429d7
HS
256}
257
258static int chcr_compute_partial_hash(struct shash_desc *desc,
259 char *iopad, char *result_hash,
260 int digest_size)
261{
262 struct sha1_state sha1_st;
263 struct sha256_state sha256_st;
264 struct sha512_state sha512_st;
265 int error;
266
267 if (digest_size == SHA1_DIGEST_SIZE) {
268 error = crypto_shash_init(desc) ?:
269 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
270 crypto_shash_export(desc, (void *)&sha1_st);
271 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
272 } else if (digest_size == SHA224_DIGEST_SIZE) {
273 error = crypto_shash_init(desc) ?:
274 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
275 crypto_shash_export(desc, (void *)&sha256_st);
276 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
277
278 } else if (digest_size == SHA256_DIGEST_SIZE) {
279 error = crypto_shash_init(desc) ?:
280 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
281 crypto_shash_export(desc, (void *)&sha256_st);
282 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
283
284 } else if (digest_size == SHA384_DIGEST_SIZE) {
285 error = crypto_shash_init(desc) ?:
286 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
287 crypto_shash_export(desc, (void *)&sha512_st);
288 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
289
290 } else if (digest_size == SHA512_DIGEST_SIZE) {
291 error = crypto_shash_init(desc) ?:
292 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
293 crypto_shash_export(desc, (void *)&sha512_st);
294 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
295 } else {
296 error = -EINVAL;
297 pr_err("Unknown digest size %d\n", digest_size);
298 }
299 return error;
300}
301
302static void chcr_change_order(char *buf, int ds)
303{
304 int i;
305
306 if (ds == SHA512_DIGEST_SIZE) {
307 for (i = 0; i < (ds / sizeof(u64)); i++)
308 *((__be64 *)buf + i) =
309 cpu_to_be64(*((u64 *)buf + i));
310 } else {
311 for (i = 0; i < (ds / sizeof(u32)); i++)
312 *((__be32 *)buf + i) =
313 cpu_to_be32(*((u32 *)buf + i));
314 }
315}
316
317static inline int is_hmac(struct crypto_tfm *tfm)
318{
319 struct crypto_alg *alg = tfm->__crt_alg;
320 struct chcr_alg_template *chcr_crypto_alg =
321 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
322 alg.hash);
5c86a8ff 323 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
324 return 1;
325 return 0;
326}
327
324429d7
HS
328static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
329 struct scatterlist *sg,
330 struct phys_sge_parm *sg_param)
331{
332 struct phys_sge_pairs *to;
adf1ca61
HJ
333 int out_buf_size = sg_param->obsize;
334 unsigned int nents = sg_param->nents, i, j = 0;
324429d7
HS
335
336 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
337 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
338 phys_cpl->pcirlxorder_to_noofsgentr =
339 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
340 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
341 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
342 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
343 CPL_RX_PHYS_DSGL_DCAID_V(0) |
344 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
345 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
346 phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
347 phys_cpl->rss_hdr_int.hash_val = 0;
348 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
349 sizeof(struct cpl_rx_phys_dsgl));
350
351 for (i = 0; nents; to++) {
adf1ca61
HJ
352 for (j = 0; j < 8 && nents; j++, nents--) {
353 out_buf_size -= sg_dma_len(sg);
354 to->len[j] = htons(sg_dma_len(sg));
324429d7 355 to->addr[j] = cpu_to_be64(sg_dma_address(sg));
324429d7
HS
356 sg = sg_next(sg);
357 }
358 }
adf1ca61
HJ
359 if (out_buf_size) {
360 j--;
361 to--;
362 to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
363 }
324429d7
HS
364}
365
adf1ca61
HJ
366static inline int map_writesg_phys_cpl(struct device *dev,
367 struct cpl_rx_phys_dsgl *phys_cpl,
368 struct scatterlist *sg,
369 struct phys_sge_parm *sg_param)
324429d7
HS
370{
371 if (!sg || !sg_param->nents)
372 return 0;
373
374 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
375 if (sg_param->nents == 0) {
376 pr_err("CHCR : DMA mapping failed\n");
377 return -EINVAL;
378 }
379 write_phys_cpl(phys_cpl, sg, sg_param);
380 return 0;
381}
382
383static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
384{
385 struct crypto_alg *alg = tfm->__crt_alg;
386 struct chcr_alg_template *chcr_crypto_alg =
387 container_of(alg, struct chcr_alg_template, alg.crypto);
388
389 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
390}
391
358961d1
HJ
392static inline void write_buffer_to_skb(struct sk_buff *skb,
393 unsigned int *frags,
394 char *bfr,
395 u8 bfr_len)
396{
397 skb->len += bfr_len;
398 skb->data_len += bfr_len;
399 skb->truesize += bfr_len;
400 get_page(virt_to_page(bfr));
401 skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
402 offset_in_page(bfr), bfr_len);
403 (*frags)++;
404}
405
406
324429d7 407static inline void
358961d1 408write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
324429d7
HS
409 struct scatterlist *sg, unsigned int count)
410{
411 struct page *spage;
412 unsigned int page_len;
413
414 skb->len += count;
415 skb->data_len += count;
416 skb->truesize += count;
18f0aa06 417
324429d7 418 while (count > 0) {
18f0aa06 419 if (!sg || (!(sg->length)))
324429d7
HS
420 break;
421 spage = sg_page(sg);
422 get_page(spage);
423 page_len = min(sg->length, count);
424 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
425 (*frags)++;
426 count -= page_len;
427 sg = sg_next(sg);
428 }
429}
430
431static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
432 struct _key_ctx *key_ctx)
433{
434 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 435 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
436 } else {
437 memcpy(key_ctx->key,
438 ablkctx->key + (ablkctx->enckey_len >> 1),
439 ablkctx->enckey_len >> 1);
cc1b156d
HJ
440 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
441 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
442 }
443 return 0;
444}
445
446static inline void create_wreq(struct chcr_context *ctx,
358961d1 447 struct chcr_wr *chcr_req,
324429d7
HS
448 void *req, struct sk_buff *skb,
449 int kctx_len, int hash_sz,
450 unsigned int phys_dsgl)
451{
452 struct uld_ctx *u_ctx = ULD_CTX(ctx);
324429d7
HS
453 int iv_loc = IV_DSGL;
454 int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
455 unsigned int immdatalen = 0, nr_frags = 0;
456
457 if (is_ofld_imm(skb)) {
458 immdatalen = skb->data_len;
459 iv_loc = IV_IMMEDIATE;
460 } else {
461 nr_frags = skb_shinfo(skb)->nr_frags;
462 }
463
358961d1
HJ
464 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
465 ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
466 chcr_req->wreq.pld_size_hash_size =
324429d7
HS
467 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
468 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1
HJ
469 chcr_req->wreq.len16_pkd =
470 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
324429d7 471 (calc_tx_flits_ofld(skb) * 8), 16)));
358961d1
HJ
472 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
473 chcr_req->wreq.rx_chid_to_rx_q_id =
324429d7
HS
474 FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
475 (hash_sz) ? IV_NOP : iv_loc);
476
358961d1
HJ
477 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
478 chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
479 16) - ((sizeof(chcr_req->wreq)) >> 4)));
324429d7 480
358961d1
HJ
481 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
482 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
483 sizeof(chcr_req->key_ctx) +
484 kctx_len +
324429d7
HS
485 ((hash_sz) ? DUMMY_BYTES :
486 (sizeof(struct cpl_rx_phys_dsgl) +
487 phys_dsgl)) + immdatalen);
488}
489
490/**
491 * create_cipher_wr - form the WR for cipher operations
492 * @req: cipher req.
493 * @ctx: crypto driver context of the request.
494 * @qid: ingress qid where response of this WR should be received.
495 * @op_type: encryption or decryption
496 */
497static struct sk_buff
358961d1
HJ
498*create_cipher_wr(struct ablkcipher_request *req,
499 unsigned short qid,
324429d7
HS
500 unsigned short op_type)
501{
324429d7 502 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
358961d1 503 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7
HS
504 struct uld_ctx *u_ctx = ULD_CTX(ctx);
505 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
506 struct sk_buff *skb = NULL;
358961d1 507 struct chcr_wr *chcr_req;
324429d7 508 struct cpl_rx_phys_dsgl *phys_cpl;
5c86a8ff 509 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
324429d7 510 struct phys_sge_parm sg_param;
adf1ca61 511 unsigned int frags = 0, transhdr_len, phys_dsgl;
324429d7 512 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
358961d1
HJ
513 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
514 GFP_ATOMIC;
324429d7
HS
515
516 if (!req->info)
517 return ERR_PTR(-EINVAL);
5c86a8ff
HJ
518 reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
519 if (reqctx->dst_nents <= 0) {
adf1ca61
HJ
520 pr_err("AES:Invalid Destination sg lists\n");
521 return ERR_PTR(-EINVAL);
522 }
324429d7 523 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
358961d1
HJ
524 (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
525 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
526 ablkctx->enckey_len, req->nbytes, ivsize);
324429d7 527 return ERR_PTR(-EINVAL);
358961d1 528 }
324429d7 529
5c86a8ff 530 phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
324429d7 531
358961d1 532 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
324429d7 533 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
358961d1 534 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
324429d7
HS
535 if (!skb)
536 return ERR_PTR(-ENOMEM);
537 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
358961d1
HJ
538 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
539 memset(chcr_req, 0, transhdr_len);
540 chcr_req->sec_cpl.op_ivinsrtofst =
541 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
542
543 chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
544 chcr_req->sec_cpl.aadstart_cipherstop_hi =
545 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
546
547 chcr_req->sec_cpl.cipherstop_lo_authinsert =
548 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
549 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
324429d7 550 ablkctx->ciph_mode,
358961d1
HJ
551 0, 0, ivsize >> 1);
552 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
324429d7
HS
553 0, 1, phys_dsgl);
554
358961d1 555 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
324429d7 556 if (op_type == CHCR_DECRYPT_OP) {
358961d1 557 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7
HS
558 } else {
559 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
358961d1
HJ
560 memcpy(chcr_req->key_ctx.key, ablkctx->key,
561 ablkctx->enckey_len);
324429d7 562 } else {
358961d1 563 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
564 (ablkctx->enckey_len >> 1),
565 ablkctx->enckey_len >> 1);
358961d1 566 memcpy(chcr_req->key_ctx.key +
324429d7
HS
567 (ablkctx->enckey_len >> 1),
568 ablkctx->key,
569 ablkctx->enckey_len >> 1);
570 }
571 }
358961d1 572 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
5c86a8ff 573 sg_param.nents = reqctx->dst_nents;
358961d1 574 sg_param.obsize = req->nbytes;
324429d7
HS
575 sg_param.qid = qid;
576 sg_param.align = 1;
577 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
578 &sg_param))
579 goto map_fail1;
580
581 skb_set_transport_header(skb, transhdr_len);
5c86a8ff
HJ
582 memcpy(reqctx->iv, req->info, ivsize);
583 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
358961d1
HJ
584 write_sg_to_skb(skb, &frags, req->src, req->nbytes);
585 create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
5c86a8ff 586 reqctx->skb = skb;
324429d7
HS
587 skb_get(skb);
588 return skb;
589map_fail1:
590 kfree_skb(skb);
591 return ERR_PTR(-ENOMEM);
592}
593
594static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
595 unsigned int keylen)
596{
597 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
598 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7
HS
599 unsigned int ck_size, context_size;
600 u16 alignment = 0;
601
324429d7
HS
602 if (keylen == AES_KEYSIZE_128) {
603 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
604 } else if (keylen == AES_KEYSIZE_192) {
605 alignment = 8;
606 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
607 } else if (keylen == AES_KEYSIZE_256) {
608 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
609 } else {
610 goto badkey_err;
611 }
cc1b156d
HJ
612 memcpy(ablkctx->key, key, keylen);
613 ablkctx->enckey_len = keylen;
614 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
615 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
616 keylen + alignment) >> 4;
617
618 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
619 0, 0, context_size);
620 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
621 return 0;
622badkey_err:
623 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
624 ablkctx->enckey_len = 0;
625 return -EINVAL;
626}
627
73b86bb7 628static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
324429d7
HS
629{
630 int ret = 0;
631 struct sge_ofld_txq *q;
632 struct adapter *adap = netdev2adap(dev);
633
634 local_bh_disable();
635 q = &adap->sge.ofldtxq[idx];
636 spin_lock(&q->sendq.lock);
637 if (q->full)
638 ret = -1;
639 spin_unlock(&q->sendq.lock);
640 local_bh_enable();
641 return ret;
642}
643
644static int chcr_aes_encrypt(struct ablkcipher_request *req)
645{
646 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
647 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7
HS
648 struct uld_ctx *u_ctx = ULD_CTX(ctx);
649 struct sk_buff *skb;
650
651 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
652 ctx->tx_channel_id))) {
653 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
654 return -EBUSY;
655 }
656
358961d1 657 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
324429d7
HS
658 CHCR_ENCRYPT_OP);
659 if (IS_ERR(skb)) {
660 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
661 return PTR_ERR(skb);
662 }
663 skb->dev = u_ctx->lldi.ports[0];
664 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
665 chcr_send_wr(skb);
666 return -EINPROGRESS;
667}
668
669static int chcr_aes_decrypt(struct ablkcipher_request *req)
670{
671 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
672 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7
HS
673 struct uld_ctx *u_ctx = ULD_CTX(ctx);
674 struct sk_buff *skb;
675
676 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
677 ctx->tx_channel_id))) {
678 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
679 return -EBUSY;
680 }
681
358961d1 682 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
324429d7
HS
683 CHCR_DECRYPT_OP);
684 if (IS_ERR(skb)) {
685 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
686 return PTR_ERR(skb);
687 }
688 skb->dev = u_ctx->lldi.ports[0];
689 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
690 chcr_send_wr(skb);
691 return -EINPROGRESS;
692}
693
694static int chcr_device_init(struct chcr_context *ctx)
695{
696 struct uld_ctx *u_ctx;
697 unsigned int id;
698 int err = 0, rxq_perchan, rxq_idx;
699
700 id = smp_processor_id();
701 if (!ctx->dev) {
702 err = assign_chcr_device(&ctx->dev);
703 if (err) {
704 pr_err("chcr device assignment fails\n");
705 goto out;
706 }
707 u_ctx = ULD_CTX(ctx);
708 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
709 ctx->dev->tx_channel_id = 0;
710 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
711 rxq_idx += id % rxq_perchan;
712 spin_lock(&ctx->dev->lock_chcr_dev);
713 ctx->tx_channel_id = rxq_idx;
714 spin_unlock(&ctx->dev->lock_chcr_dev);
715 }
716out:
717 return err;
718}
719
720static int chcr_cra_init(struct crypto_tfm *tfm)
721{
722 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
723 return chcr_device_init(crypto_tfm_ctx(tfm));
724}
725
726static int get_alg_config(struct algo_param *params,
727 unsigned int auth_size)
728{
729 switch (auth_size) {
730 case SHA1_DIGEST_SIZE:
731 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
732 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
733 params->result_size = SHA1_DIGEST_SIZE;
734 break;
735 case SHA224_DIGEST_SIZE:
736 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
737 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
738 params->result_size = SHA256_DIGEST_SIZE;
739 break;
740 case SHA256_DIGEST_SIZE:
741 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
742 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
743 params->result_size = SHA256_DIGEST_SIZE;
744 break;
745 case SHA384_DIGEST_SIZE:
746 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
747 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
748 params->result_size = SHA512_DIGEST_SIZE;
749 break;
750 case SHA512_DIGEST_SIZE:
751 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
752 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
753 params->result_size = SHA512_DIGEST_SIZE;
754 break;
755 default:
756 pr_err("chcr : ERROR, unsupported digest size\n");
757 return -EINVAL;
758 }
759 return 0;
760}
761
e7922729
HJ
762static inline void chcr_free_shash(struct crypto_shash *base_hash)
763{
764 crypto_free_shash(base_hash);
765}
766
324429d7 767/**
358961d1 768 * create_hash_wr - Create hash work request
324429d7
HS
769 * @req - Cipher req base
770 */
358961d1 771static struct sk_buff *create_hash_wr(struct ahash_request *req,
324429d7
HS
772 struct hash_wr_param *param)
773{
774 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
775 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
776 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
777 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
778 struct sk_buff *skb = NULL;
358961d1 779 struct chcr_wr *chcr_req;
324429d7
HS
780 unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
781 unsigned int digestsize = crypto_ahash_digestsize(tfm);
358961d1 782 unsigned int kctx_len = 0;
324429d7 783 u8 hash_size_in_response = 0;
358961d1
HJ
784 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
785 GFP_ATOMIC;
324429d7
HS
786
787 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
358961d1 788 kctx_len = param->alg_prm.result_size + iopad_alignment;
324429d7
HS
789 if (param->opad_needed)
790 kctx_len += param->alg_prm.result_size + iopad_alignment;
791
792 if (req_ctx->result)
793 hash_size_in_response = digestsize;
794 else
795 hash_size_in_response = param->alg_prm.result_size;
796 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
358961d1 797 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
324429d7
HS
798 if (!skb)
799 return skb;
800
801 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
358961d1
HJ
802 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
803 memset(chcr_req, 0, transhdr_len);
324429d7 804
358961d1
HJ
805 chcr_req->sec_cpl.op_ivinsrtofst =
806 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
807 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 808
358961d1 809 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 810 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 811 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 812 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 813 chcr_req->sec_cpl.seqno_numivs =
324429d7 814 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 815 param->opad_needed, 0);
324429d7 816
358961d1 817 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
818 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
819
358961d1
HJ
820 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
821 param->alg_prm.result_size);
324429d7
HS
822
823 if (param->opad_needed)
358961d1
HJ
824 memcpy(chcr_req->key_ctx.key +
825 ((param->alg_prm.result_size <= 32) ? 32 :
826 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
827 hmacctx->opad, param->alg_prm.result_size);
828
358961d1 829 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
830 param->alg_prm.mk_size, 0,
831 param->opad_needed,
358961d1
HJ
832 ((kctx_len +
833 sizeof(chcr_req->key_ctx)) >> 4));
834 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
324429d7
HS
835
836 skb_set_transport_header(skb, transhdr_len);
837 if (param->bfr_len != 0)
44fce12a
HJ
838 write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
839 param->bfr_len);
324429d7 840 if (param->sg_len != 0)
358961d1 841 write_sg_to_skb(skb, &frags, req->src, param->sg_len);
324429d7 842
358961d1 843 create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
324429d7
HS
844 0);
845 req_ctx->skb = skb;
846 skb_get(skb);
847 return skb;
848}
849
850static int chcr_ahash_update(struct ahash_request *req)
851{
852 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
853 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
854 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
855 struct uld_ctx *u_ctx = NULL;
856 struct sk_buff *skb;
857 u8 remainder = 0, bs;
858 unsigned int nbytes = req->nbytes;
859 struct hash_wr_param params;
860
861 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
862
863 u_ctx = ULD_CTX(ctx);
864 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
865 ctx->tx_channel_id))) {
866 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
867 return -EBUSY;
868 }
869
44fce12a
HJ
870 if (nbytes + req_ctx->reqlen >= bs) {
871 remainder = (nbytes + req_ctx->reqlen) % bs;
872 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 873 } else {
44fce12a
HJ
874 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
875 + req_ctx->reqlen, nbytes, 0);
876 req_ctx->reqlen += nbytes;
324429d7
HS
877 return 0;
878 }
879
880 params.opad_needed = 0;
881 params.more = 1;
882 params.last = 0;
44fce12a
HJ
883 params.sg_len = nbytes - req_ctx->reqlen;
884 params.bfr_len = req_ctx->reqlen;
324429d7
HS
885 params.scmd1 = 0;
886 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
887 req_ctx->result = 0;
888 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 889 skb = create_hash_wr(req, &params);
324429d7 890
44fce12a
HJ
891 if (IS_ERR(skb))
892 return PTR_ERR(skb);
893
894 if (remainder) {
895 u8 *temp;
896 /* Swap buffers */
897 temp = req_ctx->reqbfr;
898 req_ctx->reqbfr = req_ctx->skbfr;
899 req_ctx->skbfr = temp;
324429d7 900 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 901 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 902 remainder);
44fce12a
HJ
903 }
904 req_ctx->reqlen = remainder;
324429d7
HS
905 skb->dev = u_ctx->lldi.ports[0];
906 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
907 chcr_send_wr(skb);
908
909 return -EINPROGRESS;
910}
911
912static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
913{
914 memset(bfr_ptr, 0, bs);
915 *bfr_ptr = 0x80;
916 if (bs == 64)
917 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
918 else
919 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
920}
921
922static int chcr_ahash_final(struct ahash_request *req)
923{
924 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
925 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
926 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
927 struct hash_wr_param params;
928 struct sk_buff *skb;
929 struct uld_ctx *u_ctx = NULL;
930 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
931
932 u_ctx = ULD_CTX(ctx);
933 if (is_hmac(crypto_ahash_tfm(rtfm)))
934 params.opad_needed = 1;
935 else
936 params.opad_needed = 0;
937 params.sg_len = 0;
938 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
939 req_ctx->result = 1;
44fce12a 940 params.bfr_len = req_ctx->reqlen;
324429d7 941 req_ctx->data_len += params.bfr_len + params.sg_len;
44fce12a
HJ
942 if (req_ctx->reqlen == 0) {
943 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
944 params.last = 0;
945 params.more = 1;
946 params.scmd1 = 0;
947 params.bfr_len = bs;
948
949 } else {
950 params.scmd1 = req_ctx->data_len;
951 params.last = 1;
952 params.more = 0;
953 }
358961d1
HJ
954 skb = create_hash_wr(req, &params);
955 if (IS_ERR(skb))
956 return PTR_ERR(skb);
957
324429d7
HS
958 skb->dev = u_ctx->lldi.ports[0];
959 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
960 chcr_send_wr(skb);
961 return -EINPROGRESS;
962}
963
964static int chcr_ahash_finup(struct ahash_request *req)
965{
966 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
967 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
968 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
969 struct uld_ctx *u_ctx = NULL;
970 struct sk_buff *skb;
971 struct hash_wr_param params;
972 u8 bs;
973
974 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
975 u_ctx = ULD_CTX(ctx);
976
977 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
978 ctx->tx_channel_id))) {
979 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
980 return -EBUSY;
981 }
982
983 if (is_hmac(crypto_ahash_tfm(rtfm)))
984 params.opad_needed = 1;
985 else
986 params.opad_needed = 0;
987
988 params.sg_len = req->nbytes;
44fce12a 989 params.bfr_len = req_ctx->reqlen;
324429d7
HS
990 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
991 req_ctx->data_len += params.bfr_len + params.sg_len;
992 req_ctx->result = 1;
44fce12a
HJ
993 if ((req_ctx->reqlen + req->nbytes) == 0) {
994 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
995 params.last = 0;
996 params.more = 1;
997 params.scmd1 = 0;
998 params.bfr_len = bs;
999 } else {
1000 params.scmd1 = req_ctx->data_len;
1001 params.last = 1;
1002 params.more = 0;
1003 }
1004
358961d1
HJ
1005 skb = create_hash_wr(req, &params);
1006 if (IS_ERR(skb))
1007 return PTR_ERR(skb);
1008
324429d7
HS
1009 skb->dev = u_ctx->lldi.ports[0];
1010 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1011 chcr_send_wr(skb);
1012
1013 return -EINPROGRESS;
1014}
1015
1016static int chcr_ahash_digest(struct ahash_request *req)
1017{
1018 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1019 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1020 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1021 struct uld_ctx *u_ctx = NULL;
1022 struct sk_buff *skb;
1023 struct hash_wr_param params;
1024 u8 bs;
1025
1026 rtfm->init(req);
1027 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1028
1029 u_ctx = ULD_CTX(ctx);
1030 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1031 ctx->tx_channel_id))) {
1032 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1033 return -EBUSY;
1034 }
1035
1036 if (is_hmac(crypto_ahash_tfm(rtfm)))
1037 params.opad_needed = 1;
1038 else
1039 params.opad_needed = 0;
1040
1041 params.last = 0;
1042 params.more = 0;
1043 params.sg_len = req->nbytes;
1044 params.bfr_len = 0;
1045 params.scmd1 = 0;
1046 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1047 req_ctx->result = 1;
1048 req_ctx->data_len += params.bfr_len + params.sg_len;
1049
44fce12a
HJ
1050 if (req->nbytes == 0) {
1051 create_last_hash_block(req_ctx->reqbfr, bs, 0);
324429d7
HS
1052 params.more = 1;
1053 params.bfr_len = bs;
1054 }
1055
358961d1
HJ
1056 skb = create_hash_wr(req, &params);
1057 if (IS_ERR(skb))
1058 return PTR_ERR(skb);
324429d7
HS
1059
1060 skb->dev = u_ctx->lldi.ports[0];
1061 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1062 chcr_send_wr(skb);
1063 return -EINPROGRESS;
1064}
1065
1066static int chcr_ahash_export(struct ahash_request *areq, void *out)
1067{
1068 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1069 struct chcr_ahash_req_ctx *state = out;
1070
44fce12a 1071 state->reqlen = req_ctx->reqlen;
324429d7 1072 state->data_len = req_ctx->data_len;
44fce12a 1073 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
1074 memcpy(state->partial_hash, req_ctx->partial_hash,
1075 CHCR_HASH_MAX_DIGEST_SIZE);
44fce12a 1076 return 0;
324429d7
HS
1077}
1078
1079static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1080{
1081 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1082 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1083
44fce12a 1084 req_ctx->reqlen = state->reqlen;
324429d7 1085 req_ctx->data_len = state->data_len;
44fce12a
HJ
1086 req_ctx->reqbfr = req_ctx->bfr1;
1087 req_ctx->skbfr = req_ctx->bfr2;
1088 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
1089 memcpy(req_ctx->partial_hash, state->partial_hash,
1090 CHCR_HASH_MAX_DIGEST_SIZE);
1091 return 0;
1092}
1093
1094static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1095 unsigned int keylen)
1096{
1097 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1098 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1099 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1100 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1101 unsigned int i, err = 0, updated_digestsize;
1102
e7922729
HJ
1103 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1104
1105 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
1106 * first request's data. opad will be sent with the final hash result
1107 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1108 */
e7922729
HJ
1109 shash->tfm = hmacctx->base_hash;
1110 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
324429d7 1111 if (keylen > bs) {
e7922729 1112 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
1113 hmacctx->ipad);
1114 if (err)
1115 goto out;
1116 keylen = digestsize;
1117 } else {
1118 memcpy(hmacctx->ipad, key, keylen);
1119 }
1120 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1121 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1122
1123 for (i = 0; i < bs / sizeof(int); i++) {
1124 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1125 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1126 }
1127
1128 updated_digestsize = digestsize;
1129 if (digestsize == SHA224_DIGEST_SIZE)
1130 updated_digestsize = SHA256_DIGEST_SIZE;
1131 else if (digestsize == SHA384_DIGEST_SIZE)
1132 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 1133 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
1134 hmacctx->ipad, digestsize);
1135 if (err)
1136 goto out;
1137 chcr_change_order(hmacctx->ipad, updated_digestsize);
1138
e7922729 1139 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
1140 hmacctx->opad, digestsize);
1141 if (err)
1142 goto out;
1143 chcr_change_order(hmacctx->opad, updated_digestsize);
1144out:
1145 return err;
1146}
1147
1148static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1149 unsigned int key_len)
1150{
1151 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1152 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7
HS
1153 unsigned short context_size = 0;
1154
cc1b156d
HJ
1155 if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1156 (key_len != (AES_KEYSIZE_256 << 1))) {
324429d7
HS
1157 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1158 CRYPTO_TFM_RES_BAD_KEY_LEN);
1159 ablkctx->enckey_len = 0;
cc1b156d
HJ
1160 return -EINVAL;
1161
324429d7 1162 }
cc1b156d
HJ
1163
1164 memcpy(ablkctx->key, key, key_len);
1165 ablkctx->enckey_len = key_len;
1166 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1167 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1168 ablkctx->key_ctx_hdr =
1169 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1170 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1171 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1172 CHCR_KEYCTX_NO_KEY, 1,
1173 0, context_size);
1174 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1175 return 0;
324429d7
HS
1176}
1177
1178static int chcr_sha_init(struct ahash_request *areq)
1179{
1180 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1181 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1182 int digestsize = crypto_ahash_digestsize(tfm);
1183
1184 req_ctx->data_len = 0;
44fce12a
HJ
1185 req_ctx->reqlen = 0;
1186 req_ctx->reqbfr = req_ctx->bfr1;
1187 req_ctx->skbfr = req_ctx->bfr2;
324429d7
HS
1188 req_ctx->skb = NULL;
1189 req_ctx->result = 0;
1190 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1191 return 0;
1192}
1193
1194static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1195{
1196 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1197 sizeof(struct chcr_ahash_req_ctx));
1198 return chcr_device_init(crypto_tfm_ctx(tfm));
1199}
1200
1201static int chcr_hmac_init(struct ahash_request *areq)
1202{
1203 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1204 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1205 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1206 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1207 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1208 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1209
1210 chcr_sha_init(areq);
1211 req_ctx->data_len = bs;
1212 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1213 if (digestsize == SHA224_DIGEST_SIZE)
1214 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1215 SHA256_DIGEST_SIZE);
1216 else if (digestsize == SHA384_DIGEST_SIZE)
1217 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1218 SHA512_DIGEST_SIZE);
1219 else
1220 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1221 digestsize);
1222 }
1223 return 0;
1224}
1225
1226static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1227{
1228 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1229 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1230 unsigned int digestsize =
1231 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1232
1233 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1234 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
1235 hmacctx->base_hash = chcr_alloc_shash(digestsize);
1236 if (IS_ERR(hmacctx->base_hash))
1237 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
1238 return chcr_device_init(crypto_tfm_ctx(tfm));
1239}
1240
324429d7
HS
1241static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1242{
1243 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1244 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1245
e7922729
HJ
1246 if (hmacctx->base_hash) {
1247 chcr_free_shash(hmacctx->base_hash);
1248 hmacctx->base_hash = NULL;
324429d7
HS
1249 }
1250}
1251
1252static struct chcr_alg_template driver_algs[] = {
1253 /* AES-CBC */
1254 {
1255 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1256 .is_registered = 0,
1257 .alg.crypto = {
1258 .cra_name = "cbc(aes)",
1259 .cra_driver_name = "cbc(aes-chcr)",
1260 .cra_priority = CHCR_CRA_PRIORITY,
1261 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1262 CRYPTO_ALG_ASYNC,
1263 .cra_blocksize = AES_BLOCK_SIZE,
1264 .cra_ctxsize = sizeof(struct chcr_context)
1265 + sizeof(struct ablk_ctx),
1266 .cra_alignmask = 0,
1267 .cra_type = &crypto_ablkcipher_type,
1268 .cra_module = THIS_MODULE,
1269 .cra_init = chcr_cra_init,
1270 .cra_exit = NULL,
1271 .cra_u.ablkcipher = {
1272 .min_keysize = AES_MIN_KEY_SIZE,
1273 .max_keysize = AES_MAX_KEY_SIZE,
1274 .ivsize = AES_BLOCK_SIZE,
1275 .setkey = chcr_aes_cbc_setkey,
1276 .encrypt = chcr_aes_encrypt,
1277 .decrypt = chcr_aes_decrypt,
1278 }
1279 }
1280 },
1281 {
1282 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1283 .is_registered = 0,
1284 .alg.crypto = {
1285 .cra_name = "xts(aes)",
1286 .cra_driver_name = "xts(aes-chcr)",
1287 .cra_priority = CHCR_CRA_PRIORITY,
1288 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1289 CRYPTO_ALG_ASYNC,
1290 .cra_blocksize = AES_BLOCK_SIZE,
1291 .cra_ctxsize = sizeof(struct chcr_context) +
1292 sizeof(struct ablk_ctx),
1293 .cra_alignmask = 0,
1294 .cra_type = &crypto_ablkcipher_type,
1295 .cra_module = THIS_MODULE,
1296 .cra_init = chcr_cra_init,
1297 .cra_exit = NULL,
1298 .cra_u = {
1299 .ablkcipher = {
1300 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1301 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1302 .ivsize = AES_BLOCK_SIZE,
1303 .setkey = chcr_aes_xts_setkey,
1304 .encrypt = chcr_aes_encrypt,
1305 .decrypt = chcr_aes_decrypt,
1306 }
1307 }
1308 }
1309 },
1310 /* SHA */
1311 {
1312 .type = CRYPTO_ALG_TYPE_AHASH,
1313 .is_registered = 0,
1314 .alg.hash = {
1315 .halg.digestsize = SHA1_DIGEST_SIZE,
1316 .halg.base = {
1317 .cra_name = "sha1",
1318 .cra_driver_name = "sha1-chcr",
1319 .cra_blocksize = SHA1_BLOCK_SIZE,
1320 }
1321 }
1322 },
1323 {
1324 .type = CRYPTO_ALG_TYPE_AHASH,
1325 .is_registered = 0,
1326 .alg.hash = {
1327 .halg.digestsize = SHA256_DIGEST_SIZE,
1328 .halg.base = {
1329 .cra_name = "sha256",
1330 .cra_driver_name = "sha256-chcr",
1331 .cra_blocksize = SHA256_BLOCK_SIZE,
1332 }
1333 }
1334 },
1335 {
1336 .type = CRYPTO_ALG_TYPE_AHASH,
1337 .is_registered = 0,
1338 .alg.hash = {
1339 .halg.digestsize = SHA224_DIGEST_SIZE,
1340 .halg.base = {
1341 .cra_name = "sha224",
1342 .cra_driver_name = "sha224-chcr",
1343 .cra_blocksize = SHA224_BLOCK_SIZE,
1344 }
1345 }
1346 },
1347 {
1348 .type = CRYPTO_ALG_TYPE_AHASH,
1349 .is_registered = 0,
1350 .alg.hash = {
1351 .halg.digestsize = SHA384_DIGEST_SIZE,
1352 .halg.base = {
1353 .cra_name = "sha384",
1354 .cra_driver_name = "sha384-chcr",
1355 .cra_blocksize = SHA384_BLOCK_SIZE,
1356 }
1357 }
1358 },
1359 {
1360 .type = CRYPTO_ALG_TYPE_AHASH,
1361 .is_registered = 0,
1362 .alg.hash = {
1363 .halg.digestsize = SHA512_DIGEST_SIZE,
1364 .halg.base = {
1365 .cra_name = "sha512",
1366 .cra_driver_name = "sha512-chcr",
1367 .cra_blocksize = SHA512_BLOCK_SIZE,
1368 }
1369 }
1370 },
1371 /* HMAC */
1372 {
1373 .type = CRYPTO_ALG_TYPE_HMAC,
1374 .is_registered = 0,
1375 .alg.hash = {
1376 .halg.digestsize = SHA1_DIGEST_SIZE,
1377 .halg.base = {
1378 .cra_name = "hmac(sha1)",
1379 .cra_driver_name = "hmac(sha1-chcr)",
1380 .cra_blocksize = SHA1_BLOCK_SIZE,
1381 }
1382 }
1383 },
1384 {
1385 .type = CRYPTO_ALG_TYPE_HMAC,
1386 .is_registered = 0,
1387 .alg.hash = {
1388 .halg.digestsize = SHA224_DIGEST_SIZE,
1389 .halg.base = {
1390 .cra_name = "hmac(sha224)",
1391 .cra_driver_name = "hmac(sha224-chcr)",
1392 .cra_blocksize = SHA224_BLOCK_SIZE,
1393 }
1394 }
1395 },
1396 {
1397 .type = CRYPTO_ALG_TYPE_HMAC,
1398 .is_registered = 0,
1399 .alg.hash = {
1400 .halg.digestsize = SHA256_DIGEST_SIZE,
1401 .halg.base = {
1402 .cra_name = "hmac(sha256)",
1403 .cra_driver_name = "hmac(sha256-chcr)",
1404 .cra_blocksize = SHA256_BLOCK_SIZE,
1405 }
1406 }
1407 },
1408 {
1409 .type = CRYPTO_ALG_TYPE_HMAC,
1410 .is_registered = 0,
1411 .alg.hash = {
1412 .halg.digestsize = SHA384_DIGEST_SIZE,
1413 .halg.base = {
1414 .cra_name = "hmac(sha384)",
1415 .cra_driver_name = "hmac(sha384-chcr)",
1416 .cra_blocksize = SHA384_BLOCK_SIZE,
1417 }
1418 }
1419 },
1420 {
1421 .type = CRYPTO_ALG_TYPE_HMAC,
1422 .is_registered = 0,
1423 .alg.hash = {
1424 .halg.digestsize = SHA512_DIGEST_SIZE,
1425 .halg.base = {
1426 .cra_name = "hmac(sha512)",
1427 .cra_driver_name = "hmac(sha512-chcr)",
1428 .cra_blocksize = SHA512_BLOCK_SIZE,
1429 }
1430 }
1431 },
1432};
1433
1434/*
1435 * chcr_unregister_alg - Deregister crypto algorithms with
1436 * kernel framework.
1437 */
1438static int chcr_unregister_alg(void)
1439{
1440 int i;
1441
1442 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1443 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1444 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1445 if (driver_algs[i].is_registered)
1446 crypto_unregister_alg(
1447 &driver_algs[i].alg.crypto);
1448 break;
1449 case CRYPTO_ALG_TYPE_AHASH:
1450 if (driver_algs[i].is_registered)
1451 crypto_unregister_ahash(
1452 &driver_algs[i].alg.hash);
1453 break;
1454 }
1455 driver_algs[i].is_registered = 0;
1456 }
1457 return 0;
1458}
1459
1460#define SZ_AHASH_CTX sizeof(struct chcr_context)
1461#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1462#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1463#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1464
1465/*
1466 * chcr_register_alg - Register crypto algorithms with kernel framework.
1467 */
1468static int chcr_register_alg(void)
1469{
1470 struct crypto_alg ai;
1471 struct ahash_alg *a_hash;
1472 int err = 0, i;
1473 char *name = NULL;
1474
1475 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1476 if (driver_algs[i].is_registered)
1477 continue;
1478 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1479 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1480 err = crypto_register_alg(&driver_algs[i].alg.crypto);
1481 name = driver_algs[i].alg.crypto.cra_driver_name;
1482 break;
1483 case CRYPTO_ALG_TYPE_AHASH:
1484 a_hash = &driver_algs[i].alg.hash;
1485 a_hash->update = chcr_ahash_update;
1486 a_hash->final = chcr_ahash_final;
1487 a_hash->finup = chcr_ahash_finup;
1488 a_hash->digest = chcr_ahash_digest;
1489 a_hash->export = chcr_ahash_export;
1490 a_hash->import = chcr_ahash_import;
1491 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1492 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1493 a_hash->halg.base.cra_module = THIS_MODULE;
1494 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1495 a_hash->halg.base.cra_alignmask = 0;
1496 a_hash->halg.base.cra_exit = NULL;
1497 a_hash->halg.base.cra_type = &crypto_ahash_type;
1498
1499 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1500 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1501 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1502 a_hash->init = chcr_hmac_init;
1503 a_hash->setkey = chcr_ahash_setkey;
1504 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1505 } else {
1506 a_hash->init = chcr_sha_init;
1507 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1508 a_hash->halg.base.cra_init = chcr_sha_cra_init;
1509 }
1510 err = crypto_register_ahash(&driver_algs[i].alg.hash);
1511 ai = driver_algs[i].alg.hash.halg.base;
1512 name = ai.cra_driver_name;
1513 break;
1514 }
1515 if (err) {
1516 pr_err("chcr : %s : Algorithm registration failed\n",
1517 name);
1518 goto register_err;
1519 } else {
1520 driver_algs[i].is_registered = 1;
1521 }
1522 }
1523 return 0;
1524
1525register_err:
1526 chcr_unregister_alg();
1527 return err;
1528}
1529
1530/*
1531 * start_crypto - Register the crypto algorithms.
1532 * This should called once when the first device comesup. After this
1533 * kernel will start calling driver APIs for crypto operations.
1534 */
1535int start_crypto(void)
1536{
1537 return chcr_register_alg();
1538}
1539
1540/*
1541 * stop_crypto - Deregister all the crypto algorithms with kernel.
1542 * This should be called once when the last device goes down. After this
1543 * kernel will not call the driver API for crypto operations.
1544 */
1545int stop_crypto(void)
1546{
1547 chcr_unregister_alg();
1548 return 0;
1549}