]>
Commit | Line | Data |
---|---|---|
324429d7 HS |
1 | /* |
2 | * This file is part of the Chelsio T6 Crypto driver for Linux. | |
3 | * | |
4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * Written and Maintained by: | |
35 | * Manoj Malviya (manojmalviya@chelsio.com) | |
36 | * Atul Gupta (atul.gupta@chelsio.com) | |
37 | * Jitendra Lulla (jlulla@chelsio.com) | |
38 | * Yeshaswi M R Gowda (yeshaswi@chelsio.com) | |
39 | * Harsh Jain (harsh@chelsio.com) | |
40 | */ | |
41 | ||
42 | #define pr_fmt(fmt) "chcr:" fmt | |
43 | ||
44 | #include <linux/kernel.h> | |
45 | #include <linux/module.h> | |
46 | #include <linux/crypto.h> | |
47 | #include <linux/cryptohash.h> | |
48 | #include <linux/skbuff.h> | |
49 | #include <linux/rtnetlink.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/scatterlist.h> | |
52 | ||
53 | #include <crypto/aes.h> | |
54 | #include <crypto/algapi.h> | |
55 | #include <crypto/hash.h> | |
8f6acb7f | 56 | #include <crypto/gcm.h> |
324429d7 | 57 | #include <crypto/sha.h> |
2debd332 | 58 | #include <crypto/authenc.h> |
b8fd1f41 HJ |
59 | #include <crypto/ctr.h> |
60 | #include <crypto/gf128mul.h> | |
2debd332 HJ |
61 | #include <crypto/internal/aead.h> |
62 | #include <crypto/null.h> | |
63 | #include <crypto/internal/skcipher.h> | |
64 | #include <crypto/aead.h> | |
65 | #include <crypto/scatterwalk.h> | |
324429d7 HS |
66 | #include <crypto/internal/hash.h> |
67 | ||
68 | #include "t4fw_api.h" | |
69 | #include "t4_msg.h" | |
70 | #include "chcr_core.h" | |
71 | #include "chcr_algo.h" | |
72 | #include "chcr_crypto.h" | |
73 | ||
2f47d580 HJ |
74 | #define IV AES_BLOCK_SIZE |
75 | ||
8579e076 CIK |
76 | static unsigned int sgl_ent_len[] = { |
77 | 0, 0, 16, 24, 40, 48, 64, 72, 88, | |
78 | 96, 112, 120, 136, 144, 160, 168, 184, | |
79 | 192, 208, 216, 232, 240, 256, 264, 280, | |
80 | 288, 304, 312, 328, 336, 352, 360, 376 | |
81 | }; | |
6dad4e8a | 82 | |
8579e076 CIK |
83 | static unsigned int dsgl_ent_len[] = { |
84 | 0, 32, 32, 48, 48, 64, 64, 80, 80, | |
85 | 112, 112, 128, 128, 144, 144, 160, 160, | |
86 | 192, 192, 208, 208, 224, 224, 240, 240, | |
87 | 272, 272, 288, 288, 304, 304, 320, 320 | |
88 | }; | |
6dad4e8a AG |
89 | |
90 | static u32 round_constant[11] = { | |
91 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, | |
92 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, | |
93 | 0x1B000000, 0x36000000, 0x6C000000 | |
94 | }; | |
95 | ||
96 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | |
97 | unsigned char *input, int err); | |
98 | ||
2debd332 HJ |
99 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
100 | { | |
101 | return ctx->crypto_ctx->aeadctx; | |
102 | } | |
103 | ||
324429d7 HS |
104 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) |
105 | { | |
106 | return ctx->crypto_ctx->ablkctx; | |
107 | } | |
108 | ||
109 | static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) | |
110 | { | |
111 | return ctx->crypto_ctx->hmacctx; | |
112 | } | |
113 | ||
2debd332 HJ |
114 | static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) |
115 | { | |
116 | return gctx->ctx->gcm; | |
117 | } | |
118 | ||
119 | static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) | |
120 | { | |
121 | return gctx->ctx->authenc; | |
122 | } | |
123 | ||
324429d7 HS |
124 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
125 | { | |
126 | return ctx->dev->u_ctx; | |
127 | } | |
128 | ||
129 | static inline int is_ofld_imm(const struct sk_buff *skb) | |
130 | { | |
2f47d580 | 131 | return (skb->len <= SGE_MAX_WR_LEN); |
324429d7 HS |
132 | } |
133 | ||
2f47d580 HJ |
134 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, |
135 | unsigned int entlen, | |
136 | unsigned int skip) | |
2956f36c HJ |
137 | { |
138 | int nents = 0; | |
139 | unsigned int less; | |
2f47d580 | 140 | unsigned int skip_len = 0; |
2956f36c | 141 | |
2f47d580 HJ |
142 | while (sg && skip) { |
143 | if (sg_dma_len(sg) <= skip) { | |
144 | skip -= sg_dma_len(sg); | |
145 | skip_len = 0; | |
146 | sg = sg_next(sg); | |
147 | } else { | |
148 | skip_len = skip; | |
149 | skip = 0; | |
150 | } | |
2956f36c HJ |
151 | } |
152 | ||
2f47d580 HJ |
153 | while (sg && reqlen) { |
154 | less = min(reqlen, sg_dma_len(sg) - skip_len); | |
155 | nents += DIV_ROUND_UP(less, entlen); | |
156 | reqlen -= less; | |
157 | skip_len = 0; | |
158 | sg = sg_next(sg); | |
159 | } | |
2956f36c HJ |
160 | return nents; |
161 | } | |
162 | ||
2f47d580 HJ |
163 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, |
164 | unsigned char *input, | |
165 | int err) | |
166 | { | |
167 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | |
168 | int digestsize, updated_digestsize; | |
169 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
170 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | |
171 | ||
172 | if (input == NULL) | |
173 | goto out; | |
2f47d580 HJ |
174 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); |
175 | if (reqctx->is_sg_map) | |
176 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
177 | if (reqctx->dma_addr) | |
178 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr, | |
179 | reqctx->dma_len, DMA_TO_DEVICE); | |
180 | reqctx->dma_addr = 0; | |
181 | updated_digestsize = digestsize; | |
182 | if (digestsize == SHA224_DIGEST_SIZE) | |
183 | updated_digestsize = SHA256_DIGEST_SIZE; | |
184 | else if (digestsize == SHA384_DIGEST_SIZE) | |
185 | updated_digestsize = SHA512_DIGEST_SIZE; | |
186 | if (reqctx->result == 1) { | |
187 | reqctx->result = 0; | |
188 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), | |
189 | digestsize); | |
190 | } else { | |
191 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), | |
192 | updated_digestsize); | |
193 | } | |
194 | out: | |
195 | req->base.complete(&req->base, err); | |
6dad4e8a | 196 | } |
2f47d580 | 197 | |
6dad4e8a | 198 | static inline int get_aead_subtype(struct crypto_aead *aead) |
2f47d580 | 199 | { |
6dad4e8a AG |
200 | struct aead_alg *alg = crypto_aead_alg(aead); |
201 | struct chcr_alg_template *chcr_crypto_alg = | |
202 | container_of(alg, struct chcr_alg_template, alg.aead); | |
203 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
2f47d580 | 204 | } |
2f47d580 | 205 | |
6dad4e8a | 206 | void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
2debd332 HJ |
207 | { |
208 | u8 temp[SHA512_DIGEST_SIZE]; | |
209 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
210 | int authsize = crypto_aead_authsize(tfm); | |
211 | struct cpl_fw6_pld *fw6_pld; | |
212 | int cmp = 0; | |
213 | ||
214 | fw6_pld = (struct cpl_fw6_pld *)input; | |
215 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || | |
216 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { | |
d600fc8a | 217 | cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); |
2debd332 HJ |
218 | } else { |
219 | ||
220 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, | |
221 | authsize, req->assoclen + | |
222 | req->cryptlen - authsize); | |
d600fc8a | 223 | cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); |
2debd332 HJ |
224 | } |
225 | if (cmp) | |
226 | *err = -EBADMSG; | |
227 | else | |
228 | *err = 0; | |
229 | } | |
230 | ||
6dad4e8a AG |
231 | static inline void chcr_handle_aead_resp(struct aead_request *req, |
232 | unsigned char *input, | |
233 | int err) | |
234 | { | |
235 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
236 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
237 | struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); | |
238 | ||
239 | chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); | |
240 | if (reqctx->b0_dma) | |
241 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, | |
242 | reqctx->b0_len, DMA_BIDIRECTIONAL); | |
243 | if (reqctx->verify == VERIFY_SW) { | |
244 | chcr_verify_tag(req, input, &err); | |
245 | reqctx->verify = VERIFY_HW; | |
246 | } | |
247 | req->base.complete(&req->base, err); | |
248 | } | |
249 | ||
324429d7 HS |
250 | /* |
251 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | |
252 | * @req: crypto request | |
253 | */ | |
254 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |
2debd332 | 255 | int err) |
324429d7 HS |
256 | { |
257 | struct crypto_tfm *tfm = req->tfm; | |
258 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
ee0863ba | 259 | struct adapter *adap = padap(ctx->dev); |
324429d7 HS |
260 | |
261 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
2debd332 | 262 | case CRYPTO_ALG_TYPE_AEAD: |
2f47d580 | 263 | chcr_handle_aead_resp(aead_request_cast(req), input, err); |
2debd332 HJ |
264 | break; |
265 | ||
44e9f799 | 266 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
b8fd1f41 HJ |
267 | err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), |
268 | input, err); | |
324429d7 HS |
269 | break; |
270 | ||
271 | case CRYPTO_ALG_TYPE_AHASH: | |
2f47d580 | 272 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); |
324429d7 | 273 | } |
ee0863ba | 274 | atomic_inc(&adap->chcr_stats.complete); |
2debd332 | 275 | return err; |
324429d7 HS |
276 | } |
277 | ||
2f47d580 | 278 | static void get_aes_decrypt_key(unsigned char *dec_key, |
39f91a34 HJ |
279 | const unsigned char *key, |
280 | unsigned int keylength) | |
281 | { | |
282 | u32 temp; | |
283 | u32 w_ring[MAX_NK]; | |
284 | int i, j, k; | |
285 | u8 nr, nk; | |
286 | ||
287 | switch (keylength) { | |
288 | case AES_KEYLENGTH_128BIT: | |
289 | nk = KEYLENGTH_4BYTES; | |
290 | nr = NUMBER_OF_ROUNDS_10; | |
291 | break; | |
292 | case AES_KEYLENGTH_192BIT: | |
293 | nk = KEYLENGTH_6BYTES; | |
294 | nr = NUMBER_OF_ROUNDS_12; | |
295 | break; | |
296 | case AES_KEYLENGTH_256BIT: | |
297 | nk = KEYLENGTH_8BYTES; | |
298 | nr = NUMBER_OF_ROUNDS_14; | |
299 | break; | |
300 | default: | |
301 | return; | |
302 | } | |
303 | for (i = 0; i < nk; i++) | |
304 | w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); | |
305 | ||
306 | i = 0; | |
307 | temp = w_ring[nk - 1]; | |
308 | while (i + nk < (nr + 1) * 4) { | |
309 | if (!(i % nk)) { | |
310 | /* RotWord(temp) */ | |
311 | temp = (temp << 8) | (temp >> 24); | |
312 | temp = aes_ks_subword(temp); | |
313 | temp ^= round_constant[i / nk]; | |
314 | } else if (nk == 8 && (i % 4 == 0)) { | |
315 | temp = aes_ks_subword(temp); | |
316 | } | |
317 | w_ring[i % nk] ^= temp; | |
318 | temp = w_ring[i % nk]; | |
319 | i++; | |
320 | } | |
321 | i--; | |
322 | for (k = 0, j = i % nk; k < nk; k++) { | |
323 | *((u32 *)dec_key + k) = htonl(w_ring[j]); | |
324 | j--; | |
325 | if (j < 0) | |
326 | j += nk; | |
327 | } | |
328 | } | |
329 | ||
e7922729 | 330 | static struct crypto_shash *chcr_alloc_shash(unsigned int ds) |
324429d7 | 331 | { |
ec1bca94 | 332 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
324429d7 HS |
333 | |
334 | switch (ds) { | |
335 | case SHA1_DIGEST_SIZE: | |
e7922729 | 336 | base_hash = crypto_alloc_shash("sha1", 0, 0); |
324429d7 HS |
337 | break; |
338 | case SHA224_DIGEST_SIZE: | |
e7922729 | 339 | base_hash = crypto_alloc_shash("sha224", 0, 0); |
324429d7 HS |
340 | break; |
341 | case SHA256_DIGEST_SIZE: | |
e7922729 | 342 | base_hash = crypto_alloc_shash("sha256", 0, 0); |
324429d7 HS |
343 | break; |
344 | case SHA384_DIGEST_SIZE: | |
e7922729 | 345 | base_hash = crypto_alloc_shash("sha384", 0, 0); |
324429d7 HS |
346 | break; |
347 | case SHA512_DIGEST_SIZE: | |
e7922729 | 348 | base_hash = crypto_alloc_shash("sha512", 0, 0); |
324429d7 HS |
349 | break; |
350 | } | |
324429d7 | 351 | |
e7922729 | 352 | return base_hash; |
324429d7 HS |
353 | } |
354 | ||
355 | static int chcr_compute_partial_hash(struct shash_desc *desc, | |
356 | char *iopad, char *result_hash, | |
357 | int digest_size) | |
358 | { | |
359 | struct sha1_state sha1_st; | |
360 | struct sha256_state sha256_st; | |
361 | struct sha512_state sha512_st; | |
362 | int error; | |
363 | ||
364 | if (digest_size == SHA1_DIGEST_SIZE) { | |
365 | error = crypto_shash_init(desc) ?: | |
366 | crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: | |
367 | crypto_shash_export(desc, (void *)&sha1_st); | |
368 | memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); | |
369 | } else if (digest_size == SHA224_DIGEST_SIZE) { | |
370 | error = crypto_shash_init(desc) ?: | |
371 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
372 | crypto_shash_export(desc, (void *)&sha256_st); | |
373 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
374 | ||
375 | } else if (digest_size == SHA256_DIGEST_SIZE) { | |
376 | error = crypto_shash_init(desc) ?: | |
377 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
378 | crypto_shash_export(desc, (void *)&sha256_st); | |
379 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
380 | ||
381 | } else if (digest_size == SHA384_DIGEST_SIZE) { | |
382 | error = crypto_shash_init(desc) ?: | |
383 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
384 | crypto_shash_export(desc, (void *)&sha512_st); | |
385 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
386 | ||
387 | } else if (digest_size == SHA512_DIGEST_SIZE) { | |
388 | error = crypto_shash_init(desc) ?: | |
389 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
390 | crypto_shash_export(desc, (void *)&sha512_st); | |
391 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
392 | } else { | |
393 | error = -EINVAL; | |
394 | pr_err("Unknown digest size %d\n", digest_size); | |
395 | } | |
396 | return error; | |
397 | } | |
398 | ||
399 | static void chcr_change_order(char *buf, int ds) | |
400 | { | |
401 | int i; | |
402 | ||
403 | if (ds == SHA512_DIGEST_SIZE) { | |
404 | for (i = 0; i < (ds / sizeof(u64)); i++) | |
405 | *((__be64 *)buf + i) = | |
406 | cpu_to_be64(*((u64 *)buf + i)); | |
407 | } else { | |
408 | for (i = 0; i < (ds / sizeof(u32)); i++) | |
409 | *((__be32 *)buf + i) = | |
410 | cpu_to_be32(*((u32 *)buf + i)); | |
411 | } | |
412 | } | |
413 | ||
414 | static inline int is_hmac(struct crypto_tfm *tfm) | |
415 | { | |
416 | struct crypto_alg *alg = tfm->__crt_alg; | |
417 | struct chcr_alg_template *chcr_crypto_alg = | |
418 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, | |
419 | alg.hash); | |
5c86a8ff | 420 | if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) |
324429d7 HS |
421 | return 1; |
422 | return 0; | |
423 | } | |
424 | ||
2f47d580 HJ |
425 | static inline void dsgl_walk_init(struct dsgl_walk *walk, |
426 | struct cpl_rx_phys_dsgl *dsgl) | |
324429d7 | 427 | { |
2f47d580 HJ |
428 | walk->dsgl = dsgl; |
429 | walk->nents = 0; | |
430 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); | |
431 | } | |
432 | ||
433 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) | |
434 | { | |
435 | struct cpl_rx_phys_dsgl *phys_cpl; | |
436 | ||
437 | phys_cpl = walk->dsgl; | |
324429d7 HS |
438 | |
439 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | |
440 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | |
2f47d580 HJ |
441 | phys_cpl->pcirlxorder_to_noofsgentr = |
442 | htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | | |
443 | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | | |
444 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | | |
445 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | | |
446 | CPL_RX_PHYS_DSGL_DCAID_V(0) | | |
447 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); | |
448 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; | |
449 | phys_cpl->rss_hdr_int.qid = htons(qid); | |
450 | phys_cpl->rss_hdr_int.hash_val = 0; | |
451 | } | |
452 | ||
453 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, | |
454 | size_t size, | |
455 | dma_addr_t *addr) | |
456 | { | |
457 | int j; | |
458 | ||
459 | if (!size) | |
460 | return; | |
461 | j = walk->nents; | |
462 | walk->to->len[j % 8] = htons(size); | |
463 | walk->to->addr[j % 8] = cpu_to_be64(*addr); | |
464 | j++; | |
465 | if ((j % 8) == 0) | |
466 | walk->to++; | |
467 | walk->nents = j; | |
468 | } | |
469 | ||
470 | static void dsgl_walk_add_sg(struct dsgl_walk *walk, | |
471 | struct scatterlist *sg, | |
472 | unsigned int slen, | |
473 | unsigned int skip) | |
474 | { | |
475 | int skip_len = 0; | |
476 | unsigned int left_size = slen, len = 0; | |
477 | unsigned int j = walk->nents; | |
478 | int offset, ent_len; | |
479 | ||
480 | if (!slen) | |
481 | return; | |
482 | while (sg && skip) { | |
483 | if (sg_dma_len(sg) <= skip) { | |
484 | skip -= sg_dma_len(sg); | |
485 | skip_len = 0; | |
486 | sg = sg_next(sg); | |
487 | } else { | |
488 | skip_len = skip; | |
489 | skip = 0; | |
490 | } | |
491 | } | |
492 | ||
2956f36c | 493 | while (left_size && sg) { |
2f47d580 | 494 | len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); |
2956f36c HJ |
495 | offset = 0; |
496 | while (len) { | |
2f47d580 HJ |
497 | ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); |
498 | walk->to->len[j % 8] = htons(ent_len); | |
499 | walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + | |
500 | offset + skip_len); | |
2956f36c HJ |
501 | offset += ent_len; |
502 | len -= ent_len; | |
503 | j++; | |
504 | if ((j % 8) == 0) | |
2f47d580 | 505 | walk->to++; |
2956f36c | 506 | } |
2f47d580 HJ |
507 | walk->last_sg = sg; |
508 | walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - | |
509 | skip_len) + skip_len; | |
510 | left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); | |
511 | skip_len = 0; | |
2956f36c HJ |
512 | sg = sg_next(sg); |
513 | } | |
2f47d580 HJ |
514 | walk->nents = j; |
515 | } | |
516 | ||
517 | static inline void ulptx_walk_init(struct ulptx_walk *walk, | |
518 | struct ulptx_sgl *ulp) | |
519 | { | |
520 | walk->sgl = ulp; | |
521 | walk->nents = 0; | |
522 | walk->pair_idx = 0; | |
523 | walk->pair = ulp->sge; | |
524 | walk->last_sg = NULL; | |
525 | walk->last_sg_len = 0; | |
526 | } | |
527 | ||
528 | static inline void ulptx_walk_end(struct ulptx_walk *walk) | |
529 | { | |
530 | walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | | |
531 | ULPTX_NSGE_V(walk->nents)); | |
532 | } | |
2956f36c | 533 | |
2f47d580 HJ |
534 | |
535 | static inline void ulptx_walk_add_page(struct ulptx_walk *walk, | |
536 | size_t size, | |
537 | dma_addr_t *addr) | |
538 | { | |
539 | if (!size) | |
540 | return; | |
541 | ||
542 | if (walk->nents == 0) { | |
543 | walk->sgl->len0 = cpu_to_be32(size); | |
544 | walk->sgl->addr0 = cpu_to_be64(*addr); | |
545 | } else { | |
546 | walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); | |
547 | walk->pair->len[walk->pair_idx] = cpu_to_be32(size); | |
548 | walk->pair_idx = !walk->pair_idx; | |
549 | if (!walk->pair_idx) | |
550 | walk->pair++; | |
551 | } | |
552 | walk->nents++; | |
324429d7 HS |
553 | } |
554 | ||
2f47d580 | 555 | static void ulptx_walk_add_sg(struct ulptx_walk *walk, |
adf1ca61 | 556 | struct scatterlist *sg, |
2f47d580 HJ |
557 | unsigned int len, |
558 | unsigned int skip) | |
324429d7 | 559 | { |
2f47d580 HJ |
560 | int small; |
561 | int skip_len = 0; | |
562 | unsigned int sgmin; | |
324429d7 | 563 | |
2f47d580 HJ |
564 | if (!len) |
565 | return; | |
566 | ||
567 | while (sg && skip) { | |
568 | if (sg_dma_len(sg) <= skip) { | |
569 | skip -= sg_dma_len(sg); | |
570 | skip_len = 0; | |
571 | sg = sg_next(sg); | |
572 | } else { | |
573 | skip_len = skip; | |
574 | skip = 0; | |
575 | } | |
576 | } | |
8daa32b9 HJ |
577 | WARN(!sg, "SG should not be null here\n"); |
578 | if (sg && (walk->nents == 0)) { | |
2f47d580 HJ |
579 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); |
580 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | |
581 | walk->sgl->len0 = cpu_to_be32(sgmin); | |
582 | walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); | |
583 | walk->nents++; | |
584 | len -= sgmin; | |
585 | walk->last_sg = sg; | |
586 | walk->last_sg_len = sgmin + skip_len; | |
587 | skip_len += sgmin; | |
588 | if (sg_dma_len(sg) == skip_len) { | |
589 | sg = sg_next(sg); | |
590 | skip_len = 0; | |
591 | } | |
592 | } | |
593 | ||
594 | while (sg && len) { | |
595 | small = min(sg_dma_len(sg) - skip_len, len); | |
596 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | |
597 | walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); | |
598 | walk->pair->addr[walk->pair_idx] = | |
599 | cpu_to_be64(sg_dma_address(sg) + skip_len); | |
600 | walk->pair_idx = !walk->pair_idx; | |
601 | walk->nents++; | |
602 | if (!walk->pair_idx) | |
603 | walk->pair++; | |
604 | len -= sgmin; | |
605 | skip_len += sgmin; | |
606 | walk->last_sg = sg; | |
607 | walk->last_sg_len = skip_len; | |
608 | if (sg_dma_len(sg) == skip_len) { | |
609 | sg = sg_next(sg); | |
610 | skip_len = 0; | |
611 | } | |
324429d7 | 612 | } |
324429d7 HS |
613 | } |
614 | ||
615 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | |
616 | { | |
617 | struct crypto_alg *alg = tfm->__crt_alg; | |
618 | struct chcr_alg_template *chcr_crypto_alg = | |
619 | container_of(alg, struct chcr_alg_template, alg.crypto); | |
620 | ||
621 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
622 | } | |
623 | ||
b8fd1f41 HJ |
624 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) |
625 | { | |
626 | struct adapter *adap = netdev2adap(dev); | |
627 | struct sge_uld_txq_info *txq_info = | |
628 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | |
629 | struct sge_uld_txq *txq; | |
630 | int ret = 0; | |
631 | ||
632 | local_bh_disable(); | |
633 | txq = &txq_info->uldtxq[idx]; | |
634 | spin_lock(&txq->sendq.lock); | |
635 | if (txq->full) | |
636 | ret = -1; | |
637 | spin_unlock(&txq->sendq.lock); | |
638 | local_bh_enable(); | |
639 | return ret; | |
640 | } | |
641 | ||
324429d7 HS |
642 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, |
643 | struct _key_ctx *key_ctx) | |
644 | { | |
645 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | |
cc1b156d | 646 | memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); |
324429d7 HS |
647 | } else { |
648 | memcpy(key_ctx->key, | |
649 | ablkctx->key + (ablkctx->enckey_len >> 1), | |
650 | ablkctx->enckey_len >> 1); | |
cc1b156d HJ |
651 | memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), |
652 | ablkctx->rrkey, ablkctx->enckey_len >> 1); | |
324429d7 HS |
653 | } |
654 | return 0; | |
655 | } | |
b8fd1f41 HJ |
656 | static int chcr_sg_ent_in_wr(struct scatterlist *src, |
657 | struct scatterlist *dst, | |
658 | unsigned int minsg, | |
2f47d580 HJ |
659 | unsigned int space, |
660 | unsigned int srcskip, | |
661 | unsigned int dstskip) | |
b8fd1f41 HJ |
662 | { |
663 | int srclen = 0, dstlen = 0; | |
2f47d580 | 664 | int srcsg = minsg, dstsg = minsg; |
2956f36c | 665 | int offset = 0, less; |
b8fd1f41 | 666 | |
2f47d580 HJ |
667 | if (sg_dma_len(src) == srcskip) { |
668 | src = sg_next(src); | |
669 | srcskip = 0; | |
670 | } | |
671 | ||
672 | if (sg_dma_len(dst) == dstskip) { | |
673 | dst = sg_next(dst); | |
674 | dstskip = 0; | |
675 | } | |
676 | ||
677 | while (src && dst && | |
b8fd1f41 | 678 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { |
2f47d580 | 679 | srclen += (sg_dma_len(src) - srcskip); |
b8fd1f41 | 680 | srcsg++; |
2956f36c | 681 | offset = 0; |
b8fd1f41 HJ |
682 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && |
683 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { | |
684 | if (srclen <= dstlen) | |
685 | break; | |
2f47d580 | 686 | less = min_t(unsigned int, sg_dma_len(dst) - offset - |
db6deea4 | 687 | dstskip, CHCR_DST_SG_SIZE); |
2956f36c HJ |
688 | dstlen += less; |
689 | offset += less; | |
2f47d580 | 690 | if (offset == sg_dma_len(dst)) { |
2956f36c HJ |
691 | dst = sg_next(dst); |
692 | offset = 0; | |
693 | } | |
b8fd1f41 | 694 | dstsg++; |
2f47d580 | 695 | dstskip = 0; |
b8fd1f41 HJ |
696 | } |
697 | src = sg_next(src); | |
db6deea4 | 698 | srcskip = 0; |
b8fd1f41 | 699 | } |
b8fd1f41 HJ |
700 | return min(srclen, dstlen); |
701 | } | |
702 | ||
703 | static int chcr_cipher_fallback(struct crypto_skcipher *cipher, | |
704 | u32 flags, | |
705 | struct scatterlist *src, | |
706 | struct scatterlist *dst, | |
707 | unsigned int nbytes, | |
708 | u8 *iv, | |
709 | unsigned short op_type) | |
710 | { | |
711 | int err; | |
712 | ||
713 | SKCIPHER_REQUEST_ON_STACK(subreq, cipher); | |
714 | skcipher_request_set_tfm(subreq, cipher); | |
715 | skcipher_request_set_callback(subreq, flags, NULL, NULL); | |
716 | skcipher_request_set_crypt(subreq, src, dst, | |
717 | nbytes, iv); | |
718 | ||
719 | err = op_type ? crypto_skcipher_decrypt(subreq) : | |
720 | crypto_skcipher_encrypt(subreq); | |
721 | skcipher_request_zero(subreq); | |
722 | ||
723 | return err; | |
324429d7 | 724 | |
b8fd1f41 | 725 | } |
324429d7 | 726 | static inline void create_wreq(struct chcr_context *ctx, |
358961d1 | 727 | struct chcr_wr *chcr_req, |
2f47d580 HJ |
728 | struct crypto_async_request *req, |
729 | unsigned int imm, | |
570265bf | 730 | int hash_sz, |
2f47d580 | 731 | unsigned int len16, |
2512a624 HJ |
732 | unsigned int sc_len, |
733 | unsigned int lcb) | |
324429d7 HS |
734 | { |
735 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
72a56ca9 | 736 | int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; |
324429d7 | 737 | |
324429d7 | 738 | |
570265bf | 739 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; |
358961d1 | 740 | chcr_req->wreq.pld_size_hash_size = |
570265bf | 741 | htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); |
358961d1 | 742 | chcr_req->wreq.len16_pkd = |
2f47d580 | 743 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); |
358961d1 HJ |
744 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
745 | chcr_req->wreq.rx_chid_to_rx_q_id = | |
8a13449f | 746 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
570265bf | 747 | !!lcb, ctx->tx_qidx); |
324429d7 | 748 | |
8a13449f HJ |
749 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, |
750 | qid); | |
2f47d580 HJ |
751 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
752 | ((sizeof(chcr_req->wreq)) >> 4))); | |
324429d7 | 753 | |
2f47d580 | 754 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); |
358961d1 | 755 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
2f47d580 | 756 | sizeof(chcr_req->key_ctx) + sc_len); |
324429d7 HS |
757 | } |
758 | ||
759 | /** | |
760 | * create_cipher_wr - form the WR for cipher operations | |
761 | * @req: cipher req. | |
762 | * @ctx: crypto driver context of the request. | |
763 | * @qid: ingress qid where response of this WR should be received. | |
764 | * @op_type: encryption or decryption | |
765 | */ | |
b8fd1f41 | 766 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
324429d7 | 767 | { |
b8fd1f41 | 768 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
2f47d580 | 769 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
324429d7 | 770 | struct sk_buff *skb = NULL; |
358961d1 | 771 | struct chcr_wr *chcr_req; |
324429d7 | 772 | struct cpl_rx_phys_dsgl *phys_cpl; |
2f47d580 | 773 | struct ulptx_sgl *ulptx; |
b8fd1f41 HJ |
774 | struct chcr_blkcipher_req_ctx *reqctx = |
775 | ablkcipher_request_ctx(wrparam->req); | |
2f47d580 | 776 | unsigned int temp = 0, transhdr_len, dst_size; |
b8fd1f41 | 777 | int error; |
2956f36c | 778 | int nents; |
2f47d580 | 779 | unsigned int kctx_len; |
b8fd1f41 HJ |
780 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
781 | GFP_KERNEL : GFP_ATOMIC; | |
2f47d580 | 782 | struct adapter *adap = padap(c_ctx(tfm)->dev); |
324429d7 | 783 | |
2f47d580 HJ |
784 | nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, |
785 | reqctx->dst_ofst); | |
786 | dst_size = get_space_for_phys_dsgl(nents + 1); | |
125d01ca | 787 | kctx_len = roundup(ablkctx->enckey_len, 16); |
2f47d580 HJ |
788 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
789 | nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, | |
790 | CHCR_SRC_SG_SIZE, reqctx->src_ofst); | |
125d01ca HJ |
791 | temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) : |
792 | (sgl_len(nents + MIN_CIPHER_SG) * 8); | |
2f47d580 | 793 | transhdr_len += temp; |
125d01ca | 794 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 | 795 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
b8fd1f41 HJ |
796 | if (!skb) { |
797 | error = -ENOMEM; | |
798 | goto err; | |
799 | } | |
de77b966 | 800 | chcr_req = __skb_put_zero(skb, transhdr_len); |
358961d1 | 801 | chcr_req->sec_cpl.op_ivinsrtofst = |
2f47d580 | 802 | FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); |
358961d1 | 803 | |
2f47d580 | 804 | chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); |
358961d1 | 805 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
2f47d580 | 806 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); |
358961d1 HJ |
807 | |
808 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | |
809 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | |
b8fd1f41 | 810 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, |
324429d7 | 811 | ablkctx->ciph_mode, |
2f47d580 | 812 | 0, 0, IV >> 1); |
358961d1 | 813 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
2f47d580 | 814 | 0, 0, dst_size); |
324429d7 | 815 | |
358961d1 | 816 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
b8fd1f41 HJ |
817 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
818 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
819 | CRYPTO_ALG_SUB_TYPE_CTR)) && | |
820 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
821 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { | |
358961d1 | 822 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
324429d7 | 823 | } else { |
b8fd1f41 HJ |
824 | if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || |
825 | (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { | |
358961d1 HJ |
826 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
827 | ablkctx->enckey_len); | |
324429d7 | 828 | } else { |
358961d1 | 829 | memcpy(chcr_req->key_ctx.key, ablkctx->key + |
324429d7 HS |
830 | (ablkctx->enckey_len >> 1), |
831 | ablkctx->enckey_len >> 1); | |
358961d1 | 832 | memcpy(chcr_req->key_ctx.key + |
324429d7 HS |
833 | (ablkctx->enckey_len >> 1), |
834 | ablkctx->key, | |
835 | ablkctx->enckey_len >> 1); | |
836 | } | |
837 | } | |
358961d1 | 838 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2f47d580 HJ |
839 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
840 | chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); | |
841 | chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); | |
324429d7 | 842 | |
ee0863ba | 843 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
2f47d580 HJ |
844 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len |
845 | +(reqctx->imm ? (IV + wrparam->bytes) : 0); | |
846 | create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, | |
847 | transhdr_len, temp, | |
2512a624 | 848 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); |
5c86a8ff | 849 | reqctx->skb = skb; |
324429d7 | 850 | return skb; |
b8fd1f41 HJ |
851 | err: |
852 | return ERR_PTR(error); | |
853 | } | |
854 | ||
855 | static inline int chcr_keyctx_ck_size(unsigned int keylen) | |
856 | { | |
857 | int ck_size = 0; | |
858 | ||
859 | if (keylen == AES_KEYSIZE_128) | |
860 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
861 | else if (keylen == AES_KEYSIZE_192) | |
862 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
863 | else if (keylen == AES_KEYSIZE_256) | |
864 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
865 | else | |
866 | ck_size = 0; | |
867 | ||
868 | return ck_size; | |
869 | } | |
870 | static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, | |
871 | const u8 *key, | |
872 | unsigned int keylen) | |
873 | { | |
874 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
2f47d580 | 875 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
876 | int err = 0; |
877 | ||
878 | crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
879 | crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & | |
880 | CRYPTO_TFM_REQ_MASK); | |
881 | err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); | |
882 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
883 | tfm->crt_flags |= | |
884 | crypto_skcipher_get_flags(ablkctx->sw_cipher) & | |
885 | CRYPTO_TFM_RES_MASK; | |
886 | return err; | |
324429d7 HS |
887 | } |
888 | ||
b8fd1f41 HJ |
889 | static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, |
890 | const u8 *key, | |
324429d7 HS |
891 | unsigned int keylen) |
892 | { | |
2f47d580 | 893 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
324429d7 HS |
894 | unsigned int ck_size, context_size; |
895 | u16 alignment = 0; | |
b8fd1f41 | 896 | int err; |
324429d7 | 897 | |
b8fd1f41 HJ |
898 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
899 | if (err) | |
324429d7 | 900 | goto badkey_err; |
b8fd1f41 HJ |
901 | |
902 | ck_size = chcr_keyctx_ck_size(keylen); | |
903 | alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; | |
cc1b156d HJ |
904 | memcpy(ablkctx->key, key, keylen); |
905 | ablkctx->enckey_len = keylen; | |
906 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); | |
324429d7 HS |
907 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
908 | keylen + alignment) >> 4; | |
909 | ||
910 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
911 | 0, 0, context_size); | |
912 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; | |
913 | return 0; | |
914 | badkey_err: | |
b8fd1f41 | 915 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
324429d7 | 916 | ablkctx->enckey_len = 0; |
b8fd1f41 HJ |
917 | |
918 | return err; | |
324429d7 HS |
919 | } |
920 | ||
b8fd1f41 HJ |
921 | static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, |
922 | const u8 *key, | |
923 | unsigned int keylen) | |
324429d7 | 924 | { |
2f47d580 | 925 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
926 | unsigned int ck_size, context_size; |
927 | u16 alignment = 0; | |
928 | int err; | |
929 | ||
930 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | |
931 | if (err) | |
932 | goto badkey_err; | |
933 | ck_size = chcr_keyctx_ck_size(keylen); | |
934 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | |
935 | memcpy(ablkctx->key, key, keylen); | |
936 | ablkctx->enckey_len = keylen; | |
937 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | |
938 | keylen + alignment) >> 4; | |
939 | ||
940 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
941 | 0, 0, context_size); | |
942 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
943 | ||
944 | return 0; | |
945 | badkey_err: | |
946 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
947 | ablkctx->enckey_len = 0; | |
948 | ||
949 | return err; | |
950 | } | |
951 | ||
952 | static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, | |
953 | const u8 *key, | |
954 | unsigned int keylen) | |
955 | { | |
2f47d580 | 956 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
957 | unsigned int ck_size, context_size; |
958 | u16 alignment = 0; | |
959 | int err; | |
960 | ||
961 | if (keylen < CTR_RFC3686_NONCE_SIZE) | |
962 | return -EINVAL; | |
963 | memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), | |
964 | CTR_RFC3686_NONCE_SIZE); | |
965 | ||
966 | keylen -= CTR_RFC3686_NONCE_SIZE; | |
967 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | |
968 | if (err) | |
969 | goto badkey_err; | |
970 | ||
971 | ck_size = chcr_keyctx_ck_size(keylen); | |
972 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | |
973 | memcpy(ablkctx->key, key, keylen); | |
974 | ablkctx->enckey_len = keylen; | |
975 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | |
976 | keylen + alignment) >> 4; | |
977 | ||
978 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
979 | 0, 0, context_size); | |
980 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
981 | ||
982 | return 0; | |
983 | badkey_err: | |
984 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
985 | ablkctx->enckey_len = 0; | |
986 | ||
987 | return err; | |
988 | } | |
989 | static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) | |
990 | { | |
991 | unsigned int size = AES_BLOCK_SIZE; | |
992 | __be32 *b = (__be32 *)(dstiv + size); | |
993 | u32 c, prev; | |
994 | ||
995 | memcpy(dstiv, srciv, AES_BLOCK_SIZE); | |
996 | for (; size >= 4; size -= 4) { | |
997 | prev = be32_to_cpu(*--b); | |
998 | c = prev + add; | |
999 | *b = cpu_to_be32(c); | |
1000 | if (prev < c) | |
1001 | break; | |
1002 | add = 1; | |
1003 | } | |
1004 | ||
1005 | } | |
1006 | ||
1007 | static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) | |
1008 | { | |
1009 | __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); | |
1010 | u64 c; | |
1011 | u32 temp = be32_to_cpu(*--b); | |
1012 | ||
1013 | temp = ~temp; | |
1014 | c = (u64)temp + 1; // No of block can processed withou overflow | |
1015 | if ((bytes / AES_BLOCK_SIZE) > c) | |
1016 | bytes = c * AES_BLOCK_SIZE; | |
1017 | return bytes; | |
1018 | } | |
1019 | ||
209897d5 HJ |
1020 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, |
1021 | u32 isfinal) | |
b8fd1f41 HJ |
1022 | { |
1023 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 | 1024 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
b8fd1f41 HJ |
1025 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
1026 | struct crypto_cipher *cipher; | |
1027 | int ret, i; | |
1028 | u8 *key; | |
1029 | unsigned int keylen; | |
de1a00ac HJ |
1030 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; |
1031 | int round8 = round / 8; | |
b8fd1f41 | 1032 | |
d3f1d2f7 | 1033 | cipher = ablkctx->aes_generic; |
de1a00ac | 1034 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
b8fd1f41 | 1035 | |
b8fd1f41 HJ |
1036 | keylen = ablkctx->enckey_len / 2; |
1037 | key = ablkctx->key + keylen; | |
1038 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
1039 | if (ret) | |
d3f1d2f7 | 1040 | goto out; |
2f47d580 | 1041 | /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/ |
de1a00ac HJ |
1042 | for (i = 0; i < round8; i++) |
1043 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); | |
1044 | ||
1045 | for (i = 0; i < (round % 8); i++) | |
b8fd1f41 HJ |
1046 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
1047 | ||
209897d5 HJ |
1048 | if (!isfinal) |
1049 | crypto_cipher_decrypt_one(cipher, iv, iv); | |
b8fd1f41 HJ |
1050 | out: |
1051 | return ret; | |
1052 | } | |
1053 | ||
1054 | static int chcr_update_cipher_iv(struct ablkcipher_request *req, | |
1055 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | |
1056 | { | |
1057 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1058 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1059 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | |
ab677ff4 | 1060 | int ret = 0; |
324429d7 | 1061 | |
b8fd1f41 HJ |
1062 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
1063 | ctr_add_iv(iv, req->info, (reqctx->processed / | |
1064 | AES_BLOCK_SIZE)); | |
1065 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) | |
1066 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
1067 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / | |
1068 | AES_BLOCK_SIZE) + 1); | |
1069 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | |
209897d5 | 1070 | ret = chcr_update_tweak(req, iv, 0); |
b8fd1f41 HJ |
1071 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1072 | if (reqctx->op) | |
1073 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | |
1074 | 16, | |
1075 | reqctx->processed - AES_BLOCK_SIZE); | |
1076 | else | |
1077 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); | |
1078 | } | |
1079 | ||
324429d7 | 1080 | return ret; |
b8fd1f41 | 1081 | |
324429d7 HS |
1082 | } |
1083 | ||
b8fd1f41 HJ |
1084 | /* We need separate function for final iv because in rfc3686 Initial counter |
1085 | * starts from 1 and buffer size of iv is 8 byte only which remains constant | |
1086 | * for subsequent update requests | |
1087 | */ | |
1088 | ||
1089 | static int chcr_final_cipher_iv(struct ablkcipher_request *req, | |
1090 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | |
1091 | { | |
1092 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1093 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1094 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | |
1095 | int ret = 0; | |
1096 | ||
1097 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) | |
1098 | ctr_add_iv(iv, req->info, (reqctx->processed / | |
1099 | AES_BLOCK_SIZE)); | |
1100 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | |
209897d5 | 1101 | ret = chcr_update_tweak(req, iv, 1); |
b8fd1f41 HJ |
1102 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1103 | if (reqctx->op) | |
1104 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | |
1105 | 16, | |
1106 | reqctx->processed - AES_BLOCK_SIZE); | |
1107 | else | |
1108 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); | |
1109 | ||
1110 | } | |
1111 | return ret; | |
1112 | ||
1113 | } | |
1114 | ||
b8fd1f41 HJ |
1115 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
1116 | unsigned char *input, int err) | |
324429d7 HS |
1117 | { |
1118 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 HJ |
1119 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
1120 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); | |
324429d7 | 1121 | struct sk_buff *skb; |
b8fd1f41 HJ |
1122 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; |
1123 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1124 | struct cipher_wr_param wrparam; | |
1125 | int bytes; | |
1126 | ||
b8fd1f41 | 1127 | if (err) |
2f47d580 | 1128 | goto unmap; |
b8fd1f41 | 1129 | if (req->nbytes == reqctx->processed) { |
2f47d580 HJ |
1130 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1131 | req); | |
b8fd1f41 HJ |
1132 | err = chcr_final_cipher_iv(req, fw6_pld, req->info); |
1133 | goto complete; | |
1134 | } | |
1135 | ||
1136 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1137 | c_ctx(tfm)->tx_qidx))) { |
b8fd1f41 HJ |
1138 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1139 | err = -EBUSY; | |
2f47d580 | 1140 | goto unmap; |
b8fd1f41 HJ |
1141 | } |
1142 | ||
1143 | } | |
2f47d580 HJ |
1144 | if (!reqctx->imm) { |
1145 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, | |
1146 | SPACE_LEFT(ablkctx->enckey_len), | |
1147 | reqctx->src_ofst, reqctx->dst_ofst); | |
db6deea4 HJ |
1148 | if ((bytes + reqctx->processed) >= req->nbytes) |
1149 | bytes = req->nbytes - reqctx->processed; | |
1150 | else | |
125d01ca | 1151 | bytes = rounddown(bytes, 16); |
2f47d580 HJ |
1152 | } else { |
1153 | /*CTR mode counter overfloa*/ | |
1154 | bytes = req->nbytes - reqctx->processed; | |
1155 | } | |
1156 | dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | |
1157 | reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | |
b8fd1f41 | 1158 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); |
2f47d580 HJ |
1159 | dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1160 | reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | |
b8fd1f41 | 1161 | if (err) |
2f47d580 | 1162 | goto unmap; |
b8fd1f41 HJ |
1163 | |
1164 | if (unlikely(bytes == 0)) { | |
2f47d580 HJ |
1165 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1166 | req); | |
b8fd1f41 HJ |
1167 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1168 | req->base.flags, | |
2f47d580 HJ |
1169 | req->src, |
1170 | req->dst, | |
1171 | req->nbytes, | |
1172 | req->info, | |
b8fd1f41 HJ |
1173 | reqctx->op); |
1174 | goto complete; | |
1175 | } | |
1176 | ||
1177 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
1178 | CRYPTO_ALG_SUB_TYPE_CTR) | |
1179 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); | |
2f47d580 | 1180 | wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; |
b8fd1f41 HJ |
1181 | wrparam.req = req; |
1182 | wrparam.bytes = bytes; | |
1183 | skb = create_cipher_wr(&wrparam); | |
1184 | if (IS_ERR(skb)) { | |
1185 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | |
1186 | err = PTR_ERR(skb); | |
2f47d580 | 1187 | goto unmap; |
b8fd1f41 HJ |
1188 | } |
1189 | skb->dev = u_ctx->lldi.ports[0]; | |
2f47d580 | 1190 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
b8fd1f41 | 1191 | chcr_send_wr(skb); |
2f47d580 HJ |
1192 | reqctx->last_req_len = bytes; |
1193 | reqctx->processed += bytes; | |
b8fd1f41 | 1194 | return 0; |
2f47d580 HJ |
1195 | unmap: |
1196 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | |
b8fd1f41 HJ |
1197 | complete: |
1198 | req->base.complete(&req->base, err); | |
1199 | return err; | |
1200 | } | |
1201 | ||
1202 | static int process_cipher(struct ablkcipher_request *req, | |
1203 | unsigned short qid, | |
1204 | struct sk_buff **skb, | |
1205 | unsigned short op_type) | |
1206 | { | |
1207 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1208 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | |
1209 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2f47d580 | 1210 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
b8fd1f41 | 1211 | struct cipher_wr_param wrparam; |
2956f36c | 1212 | int bytes, err = -EINVAL; |
b8fd1f41 | 1213 | |
b8fd1f41 HJ |
1214 | reqctx->processed = 0; |
1215 | if (!req->info) | |
1216 | goto error; | |
1217 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || | |
1218 | (req->nbytes == 0) || | |
1219 | (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { | |
1220 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | |
1221 | ablkctx->enckey_len, req->nbytes, ivsize); | |
1222 | goto error; | |
1223 | } | |
2f47d580 HJ |
1224 | chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
1225 | if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + | |
1226 | AES_MIN_KEY_SIZE + | |
1227 | sizeof(struct cpl_rx_phys_dsgl) + | |
1228 | /*Min dsgl size*/ | |
1229 | 32))) { | |
1230 | /* Can be sent as Imm*/ | |
1231 | unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; | |
1232 | ||
1233 | dnents = sg_nents_xlen(req->dst, req->nbytes, | |
1234 | CHCR_DST_SG_SIZE, 0); | |
1235 | dnents += 1; // IV | |
1236 | phys_dsgl = get_space_for_phys_dsgl(dnents); | |
125d01ca | 1237 | kctx_len = roundup(ablkctx->enckey_len, 16); |
2f47d580 HJ |
1238 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
1239 | reqctx->imm = (transhdr_len + IV + req->nbytes) <= | |
1240 | SGE_MAX_WR_LEN; | |
1241 | bytes = IV + req->nbytes; | |
1242 | ||
1243 | } else { | |
1244 | reqctx->imm = 0; | |
1245 | } | |
1246 | ||
1247 | if (!reqctx->imm) { | |
1248 | bytes = chcr_sg_ent_in_wr(req->src, req->dst, | |
1249 | MIN_CIPHER_SG, | |
1250 | SPACE_LEFT(ablkctx->enckey_len), | |
1251 | 0, 0); | |
db6deea4 HJ |
1252 | if ((bytes + reqctx->processed) >= req->nbytes) |
1253 | bytes = req->nbytes - reqctx->processed; | |
1254 | else | |
125d01ca | 1255 | bytes = rounddown(bytes, 16); |
2f47d580 | 1256 | } else { |
b8fd1f41 | 1257 | bytes = req->nbytes; |
2f47d580 | 1258 | } |
b8fd1f41 | 1259 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
db6deea4 | 1260 | CRYPTO_ALG_SUB_TYPE_CTR) { |
b8fd1f41 HJ |
1261 | bytes = adjust_ctr_overflow(req->info, bytes); |
1262 | } | |
1263 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
1264 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { | |
1265 | memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); | |
1266 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, | |
1267 | CTR_RFC3686_IV_SIZE); | |
1268 | ||
1269 | /* initialize counter portion of counter block */ | |
1270 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
1271 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | |
1272 | ||
1273 | } else { | |
1274 | ||
2f47d580 | 1275 | memcpy(reqctx->iv, req->info, IV); |
b8fd1f41 HJ |
1276 | } |
1277 | if (unlikely(bytes == 0)) { | |
2f47d580 HJ |
1278 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1279 | req); | |
b8fd1f41 HJ |
1280 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1281 | req->base.flags, | |
1282 | req->src, | |
1283 | req->dst, | |
1284 | req->nbytes, | |
1285 | req->info, | |
1286 | op_type); | |
1287 | goto error; | |
1288 | } | |
b8fd1f41 | 1289 | reqctx->op = op_type; |
2f47d580 HJ |
1290 | reqctx->srcsg = req->src; |
1291 | reqctx->dstsg = req->dst; | |
1292 | reqctx->src_ofst = 0; | |
1293 | reqctx->dst_ofst = 0; | |
b8fd1f41 HJ |
1294 | wrparam.qid = qid; |
1295 | wrparam.req = req; | |
1296 | wrparam.bytes = bytes; | |
1297 | *skb = create_cipher_wr(&wrparam); | |
1298 | if (IS_ERR(*skb)) { | |
1299 | err = PTR_ERR(*skb); | |
2f47d580 | 1300 | goto unmap; |
b8fd1f41 | 1301 | } |
2f47d580 HJ |
1302 | reqctx->processed = bytes; |
1303 | reqctx->last_req_len = bytes; | |
b8fd1f41 HJ |
1304 | |
1305 | return 0; | |
2f47d580 HJ |
1306 | unmap: |
1307 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | |
b8fd1f41 HJ |
1308 | error: |
1309 | return err; | |
1310 | } | |
1311 | ||
1312 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | |
1313 | { | |
1314 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
b8fd1f41 HJ |
1315 | struct sk_buff *skb = NULL; |
1316 | int err; | |
2f47d580 | 1317 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
324429d7 HS |
1318 | |
1319 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1320 | c_ctx(tfm)->tx_qidx))) { |
324429d7 HS |
1321 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1322 | return -EBUSY; | |
1323 | } | |
1324 | ||
2f47d580 HJ |
1325 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1326 | &skb, CHCR_ENCRYPT_OP); | |
b8fd1f41 HJ |
1327 | if (err || !skb) |
1328 | return err; | |
324429d7 | 1329 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1330 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
324429d7 HS |
1331 | chcr_send_wr(skb); |
1332 | return -EINPROGRESS; | |
1333 | } | |
1334 | ||
1335 | static int chcr_aes_decrypt(struct ablkcipher_request *req) | |
1336 | { | |
1337 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 | 1338 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
b8fd1f41 HJ |
1339 | struct sk_buff *skb = NULL; |
1340 | int err; | |
324429d7 HS |
1341 | |
1342 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1343 | c_ctx(tfm)->tx_qidx))) { |
324429d7 HS |
1344 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1345 | return -EBUSY; | |
1346 | } | |
1347 | ||
2f47d580 HJ |
1348 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1349 | &skb, CHCR_DECRYPT_OP); | |
b8fd1f41 HJ |
1350 | if (err || !skb) |
1351 | return err; | |
324429d7 | 1352 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1353 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
324429d7 HS |
1354 | chcr_send_wr(skb); |
1355 | return -EINPROGRESS; | |
1356 | } | |
1357 | ||
1358 | static int chcr_device_init(struct chcr_context *ctx) | |
1359 | { | |
14c19b17 | 1360 | struct uld_ctx *u_ctx = NULL; |
72a56ca9 | 1361 | struct adapter *adap; |
324429d7 | 1362 | unsigned int id; |
72a56ca9 | 1363 | int txq_perchan, txq_idx, ntxq; |
324429d7 HS |
1364 | int err = 0, rxq_perchan, rxq_idx; |
1365 | ||
1366 | id = smp_processor_id(); | |
1367 | if (!ctx->dev) { | |
14c19b17 HJ |
1368 | u_ctx = assign_chcr_device(); |
1369 | if (!u_ctx) { | |
324429d7 HS |
1370 | pr_err("chcr device assignment fails\n"); |
1371 | goto out; | |
1372 | } | |
14c19b17 | 1373 | ctx->dev = u_ctx->dev; |
72a56ca9 HJ |
1374 | adap = padap(ctx->dev); |
1375 | ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, | |
1376 | adap->vres.ncrypto_fc); | |
324429d7 | 1377 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
72a56ca9 | 1378 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
324429d7 HS |
1379 | rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; |
1380 | rxq_idx += id % rxq_perchan; | |
72a56ca9 HJ |
1381 | txq_idx = ctx->dev->tx_channel_id * txq_perchan; |
1382 | txq_idx += id % txq_perchan; | |
324429d7 | 1383 | spin_lock(&ctx->dev->lock_chcr_dev); |
72a56ca9 HJ |
1384 | ctx->rx_qidx = rxq_idx; |
1385 | ctx->tx_qidx = txq_idx; | |
ab677ff4 | 1386 | ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; |
8a13449f | 1387 | ctx->dev->rx_channel_id = 0; |
324429d7 HS |
1388 | spin_unlock(&ctx->dev->lock_chcr_dev); |
1389 | } | |
1390 | out: | |
1391 | return err; | |
1392 | } | |
1393 | ||
1394 | static int chcr_cra_init(struct crypto_tfm *tfm) | |
1395 | { | |
b8fd1f41 HJ |
1396 | struct crypto_alg *alg = tfm->__crt_alg; |
1397 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1398 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1399 | ||
1400 | ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, | |
1401 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
1402 | if (IS_ERR(ablkctx->sw_cipher)) { | |
1403 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | |
1404 | return PTR_ERR(ablkctx->sw_cipher); | |
1405 | } | |
d3f1d2f7 HJ |
1406 | |
1407 | if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { | |
1408 | /* To update tweak*/ | |
1409 | ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); | |
1410 | if (IS_ERR(ablkctx->aes_generic)) { | |
1411 | pr_err("failed to allocate aes cipher for tweak\n"); | |
1412 | return PTR_ERR(ablkctx->aes_generic); | |
1413 | } | |
1414 | } else | |
1415 | ablkctx->aes_generic = NULL; | |
1416 | ||
324429d7 HS |
1417 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
1418 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1419 | } | |
1420 | ||
b8fd1f41 HJ |
1421 | static int chcr_rfc3686_init(struct crypto_tfm *tfm) |
1422 | { | |
1423 | struct crypto_alg *alg = tfm->__crt_alg; | |
1424 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1425 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1426 | ||
1427 | /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) | |
1428 | * cannot be used as fallback in chcr_handle_cipher_response | |
1429 | */ | |
1430 | ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, | |
1431 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
1432 | if (IS_ERR(ablkctx->sw_cipher)) { | |
1433 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | |
1434 | return PTR_ERR(ablkctx->sw_cipher); | |
1435 | } | |
1436 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | |
1437 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1438 | } | |
1439 | ||
1440 | ||
1441 | static void chcr_cra_exit(struct crypto_tfm *tfm) | |
1442 | { | |
1443 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1444 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1445 | ||
1446 | crypto_free_skcipher(ablkctx->sw_cipher); | |
d3f1d2f7 HJ |
1447 | if (ablkctx->aes_generic) |
1448 | crypto_free_cipher(ablkctx->aes_generic); | |
b8fd1f41 HJ |
1449 | } |
1450 | ||
324429d7 HS |
1451 | static int get_alg_config(struct algo_param *params, |
1452 | unsigned int auth_size) | |
1453 | { | |
1454 | switch (auth_size) { | |
1455 | case SHA1_DIGEST_SIZE: | |
1456 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; | |
1457 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; | |
1458 | params->result_size = SHA1_DIGEST_SIZE; | |
1459 | break; | |
1460 | case SHA224_DIGEST_SIZE: | |
1461 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
1462 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; | |
1463 | params->result_size = SHA256_DIGEST_SIZE; | |
1464 | break; | |
1465 | case SHA256_DIGEST_SIZE: | |
1466 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
1467 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; | |
1468 | params->result_size = SHA256_DIGEST_SIZE; | |
1469 | break; | |
1470 | case SHA384_DIGEST_SIZE: | |
1471 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
1472 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; | |
1473 | params->result_size = SHA512_DIGEST_SIZE; | |
1474 | break; | |
1475 | case SHA512_DIGEST_SIZE: | |
1476 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
1477 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; | |
1478 | params->result_size = SHA512_DIGEST_SIZE; | |
1479 | break; | |
1480 | default: | |
1481 | pr_err("chcr : ERROR, unsupported digest size\n"); | |
1482 | return -EINVAL; | |
1483 | } | |
1484 | return 0; | |
1485 | } | |
1486 | ||
e7922729 | 1487 | static inline void chcr_free_shash(struct crypto_shash *base_hash) |
324429d7 | 1488 | { |
e7922729 | 1489 | crypto_free_shash(base_hash); |
324429d7 HS |
1490 | } |
1491 | ||
1492 | /** | |
358961d1 | 1493 | * create_hash_wr - Create hash work request |
324429d7 HS |
1494 | * @req - Cipher req base |
1495 | */ | |
358961d1 | 1496 | static struct sk_buff *create_hash_wr(struct ahash_request *req, |
2debd332 | 1497 | struct hash_wr_param *param) |
324429d7 HS |
1498 | { |
1499 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1500 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
2f47d580 | 1501 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
324429d7 | 1502 | struct sk_buff *skb = NULL; |
2f47d580 | 1503 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); |
358961d1 | 1504 | struct chcr_wr *chcr_req; |
2f47d580 HJ |
1505 | struct ulptx_sgl *ulptx; |
1506 | unsigned int nents = 0, transhdr_len, iopad_alignment = 0; | |
324429d7 | 1507 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
2f47d580 | 1508 | unsigned int kctx_len = 0, temp = 0; |
324429d7 | 1509 | u8 hash_size_in_response = 0; |
358961d1 HJ |
1510 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1511 | GFP_ATOMIC; | |
2f47d580 HJ |
1512 | struct adapter *adap = padap(h_ctx(tfm)->dev); |
1513 | int error = 0; | |
324429d7 HS |
1514 | |
1515 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); | |
358961d1 | 1516 | kctx_len = param->alg_prm.result_size + iopad_alignment; |
324429d7 HS |
1517 | if (param->opad_needed) |
1518 | kctx_len += param->alg_prm.result_size + iopad_alignment; | |
1519 | ||
1520 | if (req_ctx->result) | |
1521 | hash_size_in_response = digestsize; | |
1522 | else | |
1523 | hash_size_in_response = param->alg_prm.result_size; | |
1524 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); | |
2f47d580 HJ |
1525 | req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <= |
1526 | SGE_MAX_WR_LEN; | |
1527 | nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0); | |
1528 | nents += param->bfr_len ? 1 : 0; | |
125d01ca HJ |
1529 | transhdr_len += req_ctx->imm ? roundup((param->bfr_len + |
1530 | param->sg_len), 16) : | |
2f47d580 | 1531 | (sgl_len(nents) * 8); |
125d01ca | 1532 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
1533 | |
1534 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); | |
324429d7 | 1535 | if (!skb) |
2f47d580 | 1536 | return ERR_PTR(-ENOMEM); |
de77b966 | 1537 | chcr_req = __skb_put_zero(skb, transhdr_len); |
324429d7 | 1538 | |
358961d1 | 1539 | chcr_req->sec_cpl.op_ivinsrtofst = |
2f47d580 | 1540 | FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); |
358961d1 | 1541 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
324429d7 | 1542 | |
358961d1 | 1543 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
324429d7 | 1544 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); |
358961d1 | 1545 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
324429d7 | 1546 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); |
358961d1 | 1547 | chcr_req->sec_cpl.seqno_numivs = |
324429d7 | 1548 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, |
358961d1 | 1549 | param->opad_needed, 0); |
324429d7 | 1550 | |
358961d1 | 1551 | chcr_req->sec_cpl.ivgen_hdrlen = |
324429d7 HS |
1552 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); |
1553 | ||
358961d1 HJ |
1554 | memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, |
1555 | param->alg_prm.result_size); | |
324429d7 HS |
1556 | |
1557 | if (param->opad_needed) | |
358961d1 HJ |
1558 | memcpy(chcr_req->key_ctx.key + |
1559 | ((param->alg_prm.result_size <= 32) ? 32 : | |
1560 | CHCR_HASH_MAX_DIGEST_SIZE), | |
324429d7 HS |
1561 | hmacctx->opad, param->alg_prm.result_size); |
1562 | ||
358961d1 | 1563 | chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, |
324429d7 HS |
1564 | param->alg_prm.mk_size, 0, |
1565 | param->opad_needed, | |
358961d1 HJ |
1566 | ((kctx_len + |
1567 | sizeof(chcr_req->key_ctx)) >> 4)); | |
1568 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); | |
2f47d580 HJ |
1569 | ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len + |
1570 | DUMMY_BYTES); | |
1571 | if (param->bfr_len != 0) { | |
1572 | req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, | |
1573 | req_ctx->reqbfr, param->bfr_len, | |
1574 | DMA_TO_DEVICE); | |
1575 | if (dma_mapping_error(&u_ctx->lldi.pdev->dev, | |
1576 | req_ctx->dma_addr)) { | |
1577 | error = -ENOMEM; | |
1578 | goto err; | |
1579 | } | |
1580 | req_ctx->dma_len = param->bfr_len; | |
1581 | } else { | |
1582 | req_ctx->dma_addr = 0; | |
1583 | } | |
1584 | chcr_add_hash_src_ent(req, ulptx, param); | |
1585 | /* Request upto max wr size */ | |
1586 | temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len | |
1587 | + param->bfr_len) : 0); | |
ee0863ba | 1588 | atomic_inc(&adap->chcr_stats.digest_rqst); |
2f47d580 HJ |
1589 | create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm, |
1590 | hash_size_in_response, transhdr_len, | |
1591 | temp, 0); | |
324429d7 | 1592 | req_ctx->skb = skb; |
324429d7 | 1593 | return skb; |
2f47d580 HJ |
1594 | err: |
1595 | kfree_skb(skb); | |
1596 | return ERR_PTR(error); | |
324429d7 HS |
1597 | } |
1598 | ||
1599 | static int chcr_ahash_update(struct ahash_request *req) | |
1600 | { | |
1601 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1602 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1603 | struct uld_ctx *u_ctx = NULL; |
1604 | struct sk_buff *skb; | |
1605 | u8 remainder = 0, bs; | |
1606 | unsigned int nbytes = req->nbytes; | |
1607 | struct hash_wr_param params; | |
2f47d580 | 1608 | int error; |
324429d7 HS |
1609 | |
1610 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1611 | ||
2f47d580 | 1612 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 | 1613 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
2f47d580 | 1614 | h_ctx(rtfm)->tx_qidx))) { |
324429d7 HS |
1615 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1616 | return -EBUSY; | |
1617 | } | |
1618 | ||
44fce12a HJ |
1619 | if (nbytes + req_ctx->reqlen >= bs) { |
1620 | remainder = (nbytes + req_ctx->reqlen) % bs; | |
1621 | nbytes = nbytes + req_ctx->reqlen - remainder; | |
324429d7 | 1622 | } else { |
44fce12a HJ |
1623 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr |
1624 | + req_ctx->reqlen, nbytes, 0); | |
1625 | req_ctx->reqlen += nbytes; | |
324429d7 HS |
1626 | return 0; |
1627 | } | |
2f47d580 HJ |
1628 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
1629 | if (error) | |
1630 | return -ENOMEM; | |
324429d7 HS |
1631 | params.opad_needed = 0; |
1632 | params.more = 1; | |
1633 | params.last = 0; | |
44fce12a HJ |
1634 | params.sg_len = nbytes - req_ctx->reqlen; |
1635 | params.bfr_len = req_ctx->reqlen; | |
324429d7 HS |
1636 | params.scmd1 = 0; |
1637 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
1638 | req_ctx->result = 0; | |
1639 | req_ctx->data_len += params.sg_len + params.bfr_len; | |
358961d1 | 1640 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1641 | if (IS_ERR(skb)) { |
1642 | error = PTR_ERR(skb); | |
1643 | goto unmap; | |
1644 | } | |
324429d7 | 1645 | |
44fce12a | 1646 | if (remainder) { |
44fce12a | 1647 | /* Swap buffers */ |
abfa2b37 | 1648 | swap(req_ctx->reqbfr, req_ctx->skbfr); |
324429d7 | 1649 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
44fce12a | 1650 | req_ctx->reqbfr, remainder, req->nbytes - |
324429d7 | 1651 | remainder); |
44fce12a HJ |
1652 | } |
1653 | req_ctx->reqlen = remainder; | |
324429d7 | 1654 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1655 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1656 | chcr_send_wr(skb); |
1657 | ||
1658 | return -EINPROGRESS; | |
2f47d580 HJ |
1659 | unmap: |
1660 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1661 | return error; | |
324429d7 HS |
1662 | } |
1663 | ||
1664 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) | |
1665 | { | |
1666 | memset(bfr_ptr, 0, bs); | |
1667 | *bfr_ptr = 0x80; | |
1668 | if (bs == 64) | |
1669 | *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); | |
1670 | else | |
1671 | *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); | |
1672 | } | |
1673 | ||
1674 | static int chcr_ahash_final(struct ahash_request *req) | |
1675 | { | |
1676 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1677 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1678 | struct hash_wr_param params; |
1679 | struct sk_buff *skb; | |
1680 | struct uld_ctx *u_ctx = NULL; | |
1681 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1682 | ||
2f47d580 | 1683 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 HS |
1684 | if (is_hmac(crypto_ahash_tfm(rtfm))) |
1685 | params.opad_needed = 1; | |
1686 | else | |
1687 | params.opad_needed = 0; | |
1688 | params.sg_len = 0; | |
1689 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
1690 | req_ctx->result = 1; | |
44fce12a | 1691 | params.bfr_len = req_ctx->reqlen; |
324429d7 | 1692 | req_ctx->data_len += params.bfr_len + params.sg_len; |
44fce12a HJ |
1693 | if (req_ctx->reqlen == 0) { |
1694 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1695 | params.last = 0; |
1696 | params.more = 1; | |
1697 | params.scmd1 = 0; | |
1698 | params.bfr_len = bs; | |
1699 | ||
1700 | } else { | |
1701 | params.scmd1 = req_ctx->data_len; | |
1702 | params.last = 1; | |
1703 | params.more = 0; | |
1704 | } | |
358961d1 | 1705 | skb = create_hash_wr(req, ¶ms); |
40cdbe1a YG |
1706 | if (IS_ERR(skb)) |
1707 | return PTR_ERR(skb); | |
358961d1 | 1708 | |
324429d7 | 1709 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1710 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1711 | chcr_send_wr(skb); |
1712 | return -EINPROGRESS; | |
1713 | } | |
1714 | ||
1715 | static int chcr_ahash_finup(struct ahash_request *req) | |
1716 | { | |
1717 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1718 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1719 | struct uld_ctx *u_ctx = NULL; |
1720 | struct sk_buff *skb; | |
1721 | struct hash_wr_param params; | |
1722 | u8 bs; | |
2f47d580 | 1723 | int error; |
324429d7 HS |
1724 | |
1725 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
2f47d580 | 1726 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 HS |
1727 | |
1728 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1729 | h_ctx(rtfm)->tx_qidx))) { |
324429d7 HS |
1730 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1731 | return -EBUSY; | |
1732 | } | |
1733 | ||
1734 | if (is_hmac(crypto_ahash_tfm(rtfm))) | |
1735 | params.opad_needed = 1; | |
1736 | else | |
1737 | params.opad_needed = 0; | |
1738 | ||
1739 | params.sg_len = req->nbytes; | |
44fce12a | 1740 | params.bfr_len = req_ctx->reqlen; |
324429d7 HS |
1741 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
1742 | req_ctx->data_len += params.bfr_len + params.sg_len; | |
1743 | req_ctx->result = 1; | |
44fce12a HJ |
1744 | if ((req_ctx->reqlen + req->nbytes) == 0) { |
1745 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1746 | params.last = 0; |
1747 | params.more = 1; | |
1748 | params.scmd1 = 0; | |
1749 | params.bfr_len = bs; | |
1750 | } else { | |
1751 | params.scmd1 = req_ctx->data_len; | |
1752 | params.last = 1; | |
1753 | params.more = 0; | |
1754 | } | |
2f47d580 HJ |
1755 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
1756 | if (error) | |
1757 | return -ENOMEM; | |
324429d7 | 1758 | |
358961d1 | 1759 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1760 | if (IS_ERR(skb)) { |
1761 | error = PTR_ERR(skb); | |
1762 | goto unmap; | |
1763 | } | |
324429d7 | 1764 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1765 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1766 | chcr_send_wr(skb); |
1767 | ||
1768 | return -EINPROGRESS; | |
2f47d580 HJ |
1769 | unmap: |
1770 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1771 | return error; | |
324429d7 HS |
1772 | } |
1773 | ||
1774 | static int chcr_ahash_digest(struct ahash_request *req) | |
1775 | { | |
1776 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1777 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1778 | struct uld_ctx *u_ctx = NULL; |
1779 | struct sk_buff *skb; | |
1780 | struct hash_wr_param params; | |
1781 | u8 bs; | |
2f47d580 | 1782 | int error; |
324429d7 HS |
1783 | |
1784 | rtfm->init(req); | |
1785 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1786 | ||
2f47d580 | 1787 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 | 1788 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
2f47d580 | 1789 | h_ctx(rtfm)->tx_qidx))) { |
324429d7 HS |
1790 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1791 | return -EBUSY; | |
1792 | } | |
1793 | ||
1794 | if (is_hmac(crypto_ahash_tfm(rtfm))) | |
1795 | params.opad_needed = 1; | |
1796 | else | |
1797 | params.opad_needed = 0; | |
2f47d580 HJ |
1798 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
1799 | if (error) | |
1800 | return -ENOMEM; | |
324429d7 HS |
1801 | |
1802 | params.last = 0; | |
1803 | params.more = 0; | |
1804 | params.sg_len = req->nbytes; | |
1805 | params.bfr_len = 0; | |
1806 | params.scmd1 = 0; | |
1807 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
1808 | req_ctx->result = 1; | |
1809 | req_ctx->data_len += params.bfr_len + params.sg_len; | |
1810 | ||
44fce12a HJ |
1811 | if (req->nbytes == 0) { |
1812 | create_last_hash_block(req_ctx->reqbfr, bs, 0); | |
324429d7 HS |
1813 | params.more = 1; |
1814 | params.bfr_len = bs; | |
1815 | } | |
1816 | ||
358961d1 | 1817 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1818 | if (IS_ERR(skb)) { |
1819 | error = PTR_ERR(skb); | |
1820 | goto unmap; | |
1821 | } | |
324429d7 | 1822 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1823 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1824 | chcr_send_wr(skb); |
1825 | return -EINPROGRESS; | |
2f47d580 HJ |
1826 | unmap: |
1827 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1828 | return error; | |
324429d7 HS |
1829 | } |
1830 | ||
1831 | static int chcr_ahash_export(struct ahash_request *areq, void *out) | |
1832 | { | |
1833 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1834 | struct chcr_ahash_req_ctx *state = out; | |
1835 | ||
44fce12a | 1836 | state->reqlen = req_ctx->reqlen; |
324429d7 | 1837 | state->data_len = req_ctx->data_len; |
2f47d580 HJ |
1838 | state->is_sg_map = 0; |
1839 | state->result = 0; | |
44fce12a | 1840 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
324429d7 HS |
1841 | memcpy(state->partial_hash, req_ctx->partial_hash, |
1842 | CHCR_HASH_MAX_DIGEST_SIZE); | |
44fce12a | 1843 | return 0; |
324429d7 HS |
1844 | } |
1845 | ||
1846 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |
1847 | { | |
1848 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1849 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; | |
1850 | ||
44fce12a | 1851 | req_ctx->reqlen = state->reqlen; |
324429d7 | 1852 | req_ctx->data_len = state->data_len; |
44fce12a HJ |
1853 | req_ctx->reqbfr = req_ctx->bfr1; |
1854 | req_ctx->skbfr = req_ctx->bfr2; | |
2f47d580 HJ |
1855 | req_ctx->is_sg_map = 0; |
1856 | req_ctx->result = 0; | |
44fce12a | 1857 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); |
324429d7 HS |
1858 | memcpy(req_ctx->partial_hash, state->partial_hash, |
1859 | CHCR_HASH_MAX_DIGEST_SIZE); | |
1860 | return 0; | |
1861 | } | |
1862 | ||
1863 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1864 | unsigned int keylen) | |
1865 | { | |
2f47d580 | 1866 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
324429d7 HS |
1867 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
1868 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
1869 | unsigned int i, err = 0, updated_digestsize; | |
1870 | ||
e7922729 HJ |
1871 | SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); |
1872 | ||
1873 | /* use the key to calculate the ipad and opad. ipad will sent with the | |
324429d7 HS |
1874 | * first request's data. opad will be sent with the final hash result |
1875 | * ipad in hmacctx->ipad and opad in hmacctx->opad location | |
1876 | */ | |
e7922729 HJ |
1877 | shash->tfm = hmacctx->base_hash; |
1878 | shash->flags = crypto_shash_get_flags(hmacctx->base_hash); | |
324429d7 | 1879 | if (keylen > bs) { |
e7922729 | 1880 | err = crypto_shash_digest(shash, key, keylen, |
324429d7 HS |
1881 | hmacctx->ipad); |
1882 | if (err) | |
1883 | goto out; | |
1884 | keylen = digestsize; | |
1885 | } else { | |
1886 | memcpy(hmacctx->ipad, key, keylen); | |
1887 | } | |
1888 | memset(hmacctx->ipad + keylen, 0, bs - keylen); | |
1889 | memcpy(hmacctx->opad, hmacctx->ipad, bs); | |
1890 | ||
1891 | for (i = 0; i < bs / sizeof(int); i++) { | |
1892 | *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; | |
1893 | *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; | |
1894 | } | |
1895 | ||
1896 | updated_digestsize = digestsize; | |
1897 | if (digestsize == SHA224_DIGEST_SIZE) | |
1898 | updated_digestsize = SHA256_DIGEST_SIZE; | |
1899 | else if (digestsize == SHA384_DIGEST_SIZE) | |
1900 | updated_digestsize = SHA512_DIGEST_SIZE; | |
e7922729 | 1901 | err = chcr_compute_partial_hash(shash, hmacctx->ipad, |
324429d7 HS |
1902 | hmacctx->ipad, digestsize); |
1903 | if (err) | |
1904 | goto out; | |
1905 | chcr_change_order(hmacctx->ipad, updated_digestsize); | |
1906 | ||
e7922729 | 1907 | err = chcr_compute_partial_hash(shash, hmacctx->opad, |
324429d7 HS |
1908 | hmacctx->opad, digestsize); |
1909 | if (err) | |
1910 | goto out; | |
1911 | chcr_change_order(hmacctx->opad, updated_digestsize); | |
1912 | out: | |
1913 | return err; | |
1914 | } | |
1915 | ||
b8fd1f41 | 1916 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
324429d7 HS |
1917 | unsigned int key_len) |
1918 | { | |
2f47d580 | 1919 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
324429d7 | 1920 | unsigned short context_size = 0; |
b8fd1f41 | 1921 | int err; |
324429d7 | 1922 | |
b8fd1f41 HJ |
1923 | err = chcr_cipher_fallback_setkey(cipher, key, key_len); |
1924 | if (err) | |
1925 | goto badkey_err; | |
cc1b156d HJ |
1926 | |
1927 | memcpy(ablkctx->key, key, key_len); | |
1928 | ablkctx->enckey_len = key_len; | |
1929 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); | |
1930 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; | |
1931 | ablkctx->key_ctx_hdr = | |
1932 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? | |
1933 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : | |
1934 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, | |
1935 | CHCR_KEYCTX_NO_KEY, 1, | |
1936 | 0, context_size); | |
1937 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | |
1938 | return 0; | |
b8fd1f41 HJ |
1939 | badkey_err: |
1940 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
1941 | ablkctx->enckey_len = 0; | |
1942 | ||
1943 | return err; | |
324429d7 HS |
1944 | } |
1945 | ||
1946 | static int chcr_sha_init(struct ahash_request *areq) | |
1947 | { | |
1948 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1949 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | |
1950 | int digestsize = crypto_ahash_digestsize(tfm); | |
1951 | ||
1952 | req_ctx->data_len = 0; | |
44fce12a HJ |
1953 | req_ctx->reqlen = 0; |
1954 | req_ctx->reqbfr = req_ctx->bfr1; | |
1955 | req_ctx->skbfr = req_ctx->bfr2; | |
324429d7 HS |
1956 | req_ctx->skb = NULL; |
1957 | req_ctx->result = 0; | |
2f47d580 | 1958 | req_ctx->is_sg_map = 0; |
324429d7 HS |
1959 | copy_hash_init_values(req_ctx->partial_hash, digestsize); |
1960 | return 0; | |
1961 | } | |
1962 | ||
1963 | static int chcr_sha_cra_init(struct crypto_tfm *tfm) | |
1964 | { | |
1965 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
1966 | sizeof(struct chcr_ahash_req_ctx)); | |
1967 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1968 | } | |
1969 | ||
1970 | static int chcr_hmac_init(struct ahash_request *areq) | |
1971 | { | |
1972 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1973 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); | |
2f47d580 | 1974 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); |
324429d7 HS |
1975 | unsigned int digestsize = crypto_ahash_digestsize(rtfm); |
1976 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1977 | ||
1978 | chcr_sha_init(areq); | |
1979 | req_ctx->data_len = bs; | |
1980 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1981 | if (digestsize == SHA224_DIGEST_SIZE) | |
1982 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1983 | SHA256_DIGEST_SIZE); | |
1984 | else if (digestsize == SHA384_DIGEST_SIZE) | |
1985 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1986 | SHA512_DIGEST_SIZE); | |
1987 | else | |
1988 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1989 | digestsize); | |
1990 | } | |
1991 | return 0; | |
1992 | } | |
1993 | ||
1994 | static int chcr_hmac_cra_init(struct crypto_tfm *tfm) | |
1995 | { | |
1996 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1997 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
1998 | unsigned int digestsize = | |
1999 | crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); | |
2000 | ||
2001 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
2002 | sizeof(struct chcr_ahash_req_ctx)); | |
e7922729 HJ |
2003 | hmacctx->base_hash = chcr_alloc_shash(digestsize); |
2004 | if (IS_ERR(hmacctx->base_hash)) | |
2005 | return PTR_ERR(hmacctx->base_hash); | |
324429d7 HS |
2006 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
2007 | } | |
2008 | ||
324429d7 HS |
2009 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) |
2010 | { | |
2011 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
2012 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
2013 | ||
e7922729 HJ |
2014 | if (hmacctx->base_hash) { |
2015 | chcr_free_shash(hmacctx->base_hash); | |
2016 | hmacctx->base_hash = NULL; | |
324429d7 HS |
2017 | } |
2018 | } | |
2019 | ||
2f47d580 HJ |
2020 | static int chcr_aead_common_init(struct aead_request *req, |
2021 | unsigned short op_type) | |
2debd332 | 2022 | { |
2f47d580 HJ |
2023 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2024 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); | |
2025 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2026 | int error = -EINVAL; | |
2f47d580 | 2027 | unsigned int authsize = crypto_aead_authsize(tfm); |
2debd332 | 2028 | |
2f47d580 HJ |
2029 | /* validate key size */ |
2030 | if (aeadctx->enckey_len == 0) | |
2031 | goto err; | |
2032 | if (op_type && req->cryptlen < authsize) | |
2033 | goto err; | |
2034 | error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, | |
2035 | op_type); | |
2036 | if (error) { | |
2037 | error = -ENOMEM; | |
2038 | goto err; | |
2039 | } | |
2040 | reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, | |
2041 | CHCR_SRC_SG_SIZE, 0); | |
2042 | reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, | |
2043 | CHCR_SRC_SG_SIZE, req->assoclen); | |
2044 | return 0; | |
2045 | err: | |
2046 | return error; | |
2debd332 | 2047 | } |
2f47d580 HJ |
2048 | |
2049 | static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, | |
0e93708d HJ |
2050 | int aadmax, int wrlen, |
2051 | unsigned short op_type) | |
2052 | { | |
2053 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | |
2054 | ||
2055 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || | |
2f47d580 | 2056 | dst_nents > MAX_DSGL_ENT || |
0e93708d | 2057 | (req->assoclen > aadmax) || |
2f47d580 | 2058 | (wrlen > SGE_MAX_WR_LEN)) |
0e93708d HJ |
2059 | return 1; |
2060 | return 0; | |
2061 | } | |
2debd332 | 2062 | |
0e93708d HJ |
2063 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) |
2064 | { | |
2065 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2066 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d HJ |
2067 | struct aead_request *subreq = aead_request_ctx(req); |
2068 | ||
2069 | aead_request_set_tfm(subreq, aeadctx->sw_cipher); | |
2070 | aead_request_set_callback(subreq, req->base.flags, | |
2071 | req->base.complete, req->base.data); | |
2072 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
2073 | req->iv); | |
2074 | aead_request_set_ad(subreq, req->assoclen); | |
2075 | return op_type ? crypto_aead_decrypt(subreq) : | |
2076 | crypto_aead_encrypt(subreq); | |
2077 | } | |
2debd332 HJ |
2078 | |
2079 | static struct sk_buff *create_authenc_wr(struct aead_request *req, | |
2080 | unsigned short qid, | |
2081 | int size, | |
2082 | unsigned short op_type) | |
2083 | { | |
2084 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2085 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2086 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
2087 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2088 | struct sk_buff *skb = NULL; | |
2089 | struct chcr_wr *chcr_req; | |
2090 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2091 | struct ulptx_sgl *ulptx; |
2092 | unsigned int transhdr_len; | |
3d64bd67 | 2093 | unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); |
2f47d580 | 2094 | unsigned int kctx_len = 0, dnents; |
2debd332 HJ |
2095 | unsigned int assoclen = req->assoclen; |
2096 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2f47d580 | 2097 | int error = -EINVAL; |
2debd332 HJ |
2098 | int null = 0; |
2099 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
2100 | GFP_ATOMIC; | |
2f47d580 | 2101 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2102 | |
2f47d580 HJ |
2103 | if (req->cryptlen == 0) |
2104 | return NULL; | |
2debd332 | 2105 | |
2f47d580 | 2106 | reqctx->b0_dma = 0; |
3d64bd67 HJ |
2107 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || |
2108 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
2debd332 HJ |
2109 | null = 1; |
2110 | assoclen = 0; | |
2111 | } | |
2f47d580 HJ |
2112 | error = chcr_aead_common_init(req, op_type); |
2113 | if (error) | |
2114 | return ERR_PTR(error); | |
5abc8db0 HJ |
2115 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2116 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | |
2117 | (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, | |
2118 | req->assoclen); | |
2119 | dnents += MIN_AUTH_SG; // For IV | |
2f47d580 HJ |
2120 | |
2121 | dst_size = get_space_for_phys_dsgl(dnents); | |
2debd332 HJ |
2122 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) |
2123 | - sizeof(chcr_req->key_ctx); | |
2124 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | |
2f47d580 HJ |
2125 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < |
2126 | SGE_MAX_WR_LEN; | |
125d01ca HJ |
2127 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) |
2128 | : (sgl_len(reqctx->src_nents + reqctx->aad_nents | |
2f47d580 HJ |
2129 | + MIN_GCM_SG) * 8); |
2130 | transhdr_len += temp; | |
125d01ca | 2131 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2132 | |
2133 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, | |
2134 | transhdr_len, op_type)) { | |
ee0863ba | 2135 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2136 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2137 | op_type); | |
0e93708d HJ |
2138 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2139 | } | |
2f47d580 | 2140 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
5fe8c711 HJ |
2141 | if (!skb) { |
2142 | error = -ENOMEM; | |
2debd332 | 2143 | goto err; |
5fe8c711 | 2144 | } |
2debd332 | 2145 | |
de77b966 | 2146 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2debd332 | 2147 | |
2f47d580 | 2148 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; |
2debd332 HJ |
2149 | |
2150 | /* | |
2151 | * Input order is AAD,IV and Payload. where IV should be included as | |
2152 | * the part of authdata. All other fields should be filled according | |
2153 | * to the hardware spec | |
2154 | */ | |
2155 | chcr_req->sec_cpl.op_ivinsrtofst = | |
2f47d580 HJ |
2156 | FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, |
2157 | assoclen + 1); | |
2158 | chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); | |
2debd332 HJ |
2159 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2160 | assoclen ? 1 : 0, assoclen, | |
2f47d580 HJ |
2161 | assoclen + IV + 1, |
2162 | (temp & 0x1F0) >> 4); | |
2debd332 | 2163 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( |
2f47d580 HJ |
2164 | temp & 0xF, |
2165 | null ? 0 : assoclen + IV + 1, | |
2166 | temp, temp); | |
3d64bd67 HJ |
2167 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || |
2168 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) | |
2169 | temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
2170 | else | |
2171 | temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; | |
2debd332 HJ |
2172 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
2173 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | |
3d64bd67 | 2174 | temp, |
2debd332 | 2175 | actx->auth_mode, aeadctx->hmac_ctrl, |
2f47d580 | 2176 | IV >> 1); |
2debd332 | 2177 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2f47d580 | 2178 | 0, 0, dst_size); |
2debd332 HJ |
2179 | |
2180 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
3d64bd67 HJ |
2181 | if (op_type == CHCR_ENCRYPT_OP || |
2182 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
2183 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) | |
2debd332 HJ |
2184 | memcpy(chcr_req->key_ctx.key, aeadctx->key, |
2185 | aeadctx->enckey_len); | |
2186 | else | |
2187 | memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, | |
2188 | aeadctx->enckey_len); | |
2189 | ||
125d01ca HJ |
2190 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2191 | actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); | |
3d64bd67 HJ |
2192 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
2193 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
2194 | memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); | |
2195 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, | |
2196 | CTR_RFC3686_IV_SIZE); | |
2197 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
2198 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | |
2199 | } else { | |
2200 | memcpy(reqctx->iv, req->iv, IV); | |
2201 | } | |
2debd332 | 2202 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2f47d580 HJ |
2203 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2204 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | |
2205 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
ee0863ba | 2206 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
2f47d580 HJ |
2207 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2208 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); | |
2209 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, | |
2210 | transhdr_len, temp, 0); | |
2debd332 | 2211 | reqctx->skb = skb; |
2f47d580 | 2212 | reqctx->op = op_type; |
2debd332 HJ |
2213 | |
2214 | return skb; | |
2debd332 | 2215 | err: |
2f47d580 HJ |
2216 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2217 | op_type); | |
2218 | ||
5fe8c711 | 2219 | return ERR_PTR(error); |
2debd332 HJ |
2220 | } |
2221 | ||
6dad4e8a AG |
2222 | int chcr_aead_dma_map(struct device *dev, |
2223 | struct aead_request *req, | |
2224 | unsigned short op_type) | |
2f47d580 HJ |
2225 | { |
2226 | int error; | |
2227 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2228 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2229 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2230 | int dst_size; | |
2231 | ||
2232 | dst_size = req->assoclen + req->cryptlen + (op_type ? | |
2233 | -authsize : authsize); | |
2234 | if (!req->cryptlen || !dst_size) | |
2235 | return 0; | |
2236 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, | |
2237 | DMA_BIDIRECTIONAL); | |
2238 | if (dma_mapping_error(dev, reqctx->iv_dma)) | |
2239 | return -ENOMEM; | |
2240 | ||
2241 | if (req->src == req->dst) { | |
2242 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2243 | DMA_BIDIRECTIONAL); | |
2244 | if (!error) | |
2245 | goto err; | |
2246 | } else { | |
2247 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2248 | DMA_TO_DEVICE); | |
2249 | if (!error) | |
2250 | goto err; | |
2251 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | |
2252 | DMA_FROM_DEVICE); | |
2253 | if (!error) { | |
2254 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2255 | DMA_TO_DEVICE); | |
2256 | goto err; | |
2257 | } | |
2258 | } | |
2259 | ||
2260 | return 0; | |
2261 | err: | |
2262 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | |
2263 | return -ENOMEM; | |
2264 | } | |
2265 | ||
6dad4e8a AG |
2266 | void chcr_aead_dma_unmap(struct device *dev, |
2267 | struct aead_request *req, | |
2268 | unsigned short op_type) | |
2f47d580 HJ |
2269 | { |
2270 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2271 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2272 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2273 | int dst_size; | |
2274 | ||
2275 | dst_size = req->assoclen + req->cryptlen + (op_type ? | |
2276 | -authsize : authsize); | |
2277 | if (!req->cryptlen || !dst_size) | |
2278 | return; | |
2279 | ||
2280 | dma_unmap_single(dev, reqctx->iv_dma, IV, | |
2281 | DMA_BIDIRECTIONAL); | |
2282 | if (req->src == req->dst) { | |
2283 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2284 | DMA_BIDIRECTIONAL); | |
2285 | } else { | |
2286 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2287 | DMA_TO_DEVICE); | |
2288 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | |
2289 | DMA_FROM_DEVICE); | |
2290 | } | |
2291 | } | |
2292 | ||
6dad4e8a AG |
2293 | void chcr_add_aead_src_ent(struct aead_request *req, |
2294 | struct ulptx_sgl *ulptx, | |
2295 | unsigned int assoclen, | |
2296 | unsigned short op_type) | |
2f47d580 HJ |
2297 | { |
2298 | struct ulptx_walk ulp_walk; | |
2299 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2300 | ||
2301 | if (reqctx->imm) { | |
2302 | u8 *buf = (u8 *)ulptx; | |
2303 | ||
2304 | if (reqctx->b0_dma) { | |
2305 | memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); | |
2306 | buf += reqctx->b0_len; | |
2307 | } | |
2308 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2309 | buf, assoclen, 0); | |
2310 | buf += assoclen; | |
2311 | memcpy(buf, reqctx->iv, IV); | |
2312 | buf += IV; | |
2313 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2314 | buf, req->cryptlen, req->assoclen); | |
2315 | } else { | |
2316 | ulptx_walk_init(&ulp_walk, ulptx); | |
2317 | if (reqctx->b0_dma) | |
2318 | ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, | |
2319 | &reqctx->b0_dma); | |
2320 | ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); | |
2321 | ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); | |
2322 | ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, | |
2323 | req->assoclen); | |
2324 | ulptx_walk_end(&ulp_walk); | |
2325 | } | |
2326 | } | |
2327 | ||
6dad4e8a AG |
2328 | void chcr_add_aead_dst_ent(struct aead_request *req, |
2329 | struct cpl_rx_phys_dsgl *phys_cpl, | |
2330 | unsigned int assoclen, | |
2331 | unsigned short op_type, | |
2332 | unsigned short qid) | |
2f47d580 HJ |
2333 | { |
2334 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2335 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2336 | struct dsgl_walk dsgl_walk; | |
2337 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2338 | u32 temp; | |
2339 | ||
2340 | dsgl_walk_init(&dsgl_walk, phys_cpl); | |
2341 | if (reqctx->b0_dma) | |
2342 | dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); | |
2343 | dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); | |
2344 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | |
2345 | temp = req->cryptlen + (op_type ? -authsize : authsize); | |
2346 | dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); | |
2347 | dsgl_walk_end(&dsgl_walk, qid); | |
2348 | } | |
2349 | ||
6dad4e8a AG |
2350 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
2351 | struct ulptx_sgl *ulptx, | |
2352 | struct cipher_wr_param *wrparam) | |
2f47d580 HJ |
2353 | { |
2354 | struct ulptx_walk ulp_walk; | |
2355 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2356 | ||
2357 | if (reqctx->imm) { | |
2358 | u8 *buf = (u8 *)ulptx; | |
2359 | ||
2360 | memcpy(buf, reqctx->iv, IV); | |
2361 | buf += IV; | |
2362 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2363 | buf, wrparam->bytes, reqctx->processed); | |
2364 | } else { | |
2365 | ulptx_walk_init(&ulp_walk, ulptx); | |
2366 | ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); | |
2367 | ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, | |
2368 | reqctx->src_ofst); | |
2369 | reqctx->srcsg = ulp_walk.last_sg; | |
2370 | reqctx->src_ofst = ulp_walk.last_sg_len; | |
2371 | ulptx_walk_end(&ulp_walk); | |
2372 | } | |
2373 | } | |
2374 | ||
6dad4e8a AG |
2375 | void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, |
2376 | struct cpl_rx_phys_dsgl *phys_cpl, | |
2377 | struct cipher_wr_param *wrparam, | |
2378 | unsigned short qid) | |
2f47d580 HJ |
2379 | { |
2380 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2381 | struct dsgl_walk dsgl_walk; | |
2382 | ||
2383 | dsgl_walk_init(&dsgl_walk, phys_cpl); | |
2384 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | |
2385 | dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, | |
2386 | reqctx->dst_ofst); | |
2387 | reqctx->dstsg = dsgl_walk.last_sg; | |
2388 | reqctx->dst_ofst = dsgl_walk.last_sg_len; | |
2389 | ||
2390 | dsgl_walk_end(&dsgl_walk, qid); | |
2391 | } | |
2392 | ||
6dad4e8a AG |
2393 | void chcr_add_hash_src_ent(struct ahash_request *req, |
2394 | struct ulptx_sgl *ulptx, | |
2395 | struct hash_wr_param *param) | |
2f47d580 HJ |
2396 | { |
2397 | struct ulptx_walk ulp_walk; | |
2398 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | |
2399 | ||
2400 | if (reqctx->imm) { | |
2401 | u8 *buf = (u8 *)ulptx; | |
2402 | ||
2403 | if (param->bfr_len) { | |
2404 | memcpy(buf, reqctx->reqbfr, param->bfr_len); | |
2405 | buf += param->bfr_len; | |
2406 | } | |
2407 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2408 | buf, param->sg_len, 0); | |
2409 | } else { | |
2410 | ulptx_walk_init(&ulp_walk, ulptx); | |
2411 | if (param->bfr_len) | |
2412 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, | |
2413 | &reqctx->dma_addr); | |
2414 | ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, | |
db6deea4 HJ |
2415 | 0); |
2416 | ulptx_walk_end(&ulp_walk); | |
2f47d580 HJ |
2417 | } |
2418 | } | |
2419 | ||
6dad4e8a AG |
2420 | int chcr_hash_dma_map(struct device *dev, |
2421 | struct ahash_request *req) | |
2f47d580 HJ |
2422 | { |
2423 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
2424 | int error = 0; | |
2425 | ||
2426 | if (!req->nbytes) | |
2427 | return 0; | |
2428 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2429 | DMA_TO_DEVICE); | |
2430 | if (!error) | |
7814f552 | 2431 | return -ENOMEM; |
2f47d580 HJ |
2432 | req_ctx->is_sg_map = 1; |
2433 | return 0; | |
2434 | } | |
2435 | ||
6dad4e8a AG |
2436 | void chcr_hash_dma_unmap(struct device *dev, |
2437 | struct ahash_request *req) | |
2f47d580 HJ |
2438 | { |
2439 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
2440 | ||
2441 | if (!req->nbytes) | |
2442 | return; | |
2443 | ||
2444 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2445 | DMA_TO_DEVICE); | |
2446 | req_ctx->is_sg_map = 0; | |
2447 | ||
2448 | } | |
2449 | ||
6dad4e8a AG |
2450 | int chcr_cipher_dma_map(struct device *dev, |
2451 | struct ablkcipher_request *req) | |
2f47d580 HJ |
2452 | { |
2453 | int error; | |
2454 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2455 | ||
2456 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, | |
2457 | DMA_BIDIRECTIONAL); | |
2458 | if (dma_mapping_error(dev, reqctx->iv_dma)) | |
2459 | return -ENOMEM; | |
2460 | ||
2461 | if (req->src == req->dst) { | |
2462 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2463 | DMA_BIDIRECTIONAL); | |
2464 | if (!error) | |
2465 | goto err; | |
2466 | } else { | |
2467 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2468 | DMA_TO_DEVICE); | |
2469 | if (!error) | |
2470 | goto err; | |
2471 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | |
2472 | DMA_FROM_DEVICE); | |
2473 | if (!error) { | |
2474 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2475 | DMA_TO_DEVICE); | |
2476 | goto err; | |
2477 | } | |
2478 | } | |
2479 | ||
2480 | return 0; | |
2481 | err: | |
2482 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | |
2483 | return -ENOMEM; | |
2484 | } | |
6dad4e8a AG |
2485 | |
2486 | void chcr_cipher_dma_unmap(struct device *dev, | |
2487 | struct ablkcipher_request *req) | |
2f47d580 HJ |
2488 | { |
2489 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2490 | ||
2491 | dma_unmap_single(dev, reqctx->iv_dma, IV, | |
2492 | DMA_BIDIRECTIONAL); | |
2493 | if (req->src == req->dst) { | |
2494 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2495 | DMA_BIDIRECTIONAL); | |
2496 | } else { | |
2497 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2498 | DMA_TO_DEVICE); | |
2499 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | |
2500 | DMA_FROM_DEVICE); | |
2501 | } | |
2502 | } | |
2503 | ||
2debd332 HJ |
2504 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
2505 | { | |
2506 | __be32 data; | |
2507 | ||
2508 | memset(block, 0, csize); | |
2509 | block += csize; | |
2510 | ||
2511 | if (csize >= 4) | |
2512 | csize = 4; | |
2513 | else if (msglen > (unsigned int)(1 << (8 * csize))) | |
2514 | return -EOVERFLOW; | |
2515 | ||
2516 | data = cpu_to_be32(msglen); | |
2517 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); | |
2518 | ||
2519 | return 0; | |
2520 | } | |
2521 | ||
2522 | static void generate_b0(struct aead_request *req, | |
2523 | struct chcr_aead_ctx *aeadctx, | |
2524 | unsigned short op_type) | |
2525 | { | |
2526 | unsigned int l, lp, m; | |
2527 | int rc; | |
2528 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
2529 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2530 | u8 *b0 = reqctx->scratch_pad; | |
2531 | ||
2532 | m = crypto_aead_authsize(aead); | |
2533 | ||
2534 | memcpy(b0, reqctx->iv, 16); | |
2535 | ||
2536 | lp = b0[0]; | |
2537 | l = lp + 1; | |
2538 | ||
2539 | /* set m, bits 3-5 */ | |
2540 | *b0 |= (8 * ((m - 2) / 2)); | |
2541 | ||
2542 | /* set adata, bit 6, if associated data is used */ | |
2543 | if (req->assoclen) | |
2544 | *b0 |= 64; | |
2545 | rc = set_msg_len(b0 + 16 - l, | |
2546 | (op_type == CHCR_DECRYPT_OP) ? | |
2547 | req->cryptlen - m : req->cryptlen, l); | |
2548 | } | |
2549 | ||
2550 | static inline int crypto_ccm_check_iv(const u8 *iv) | |
2551 | { | |
2552 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ | |
2553 | if (iv[0] < 1 || iv[0] > 7) | |
2554 | return -EINVAL; | |
2555 | ||
2556 | return 0; | |
2557 | } | |
2558 | ||
2559 | static int ccm_format_packet(struct aead_request *req, | |
2560 | struct chcr_aead_ctx *aeadctx, | |
2561 | unsigned int sub_type, | |
2562 | unsigned short op_type) | |
2563 | { | |
2564 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2565 | int rc = 0; | |
2566 | ||
2debd332 HJ |
2567 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
2568 | reqctx->iv[0] = 3; | |
2569 | memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); | |
2570 | memcpy(reqctx->iv + 4, req->iv, 8); | |
2571 | memset(reqctx->iv + 12, 0, 4); | |
2572 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
2573 | htons(req->assoclen - 8); | |
2574 | } else { | |
2575 | memcpy(reqctx->iv, req->iv, 16); | |
2576 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
2577 | htons(req->assoclen); | |
2578 | } | |
2579 | generate_b0(req, aeadctx, op_type); | |
2580 | /* zero the ctr value */ | |
2581 | memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); | |
2582 | return rc; | |
2583 | } | |
2584 | ||
2585 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |
2586 | unsigned int dst_size, | |
2587 | struct aead_request *req, | |
2f47d580 | 2588 | unsigned short op_type) |
2debd332 HJ |
2589 | { |
2590 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2591 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2592 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; |
2593 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; | |
2f47d580 | 2594 | unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; |
2debd332 HJ |
2595 | unsigned int ccm_xtra; |
2596 | unsigned char tag_offset = 0, auth_offset = 0; | |
2debd332 HJ |
2597 | unsigned int assoclen; |
2598 | ||
2599 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
2600 | assoclen = req->assoclen - 8; | |
2601 | else | |
2602 | assoclen = req->assoclen; | |
2603 | ccm_xtra = CCM_B0_SIZE + | |
2604 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); | |
2605 | ||
2606 | auth_offset = req->cryptlen ? | |
2f47d580 | 2607 | (assoclen + IV + 1 + ccm_xtra) : 0; |
2debd332 HJ |
2608 | if (op_type == CHCR_DECRYPT_OP) { |
2609 | if (crypto_aead_authsize(tfm) != req->cryptlen) | |
2610 | tag_offset = crypto_aead_authsize(tfm); | |
2611 | else | |
2612 | auth_offset = 0; | |
2613 | } | |
2614 | ||
2615 | ||
2616 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, | |
2f47d580 | 2617 | 2, assoclen + 1 + ccm_xtra); |
2debd332 | 2618 | sec_cpl->pldlen = |
2f47d580 | 2619 | htonl(assoclen + IV + req->cryptlen + ccm_xtra); |
2debd332 HJ |
2620 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ |
2621 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | |
2622 | 1, assoclen + ccm_xtra, assoclen | |
2f47d580 | 2623 | + IV + 1 + ccm_xtra, 0); |
2debd332 HJ |
2624 | |
2625 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, | |
2626 | auth_offset, tag_offset, | |
2627 | (op_type == CHCR_ENCRYPT_OP) ? 0 : | |
2628 | crypto_aead_authsize(tfm)); | |
2629 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | |
2630 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, | |
0a7bd30c | 2631 | cipher_mode, mac_mode, |
2f47d580 | 2632 | aeadctx->hmac_ctrl, IV >> 1); |
2debd332 HJ |
2633 | |
2634 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, | |
2f47d580 | 2635 | 0, dst_size); |
2debd332 HJ |
2636 | } |
2637 | ||
1efb892b CIK |
2638 | static int aead_ccm_validate_input(unsigned short op_type, |
2639 | struct aead_request *req, | |
2640 | struct chcr_aead_ctx *aeadctx, | |
2641 | unsigned int sub_type) | |
2debd332 HJ |
2642 | { |
2643 | if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { | |
2644 | if (crypto_ccm_check_iv(req->iv)) { | |
2645 | pr_err("CCM: IV check fails\n"); | |
2646 | return -EINVAL; | |
2647 | } | |
2648 | } else { | |
2649 | if (req->assoclen != 16 && req->assoclen != 20) { | |
2650 | pr_err("RFC4309: Invalid AAD length %d\n", | |
2651 | req->assoclen); | |
2652 | return -EINVAL; | |
2653 | } | |
2654 | } | |
2debd332 HJ |
2655 | return 0; |
2656 | } | |
2657 | ||
2debd332 HJ |
2658 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, |
2659 | unsigned short qid, | |
2660 | int size, | |
2661 | unsigned short op_type) | |
2662 | { | |
2663 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2664 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2665 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2666 | struct sk_buff *skb = NULL; | |
2667 | struct chcr_wr *chcr_req; | |
2668 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2669 | struct ulptx_sgl *ulptx; |
2670 | unsigned int transhdr_len; | |
2671 | unsigned int dst_size = 0, kctx_len, dnents, temp; | |
2672 | unsigned int sub_type, assoclen = req->assoclen; | |
2debd332 | 2673 | unsigned int authsize = crypto_aead_authsize(tfm); |
2f47d580 | 2674 | int error = -EINVAL; |
2debd332 HJ |
2675 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2676 | GFP_ATOMIC; | |
2f47d580 | 2677 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2678 | |
2f47d580 HJ |
2679 | reqctx->b0_dma = 0; |
2680 | sub_type = get_aead_subtype(tfm); | |
2681 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
2682 | assoclen -= 8; | |
2f47d580 HJ |
2683 | error = chcr_aead_common_init(req, op_type); |
2684 | if (error) | |
2685 | return ERR_PTR(error); | |
0e93708d | 2686 | |
2f47d580 HJ |
2687 | |
2688 | reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); | |
5fe8c711 HJ |
2689 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); |
2690 | if (error) | |
2debd332 | 2691 | goto err; |
e1a018e6 HJ |
2692 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2693 | dnents += sg_nents_xlen(req->dst, req->cryptlen | |
2694 | + (op_type ? -authsize : authsize), | |
2695 | CHCR_DST_SG_SIZE, req->assoclen); | |
2696 | dnents += MIN_CCM_SG; // For IV and B0 | |
2f47d580 | 2697 | dst_size = get_space_for_phys_dsgl(dnents); |
125d01ca | 2698 | kctx_len = roundup(aeadctx->enckey_len, 16) * 2; |
2debd332 | 2699 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2f47d580 HJ |
2700 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + |
2701 | reqctx->b0_len) <= SGE_MAX_WR_LEN; | |
125d01ca HJ |
2702 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen + |
2703 | reqctx->b0_len, 16) : | |
2f47d580 HJ |
2704 | (sgl_len(reqctx->src_nents + reqctx->aad_nents + |
2705 | MIN_CCM_SG) * 8); | |
2706 | transhdr_len += temp; | |
125d01ca | 2707 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2708 | |
2709 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - | |
2710 | reqctx->b0_len, transhdr_len, op_type)) { | |
ee0863ba | 2711 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2712 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2713 | op_type); | |
0e93708d HJ |
2714 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2715 | } | |
2f47d580 | 2716 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
2debd332 | 2717 | |
5fe8c711 HJ |
2718 | if (!skb) { |
2719 | error = -ENOMEM; | |
2debd332 | 2720 | goto err; |
5fe8c711 | 2721 | } |
2debd332 | 2722 | |
2f47d580 | 2723 | chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); |
2debd332 | 2724 | |
2f47d580 | 2725 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); |
2debd332 HJ |
2726 | |
2727 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
2728 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
125d01ca HJ |
2729 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2730 | aeadctx->key, aeadctx->enckey_len); | |
2debd332 HJ |
2731 | |
2732 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
2f47d580 | 2733 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
5fe8c711 HJ |
2734 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); |
2735 | if (error) | |
2debd332 HJ |
2736 | goto dstmap_fail; |
2737 | ||
2f47d580 HJ |
2738 | reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, |
2739 | &reqctx->scratch_pad, reqctx->b0_len, | |
2740 | DMA_BIDIRECTIONAL); | |
2741 | if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, | |
2742 | reqctx->b0_dma)) { | |
2743 | error = -ENOMEM; | |
2debd332 | 2744 | goto dstmap_fail; |
2f47d580 HJ |
2745 | } |
2746 | ||
2747 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | |
2748 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
2debd332 | 2749 | |
ee0863ba | 2750 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2f47d580 HJ |
2751 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2752 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + | |
2753 | reqctx->b0_len) : 0); | |
2754 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, | |
2755 | transhdr_len, temp, 0); | |
2debd332 | 2756 | reqctx->skb = skb; |
2f47d580 HJ |
2757 | reqctx->op = op_type; |
2758 | ||
2debd332 HJ |
2759 | return skb; |
2760 | dstmap_fail: | |
2761 | kfree_skb(skb); | |
2debd332 | 2762 | err: |
2f47d580 | 2763 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
5fe8c711 | 2764 | return ERR_PTR(error); |
2debd332 HJ |
2765 | } |
2766 | ||
2767 | static struct sk_buff *create_gcm_wr(struct aead_request *req, | |
2768 | unsigned short qid, | |
2769 | int size, | |
2770 | unsigned short op_type) | |
2771 | { | |
2772 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2773 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2774 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2775 | struct sk_buff *skb = NULL; | |
2776 | struct chcr_wr *chcr_req; | |
2777 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2778 | struct ulptx_sgl *ulptx; |
2779 | unsigned int transhdr_len, dnents = 0; | |
2780 | unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; | |
2debd332 | 2781 | unsigned int authsize = crypto_aead_authsize(tfm); |
2f47d580 | 2782 | int error = -EINVAL; |
2debd332 HJ |
2783 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2784 | GFP_ATOMIC; | |
2f47d580 | 2785 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2786 | |
2f47d580 HJ |
2787 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
2788 | assoclen = req->assoclen - 8; | |
2debd332 | 2789 | |
2f47d580 | 2790 | reqctx->b0_dma = 0; |
2f47d580 | 2791 | error = chcr_aead_common_init(req, op_type); |
e1a018e6 HJ |
2792 | if (error) |
2793 | return ERR_PTR(error); | |
2794 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | |
2795 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | |
2796 | (op_type ? -authsize : authsize), | |
2f47d580 | 2797 | CHCR_DST_SG_SIZE, req->assoclen); |
e1a018e6 | 2798 | dnents += MIN_GCM_SG; // For IV |
2f47d580 | 2799 | dst_size = get_space_for_phys_dsgl(dnents); |
125d01ca | 2800 | kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; |
2debd332 | 2801 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2f47d580 HJ |
2802 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= |
2803 | SGE_MAX_WR_LEN; | |
125d01ca HJ |
2804 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) : |
2805 | (sgl_len(reqctx->src_nents + | |
2806 | reqctx->aad_nents + MIN_GCM_SG) * 8); | |
2f47d580 | 2807 | transhdr_len += temp; |
125d01ca | 2808 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2809 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, |
2810 | transhdr_len, op_type)) { | |
ee0863ba | 2811 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2812 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2813 | op_type); | |
0e93708d HJ |
2814 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2815 | } | |
2f47d580 | 2816 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
5fe8c711 HJ |
2817 | if (!skb) { |
2818 | error = -ENOMEM; | |
2debd332 | 2819 | goto err; |
5fe8c711 | 2820 | } |
2debd332 | 2821 | |
de77b966 | 2822 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2debd332 | 2823 | |
2f47d580 HJ |
2824 | //Offset of tag from end |
2825 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | |
2debd332 | 2826 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
2f47d580 HJ |
2827 | a_ctx(tfm)->dev->rx_channel_id, 2, |
2828 | (assoclen + 1)); | |
0e93708d | 2829 | chcr_req->sec_cpl.pldlen = |
2f47d580 | 2830 | htonl(assoclen + IV + req->cryptlen); |
2debd332 | 2831 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
d600fc8a | 2832 | assoclen ? 1 : 0, assoclen, |
2f47d580 | 2833 | assoclen + IV + 1, 0); |
e1a018e6 | 2834 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
2f47d580 HJ |
2835 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, |
2836 | temp, temp); | |
e1a018e6 | 2837 | chcr_req->sec_cpl.seqno_numivs = |
2debd332 HJ |
2838 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == |
2839 | CHCR_ENCRYPT_OP) ? 1 : 0, | |
2840 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | |
0a7bd30c | 2841 | CHCR_SCMD_AUTH_MODE_GHASH, |
2f47d580 | 2842 | aeadctx->hmac_ctrl, IV >> 1); |
2debd332 | 2843 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2f47d580 | 2844 | 0, 0, dst_size); |
2debd332 HJ |
2845 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2846 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
125d01ca HJ |
2847 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2848 | GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); | |
2debd332 HJ |
2849 | |
2850 | /* prepare a 16 byte iv */ | |
2851 | /* S A L T | IV | 0x00000001 */ | |
2852 | if (get_aead_subtype(tfm) == | |
2853 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | |
2854 | memcpy(reqctx->iv, aeadctx->salt, 4); | |
8f6acb7f | 2855 | memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); |
2debd332 | 2856 | } else { |
8f6acb7f | 2857 | memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); |
2debd332 HJ |
2858 | } |
2859 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); | |
2860 | ||
2861 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
2f47d580 | 2862 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2debd332 | 2863 | |
2f47d580 HJ |
2864 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); |
2865 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
ee0863ba | 2866 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2f47d580 HJ |
2867 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2868 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); | |
2869 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, | |
2870 | transhdr_len, temp, reqctx->verify); | |
2debd332 | 2871 | reqctx->skb = skb; |
2f47d580 | 2872 | reqctx->op = op_type; |
2debd332 HJ |
2873 | return skb; |
2874 | ||
2debd332 | 2875 | err: |
2f47d580 | 2876 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
5fe8c711 | 2877 | return ERR_PTR(error); |
2debd332 HJ |
2878 | } |
2879 | ||
2880 | ||
2881 | ||
2882 | static int chcr_aead_cra_init(struct crypto_aead *tfm) | |
2883 | { | |
2f47d580 | 2884 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d HJ |
2885 | struct aead_alg *alg = crypto_aead_alg(tfm); |
2886 | ||
2887 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, | |
5fe8c711 HJ |
2888 | CRYPTO_ALG_NEED_FALLBACK | |
2889 | CRYPTO_ALG_ASYNC); | |
0e93708d HJ |
2890 | if (IS_ERR(aeadctx->sw_cipher)) |
2891 | return PTR_ERR(aeadctx->sw_cipher); | |
2892 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), | |
2893 | sizeof(struct aead_request) + | |
2894 | crypto_aead_reqsize(aeadctx->sw_cipher))); | |
2f47d580 | 2895 | return chcr_device_init(a_ctx(tfm)); |
2debd332 HJ |
2896 | } |
2897 | ||
2898 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) | |
2899 | { | |
2f47d580 | 2900 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d | 2901 | |
0e93708d | 2902 | crypto_free_aead(aeadctx->sw_cipher); |
2debd332 HJ |
2903 | } |
2904 | ||
2905 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | |
2906 | unsigned int authsize) | |
2907 | { | |
2f47d580 | 2908 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2909 | |
2910 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; | |
2911 | aeadctx->mayverify = VERIFY_HW; | |
0e93708d | 2912 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2913 | } |
2914 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | |
2915 | unsigned int authsize) | |
2916 | { | |
2f47d580 | 2917 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2918 | u32 maxauth = crypto_aead_maxauthsize(tfm); |
2919 | ||
2920 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not | |
2921 | * true for sha1. authsize == 12 condition should be before | |
2922 | * authsize == (maxauth >> 1) | |
2923 | */ | |
2924 | if (authsize == ICV_4) { | |
2925 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
2926 | aeadctx->mayverify = VERIFY_HW; | |
2927 | } else if (authsize == ICV_6) { | |
2928 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
2929 | aeadctx->mayverify = VERIFY_HW; | |
2930 | } else if (authsize == ICV_10) { | |
2931 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
2932 | aeadctx->mayverify = VERIFY_HW; | |
2933 | } else if (authsize == ICV_12) { | |
2934 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2935 | aeadctx->mayverify = VERIFY_HW; | |
2936 | } else if (authsize == ICV_14) { | |
2937 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
2938 | aeadctx->mayverify = VERIFY_HW; | |
2939 | } else if (authsize == (maxauth >> 1)) { | |
2940 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2941 | aeadctx->mayverify = VERIFY_HW; | |
2942 | } else if (authsize == maxauth) { | |
2943 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2944 | aeadctx->mayverify = VERIFY_HW; | |
2945 | } else { | |
2946 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2947 | aeadctx->mayverify = VERIFY_SW; | |
2948 | } | |
0e93708d | 2949 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2950 | } |
2951 | ||
2952 | ||
2953 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |
2954 | { | |
2f47d580 | 2955 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2956 | |
2957 | switch (authsize) { | |
2958 | case ICV_4: | |
2959 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
2960 | aeadctx->mayverify = VERIFY_HW; | |
2961 | break; | |
2962 | case ICV_8: | |
2963 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2964 | aeadctx->mayverify = VERIFY_HW; | |
2965 | break; | |
2966 | case ICV_12: | |
2967 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2968 | aeadctx->mayverify = VERIFY_HW; | |
2969 | break; | |
2970 | case ICV_14: | |
2971 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
2972 | aeadctx->mayverify = VERIFY_HW; | |
2973 | break; | |
2974 | case ICV_16: | |
2975 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2976 | aeadctx->mayverify = VERIFY_HW; | |
2977 | break; | |
2978 | case ICV_13: | |
2979 | case ICV_15: | |
2980 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2981 | aeadctx->mayverify = VERIFY_SW; | |
2982 | break; | |
2983 | default: | |
2984 | ||
2985 | crypto_tfm_set_flags((struct crypto_tfm *) tfm, | |
2986 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2987 | return -EINVAL; | |
2988 | } | |
0e93708d | 2989 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2990 | } |
2991 | ||
2992 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | |
2993 | unsigned int authsize) | |
2994 | { | |
2f47d580 | 2995 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2996 | |
2997 | switch (authsize) { | |
2998 | case ICV_8: | |
2999 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3000 | aeadctx->mayverify = VERIFY_HW; | |
3001 | break; | |
3002 | case ICV_12: | |
3003 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3004 | aeadctx->mayverify = VERIFY_HW; | |
3005 | break; | |
3006 | case ICV_16: | |
3007 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3008 | aeadctx->mayverify = VERIFY_HW; | |
3009 | break; | |
3010 | default: | |
3011 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
3012 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3013 | return -EINVAL; | |
3014 | } | |
0e93708d | 3015 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3016 | } |
3017 | ||
3018 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, | |
3019 | unsigned int authsize) | |
3020 | { | |
2f47d580 | 3021 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3022 | |
3023 | switch (authsize) { | |
3024 | case ICV_4: | |
3025 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
3026 | aeadctx->mayverify = VERIFY_HW; | |
3027 | break; | |
3028 | case ICV_6: | |
3029 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
3030 | aeadctx->mayverify = VERIFY_HW; | |
3031 | break; | |
3032 | case ICV_8: | |
3033 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3034 | aeadctx->mayverify = VERIFY_HW; | |
3035 | break; | |
3036 | case ICV_10: | |
3037 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
3038 | aeadctx->mayverify = VERIFY_HW; | |
3039 | break; | |
3040 | case ICV_12: | |
3041 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3042 | aeadctx->mayverify = VERIFY_HW; | |
3043 | break; | |
3044 | case ICV_14: | |
3045 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
3046 | aeadctx->mayverify = VERIFY_HW; | |
3047 | break; | |
3048 | case ICV_16: | |
3049 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3050 | aeadctx->mayverify = VERIFY_HW; | |
3051 | break; | |
3052 | default: | |
3053 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
3054 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3055 | return -EINVAL; | |
3056 | } | |
0e93708d | 3057 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3058 | } |
3059 | ||
0e93708d | 3060 | static int chcr_ccm_common_setkey(struct crypto_aead *aead, |
2debd332 HJ |
3061 | const u8 *key, |
3062 | unsigned int keylen) | |
3063 | { | |
2f47d580 | 3064 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2debd332 HJ |
3065 | unsigned char ck_size, mk_size; |
3066 | int key_ctx_size = 0; | |
3067 | ||
125d01ca | 3068 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; |
2debd332 | 3069 | if (keylen == AES_KEYSIZE_128) { |
2debd332 | 3070 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
125d01ca | 3071 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; |
2debd332 HJ |
3072 | } else if (keylen == AES_KEYSIZE_192) { |
3073 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3074 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; | |
3075 | } else if (keylen == AES_KEYSIZE_256) { | |
3076 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3077 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
3078 | } else { | |
3079 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3080 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3081 | aeadctx->enckey_len = 0; | |
3082 | return -EINVAL; | |
3083 | } | |
3084 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, | |
3085 | key_ctx_size >> 4); | |
0e93708d HJ |
3086 | memcpy(aeadctx->key, key, keylen); |
3087 | aeadctx->enckey_len = keylen; | |
3088 | ||
2debd332 HJ |
3089 | return 0; |
3090 | } | |
3091 | ||
0e93708d HJ |
3092 | static int chcr_aead_ccm_setkey(struct crypto_aead *aead, |
3093 | const u8 *key, | |
3094 | unsigned int keylen) | |
3095 | { | |
2f47d580 | 3096 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
0e93708d HJ |
3097 | int error; |
3098 | ||
3099 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
3100 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
3101 | CRYPTO_TFM_REQ_MASK); | |
3102 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3103 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3104 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3105 | CRYPTO_TFM_RES_MASK); | |
3106 | if (error) | |
3107 | return error; | |
3108 | return chcr_ccm_common_setkey(aead, key, keylen); | |
3109 | } | |
3110 | ||
2debd332 HJ |
3111 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, |
3112 | unsigned int keylen) | |
3113 | { | |
2f47d580 | 3114 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
4dbeae42 | 3115 | int error; |
2debd332 HJ |
3116 | |
3117 | if (keylen < 3) { | |
3118 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3119 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3120 | aeadctx->enckey_len = 0; | |
3121 | return -EINVAL; | |
3122 | } | |
4dbeae42 HJ |
3123 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3124 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
3125 | CRYPTO_TFM_REQ_MASK); | |
3126 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3127 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3128 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3129 | CRYPTO_TFM_RES_MASK); | |
3130 | if (error) | |
3131 | return error; | |
2debd332 HJ |
3132 | keylen -= 3; |
3133 | memcpy(aeadctx->salt, key + keylen, 3); | |
0e93708d | 3134 | return chcr_ccm_common_setkey(aead, key, keylen); |
2debd332 HJ |
3135 | } |
3136 | ||
3137 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |
3138 | unsigned int keylen) | |
3139 | { | |
2f47d580 | 3140 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2debd332 | 3141 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); |
8356ea51 | 3142 | struct crypto_cipher *cipher; |
2debd332 HJ |
3143 | unsigned int ck_size; |
3144 | int ret = 0, key_ctx_size = 0; | |
3145 | ||
0e93708d HJ |
3146 | aeadctx->enckey_len = 0; |
3147 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
3148 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) | |
3149 | & CRYPTO_TFM_REQ_MASK); | |
3150 | ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3151 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3152 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3153 | CRYPTO_TFM_RES_MASK); | |
3154 | if (ret) | |
3155 | goto out; | |
3156 | ||
7c2cf1c4 HJ |
3157 | if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
3158 | keylen > 3) { | |
2debd332 HJ |
3159 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ |
3160 | memcpy(aeadctx->salt, key + keylen, 4); | |
3161 | } | |
3162 | if (keylen == AES_KEYSIZE_128) { | |
3163 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3164 | } else if (keylen == AES_KEYSIZE_192) { | |
3165 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3166 | } else if (keylen == AES_KEYSIZE_256) { | |
3167 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3168 | } else { | |
3169 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3170 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
0e93708d | 3171 | pr_err("GCM: Invalid key length %d\n", keylen); |
2debd332 HJ |
3172 | ret = -EINVAL; |
3173 | goto out; | |
3174 | } | |
3175 | ||
3176 | memcpy(aeadctx->key, key, keylen); | |
3177 | aeadctx->enckey_len = keylen; | |
125d01ca | 3178 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + |
2debd332 | 3179 | AEAD_H_SIZE; |
125d01ca | 3180 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, |
2debd332 HJ |
3181 | CHCR_KEYCTX_MAC_KEY_SIZE_128, |
3182 | 0, 0, | |
3183 | key_ctx_size >> 4); | |
8356ea51 HJ |
3184 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
3185 | * It will go in key context | |
2debd332 | 3186 | */ |
8356ea51 HJ |
3187 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); |
3188 | if (IS_ERR(cipher)) { | |
2debd332 HJ |
3189 | aeadctx->enckey_len = 0; |
3190 | ret = -ENOMEM; | |
3191 | goto out; | |
3192 | } | |
8356ea51 HJ |
3193 | |
3194 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
2debd332 HJ |
3195 | if (ret) { |
3196 | aeadctx->enckey_len = 0; | |
3197 | goto out1; | |
3198 | } | |
3199 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); | |
8356ea51 | 3200 | crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); |
2debd332 HJ |
3201 | |
3202 | out1: | |
8356ea51 | 3203 | crypto_free_cipher(cipher); |
2debd332 HJ |
3204 | out: |
3205 | return ret; | |
3206 | } | |
3207 | ||
3208 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |
3209 | unsigned int keylen) | |
3210 | { | |
2f47d580 | 3211 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
2debd332 HJ |
3212 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3213 | /* it contains auth and cipher key both*/ | |
3214 | struct crypto_authenc_keys keys; | |
3d64bd67 | 3215 | unsigned int bs, subtype; |
2debd332 HJ |
3216 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; |
3217 | int err = 0, i, key_ctx_len = 0; | |
3218 | unsigned char ck_size = 0; | |
3219 | unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; | |
ec1bca94 | 3220 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
2debd332 HJ |
3221 | struct algo_param param; |
3222 | int align; | |
3223 | u8 *o_ptr = NULL; | |
3224 | ||
0e93708d HJ |
3225 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3226 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
3227 | & CRYPTO_TFM_REQ_MASK); | |
3228 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3229 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
3230 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
3231 | & CRYPTO_TFM_RES_MASK); | |
3232 | if (err) | |
3233 | goto out; | |
3234 | ||
2debd332 HJ |
3235 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
3236 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3237 | goto out; | |
3238 | } | |
3239 | ||
3240 | if (get_alg_config(¶m, max_authsize)) { | |
3241 | pr_err("chcr : Unsupported digest size\n"); | |
3242 | goto out; | |
3243 | } | |
3d64bd67 HJ |
3244 | subtype = get_aead_subtype(authenc); |
3245 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
3246 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
3247 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | |
3248 | goto out; | |
3249 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | |
3250 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | |
3251 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | |
3252 | } | |
2debd332 HJ |
3253 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3254 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3255 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
3256 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3257 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
3258 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3259 | } else { | |
3260 | pr_err("chcr : Unsupported cipher key\n"); | |
3261 | goto out; | |
3262 | } | |
3263 | ||
3264 | /* Copy only encryption key. We use authkey to generate h(ipad) and | |
3265 | * h(opad) so authkey is not needed again. authkeylen size have the | |
3266 | * size of the hash digest size. | |
3267 | */ | |
3268 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
3269 | aeadctx->enckey_len = keys.enckeylen; | |
3d64bd67 HJ |
3270 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3271 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { | |
2debd332 | 3272 | |
3d64bd67 HJ |
3273 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, |
3274 | aeadctx->enckey_len << 3); | |
3275 | } | |
2debd332 HJ |
3276 | base_hash = chcr_alloc_shash(max_authsize); |
3277 | if (IS_ERR(base_hash)) { | |
3278 | pr_err("chcr : Base driver cannot be loaded\n"); | |
0e93708d HJ |
3279 | aeadctx->enckey_len = 0; |
3280 | return -EINVAL; | |
324429d7 | 3281 | } |
2debd332 HJ |
3282 | { |
3283 | SHASH_DESC_ON_STACK(shash, base_hash); | |
3284 | shash->tfm = base_hash; | |
3285 | shash->flags = crypto_shash_get_flags(base_hash); | |
3286 | bs = crypto_shash_blocksize(base_hash); | |
3287 | align = KEYCTX_ALIGN_PAD(max_authsize); | |
3288 | o_ptr = actx->h_iopad + param.result_size + align; | |
3289 | ||
3290 | if (keys.authkeylen > bs) { | |
3291 | err = crypto_shash_digest(shash, keys.authkey, | |
3292 | keys.authkeylen, | |
3293 | o_ptr); | |
3294 | if (err) { | |
3295 | pr_err("chcr : Base driver cannot be loaded\n"); | |
3296 | goto out; | |
3297 | } | |
3298 | keys.authkeylen = max_authsize; | |
3299 | } else | |
3300 | memcpy(o_ptr, keys.authkey, keys.authkeylen); | |
3301 | ||
3302 | /* Compute the ipad-digest*/ | |
3303 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
3304 | memcpy(pad, o_ptr, keys.authkeylen); | |
3305 | for (i = 0; i < bs >> 2; i++) | |
3306 | *((unsigned int *)pad + i) ^= IPAD_DATA; | |
3307 | ||
3308 | if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, | |
3309 | max_authsize)) | |
3310 | goto out; | |
3311 | /* Compute the opad-digest */ | |
3312 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
3313 | memcpy(pad, o_ptr, keys.authkeylen); | |
3314 | for (i = 0; i < bs >> 2; i++) | |
3315 | *((unsigned int *)pad + i) ^= OPAD_DATA; | |
3316 | ||
3317 | if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) | |
3318 | goto out; | |
3319 | ||
3320 | /* convert the ipad and opad digest to network order */ | |
3321 | chcr_change_order(actx->h_iopad, param.result_size); | |
3322 | chcr_change_order(o_ptr, param.result_size); | |
3323 | key_ctx_len = sizeof(struct _key_ctx) + | |
125d01ca | 3324 | roundup(keys.enckeylen, 16) + |
2debd332 HJ |
3325 | (param.result_size + align) * 2; |
3326 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, | |
3327 | 0, 1, key_ctx_len >> 4); | |
3328 | actx->auth_mode = param.auth_mode; | |
3329 | chcr_free_shash(base_hash); | |
3330 | ||
3331 | return 0; | |
3332 | } | |
3333 | out: | |
3334 | aeadctx->enckey_len = 0; | |
ec1bca94 | 3335 | if (!IS_ERR(base_hash)) |
2debd332 HJ |
3336 | chcr_free_shash(base_hash); |
3337 | return -EINVAL; | |
324429d7 HS |
3338 | } |
3339 | ||
2debd332 HJ |
3340 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, |
3341 | const u8 *key, unsigned int keylen) | |
3342 | { | |
2f47d580 | 3343 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
2debd332 HJ |
3344 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3345 | struct crypto_authenc_keys keys; | |
0e93708d | 3346 | int err; |
2debd332 | 3347 | /* it contains auth and cipher key both*/ |
3d64bd67 | 3348 | unsigned int subtype; |
2debd332 HJ |
3349 | int key_ctx_len = 0; |
3350 | unsigned char ck_size = 0; | |
3351 | ||
0e93708d HJ |
3352 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3353 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
3354 | & CRYPTO_TFM_REQ_MASK); | |
3355 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3356 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
3357 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
3358 | & CRYPTO_TFM_RES_MASK); | |
3359 | if (err) | |
3360 | goto out; | |
3361 | ||
2debd332 HJ |
3362 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
3363 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3364 | goto out; | |
3365 | } | |
3d64bd67 HJ |
3366 | subtype = get_aead_subtype(authenc); |
3367 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
3368 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
3369 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | |
3370 | goto out; | |
3371 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | |
3372 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | |
3373 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | |
3374 | } | |
2debd332 HJ |
3375 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3376 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3377 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
3378 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3379 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
3380 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3381 | } else { | |
3d64bd67 | 3382 | pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); |
2debd332 HJ |
3383 | goto out; |
3384 | } | |
3385 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
3386 | aeadctx->enckey_len = keys.enckeylen; | |
3d64bd67 HJ |
3387 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3388 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { | |
3389 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | |
3390 | aeadctx->enckey_len << 3); | |
3391 | } | |
125d01ca | 3392 | key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); |
2debd332 HJ |
3393 | |
3394 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, | |
3395 | 0, key_ctx_len >> 4); | |
3396 | actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; | |
3397 | return 0; | |
3398 | out: | |
3399 | aeadctx->enckey_len = 0; | |
3400 | return -EINVAL; | |
3401 | } | |
6dad4e8a AG |
3402 | |
3403 | static int chcr_aead_op(struct aead_request *req, | |
3404 | unsigned short op_type, | |
3405 | int size, | |
3406 | create_wr_t create_wr_fn) | |
3407 | { | |
3408 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
3409 | struct uld_ctx *u_ctx; | |
3410 | struct sk_buff *skb; | |
3411 | ||
3412 | if (!a_ctx(tfm)->dev) { | |
3413 | pr_err("chcr : %s : No crypto device.\n", __func__); | |
3414 | return -ENXIO; | |
3415 | } | |
3416 | u_ctx = ULD_CTX(a_ctx(tfm)); | |
3417 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
3418 | a_ctx(tfm)->tx_qidx)) { | |
3419 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
3420 | return -EBUSY; | |
3421 | } | |
3422 | ||
3423 | /* Form a WR from req */ | |
3424 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, | |
3425 | op_type); | |
3426 | ||
3427 | if (IS_ERR(skb) || !skb) | |
3428 | return PTR_ERR(skb); | |
3429 | ||
3430 | skb->dev = u_ctx->lldi.ports[0]; | |
3431 | set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); | |
3432 | chcr_send_wr(skb); | |
3433 | return -EINPROGRESS; | |
3434 | } | |
3435 | ||
2debd332 HJ |
3436 | static int chcr_aead_encrypt(struct aead_request *req) |
3437 | { | |
3438 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
3439 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
3440 | ||
3441 | reqctx->verify = VERIFY_HW; | |
3442 | ||
3443 | switch (get_aead_subtype(tfm)) { | |
3d64bd67 HJ |
3444 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3445 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: | |
3446 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | |
3447 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | |
2debd332 HJ |
3448 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, |
3449 | create_authenc_wr); | |
3450 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
3451 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
3452 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
3453 | create_aead_ccm_wr); | |
3454 | default: | |
3455 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
3456 | create_gcm_wr); | |
3457 | } | |
3458 | } | |
3459 | ||
3460 | static int chcr_aead_decrypt(struct aead_request *req) | |
3461 | { | |
3462 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 3463 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3464 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
3465 | int size; | |
3466 | ||
3467 | if (aeadctx->mayverify == VERIFY_SW) { | |
3468 | size = crypto_aead_maxauthsize(tfm); | |
3469 | reqctx->verify = VERIFY_SW; | |
3470 | } else { | |
3471 | size = 0; | |
3472 | reqctx->verify = VERIFY_HW; | |
3473 | } | |
3474 | ||
3475 | switch (get_aead_subtype(tfm)) { | |
3d64bd67 HJ |
3476 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3477 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: | |
3478 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | |
3479 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | |
2debd332 HJ |
3480 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, |
3481 | create_authenc_wr); | |
3482 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
3483 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
3484 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
3485 | create_aead_ccm_wr); | |
3486 | default: | |
3487 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
3488 | create_gcm_wr); | |
3489 | } | |
3490 | } | |
3491 | ||
324429d7 HS |
3492 | static struct chcr_alg_template driver_algs[] = { |
3493 | /* AES-CBC */ | |
3494 | { | |
b8fd1f41 | 3495 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
324429d7 HS |
3496 | .is_registered = 0, |
3497 | .alg.crypto = { | |
3498 | .cra_name = "cbc(aes)", | |
2debd332 | 3499 | .cra_driver_name = "cbc-aes-chcr", |
324429d7 | 3500 | .cra_blocksize = AES_BLOCK_SIZE, |
324429d7 | 3501 | .cra_init = chcr_cra_init, |
b8fd1f41 | 3502 | .cra_exit = chcr_cra_exit, |
324429d7 HS |
3503 | .cra_u.ablkcipher = { |
3504 | .min_keysize = AES_MIN_KEY_SIZE, | |
3505 | .max_keysize = AES_MAX_KEY_SIZE, | |
3506 | .ivsize = AES_BLOCK_SIZE, | |
3507 | .setkey = chcr_aes_cbc_setkey, | |
3508 | .encrypt = chcr_aes_encrypt, | |
3509 | .decrypt = chcr_aes_decrypt, | |
3510 | } | |
3511 | } | |
3512 | }, | |
3513 | { | |
b8fd1f41 | 3514 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
324429d7 HS |
3515 | .is_registered = 0, |
3516 | .alg.crypto = { | |
3517 | .cra_name = "xts(aes)", | |
2debd332 | 3518 | .cra_driver_name = "xts-aes-chcr", |
324429d7 | 3519 | .cra_blocksize = AES_BLOCK_SIZE, |
324429d7 HS |
3520 | .cra_init = chcr_cra_init, |
3521 | .cra_exit = NULL, | |
b8fd1f41 | 3522 | .cra_u .ablkcipher = { |
324429d7 HS |
3523 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
3524 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
3525 | .ivsize = AES_BLOCK_SIZE, | |
3526 | .setkey = chcr_aes_xts_setkey, | |
3527 | .encrypt = chcr_aes_encrypt, | |
3528 | .decrypt = chcr_aes_decrypt, | |
3529 | } | |
3530 | } | |
b8fd1f41 HJ |
3531 | }, |
3532 | { | |
3533 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, | |
3534 | .is_registered = 0, | |
3535 | .alg.crypto = { | |
3536 | .cra_name = "ctr(aes)", | |
3537 | .cra_driver_name = "ctr-aes-chcr", | |
3538 | .cra_blocksize = 1, | |
3539 | .cra_init = chcr_cra_init, | |
3540 | .cra_exit = chcr_cra_exit, | |
3541 | .cra_u.ablkcipher = { | |
3542 | .min_keysize = AES_MIN_KEY_SIZE, | |
3543 | .max_keysize = AES_MAX_KEY_SIZE, | |
3544 | .ivsize = AES_BLOCK_SIZE, | |
3545 | .setkey = chcr_aes_ctr_setkey, | |
3546 | .encrypt = chcr_aes_encrypt, | |
3547 | .decrypt = chcr_aes_decrypt, | |
3548 | } | |
3549 | } | |
3550 | }, | |
3551 | { | |
3552 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
3553 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, | |
3554 | .is_registered = 0, | |
3555 | .alg.crypto = { | |
3556 | .cra_name = "rfc3686(ctr(aes))", | |
3557 | .cra_driver_name = "rfc3686-ctr-aes-chcr", | |
3558 | .cra_blocksize = 1, | |
3559 | .cra_init = chcr_rfc3686_init, | |
3560 | .cra_exit = chcr_cra_exit, | |
3561 | .cra_u.ablkcipher = { | |
3562 | .min_keysize = AES_MIN_KEY_SIZE + | |
3563 | CTR_RFC3686_NONCE_SIZE, | |
3564 | .max_keysize = AES_MAX_KEY_SIZE + | |
3565 | CTR_RFC3686_NONCE_SIZE, | |
3566 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3567 | .setkey = chcr_aes_rfc3686_setkey, | |
3568 | .encrypt = chcr_aes_encrypt, | |
3569 | .decrypt = chcr_aes_decrypt, | |
3570 | .geniv = "seqiv", | |
3571 | } | |
324429d7 HS |
3572 | } |
3573 | }, | |
3574 | /* SHA */ | |
3575 | { | |
3576 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3577 | .is_registered = 0, | |
3578 | .alg.hash = { | |
3579 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
3580 | .halg.base = { | |
3581 | .cra_name = "sha1", | |
3582 | .cra_driver_name = "sha1-chcr", | |
3583 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
3584 | } | |
3585 | } | |
3586 | }, | |
3587 | { | |
3588 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3589 | .is_registered = 0, | |
3590 | .alg.hash = { | |
3591 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
3592 | .halg.base = { | |
3593 | .cra_name = "sha256", | |
3594 | .cra_driver_name = "sha256-chcr", | |
3595 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
3596 | } | |
3597 | } | |
3598 | }, | |
3599 | { | |
3600 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3601 | .is_registered = 0, | |
3602 | .alg.hash = { | |
3603 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
3604 | .halg.base = { | |
3605 | .cra_name = "sha224", | |
3606 | .cra_driver_name = "sha224-chcr", | |
3607 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
3608 | } | |
3609 | } | |
3610 | }, | |
3611 | { | |
3612 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3613 | .is_registered = 0, | |
3614 | .alg.hash = { | |
3615 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
3616 | .halg.base = { | |
3617 | .cra_name = "sha384", | |
3618 | .cra_driver_name = "sha384-chcr", | |
3619 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
3620 | } | |
3621 | } | |
3622 | }, | |
3623 | { | |
3624 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3625 | .is_registered = 0, | |
3626 | .alg.hash = { | |
3627 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
3628 | .halg.base = { | |
3629 | .cra_name = "sha512", | |
3630 | .cra_driver_name = "sha512-chcr", | |
3631 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
3632 | } | |
3633 | } | |
3634 | }, | |
3635 | /* HMAC */ | |
3636 | { | |
3637 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3638 | .is_registered = 0, | |
3639 | .alg.hash = { | |
3640 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
3641 | .halg.base = { | |
3642 | .cra_name = "hmac(sha1)", | |
2debd332 | 3643 | .cra_driver_name = "hmac-sha1-chcr", |
324429d7 HS |
3644 | .cra_blocksize = SHA1_BLOCK_SIZE, |
3645 | } | |
3646 | } | |
3647 | }, | |
3648 | { | |
3649 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3650 | .is_registered = 0, | |
3651 | .alg.hash = { | |
3652 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
3653 | .halg.base = { | |
3654 | .cra_name = "hmac(sha224)", | |
2debd332 | 3655 | .cra_driver_name = "hmac-sha224-chcr", |
324429d7 HS |
3656 | .cra_blocksize = SHA224_BLOCK_SIZE, |
3657 | } | |
3658 | } | |
3659 | }, | |
3660 | { | |
3661 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3662 | .is_registered = 0, | |
3663 | .alg.hash = { | |
3664 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
3665 | .halg.base = { | |
3666 | .cra_name = "hmac(sha256)", | |
2debd332 | 3667 | .cra_driver_name = "hmac-sha256-chcr", |
324429d7 HS |
3668 | .cra_blocksize = SHA256_BLOCK_SIZE, |
3669 | } | |
3670 | } | |
3671 | }, | |
3672 | { | |
3673 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3674 | .is_registered = 0, | |
3675 | .alg.hash = { | |
3676 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
3677 | .halg.base = { | |
3678 | .cra_name = "hmac(sha384)", | |
2debd332 | 3679 | .cra_driver_name = "hmac-sha384-chcr", |
324429d7 HS |
3680 | .cra_blocksize = SHA384_BLOCK_SIZE, |
3681 | } | |
3682 | } | |
3683 | }, | |
3684 | { | |
3685 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3686 | .is_registered = 0, | |
3687 | .alg.hash = { | |
3688 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
3689 | .halg.base = { | |
3690 | .cra_name = "hmac(sha512)", | |
2debd332 | 3691 | .cra_driver_name = "hmac-sha512-chcr", |
324429d7 HS |
3692 | .cra_blocksize = SHA512_BLOCK_SIZE, |
3693 | } | |
3694 | } | |
3695 | }, | |
2debd332 HJ |
3696 | /* Add AEAD Algorithms */ |
3697 | { | |
3698 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, | |
3699 | .is_registered = 0, | |
3700 | .alg.aead = { | |
3701 | .base = { | |
3702 | .cra_name = "gcm(aes)", | |
3703 | .cra_driver_name = "gcm-aes-chcr", | |
3704 | .cra_blocksize = 1, | |
e29abda5 | 3705 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3706 | .cra_ctxsize = sizeof(struct chcr_context) + |
3707 | sizeof(struct chcr_aead_ctx) + | |
3708 | sizeof(struct chcr_gcm_ctx), | |
3709 | }, | |
8f6acb7f | 3710 | .ivsize = GCM_AES_IV_SIZE, |
2debd332 HJ |
3711 | .maxauthsize = GHASH_DIGEST_SIZE, |
3712 | .setkey = chcr_gcm_setkey, | |
3713 | .setauthsize = chcr_gcm_setauthsize, | |
3714 | } | |
3715 | }, | |
3716 | { | |
3717 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, | |
3718 | .is_registered = 0, | |
3719 | .alg.aead = { | |
3720 | .base = { | |
3721 | .cra_name = "rfc4106(gcm(aes))", | |
3722 | .cra_driver_name = "rfc4106-gcm-aes-chcr", | |
3723 | .cra_blocksize = 1, | |
e29abda5 | 3724 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
3725 | .cra_ctxsize = sizeof(struct chcr_context) + |
3726 | sizeof(struct chcr_aead_ctx) + | |
3727 | sizeof(struct chcr_gcm_ctx), | |
3728 | ||
3729 | }, | |
8f6acb7f | 3730 | .ivsize = GCM_RFC4106_IV_SIZE, |
2debd332 HJ |
3731 | .maxauthsize = GHASH_DIGEST_SIZE, |
3732 | .setkey = chcr_gcm_setkey, | |
3733 | .setauthsize = chcr_4106_4309_setauthsize, | |
3734 | } | |
3735 | }, | |
3736 | { | |
3737 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, | |
3738 | .is_registered = 0, | |
3739 | .alg.aead = { | |
3740 | .base = { | |
3741 | .cra_name = "ccm(aes)", | |
3742 | .cra_driver_name = "ccm-aes-chcr", | |
3743 | .cra_blocksize = 1, | |
e29abda5 | 3744 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3745 | .cra_ctxsize = sizeof(struct chcr_context) + |
3746 | sizeof(struct chcr_aead_ctx), | |
3747 | ||
3748 | }, | |
3749 | .ivsize = AES_BLOCK_SIZE, | |
3750 | .maxauthsize = GHASH_DIGEST_SIZE, | |
3751 | .setkey = chcr_aead_ccm_setkey, | |
3752 | .setauthsize = chcr_ccm_setauthsize, | |
3753 | } | |
3754 | }, | |
3755 | { | |
3756 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, | |
3757 | .is_registered = 0, | |
3758 | .alg.aead = { | |
3759 | .base = { | |
3760 | .cra_name = "rfc4309(ccm(aes))", | |
3761 | .cra_driver_name = "rfc4309-ccm-aes-chcr", | |
3762 | .cra_blocksize = 1, | |
e29abda5 | 3763 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
3764 | .cra_ctxsize = sizeof(struct chcr_context) + |
3765 | sizeof(struct chcr_aead_ctx), | |
3766 | ||
3767 | }, | |
3768 | .ivsize = 8, | |
3769 | .maxauthsize = GHASH_DIGEST_SIZE, | |
3770 | .setkey = chcr_aead_rfc4309_setkey, | |
3771 | .setauthsize = chcr_4106_4309_setauthsize, | |
3772 | } | |
3773 | }, | |
3774 | { | |
3d64bd67 | 3775 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3776 | .is_registered = 0, |
3777 | .alg.aead = { | |
3778 | .base = { | |
3779 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
3780 | .cra_driver_name = | |
3781 | "authenc-hmac-sha1-cbc-aes-chcr", | |
3782 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3783 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3784 | .cra_ctxsize = sizeof(struct chcr_context) + |
3785 | sizeof(struct chcr_aead_ctx) + | |
3786 | sizeof(struct chcr_authenc_ctx), | |
3787 | ||
3788 | }, | |
3789 | .ivsize = AES_BLOCK_SIZE, | |
3790 | .maxauthsize = SHA1_DIGEST_SIZE, | |
3791 | .setkey = chcr_authenc_setkey, | |
3792 | .setauthsize = chcr_authenc_setauthsize, | |
3793 | } | |
3794 | }, | |
3795 | { | |
3d64bd67 | 3796 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3797 | .is_registered = 0, |
3798 | .alg.aead = { | |
3799 | .base = { | |
3800 | ||
3801 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
3802 | .cra_driver_name = | |
3803 | "authenc-hmac-sha256-cbc-aes-chcr", | |
3804 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3805 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3806 | .cra_ctxsize = sizeof(struct chcr_context) + |
3807 | sizeof(struct chcr_aead_ctx) + | |
3808 | sizeof(struct chcr_authenc_ctx), | |
3809 | ||
3810 | }, | |
3811 | .ivsize = AES_BLOCK_SIZE, | |
3812 | .maxauthsize = SHA256_DIGEST_SIZE, | |
3813 | .setkey = chcr_authenc_setkey, | |
3814 | .setauthsize = chcr_authenc_setauthsize, | |
3815 | } | |
3816 | }, | |
3817 | { | |
3d64bd67 | 3818 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3819 | .is_registered = 0, |
3820 | .alg.aead = { | |
3821 | .base = { | |
3822 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | |
3823 | .cra_driver_name = | |
3824 | "authenc-hmac-sha224-cbc-aes-chcr", | |
3825 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3826 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3827 | .cra_ctxsize = sizeof(struct chcr_context) + |
3828 | sizeof(struct chcr_aead_ctx) + | |
3829 | sizeof(struct chcr_authenc_ctx), | |
3830 | }, | |
3831 | .ivsize = AES_BLOCK_SIZE, | |
3832 | .maxauthsize = SHA224_DIGEST_SIZE, | |
3833 | .setkey = chcr_authenc_setkey, | |
3834 | .setauthsize = chcr_authenc_setauthsize, | |
3835 | } | |
3836 | }, | |
3837 | { | |
3d64bd67 | 3838 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3839 | .is_registered = 0, |
3840 | .alg.aead = { | |
3841 | .base = { | |
3842 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | |
3843 | .cra_driver_name = | |
3844 | "authenc-hmac-sha384-cbc-aes-chcr", | |
3845 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3846 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3847 | .cra_ctxsize = sizeof(struct chcr_context) + |
3848 | sizeof(struct chcr_aead_ctx) + | |
3849 | sizeof(struct chcr_authenc_ctx), | |
3850 | ||
3851 | }, | |
3852 | .ivsize = AES_BLOCK_SIZE, | |
3853 | .maxauthsize = SHA384_DIGEST_SIZE, | |
3854 | .setkey = chcr_authenc_setkey, | |
3855 | .setauthsize = chcr_authenc_setauthsize, | |
3856 | } | |
3857 | }, | |
3858 | { | |
3d64bd67 | 3859 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3860 | .is_registered = 0, |
3861 | .alg.aead = { | |
3862 | .base = { | |
3863 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | |
3864 | .cra_driver_name = | |
3865 | "authenc-hmac-sha512-cbc-aes-chcr", | |
3866 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3867 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3868 | .cra_ctxsize = sizeof(struct chcr_context) + |
3869 | sizeof(struct chcr_aead_ctx) + | |
3870 | sizeof(struct chcr_authenc_ctx), | |
3871 | ||
3872 | }, | |
3873 | .ivsize = AES_BLOCK_SIZE, | |
3874 | .maxauthsize = SHA512_DIGEST_SIZE, | |
3875 | .setkey = chcr_authenc_setkey, | |
3876 | .setauthsize = chcr_authenc_setauthsize, | |
3877 | } | |
3878 | }, | |
3879 | { | |
3d64bd67 | 3880 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, |
2debd332 HJ |
3881 | .is_registered = 0, |
3882 | .alg.aead = { | |
3883 | .base = { | |
3884 | .cra_name = "authenc(digest_null,cbc(aes))", | |
3885 | .cra_driver_name = | |
3886 | "authenc-digest_null-cbc-aes-chcr", | |
3887 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3888 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3889 | .cra_ctxsize = sizeof(struct chcr_context) + |
3890 | sizeof(struct chcr_aead_ctx) + | |
3891 | sizeof(struct chcr_authenc_ctx), | |
3892 | ||
3893 | }, | |
3894 | .ivsize = AES_BLOCK_SIZE, | |
3895 | .maxauthsize = 0, | |
3896 | .setkey = chcr_aead_digest_null_setkey, | |
3897 | .setauthsize = chcr_authenc_null_setauthsize, | |
3898 | } | |
3899 | }, | |
3d64bd67 HJ |
3900 | { |
3901 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
3902 | .is_registered = 0, | |
3903 | .alg.aead = { | |
3904 | .base = { | |
3905 | .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | |
3906 | .cra_driver_name = | |
3907 | "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", | |
3908 | .cra_blocksize = 1, | |
3909 | .cra_priority = CHCR_AEAD_PRIORITY, | |
3910 | .cra_ctxsize = sizeof(struct chcr_context) + | |
3911 | sizeof(struct chcr_aead_ctx) + | |
3912 | sizeof(struct chcr_authenc_ctx), | |
3913 | ||
3914 | }, | |
3915 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3916 | .maxauthsize = SHA1_DIGEST_SIZE, | |
3917 | .setkey = chcr_authenc_setkey, | |
3918 | .setauthsize = chcr_authenc_setauthsize, | |
3919 | } | |
3920 | }, | |
3921 | { | |
3922 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
3923 | .is_registered = 0, | |
3924 | .alg.aead = { | |
3925 | .base = { | |
3926 | ||
3927 | .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | |
3928 | .cra_driver_name = | |
3929 | "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", | |
3930 | .cra_blocksize = 1, | |
3931 | .cra_priority = CHCR_AEAD_PRIORITY, | |
3932 | .cra_ctxsize = sizeof(struct chcr_context) + | |
3933 | sizeof(struct chcr_aead_ctx) + | |
3934 | sizeof(struct chcr_authenc_ctx), | |
3935 | ||
3936 | }, | |
3937 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3938 | .maxauthsize = SHA256_DIGEST_SIZE, | |
3939 | .setkey = chcr_authenc_setkey, | |
3940 | .setauthsize = chcr_authenc_setauthsize, | |
3941 | } | |
3942 | }, | |
3943 | { | |
3944 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
3945 | .is_registered = 0, | |
3946 | .alg.aead = { | |
3947 | .base = { | |
3948 | .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", | |
3949 | .cra_driver_name = | |
3950 | "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", | |
3951 | .cra_blocksize = 1, | |
3952 | .cra_priority = CHCR_AEAD_PRIORITY, | |
3953 | .cra_ctxsize = sizeof(struct chcr_context) + | |
3954 | sizeof(struct chcr_aead_ctx) + | |
3955 | sizeof(struct chcr_authenc_ctx), | |
3956 | }, | |
3957 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3958 | .maxauthsize = SHA224_DIGEST_SIZE, | |
3959 | .setkey = chcr_authenc_setkey, | |
3960 | .setauthsize = chcr_authenc_setauthsize, | |
3961 | } | |
3962 | }, | |
3963 | { | |
3964 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
3965 | .is_registered = 0, | |
3966 | .alg.aead = { | |
3967 | .base = { | |
3968 | .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | |
3969 | .cra_driver_name = | |
3970 | "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", | |
3971 | .cra_blocksize = 1, | |
3972 | .cra_priority = CHCR_AEAD_PRIORITY, | |
3973 | .cra_ctxsize = sizeof(struct chcr_context) + | |
3974 | sizeof(struct chcr_aead_ctx) + | |
3975 | sizeof(struct chcr_authenc_ctx), | |
3976 | ||
3977 | }, | |
3978 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3979 | .maxauthsize = SHA384_DIGEST_SIZE, | |
3980 | .setkey = chcr_authenc_setkey, | |
3981 | .setauthsize = chcr_authenc_setauthsize, | |
3982 | } | |
3983 | }, | |
3984 | { | |
3985 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
3986 | .is_registered = 0, | |
3987 | .alg.aead = { | |
3988 | .base = { | |
3989 | .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | |
3990 | .cra_driver_name = | |
3991 | "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", | |
3992 | .cra_blocksize = 1, | |
3993 | .cra_priority = CHCR_AEAD_PRIORITY, | |
3994 | .cra_ctxsize = sizeof(struct chcr_context) + | |
3995 | sizeof(struct chcr_aead_ctx) + | |
3996 | sizeof(struct chcr_authenc_ctx), | |
3997 | ||
3998 | }, | |
3999 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4000 | .maxauthsize = SHA512_DIGEST_SIZE, | |
4001 | .setkey = chcr_authenc_setkey, | |
4002 | .setauthsize = chcr_authenc_setauthsize, | |
4003 | } | |
4004 | }, | |
4005 | { | |
4006 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, | |
4007 | .is_registered = 0, | |
4008 | .alg.aead = { | |
4009 | .base = { | |
4010 | .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", | |
4011 | .cra_driver_name = | |
4012 | "authenc-digest_null-rfc3686-ctr-aes-chcr", | |
4013 | .cra_blocksize = 1, | |
4014 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4015 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4016 | sizeof(struct chcr_aead_ctx) + | |
4017 | sizeof(struct chcr_authenc_ctx), | |
4018 | ||
4019 | }, | |
4020 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4021 | .maxauthsize = 0, | |
4022 | .setkey = chcr_aead_digest_null_setkey, | |
4023 | .setauthsize = chcr_authenc_null_setauthsize, | |
4024 | } | |
4025 | }, | |
4026 | ||
324429d7 HS |
4027 | }; |
4028 | ||
4029 | /* | |
4030 | * chcr_unregister_alg - Deregister crypto algorithms with | |
4031 | * kernel framework. | |
4032 | */ | |
4033 | static int chcr_unregister_alg(void) | |
4034 | { | |
4035 | int i; | |
4036 | ||
4037 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
4038 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
4039 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
4040 | if (driver_algs[i].is_registered) | |
4041 | crypto_unregister_alg( | |
4042 | &driver_algs[i].alg.crypto); | |
4043 | break; | |
2debd332 HJ |
4044 | case CRYPTO_ALG_TYPE_AEAD: |
4045 | if (driver_algs[i].is_registered) | |
4046 | crypto_unregister_aead( | |
4047 | &driver_algs[i].alg.aead); | |
4048 | break; | |
324429d7 HS |
4049 | case CRYPTO_ALG_TYPE_AHASH: |
4050 | if (driver_algs[i].is_registered) | |
4051 | crypto_unregister_ahash( | |
4052 | &driver_algs[i].alg.hash); | |
4053 | break; | |
4054 | } | |
4055 | driver_algs[i].is_registered = 0; | |
4056 | } | |
4057 | return 0; | |
4058 | } | |
4059 | ||
4060 | #define SZ_AHASH_CTX sizeof(struct chcr_context) | |
4061 | #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) | |
4062 | #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) | |
4063 | #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC) | |
4064 | ||
4065 | /* | |
4066 | * chcr_register_alg - Register crypto algorithms with kernel framework. | |
4067 | */ | |
4068 | static int chcr_register_alg(void) | |
4069 | { | |
4070 | struct crypto_alg ai; | |
4071 | struct ahash_alg *a_hash; | |
4072 | int err = 0, i; | |
4073 | char *name = NULL; | |
4074 | ||
4075 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
4076 | if (driver_algs[i].is_registered) | |
4077 | continue; | |
4078 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
4079 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
b8fd1f41 HJ |
4080 | driver_algs[i].alg.crypto.cra_priority = |
4081 | CHCR_CRA_PRIORITY; | |
4082 | driver_algs[i].alg.crypto.cra_module = THIS_MODULE; | |
4083 | driver_algs[i].alg.crypto.cra_flags = | |
4084 | CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | |
4085 | CRYPTO_ALG_NEED_FALLBACK; | |
4086 | driver_algs[i].alg.crypto.cra_ctxsize = | |
4087 | sizeof(struct chcr_context) + | |
4088 | sizeof(struct ablk_ctx); | |
4089 | driver_algs[i].alg.crypto.cra_alignmask = 0; | |
4090 | driver_algs[i].alg.crypto.cra_type = | |
4091 | &crypto_ablkcipher_type; | |
324429d7 HS |
4092 | err = crypto_register_alg(&driver_algs[i].alg.crypto); |
4093 | name = driver_algs[i].alg.crypto.cra_driver_name; | |
4094 | break; | |
2debd332 | 4095 | case CRYPTO_ALG_TYPE_AEAD: |
2debd332 | 4096 | driver_algs[i].alg.aead.base.cra_flags = |
0e93708d HJ |
4097 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | |
4098 | CRYPTO_ALG_NEED_FALLBACK; | |
2debd332 HJ |
4099 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; |
4100 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; | |
4101 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; | |
4102 | driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; | |
4103 | driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; | |
4104 | err = crypto_register_aead(&driver_algs[i].alg.aead); | |
4105 | name = driver_algs[i].alg.aead.base.cra_driver_name; | |
4106 | break; | |
324429d7 HS |
4107 | case CRYPTO_ALG_TYPE_AHASH: |
4108 | a_hash = &driver_algs[i].alg.hash; | |
4109 | a_hash->update = chcr_ahash_update; | |
4110 | a_hash->final = chcr_ahash_final; | |
4111 | a_hash->finup = chcr_ahash_finup; | |
4112 | a_hash->digest = chcr_ahash_digest; | |
4113 | a_hash->export = chcr_ahash_export; | |
4114 | a_hash->import = chcr_ahash_import; | |
4115 | a_hash->halg.statesize = SZ_AHASH_REQ_CTX; | |
4116 | a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; | |
4117 | a_hash->halg.base.cra_module = THIS_MODULE; | |
4118 | a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS; | |
4119 | a_hash->halg.base.cra_alignmask = 0; | |
4120 | a_hash->halg.base.cra_exit = NULL; | |
4121 | a_hash->halg.base.cra_type = &crypto_ahash_type; | |
4122 | ||
4123 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { | |
4124 | a_hash->halg.base.cra_init = chcr_hmac_cra_init; | |
4125 | a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; | |
4126 | a_hash->init = chcr_hmac_init; | |
4127 | a_hash->setkey = chcr_ahash_setkey; | |
4128 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; | |
4129 | } else { | |
4130 | a_hash->init = chcr_sha_init; | |
4131 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; | |
4132 | a_hash->halg.base.cra_init = chcr_sha_cra_init; | |
4133 | } | |
4134 | err = crypto_register_ahash(&driver_algs[i].alg.hash); | |
4135 | ai = driver_algs[i].alg.hash.halg.base; | |
4136 | name = ai.cra_driver_name; | |
4137 | break; | |
4138 | } | |
4139 | if (err) { | |
4140 | pr_err("chcr : %s : Algorithm registration failed\n", | |
4141 | name); | |
4142 | goto register_err; | |
4143 | } else { | |
4144 | driver_algs[i].is_registered = 1; | |
4145 | } | |
4146 | } | |
4147 | return 0; | |
4148 | ||
4149 | register_err: | |
4150 | chcr_unregister_alg(); | |
4151 | return err; | |
4152 | } | |
4153 | ||
4154 | /* | |
4155 | * start_crypto - Register the crypto algorithms. | |
4156 | * This should called once when the first device comesup. After this | |
4157 | * kernel will start calling driver APIs for crypto operations. | |
4158 | */ | |
4159 | int start_crypto(void) | |
4160 | { | |
4161 | return chcr_register_alg(); | |
4162 | } | |
4163 | ||
4164 | /* | |
4165 | * stop_crypto - Deregister all the crypto algorithms with kernel. | |
4166 | * This should be called once when the last device goes down. After this | |
4167 | * kernel will not call the driver API for crypto operations. | |
4168 | */ | |
4169 | int stop_crypto(void) | |
4170 | { | |
4171 | chcr_unregister_alg(); | |
4172 | return 0; | |
4173 | } |