]>
Commit | Line | Data |
---|---|---|
324429d7 HS |
1 | /* |
2 | * This file is part of the Chelsio T6 Crypto driver for Linux. | |
3 | * | |
4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * Written and Maintained by: | |
35 | * Manoj Malviya (manojmalviya@chelsio.com) | |
36 | * Atul Gupta (atul.gupta@chelsio.com) | |
37 | * Jitendra Lulla (jlulla@chelsio.com) | |
38 | * Yeshaswi M R Gowda (yeshaswi@chelsio.com) | |
39 | * Harsh Jain (harsh@chelsio.com) | |
40 | */ | |
41 | ||
42 | #define pr_fmt(fmt) "chcr:" fmt | |
43 | ||
44 | #include <linux/kernel.h> | |
45 | #include <linux/module.h> | |
46 | #include <linux/crypto.h> | |
47 | #include <linux/cryptohash.h> | |
48 | #include <linux/skbuff.h> | |
49 | #include <linux/rtnetlink.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/scatterlist.h> | |
52 | ||
53 | #include <crypto/aes.h> | |
54 | #include <crypto/algapi.h> | |
55 | #include <crypto/hash.h> | |
56 | #include <crypto/sha.h> | |
2debd332 HJ |
57 | #include <crypto/authenc.h> |
58 | #include <crypto/internal/aead.h> | |
59 | #include <crypto/null.h> | |
60 | #include <crypto/internal/skcipher.h> | |
61 | #include <crypto/aead.h> | |
62 | #include <crypto/scatterwalk.h> | |
324429d7 HS |
63 | #include <crypto/internal/hash.h> |
64 | ||
65 | #include "t4fw_api.h" | |
66 | #include "t4_msg.h" | |
67 | #include "chcr_core.h" | |
68 | #include "chcr_algo.h" | |
69 | #include "chcr_crypto.h" | |
70 | ||
2debd332 HJ |
71 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
72 | { | |
73 | return ctx->crypto_ctx->aeadctx; | |
74 | } | |
75 | ||
324429d7 HS |
76 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) |
77 | { | |
78 | return ctx->crypto_ctx->ablkctx; | |
79 | } | |
80 | ||
81 | static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) | |
82 | { | |
83 | return ctx->crypto_ctx->hmacctx; | |
84 | } | |
85 | ||
2debd332 HJ |
86 | static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) |
87 | { | |
88 | return gctx->ctx->gcm; | |
89 | } | |
90 | ||
91 | static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) | |
92 | { | |
93 | return gctx->ctx->authenc; | |
94 | } | |
95 | ||
324429d7 HS |
96 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
97 | { | |
98 | return ctx->dev->u_ctx; | |
99 | } | |
100 | ||
101 | static inline int is_ofld_imm(const struct sk_buff *skb) | |
102 | { | |
103 | return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); | |
104 | } | |
105 | ||
106 | /* | |
107 | * sgl_len - calculates the size of an SGL of the given capacity | |
108 | * @n: the number of SGL entries | |
109 | * Calculates the number of flits needed for a scatter/gather list that | |
110 | * can hold the given number of entries. | |
111 | */ | |
112 | static inline unsigned int sgl_len(unsigned int n) | |
113 | { | |
114 | n--; | |
115 | return (3 * n) / 2 + (n & 1) + 2; | |
116 | } | |
117 | ||
2debd332 HJ |
118 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
119 | { | |
120 | u8 temp[SHA512_DIGEST_SIZE]; | |
121 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
122 | int authsize = crypto_aead_authsize(tfm); | |
123 | struct cpl_fw6_pld *fw6_pld; | |
124 | int cmp = 0; | |
125 | ||
126 | fw6_pld = (struct cpl_fw6_pld *)input; | |
127 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || | |
128 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { | |
d600fc8a | 129 | cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); |
2debd332 HJ |
130 | } else { |
131 | ||
132 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, | |
133 | authsize, req->assoclen + | |
134 | req->cryptlen - authsize); | |
d600fc8a | 135 | cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); |
2debd332 HJ |
136 | } |
137 | if (cmp) | |
138 | *err = -EBADMSG; | |
139 | else | |
140 | *err = 0; | |
141 | } | |
142 | ||
324429d7 HS |
143 | /* |
144 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | |
145 | * @req: crypto request | |
146 | */ | |
147 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |
2debd332 | 148 | int err) |
324429d7 HS |
149 | { |
150 | struct crypto_tfm *tfm = req->tfm; | |
151 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
152 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
153 | struct chcr_req_ctx ctx_req; | |
154 | struct cpl_fw6_pld *fw6_pld; | |
155 | unsigned int digestsize, updated_digestsize; | |
156 | ||
157 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
2debd332 HJ |
158 | case CRYPTO_ALG_TYPE_AEAD: |
159 | ctx_req.req.aead_req = (struct aead_request *)req; | |
160 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); | |
94e1dab1 | 161 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, |
2debd332 HJ |
162 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); |
163 | if (ctx_req.ctx.reqctx->skb) { | |
164 | kfree_skb(ctx_req.ctx.reqctx->skb); | |
165 | ctx_req.ctx.reqctx->skb = NULL; | |
166 | } | |
167 | if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { | |
168 | chcr_verify_tag(ctx_req.req.aead_req, input, | |
169 | &err); | |
170 | ctx_req.ctx.reqctx->verify = VERIFY_HW; | |
171 | } | |
172 | break; | |
173 | ||
44e9f799 | 174 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
324429d7 HS |
175 | ctx_req.req.ablk_req = (struct ablkcipher_request *)req; |
176 | ctx_req.ctx.ablk_ctx = | |
177 | ablkcipher_request_ctx(ctx_req.req.ablk_req); | |
2debd332 | 178 | if (!err) { |
324429d7 HS |
179 | fw6_pld = (struct cpl_fw6_pld *)input; |
180 | memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], | |
181 | AES_BLOCK_SIZE); | |
182 | } | |
183 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, | |
5c86a8ff | 184 | ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE); |
324429d7 HS |
185 | if (ctx_req.ctx.ablk_ctx->skb) { |
186 | kfree_skb(ctx_req.ctx.ablk_ctx->skb); | |
187 | ctx_req.ctx.ablk_ctx->skb = NULL; | |
188 | } | |
189 | break; | |
190 | ||
191 | case CRYPTO_ALG_TYPE_AHASH: | |
192 | ctx_req.req.ahash_req = (struct ahash_request *)req; | |
193 | ctx_req.ctx.ahash_ctx = | |
194 | ahash_request_ctx(ctx_req.req.ahash_req); | |
195 | digestsize = | |
196 | crypto_ahash_digestsize(crypto_ahash_reqtfm( | |
197 | ctx_req.req.ahash_req)); | |
198 | updated_digestsize = digestsize; | |
199 | if (digestsize == SHA224_DIGEST_SIZE) | |
200 | updated_digestsize = SHA256_DIGEST_SIZE; | |
201 | else if (digestsize == SHA384_DIGEST_SIZE) | |
202 | updated_digestsize = SHA512_DIGEST_SIZE; | |
5c86a8ff HJ |
203 | if (ctx_req.ctx.ahash_ctx->skb) { |
204 | kfree_skb(ctx_req.ctx.ahash_ctx->skb); | |
324429d7 | 205 | ctx_req.ctx.ahash_ctx->skb = NULL; |
5c86a8ff | 206 | } |
324429d7 HS |
207 | if (ctx_req.ctx.ahash_ctx->result == 1) { |
208 | ctx_req.ctx.ahash_ctx->result = 0; | |
209 | memcpy(ctx_req.req.ahash_req->result, input + | |
210 | sizeof(struct cpl_fw6_pld), | |
211 | digestsize); | |
212 | } else { | |
213 | memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + | |
214 | sizeof(struct cpl_fw6_pld), | |
215 | updated_digestsize); | |
216 | } | |
324429d7 HS |
217 | break; |
218 | } | |
2debd332 | 219 | return err; |
324429d7 HS |
220 | } |
221 | ||
222 | /* | |
223 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | |
224 | * @skb: the packet | |
225 | * Returns the number of flits needed for the given offload packet. | |
226 | * These packets are already fully constructed and no additional headers | |
227 | * will be added. | |
228 | */ | |
229 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | |
230 | { | |
231 | unsigned int flits, cnt; | |
232 | ||
233 | if (is_ofld_imm(skb)) | |
234 | return DIV_ROUND_UP(skb->len, 8); | |
235 | ||
236 | flits = skb_transport_offset(skb) / 8; /* headers */ | |
237 | cnt = skb_shinfo(skb)->nr_frags; | |
238 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) | |
239 | cnt++; | |
240 | return flits + sgl_len(cnt); | |
241 | } | |
242 | ||
39f91a34 HJ |
243 | static inline void get_aes_decrypt_key(unsigned char *dec_key, |
244 | const unsigned char *key, | |
245 | unsigned int keylength) | |
246 | { | |
247 | u32 temp; | |
248 | u32 w_ring[MAX_NK]; | |
249 | int i, j, k; | |
250 | u8 nr, nk; | |
251 | ||
252 | switch (keylength) { | |
253 | case AES_KEYLENGTH_128BIT: | |
254 | nk = KEYLENGTH_4BYTES; | |
255 | nr = NUMBER_OF_ROUNDS_10; | |
256 | break; | |
257 | case AES_KEYLENGTH_192BIT: | |
258 | nk = KEYLENGTH_6BYTES; | |
259 | nr = NUMBER_OF_ROUNDS_12; | |
260 | break; | |
261 | case AES_KEYLENGTH_256BIT: | |
262 | nk = KEYLENGTH_8BYTES; | |
263 | nr = NUMBER_OF_ROUNDS_14; | |
264 | break; | |
265 | default: | |
266 | return; | |
267 | } | |
268 | for (i = 0; i < nk; i++) | |
269 | w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); | |
270 | ||
271 | i = 0; | |
272 | temp = w_ring[nk - 1]; | |
273 | while (i + nk < (nr + 1) * 4) { | |
274 | if (!(i % nk)) { | |
275 | /* RotWord(temp) */ | |
276 | temp = (temp << 8) | (temp >> 24); | |
277 | temp = aes_ks_subword(temp); | |
278 | temp ^= round_constant[i / nk]; | |
279 | } else if (nk == 8 && (i % 4 == 0)) { | |
280 | temp = aes_ks_subword(temp); | |
281 | } | |
282 | w_ring[i % nk] ^= temp; | |
283 | temp = w_ring[i % nk]; | |
284 | i++; | |
285 | } | |
286 | i--; | |
287 | for (k = 0, j = i % nk; k < nk; k++) { | |
288 | *((u32 *)dec_key + k) = htonl(w_ring[j]); | |
289 | j--; | |
290 | if (j < 0) | |
291 | j += nk; | |
292 | } | |
293 | } | |
294 | ||
e7922729 | 295 | static struct crypto_shash *chcr_alloc_shash(unsigned int ds) |
324429d7 | 296 | { |
ec1bca94 | 297 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
324429d7 HS |
298 | |
299 | switch (ds) { | |
300 | case SHA1_DIGEST_SIZE: | |
e7922729 | 301 | base_hash = crypto_alloc_shash("sha1", 0, 0); |
324429d7 HS |
302 | break; |
303 | case SHA224_DIGEST_SIZE: | |
e7922729 | 304 | base_hash = crypto_alloc_shash("sha224", 0, 0); |
324429d7 HS |
305 | break; |
306 | case SHA256_DIGEST_SIZE: | |
e7922729 | 307 | base_hash = crypto_alloc_shash("sha256", 0, 0); |
324429d7 HS |
308 | break; |
309 | case SHA384_DIGEST_SIZE: | |
e7922729 | 310 | base_hash = crypto_alloc_shash("sha384", 0, 0); |
324429d7 HS |
311 | break; |
312 | case SHA512_DIGEST_SIZE: | |
e7922729 | 313 | base_hash = crypto_alloc_shash("sha512", 0, 0); |
324429d7 HS |
314 | break; |
315 | } | |
324429d7 | 316 | |
e7922729 | 317 | return base_hash; |
324429d7 HS |
318 | } |
319 | ||
320 | static int chcr_compute_partial_hash(struct shash_desc *desc, | |
321 | char *iopad, char *result_hash, | |
322 | int digest_size) | |
323 | { | |
324 | struct sha1_state sha1_st; | |
325 | struct sha256_state sha256_st; | |
326 | struct sha512_state sha512_st; | |
327 | int error; | |
328 | ||
329 | if (digest_size == SHA1_DIGEST_SIZE) { | |
330 | error = crypto_shash_init(desc) ?: | |
331 | crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: | |
332 | crypto_shash_export(desc, (void *)&sha1_st); | |
333 | memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); | |
334 | } else if (digest_size == SHA224_DIGEST_SIZE) { | |
335 | error = crypto_shash_init(desc) ?: | |
336 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
337 | crypto_shash_export(desc, (void *)&sha256_st); | |
338 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
339 | ||
340 | } else if (digest_size == SHA256_DIGEST_SIZE) { | |
341 | error = crypto_shash_init(desc) ?: | |
342 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
343 | crypto_shash_export(desc, (void *)&sha256_st); | |
344 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
345 | ||
346 | } else if (digest_size == SHA384_DIGEST_SIZE) { | |
347 | error = crypto_shash_init(desc) ?: | |
348 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
349 | crypto_shash_export(desc, (void *)&sha512_st); | |
350 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
351 | ||
352 | } else if (digest_size == SHA512_DIGEST_SIZE) { | |
353 | error = crypto_shash_init(desc) ?: | |
354 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
355 | crypto_shash_export(desc, (void *)&sha512_st); | |
356 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
357 | } else { | |
358 | error = -EINVAL; | |
359 | pr_err("Unknown digest size %d\n", digest_size); | |
360 | } | |
361 | return error; | |
362 | } | |
363 | ||
364 | static void chcr_change_order(char *buf, int ds) | |
365 | { | |
366 | int i; | |
367 | ||
368 | if (ds == SHA512_DIGEST_SIZE) { | |
369 | for (i = 0; i < (ds / sizeof(u64)); i++) | |
370 | *((__be64 *)buf + i) = | |
371 | cpu_to_be64(*((u64 *)buf + i)); | |
372 | } else { | |
373 | for (i = 0; i < (ds / sizeof(u32)); i++) | |
374 | *((__be32 *)buf + i) = | |
375 | cpu_to_be32(*((u32 *)buf + i)); | |
376 | } | |
377 | } | |
378 | ||
379 | static inline int is_hmac(struct crypto_tfm *tfm) | |
380 | { | |
381 | struct crypto_alg *alg = tfm->__crt_alg; | |
382 | struct chcr_alg_template *chcr_crypto_alg = | |
383 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, | |
384 | alg.hash); | |
5c86a8ff | 385 | if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) |
324429d7 HS |
386 | return 1; |
387 | return 0; | |
388 | } | |
389 | ||
324429d7 HS |
390 | static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, |
391 | struct scatterlist *sg, | |
392 | struct phys_sge_parm *sg_param) | |
393 | { | |
394 | struct phys_sge_pairs *to; | |
adf1ca61 HJ |
395 | int out_buf_size = sg_param->obsize; |
396 | unsigned int nents = sg_param->nents, i, j = 0; | |
324429d7 HS |
397 | |
398 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | |
399 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | |
400 | phys_cpl->pcirlxorder_to_noofsgentr = | |
401 | htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | | |
402 | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | | |
403 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | | |
404 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | | |
405 | CPL_RX_PHYS_DSGL_DCAID_V(0) | | |
406 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); | |
407 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; | |
408 | phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); | |
409 | phys_cpl->rss_hdr_int.hash_val = 0; | |
410 | to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + | |
411 | sizeof(struct cpl_rx_phys_dsgl)); | |
412 | ||
413 | for (i = 0; nents; to++) { | |
adf1ca61 HJ |
414 | for (j = 0; j < 8 && nents; j++, nents--) { |
415 | out_buf_size -= sg_dma_len(sg); | |
416 | to->len[j] = htons(sg_dma_len(sg)); | |
324429d7 | 417 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); |
324429d7 HS |
418 | sg = sg_next(sg); |
419 | } | |
420 | } | |
adf1ca61 HJ |
421 | if (out_buf_size) { |
422 | j--; | |
423 | to--; | |
424 | to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size)); | |
425 | } | |
324429d7 HS |
426 | } |
427 | ||
adf1ca61 HJ |
428 | static inline int map_writesg_phys_cpl(struct device *dev, |
429 | struct cpl_rx_phys_dsgl *phys_cpl, | |
430 | struct scatterlist *sg, | |
431 | struct phys_sge_parm *sg_param) | |
324429d7 HS |
432 | { |
433 | if (!sg || !sg_param->nents) | |
434 | return 0; | |
435 | ||
436 | sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); | |
437 | if (sg_param->nents == 0) { | |
438 | pr_err("CHCR : DMA mapping failed\n"); | |
439 | return -EINVAL; | |
440 | } | |
441 | write_phys_cpl(phys_cpl, sg, sg_param); | |
442 | return 0; | |
443 | } | |
444 | ||
2debd332 HJ |
445 | static inline int get_aead_subtype(struct crypto_aead *aead) |
446 | { | |
447 | struct aead_alg *alg = crypto_aead_alg(aead); | |
448 | struct chcr_alg_template *chcr_crypto_alg = | |
449 | container_of(alg, struct chcr_alg_template, alg.aead); | |
450 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
451 | } | |
452 | ||
324429d7 HS |
453 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) |
454 | { | |
455 | struct crypto_alg *alg = tfm->__crt_alg; | |
456 | struct chcr_alg_template *chcr_crypto_alg = | |
457 | container_of(alg, struct chcr_alg_template, alg.crypto); | |
458 | ||
459 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
460 | } | |
461 | ||
358961d1 HJ |
462 | static inline void write_buffer_to_skb(struct sk_buff *skb, |
463 | unsigned int *frags, | |
464 | char *bfr, | |
465 | u8 bfr_len) | |
466 | { | |
467 | skb->len += bfr_len; | |
468 | skb->data_len += bfr_len; | |
469 | skb->truesize += bfr_len; | |
470 | get_page(virt_to_page(bfr)); | |
471 | skb_fill_page_desc(skb, *frags, virt_to_page(bfr), | |
472 | offset_in_page(bfr), bfr_len); | |
473 | (*frags)++; | |
474 | } | |
475 | ||
476 | ||
324429d7 | 477 | static inline void |
358961d1 | 478 | write_sg_to_skb(struct sk_buff *skb, unsigned int *frags, |
324429d7 HS |
479 | struct scatterlist *sg, unsigned int count) |
480 | { | |
481 | struct page *spage; | |
482 | unsigned int page_len; | |
483 | ||
484 | skb->len += count; | |
485 | skb->data_len += count; | |
486 | skb->truesize += count; | |
18f0aa06 | 487 | |
324429d7 | 488 | while (count > 0) { |
18f0aa06 | 489 | if (!sg || (!(sg->length))) |
324429d7 HS |
490 | break; |
491 | spage = sg_page(sg); | |
492 | get_page(spage); | |
493 | page_len = min(sg->length, count); | |
494 | skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len); | |
495 | (*frags)++; | |
496 | count -= page_len; | |
497 | sg = sg_next(sg); | |
498 | } | |
499 | } | |
500 | ||
501 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, | |
502 | struct _key_ctx *key_ctx) | |
503 | { | |
504 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | |
cc1b156d | 505 | memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); |
324429d7 HS |
506 | } else { |
507 | memcpy(key_ctx->key, | |
508 | ablkctx->key + (ablkctx->enckey_len >> 1), | |
509 | ablkctx->enckey_len >> 1); | |
cc1b156d HJ |
510 | memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), |
511 | ablkctx->rrkey, ablkctx->enckey_len >> 1); | |
324429d7 HS |
512 | } |
513 | return 0; | |
514 | } | |
515 | ||
516 | static inline void create_wreq(struct chcr_context *ctx, | |
358961d1 | 517 | struct chcr_wr *chcr_req, |
324429d7 HS |
518 | void *req, struct sk_buff *skb, |
519 | int kctx_len, int hash_sz, | |
2debd332 | 520 | int is_iv, |
2512a624 HJ |
521 | unsigned int sc_len, |
522 | unsigned int lcb) | |
324429d7 HS |
523 | { |
524 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
324429d7 | 525 | int iv_loc = IV_DSGL; |
72a56ca9 | 526 | int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; |
324429d7 HS |
527 | unsigned int immdatalen = 0, nr_frags = 0; |
528 | ||
529 | if (is_ofld_imm(skb)) { | |
530 | immdatalen = skb->data_len; | |
531 | iv_loc = IV_IMMEDIATE; | |
532 | } else { | |
533 | nr_frags = skb_shinfo(skb)->nr_frags; | |
534 | } | |
535 | ||
358961d1 HJ |
536 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, |
537 | ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4)); | |
538 | chcr_req->wreq.pld_size_hash_size = | |
324429d7 HS |
539 | htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | |
540 | FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); | |
358961d1 HJ |
541 | chcr_req->wreq.len16_pkd = |
542 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( | |
324429d7 | 543 | (calc_tx_flits_ofld(skb) * 8), 16))); |
358961d1 HJ |
544 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
545 | chcr_req->wreq.rx_chid_to_rx_q_id = | |
8a13449f | 546 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
2512a624 HJ |
547 | is_iv ? iv_loc : IV_NOP, !!lcb, |
548 | ctx->tx_qidx); | |
324429d7 | 549 | |
8a13449f HJ |
550 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, |
551 | qid); | |
358961d1 HJ |
552 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), |
553 | 16) - ((sizeof(chcr_req->wreq)) >> 4))); | |
324429d7 | 554 | |
358961d1 HJ |
555 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); |
556 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + | |
557 | sizeof(chcr_req->key_ctx) + | |
2debd332 | 558 | kctx_len + sc_len + immdatalen); |
324429d7 HS |
559 | } |
560 | ||
561 | /** | |
562 | * create_cipher_wr - form the WR for cipher operations | |
563 | * @req: cipher req. | |
564 | * @ctx: crypto driver context of the request. | |
565 | * @qid: ingress qid where response of this WR should be received. | |
566 | * @op_type: encryption or decryption | |
567 | */ | |
568 | static struct sk_buff | |
358961d1 HJ |
569 | *create_cipher_wr(struct ablkcipher_request *req, |
570 | unsigned short qid, | |
324429d7 HS |
571 | unsigned short op_type) |
572 | { | |
324429d7 | 573 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
358961d1 | 574 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
324429d7 HS |
575 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
576 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
577 | struct sk_buff *skb = NULL; | |
358961d1 | 578 | struct chcr_wr *chcr_req; |
324429d7 | 579 | struct cpl_rx_phys_dsgl *phys_cpl; |
5c86a8ff | 580 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
324429d7 | 581 | struct phys_sge_parm sg_param; |
adf1ca61 | 582 | unsigned int frags = 0, transhdr_len, phys_dsgl; |
324429d7 | 583 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; |
358961d1 HJ |
584 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
585 | GFP_ATOMIC; | |
324429d7 HS |
586 | |
587 | if (!req->info) | |
588 | return ERR_PTR(-EINVAL); | |
5c86a8ff HJ |
589 | reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
590 | if (reqctx->dst_nents <= 0) { | |
adf1ca61 HJ |
591 | pr_err("AES:Invalid Destination sg lists\n"); |
592 | return ERR_PTR(-EINVAL); | |
593 | } | |
324429d7 | 594 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || |
358961d1 HJ |
595 | (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) { |
596 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | |
597 | ablkctx->enckey_len, req->nbytes, ivsize); | |
324429d7 | 598 | return ERR_PTR(-EINVAL); |
358961d1 | 599 | } |
324429d7 | 600 | |
5c86a8ff | 601 | phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); |
324429d7 | 602 | |
358961d1 | 603 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); |
324429d7 | 604 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
358961d1 | 605 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
324429d7 HS |
606 | if (!skb) |
607 | return ERR_PTR(-ENOMEM); | |
608 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | |
358961d1 HJ |
609 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); |
610 | memset(chcr_req, 0, transhdr_len); | |
611 | chcr_req->sec_cpl.op_ivinsrtofst = | |
8a13449f | 612 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); |
358961d1 HJ |
613 | |
614 | chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes); | |
615 | chcr_req->sec_cpl.aadstart_cipherstop_hi = | |
616 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); | |
617 | ||
618 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | |
619 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | |
620 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, | |
324429d7 | 621 | ablkctx->ciph_mode, |
358961d1 HJ |
622 | 0, 0, ivsize >> 1); |
623 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, | |
324429d7 HS |
624 | 0, 1, phys_dsgl); |
625 | ||
358961d1 | 626 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
324429d7 | 627 | if (op_type == CHCR_DECRYPT_OP) { |
358961d1 | 628 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
324429d7 HS |
629 | } else { |
630 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | |
358961d1 HJ |
631 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
632 | ablkctx->enckey_len); | |
324429d7 | 633 | } else { |
358961d1 | 634 | memcpy(chcr_req->key_ctx.key, ablkctx->key + |
324429d7 HS |
635 | (ablkctx->enckey_len >> 1), |
636 | ablkctx->enckey_len >> 1); | |
358961d1 | 637 | memcpy(chcr_req->key_ctx.key + |
324429d7 HS |
638 | (ablkctx->enckey_len >> 1), |
639 | ablkctx->key, | |
640 | ablkctx->enckey_len >> 1); | |
641 | } | |
642 | } | |
358961d1 | 643 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
5c86a8ff | 644 | sg_param.nents = reqctx->dst_nents; |
358961d1 | 645 | sg_param.obsize = req->nbytes; |
324429d7 HS |
646 | sg_param.qid = qid; |
647 | sg_param.align = 1; | |
648 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, | |
649 | &sg_param)) | |
650 | goto map_fail1; | |
651 | ||
652 | skb_set_transport_header(skb, transhdr_len); | |
5c86a8ff HJ |
653 | memcpy(reqctx->iv, req->info, ivsize); |
654 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | |
358961d1 | 655 | write_sg_to_skb(skb, &frags, req->src, req->nbytes); |
2debd332 | 656 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, |
2512a624 HJ |
657 | sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, |
658 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); | |
5c86a8ff | 659 | reqctx->skb = skb; |
324429d7 HS |
660 | skb_get(skb); |
661 | return skb; | |
662 | map_fail1: | |
663 | kfree_skb(skb); | |
664 | return ERR_PTR(-ENOMEM); | |
665 | } | |
666 | ||
667 | static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
668 | unsigned int keylen) | |
669 | { | |
670 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | |
671 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
324429d7 HS |
672 | unsigned int ck_size, context_size; |
673 | u16 alignment = 0; | |
674 | ||
324429d7 HS |
675 | if (keylen == AES_KEYSIZE_128) { |
676 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
677 | } else if (keylen == AES_KEYSIZE_192) { | |
678 | alignment = 8; | |
679 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
680 | } else if (keylen == AES_KEYSIZE_256) { | |
681 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
682 | } else { | |
683 | goto badkey_err; | |
684 | } | |
cc1b156d HJ |
685 | memcpy(ablkctx->key, key, keylen); |
686 | ablkctx->enckey_len = keylen; | |
687 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); | |
324429d7 HS |
688 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
689 | keylen + alignment) >> 4; | |
690 | ||
691 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
692 | 0, 0, context_size); | |
693 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; | |
694 | return 0; | |
695 | badkey_err: | |
696 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
697 | ablkctx->enckey_len = 0; | |
698 | return -EINVAL; | |
699 | } | |
700 | ||
73b86bb7 | 701 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) |
324429d7 | 702 | { |
324429d7 | 703 | struct adapter *adap = netdev2adap(dev); |
ab677ff4 HS |
704 | struct sge_uld_txq_info *txq_info = |
705 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | |
706 | struct sge_uld_txq *txq; | |
707 | int ret = 0; | |
324429d7 HS |
708 | |
709 | local_bh_disable(); | |
ab677ff4 HS |
710 | txq = &txq_info->uldtxq[idx]; |
711 | spin_lock(&txq->sendq.lock); | |
712 | if (txq->full) | |
324429d7 | 713 | ret = -1; |
ab677ff4 | 714 | spin_unlock(&txq->sendq.lock); |
324429d7 HS |
715 | local_bh_enable(); |
716 | return ret; | |
717 | } | |
718 | ||
719 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | |
720 | { | |
721 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
722 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | |
324429d7 HS |
723 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
724 | struct sk_buff *skb; | |
725 | ||
726 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
72a56ca9 | 727 | ctx->tx_qidx))) { |
324429d7 HS |
728 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
729 | return -EBUSY; | |
730 | } | |
731 | ||
72a56ca9 | 732 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], |
324429d7 HS |
733 | CHCR_ENCRYPT_OP); |
734 | if (IS_ERR(skb)) { | |
735 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | |
736 | return PTR_ERR(skb); | |
737 | } | |
738 | skb->dev = u_ctx->lldi.ports[0]; | |
72a56ca9 | 739 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
740 | chcr_send_wr(skb); |
741 | return -EINPROGRESS; | |
742 | } | |
743 | ||
744 | static int chcr_aes_decrypt(struct ablkcipher_request *req) | |
745 | { | |
746 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
747 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | |
324429d7 HS |
748 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
749 | struct sk_buff *skb; | |
750 | ||
751 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
72a56ca9 | 752 | ctx->tx_qidx))) { |
324429d7 HS |
753 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
754 | return -EBUSY; | |
755 | } | |
756 | ||
72a56ca9 | 757 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], |
324429d7 HS |
758 | CHCR_DECRYPT_OP); |
759 | if (IS_ERR(skb)) { | |
760 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | |
761 | return PTR_ERR(skb); | |
762 | } | |
763 | skb->dev = u_ctx->lldi.ports[0]; | |
72a56ca9 | 764 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
765 | chcr_send_wr(skb); |
766 | return -EINPROGRESS; | |
767 | } | |
768 | ||
769 | static int chcr_device_init(struct chcr_context *ctx) | |
770 | { | |
771 | struct uld_ctx *u_ctx; | |
72a56ca9 | 772 | struct adapter *adap; |
324429d7 | 773 | unsigned int id; |
72a56ca9 | 774 | int txq_perchan, txq_idx, ntxq; |
324429d7 HS |
775 | int err = 0, rxq_perchan, rxq_idx; |
776 | ||
777 | id = smp_processor_id(); | |
778 | if (!ctx->dev) { | |
779 | err = assign_chcr_device(&ctx->dev); | |
780 | if (err) { | |
781 | pr_err("chcr device assignment fails\n"); | |
782 | goto out; | |
783 | } | |
784 | u_ctx = ULD_CTX(ctx); | |
72a56ca9 HJ |
785 | adap = padap(ctx->dev); |
786 | ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, | |
787 | adap->vres.ncrypto_fc); | |
324429d7 | 788 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
72a56ca9 | 789 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
324429d7 HS |
790 | rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; |
791 | rxq_idx += id % rxq_perchan; | |
72a56ca9 HJ |
792 | txq_idx = ctx->dev->tx_channel_id * txq_perchan; |
793 | txq_idx += id % txq_perchan; | |
324429d7 | 794 | spin_lock(&ctx->dev->lock_chcr_dev); |
72a56ca9 HJ |
795 | ctx->rx_qidx = rxq_idx; |
796 | ctx->tx_qidx = txq_idx; | |
ab677ff4 | 797 | ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; |
8a13449f | 798 | ctx->dev->rx_channel_id = 0; |
324429d7 HS |
799 | spin_unlock(&ctx->dev->lock_chcr_dev); |
800 | } | |
801 | out: | |
802 | return err; | |
803 | } | |
804 | ||
805 | static int chcr_cra_init(struct crypto_tfm *tfm) | |
806 | { | |
807 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | |
808 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
809 | } | |
810 | ||
811 | static int get_alg_config(struct algo_param *params, | |
812 | unsigned int auth_size) | |
813 | { | |
814 | switch (auth_size) { | |
815 | case SHA1_DIGEST_SIZE: | |
816 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; | |
817 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; | |
818 | params->result_size = SHA1_DIGEST_SIZE; | |
819 | break; | |
820 | case SHA224_DIGEST_SIZE: | |
821 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
822 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; | |
823 | params->result_size = SHA256_DIGEST_SIZE; | |
824 | break; | |
825 | case SHA256_DIGEST_SIZE: | |
826 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
827 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; | |
828 | params->result_size = SHA256_DIGEST_SIZE; | |
829 | break; | |
830 | case SHA384_DIGEST_SIZE: | |
831 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
832 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; | |
833 | params->result_size = SHA512_DIGEST_SIZE; | |
834 | break; | |
835 | case SHA512_DIGEST_SIZE: | |
836 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
837 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; | |
838 | params->result_size = SHA512_DIGEST_SIZE; | |
839 | break; | |
840 | default: | |
841 | pr_err("chcr : ERROR, unsupported digest size\n"); | |
842 | return -EINVAL; | |
843 | } | |
844 | return 0; | |
845 | } | |
846 | ||
e7922729 | 847 | static inline void chcr_free_shash(struct crypto_shash *base_hash) |
324429d7 | 848 | { |
e7922729 | 849 | crypto_free_shash(base_hash); |
324429d7 HS |
850 | } |
851 | ||
852 | /** | |
358961d1 | 853 | * create_hash_wr - Create hash work request |
324429d7 HS |
854 | * @req - Cipher req base |
855 | */ | |
358961d1 | 856 | static struct sk_buff *create_hash_wr(struct ahash_request *req, |
2debd332 | 857 | struct hash_wr_param *param) |
324429d7 HS |
858 | { |
859 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
860 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
861 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
862 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
863 | struct sk_buff *skb = NULL; | |
358961d1 | 864 | struct chcr_wr *chcr_req; |
324429d7 HS |
865 | unsigned int frags = 0, transhdr_len, iopad_alignment = 0; |
866 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | |
358961d1 | 867 | unsigned int kctx_len = 0; |
324429d7 | 868 | u8 hash_size_in_response = 0; |
358961d1 HJ |
869 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
870 | GFP_ATOMIC; | |
324429d7 HS |
871 | |
872 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); | |
358961d1 | 873 | kctx_len = param->alg_prm.result_size + iopad_alignment; |
324429d7 HS |
874 | if (param->opad_needed) |
875 | kctx_len += param->alg_prm.result_size + iopad_alignment; | |
876 | ||
877 | if (req_ctx->result) | |
878 | hash_size_in_response = digestsize; | |
879 | else | |
880 | hash_size_in_response = param->alg_prm.result_size; | |
881 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); | |
358961d1 | 882 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
324429d7 HS |
883 | if (!skb) |
884 | return skb; | |
885 | ||
886 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | |
358961d1 HJ |
887 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); |
888 | memset(chcr_req, 0, transhdr_len); | |
324429d7 | 889 | |
358961d1 | 890 | chcr_req->sec_cpl.op_ivinsrtofst = |
8a13449f | 891 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0); |
358961d1 | 892 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
324429d7 | 893 | |
358961d1 | 894 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
324429d7 | 895 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); |
358961d1 | 896 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
324429d7 | 897 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); |
358961d1 | 898 | chcr_req->sec_cpl.seqno_numivs = |
324429d7 | 899 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, |
358961d1 | 900 | param->opad_needed, 0); |
324429d7 | 901 | |
358961d1 | 902 | chcr_req->sec_cpl.ivgen_hdrlen = |
324429d7 HS |
903 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); |
904 | ||
358961d1 HJ |
905 | memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, |
906 | param->alg_prm.result_size); | |
324429d7 HS |
907 | |
908 | if (param->opad_needed) | |
358961d1 HJ |
909 | memcpy(chcr_req->key_ctx.key + |
910 | ((param->alg_prm.result_size <= 32) ? 32 : | |
911 | CHCR_HASH_MAX_DIGEST_SIZE), | |
324429d7 HS |
912 | hmacctx->opad, param->alg_prm.result_size); |
913 | ||
358961d1 | 914 | chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, |
324429d7 HS |
915 | param->alg_prm.mk_size, 0, |
916 | param->opad_needed, | |
358961d1 HJ |
917 | ((kctx_len + |
918 | sizeof(chcr_req->key_ctx)) >> 4)); | |
919 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); | |
324429d7 HS |
920 | |
921 | skb_set_transport_header(skb, transhdr_len); | |
922 | if (param->bfr_len != 0) | |
44fce12a HJ |
923 | write_buffer_to_skb(skb, &frags, req_ctx->reqbfr, |
924 | param->bfr_len); | |
324429d7 | 925 | if (param->sg_len != 0) |
358961d1 | 926 | write_sg_to_skb(skb, &frags, req->src, param->sg_len); |
324429d7 | 927 | |
2debd332 | 928 | create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0, |
2512a624 | 929 | DUMMY_BYTES, 0); |
324429d7 HS |
930 | req_ctx->skb = skb; |
931 | skb_get(skb); | |
932 | return skb; | |
933 | } | |
934 | ||
935 | static int chcr_ahash_update(struct ahash_request *req) | |
936 | { | |
937 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
938 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
939 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | |
940 | struct uld_ctx *u_ctx = NULL; | |
941 | struct sk_buff *skb; | |
942 | u8 remainder = 0, bs; | |
943 | unsigned int nbytes = req->nbytes; | |
944 | struct hash_wr_param params; | |
945 | ||
946 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
947 | ||
948 | u_ctx = ULD_CTX(ctx); | |
949 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
72a56ca9 | 950 | ctx->tx_qidx))) { |
324429d7 HS |
951 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
952 | return -EBUSY; | |
953 | } | |
954 | ||
44fce12a HJ |
955 | if (nbytes + req_ctx->reqlen >= bs) { |
956 | remainder = (nbytes + req_ctx->reqlen) % bs; | |
957 | nbytes = nbytes + req_ctx->reqlen - remainder; | |
324429d7 | 958 | } else { |
44fce12a HJ |
959 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr |
960 | + req_ctx->reqlen, nbytes, 0); | |
961 | req_ctx->reqlen += nbytes; | |
324429d7 HS |
962 | return 0; |
963 | } | |
964 | ||
965 | params.opad_needed = 0; | |
966 | params.more = 1; | |
967 | params.last = 0; | |
44fce12a HJ |
968 | params.sg_len = nbytes - req_ctx->reqlen; |
969 | params.bfr_len = req_ctx->reqlen; | |
324429d7 HS |
970 | params.scmd1 = 0; |
971 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
972 | req_ctx->result = 0; | |
973 | req_ctx->data_len += params.sg_len + params.bfr_len; | |
358961d1 | 974 | skb = create_hash_wr(req, ¶ms); |
324429d7 HS |
975 | if (!skb) |
976 | return -ENOMEM; | |
977 | ||
44fce12a HJ |
978 | if (remainder) { |
979 | u8 *temp; | |
980 | /* Swap buffers */ | |
981 | temp = req_ctx->reqbfr; | |
982 | req_ctx->reqbfr = req_ctx->skbfr; | |
983 | req_ctx->skbfr = temp; | |
324429d7 | 984 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
44fce12a | 985 | req_ctx->reqbfr, remainder, req->nbytes - |
324429d7 | 986 | remainder); |
44fce12a HJ |
987 | } |
988 | req_ctx->reqlen = remainder; | |
324429d7 | 989 | skb->dev = u_ctx->lldi.ports[0]; |
72a56ca9 | 990 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
991 | chcr_send_wr(skb); |
992 | ||
993 | return -EINPROGRESS; | |
994 | } | |
995 | ||
996 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) | |
997 | { | |
998 | memset(bfr_ptr, 0, bs); | |
999 | *bfr_ptr = 0x80; | |
1000 | if (bs == 64) | |
1001 | *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); | |
1002 | else | |
1003 | *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); | |
1004 | } | |
1005 | ||
1006 | static int chcr_ahash_final(struct ahash_request *req) | |
1007 | { | |
1008 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1009 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
1010 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | |
1011 | struct hash_wr_param params; | |
1012 | struct sk_buff *skb; | |
1013 | struct uld_ctx *u_ctx = NULL; | |
1014 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1015 | ||
1016 | u_ctx = ULD_CTX(ctx); | |
1017 | if (is_hmac(crypto_ahash_tfm(rtfm))) | |
1018 | params.opad_needed = 1; | |
1019 | else | |
1020 | params.opad_needed = 0; | |
1021 | params.sg_len = 0; | |
1022 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
1023 | req_ctx->result = 1; | |
44fce12a | 1024 | params.bfr_len = req_ctx->reqlen; |
324429d7 | 1025 | req_ctx->data_len += params.bfr_len + params.sg_len; |
44fce12a HJ |
1026 | if (req_ctx->reqlen == 0) { |
1027 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1028 | params.last = 0; |
1029 | params.more = 1; | |
1030 | params.scmd1 = 0; | |
1031 | params.bfr_len = bs; | |
1032 | ||
1033 | } else { | |
1034 | params.scmd1 = req_ctx->data_len; | |
1035 | params.last = 1; | |
1036 | params.more = 0; | |
1037 | } | |
358961d1 | 1038 | skb = create_hash_wr(req, ¶ms); |
9a97ffd4 DC |
1039 | if (!skb) |
1040 | return -ENOMEM; | |
358961d1 | 1041 | |
324429d7 | 1042 | skb->dev = u_ctx->lldi.ports[0]; |
72a56ca9 | 1043 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
1044 | chcr_send_wr(skb); |
1045 | return -EINPROGRESS; | |
1046 | } | |
1047 | ||
1048 | static int chcr_ahash_finup(struct ahash_request *req) | |
1049 | { | |
1050 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1051 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
1052 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | |
1053 | struct uld_ctx *u_ctx = NULL; | |
1054 | struct sk_buff *skb; | |
1055 | struct hash_wr_param params; | |
1056 | u8 bs; | |
1057 | ||
1058 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1059 | u_ctx = ULD_CTX(ctx); | |
1060 | ||
1061 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
72a56ca9 | 1062 | ctx->tx_qidx))) { |
324429d7 HS |
1063 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1064 | return -EBUSY; | |
1065 | } | |
1066 | ||
1067 | if (is_hmac(crypto_ahash_tfm(rtfm))) | |
1068 | params.opad_needed = 1; | |
1069 | else | |
1070 | params.opad_needed = 0; | |
1071 | ||
1072 | params.sg_len = req->nbytes; | |
44fce12a | 1073 | params.bfr_len = req_ctx->reqlen; |
324429d7 HS |
1074 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
1075 | req_ctx->data_len += params.bfr_len + params.sg_len; | |
1076 | req_ctx->result = 1; | |
44fce12a HJ |
1077 | if ((req_ctx->reqlen + req->nbytes) == 0) { |
1078 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1079 | params.last = 0; |
1080 | params.more = 1; | |
1081 | params.scmd1 = 0; | |
1082 | params.bfr_len = bs; | |
1083 | } else { | |
1084 | params.scmd1 = req_ctx->data_len; | |
1085 | params.last = 1; | |
1086 | params.more = 0; | |
1087 | } | |
1088 | ||
358961d1 | 1089 | skb = create_hash_wr(req, ¶ms); |
324429d7 HS |
1090 | if (!skb) |
1091 | return -ENOMEM; | |
358961d1 | 1092 | |
324429d7 | 1093 | skb->dev = u_ctx->lldi.ports[0]; |
72a56ca9 | 1094 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
1095 | chcr_send_wr(skb); |
1096 | ||
1097 | return -EINPROGRESS; | |
1098 | } | |
1099 | ||
1100 | static int chcr_ahash_digest(struct ahash_request *req) | |
1101 | { | |
1102 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1103 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
1104 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | |
1105 | struct uld_ctx *u_ctx = NULL; | |
1106 | struct sk_buff *skb; | |
1107 | struct hash_wr_param params; | |
1108 | u8 bs; | |
1109 | ||
1110 | rtfm->init(req); | |
1111 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1112 | ||
1113 | u_ctx = ULD_CTX(ctx); | |
1114 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
72a56ca9 | 1115 | ctx->tx_qidx))) { |
324429d7 HS |
1116 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1117 | return -EBUSY; | |
1118 | } | |
1119 | ||
1120 | if (is_hmac(crypto_ahash_tfm(rtfm))) | |
1121 | params.opad_needed = 1; | |
1122 | else | |
1123 | params.opad_needed = 0; | |
1124 | ||
1125 | params.last = 0; | |
1126 | params.more = 0; | |
1127 | params.sg_len = req->nbytes; | |
1128 | params.bfr_len = 0; | |
1129 | params.scmd1 = 0; | |
1130 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | |
1131 | req_ctx->result = 1; | |
1132 | req_ctx->data_len += params.bfr_len + params.sg_len; | |
1133 | ||
44fce12a HJ |
1134 | if (req->nbytes == 0) { |
1135 | create_last_hash_block(req_ctx->reqbfr, bs, 0); | |
324429d7 HS |
1136 | params.more = 1; |
1137 | params.bfr_len = bs; | |
1138 | } | |
1139 | ||
358961d1 | 1140 | skb = create_hash_wr(req, ¶ms); |
324429d7 HS |
1141 | if (!skb) |
1142 | return -ENOMEM; | |
1143 | ||
1144 | skb->dev = u_ctx->lldi.ports[0]; | |
72a56ca9 | 1145 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
324429d7 HS |
1146 | chcr_send_wr(skb); |
1147 | return -EINPROGRESS; | |
1148 | } | |
1149 | ||
1150 | static int chcr_ahash_export(struct ahash_request *areq, void *out) | |
1151 | { | |
1152 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1153 | struct chcr_ahash_req_ctx *state = out; | |
1154 | ||
44fce12a | 1155 | state->reqlen = req_ctx->reqlen; |
324429d7 | 1156 | state->data_len = req_ctx->data_len; |
44fce12a | 1157 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
324429d7 HS |
1158 | memcpy(state->partial_hash, req_ctx->partial_hash, |
1159 | CHCR_HASH_MAX_DIGEST_SIZE); | |
44fce12a | 1160 | return 0; |
324429d7 HS |
1161 | } |
1162 | ||
1163 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |
1164 | { | |
1165 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1166 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; | |
1167 | ||
44fce12a | 1168 | req_ctx->reqlen = state->reqlen; |
324429d7 | 1169 | req_ctx->data_len = state->data_len; |
44fce12a HJ |
1170 | req_ctx->reqbfr = req_ctx->bfr1; |
1171 | req_ctx->skbfr = req_ctx->bfr2; | |
1172 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); | |
324429d7 HS |
1173 | memcpy(req_ctx->partial_hash, state->partial_hash, |
1174 | CHCR_HASH_MAX_DIGEST_SIZE); | |
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1179 | unsigned int keylen) | |
1180 | { | |
1181 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
1182 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
1183 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | |
1184 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
1185 | unsigned int i, err = 0, updated_digestsize; | |
1186 | ||
e7922729 HJ |
1187 | SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); |
1188 | ||
1189 | /* use the key to calculate the ipad and opad. ipad will sent with the | |
324429d7 HS |
1190 | * first request's data. opad will be sent with the final hash result |
1191 | * ipad in hmacctx->ipad and opad in hmacctx->opad location | |
1192 | */ | |
e7922729 HJ |
1193 | shash->tfm = hmacctx->base_hash; |
1194 | shash->flags = crypto_shash_get_flags(hmacctx->base_hash); | |
324429d7 | 1195 | if (keylen > bs) { |
e7922729 | 1196 | err = crypto_shash_digest(shash, key, keylen, |
324429d7 HS |
1197 | hmacctx->ipad); |
1198 | if (err) | |
1199 | goto out; | |
1200 | keylen = digestsize; | |
1201 | } else { | |
1202 | memcpy(hmacctx->ipad, key, keylen); | |
1203 | } | |
1204 | memset(hmacctx->ipad + keylen, 0, bs - keylen); | |
1205 | memcpy(hmacctx->opad, hmacctx->ipad, bs); | |
1206 | ||
1207 | for (i = 0; i < bs / sizeof(int); i++) { | |
1208 | *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; | |
1209 | *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; | |
1210 | } | |
1211 | ||
1212 | updated_digestsize = digestsize; | |
1213 | if (digestsize == SHA224_DIGEST_SIZE) | |
1214 | updated_digestsize = SHA256_DIGEST_SIZE; | |
1215 | else if (digestsize == SHA384_DIGEST_SIZE) | |
1216 | updated_digestsize = SHA512_DIGEST_SIZE; | |
e7922729 | 1217 | err = chcr_compute_partial_hash(shash, hmacctx->ipad, |
324429d7 HS |
1218 | hmacctx->ipad, digestsize); |
1219 | if (err) | |
1220 | goto out; | |
1221 | chcr_change_order(hmacctx->ipad, updated_digestsize); | |
1222 | ||
e7922729 | 1223 | err = chcr_compute_partial_hash(shash, hmacctx->opad, |
324429d7 HS |
1224 | hmacctx->opad, digestsize); |
1225 | if (err) | |
1226 | goto out; | |
1227 | chcr_change_order(hmacctx->opad, updated_digestsize); | |
1228 | out: | |
1229 | return err; | |
1230 | } | |
1231 | ||
1232 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
1233 | unsigned int key_len) | |
1234 | { | |
1235 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | |
1236 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
324429d7 HS |
1237 | unsigned short context_size = 0; |
1238 | ||
cc1b156d HJ |
1239 | if ((key_len != (AES_KEYSIZE_128 << 1)) && |
1240 | (key_len != (AES_KEYSIZE_256 << 1))) { | |
324429d7 HS |
1241 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, |
1242 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
1243 | ablkctx->enckey_len = 0; | |
cc1b156d HJ |
1244 | return -EINVAL; |
1245 | ||
324429d7 | 1246 | } |
cc1b156d HJ |
1247 | |
1248 | memcpy(ablkctx->key, key, key_len); | |
1249 | ablkctx->enckey_len = key_len; | |
1250 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); | |
1251 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; | |
1252 | ablkctx->key_ctx_hdr = | |
1253 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? | |
1254 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : | |
1255 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, | |
1256 | CHCR_KEYCTX_NO_KEY, 1, | |
1257 | 0, context_size); | |
1258 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | |
1259 | return 0; | |
324429d7 HS |
1260 | } |
1261 | ||
1262 | static int chcr_sha_init(struct ahash_request *areq) | |
1263 | { | |
1264 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1265 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | |
1266 | int digestsize = crypto_ahash_digestsize(tfm); | |
1267 | ||
1268 | req_ctx->data_len = 0; | |
44fce12a HJ |
1269 | req_ctx->reqlen = 0; |
1270 | req_ctx->reqbfr = req_ctx->bfr1; | |
1271 | req_ctx->skbfr = req_ctx->bfr2; | |
324429d7 HS |
1272 | req_ctx->skb = NULL; |
1273 | req_ctx->result = 0; | |
1274 | copy_hash_init_values(req_ctx->partial_hash, digestsize); | |
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | static int chcr_sha_cra_init(struct crypto_tfm *tfm) | |
1279 | { | |
1280 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
1281 | sizeof(struct chcr_ahash_req_ctx)); | |
1282 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1283 | } | |
1284 | ||
1285 | static int chcr_hmac_init(struct ahash_request *areq) | |
1286 | { | |
1287 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1288 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); | |
1289 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | |
1290 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
1291 | unsigned int digestsize = crypto_ahash_digestsize(rtfm); | |
1292 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1293 | ||
1294 | chcr_sha_init(areq); | |
1295 | req_ctx->data_len = bs; | |
1296 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1297 | if (digestsize == SHA224_DIGEST_SIZE) | |
1298 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1299 | SHA256_DIGEST_SIZE); | |
1300 | else if (digestsize == SHA384_DIGEST_SIZE) | |
1301 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1302 | SHA512_DIGEST_SIZE); | |
1303 | else | |
1304 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
1305 | digestsize); | |
1306 | } | |
1307 | return 0; | |
1308 | } | |
1309 | ||
1310 | static int chcr_hmac_cra_init(struct crypto_tfm *tfm) | |
1311 | { | |
1312 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1313 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
1314 | unsigned int digestsize = | |
1315 | crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); | |
1316 | ||
1317 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
1318 | sizeof(struct chcr_ahash_req_ctx)); | |
e7922729 HJ |
1319 | hmacctx->base_hash = chcr_alloc_shash(digestsize); |
1320 | if (IS_ERR(hmacctx->base_hash)) | |
1321 | return PTR_ERR(hmacctx->base_hash); | |
324429d7 HS |
1322 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
1323 | } | |
1324 | ||
324429d7 HS |
1325 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) |
1326 | { | |
1327 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1328 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
1329 | ||
e7922729 HJ |
1330 | if (hmacctx->base_hash) { |
1331 | chcr_free_shash(hmacctx->base_hash); | |
1332 | hmacctx->base_hash = NULL; | |
324429d7 HS |
1333 | } |
1334 | } | |
1335 | ||
2debd332 HJ |
1336 | static int chcr_copy_assoc(struct aead_request *req, |
1337 | struct chcr_aead_ctx *ctx) | |
1338 | { | |
1339 | SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); | |
1340 | ||
1341 | skcipher_request_set_tfm(skreq, ctx->null); | |
1342 | skcipher_request_set_callback(skreq, aead_request_flags(req), | |
1343 | NULL, NULL); | |
1344 | skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, | |
1345 | NULL); | |
1346 | ||
1347 | return crypto_skcipher_encrypt(skreq); | |
1348 | } | |
0e93708d HJ |
1349 | static int chcr_aead_need_fallback(struct aead_request *req, int src_nent, |
1350 | int aadmax, int wrlen, | |
1351 | unsigned short op_type) | |
1352 | { | |
1353 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | |
1354 | ||
1355 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || | |
1356 | (req->assoclen > aadmax) || | |
1357 | (src_nent > MAX_SKB_FRAGS) || | |
1358 | (wrlen > MAX_WR_SIZE)) | |
1359 | return 1; | |
1360 | return 0; | |
1361 | } | |
2debd332 | 1362 | |
0e93708d HJ |
1363 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) |
1364 | { | |
1365 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1366 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
1367 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
1368 | struct aead_request *subreq = aead_request_ctx(req); | |
1369 | ||
1370 | aead_request_set_tfm(subreq, aeadctx->sw_cipher); | |
1371 | aead_request_set_callback(subreq, req->base.flags, | |
1372 | req->base.complete, req->base.data); | |
1373 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
1374 | req->iv); | |
1375 | aead_request_set_ad(subreq, req->assoclen); | |
1376 | return op_type ? crypto_aead_decrypt(subreq) : | |
1377 | crypto_aead_encrypt(subreq); | |
1378 | } | |
2debd332 HJ |
1379 | |
1380 | static struct sk_buff *create_authenc_wr(struct aead_request *req, | |
1381 | unsigned short qid, | |
1382 | int size, | |
1383 | unsigned short op_type) | |
1384 | { | |
1385 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1386 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
1387 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
1388 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
1389 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | |
1390 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1391 | struct sk_buff *skb = NULL; | |
1392 | struct chcr_wr *chcr_req; | |
1393 | struct cpl_rx_phys_dsgl *phys_cpl; | |
1394 | struct phys_sge_parm sg_param; | |
94e1dab1 | 1395 | struct scatterlist *src; |
2debd332 HJ |
1396 | unsigned int frags = 0, transhdr_len; |
1397 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; | |
1398 | unsigned int kctx_len = 0; | |
1399 | unsigned short stop_offset = 0; | |
1400 | unsigned int assoclen = req->assoclen; | |
1401 | unsigned int authsize = crypto_aead_authsize(tfm); | |
5fe8c711 | 1402 | int error = -EINVAL, src_nent; |
2debd332 HJ |
1403 | int null = 0; |
1404 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
1405 | GFP_ATOMIC; | |
1406 | ||
1407 | if (aeadctx->enckey_len == 0 || (req->cryptlen == 0)) | |
1408 | goto err; | |
1409 | ||
1410 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | |
1411 | goto err; | |
0e93708d HJ |
1412 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); |
1413 | if (src_nent < 0) | |
2debd332 | 1414 | goto err; |
94e1dab1 HJ |
1415 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1416 | reqctx->dst = src; | |
1417 | ||
2debd332 | 1418 | if (req->src != req->dst) { |
5fe8c711 HJ |
1419 | error = chcr_copy_assoc(req, aeadctx); |
1420 | if (error) | |
1421 | return ERR_PTR(error); | |
94e1dab1 HJ |
1422 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
1423 | req->assoclen); | |
2debd332 HJ |
1424 | } |
1425 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | |
1426 | null = 1; | |
1427 | assoclen = 0; | |
1428 | } | |
94e1dab1 | 1429 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
2debd332 | 1430 | (op_type ? -authsize : authsize)); |
0e93708d | 1431 | if (reqctx->dst_nents < 0) { |
2debd332 | 1432 | pr_err("AUTHENC:Invalid Destination sg entries\n"); |
5fe8c711 | 1433 | error = -EINVAL; |
2debd332 HJ |
1434 | goto err; |
1435 | } | |
1436 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | |
1437 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) | |
1438 | - sizeof(chcr_req->key_ctx); | |
1439 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | |
0e93708d HJ |
1440 | if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG, |
1441 | T6_MAX_AAD_SIZE, | |
1442 | transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), | |
1443 | op_type)) { | |
1444 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | |
1445 | } | |
2debd332 | 1446 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
5fe8c711 HJ |
1447 | if (!skb) { |
1448 | error = -ENOMEM; | |
2debd332 | 1449 | goto err; |
5fe8c711 | 1450 | } |
2debd332 HJ |
1451 | |
1452 | /* LLD is going to write the sge hdr. */ | |
1453 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | |
1454 | ||
1455 | /* Write WR */ | |
1456 | chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len); | |
1457 | memset(chcr_req, 0, transhdr_len); | |
1458 | ||
1459 | stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | |
1460 | ||
1461 | /* | |
1462 | * Input order is AAD,IV and Payload. where IV should be included as | |
1463 | * the part of authdata. All other fields should be filled according | |
1464 | * to the hardware spec | |
1465 | */ | |
1466 | chcr_req->sec_cpl.op_ivinsrtofst = | |
8a13449f | 1467 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, |
2debd332 HJ |
1468 | (ivsize ? (assoclen + 1) : 0)); |
1469 | chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen); | |
1470 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | |
1471 | assoclen ? 1 : 0, assoclen, | |
1472 | assoclen + ivsize + 1, | |
1473 | (stop_offset & 0x1F0) >> 4); | |
1474 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( | |
1475 | stop_offset & 0xF, | |
1476 | null ? 0 : assoclen + ivsize + 1, | |
1477 | stop_offset, stop_offset); | |
1478 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | |
1479 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | |
1480 | CHCR_SCMD_CIPHER_MODE_AES_CBC, | |
1481 | actx->auth_mode, aeadctx->hmac_ctrl, | |
1482 | ivsize >> 1); | |
1483 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | |
1484 | 0, 1, dst_size); | |
1485 | ||
1486 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
1487 | if (op_type == CHCR_ENCRYPT_OP) | |
1488 | memcpy(chcr_req->key_ctx.key, aeadctx->key, | |
1489 | aeadctx->enckey_len); | |
1490 | else | |
1491 | memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, | |
1492 | aeadctx->enckey_len); | |
1493 | ||
1494 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << | |
1495 | 4), actx->h_iopad, kctx_len - | |
1496 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); | |
1497 | ||
1498 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
1499 | sg_param.nents = reqctx->dst_nents; | |
1500 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | |
1501 | sg_param.qid = qid; | |
5fe8c711 HJ |
1502 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1503 | reqctx->dst, &sg_param); | |
1504 | if (error) | |
2debd332 HJ |
1505 | goto dstmap_fail; |
1506 | ||
1507 | skb_set_transport_header(skb, transhdr_len); | |
1508 | ||
1509 | if (assoclen) { | |
1510 | /* AAD buffer in */ | |
1511 | write_sg_to_skb(skb, &frags, req->src, assoclen); | |
1512 | ||
1513 | } | |
1514 | write_buffer_to_skb(skb, &frags, req->iv, ivsize); | |
1515 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | |
1516 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | |
2512a624 | 1517 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); |
2debd332 HJ |
1518 | reqctx->skb = skb; |
1519 | skb_get(skb); | |
1520 | ||
1521 | return skb; | |
1522 | dstmap_fail: | |
1523 | /* ivmap_fail: */ | |
1524 | kfree_skb(skb); | |
1525 | err: | |
5fe8c711 | 1526 | return ERR_PTR(error); |
2debd332 HJ |
1527 | } |
1528 | ||
2debd332 HJ |
1529 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
1530 | { | |
1531 | __be32 data; | |
1532 | ||
1533 | memset(block, 0, csize); | |
1534 | block += csize; | |
1535 | ||
1536 | if (csize >= 4) | |
1537 | csize = 4; | |
1538 | else if (msglen > (unsigned int)(1 << (8 * csize))) | |
1539 | return -EOVERFLOW; | |
1540 | ||
1541 | data = cpu_to_be32(msglen); | |
1542 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); | |
1543 | ||
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | static void generate_b0(struct aead_request *req, | |
1548 | struct chcr_aead_ctx *aeadctx, | |
1549 | unsigned short op_type) | |
1550 | { | |
1551 | unsigned int l, lp, m; | |
1552 | int rc; | |
1553 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
1554 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1555 | u8 *b0 = reqctx->scratch_pad; | |
1556 | ||
1557 | m = crypto_aead_authsize(aead); | |
1558 | ||
1559 | memcpy(b0, reqctx->iv, 16); | |
1560 | ||
1561 | lp = b0[0]; | |
1562 | l = lp + 1; | |
1563 | ||
1564 | /* set m, bits 3-5 */ | |
1565 | *b0 |= (8 * ((m - 2) / 2)); | |
1566 | ||
1567 | /* set adata, bit 6, if associated data is used */ | |
1568 | if (req->assoclen) | |
1569 | *b0 |= 64; | |
1570 | rc = set_msg_len(b0 + 16 - l, | |
1571 | (op_type == CHCR_DECRYPT_OP) ? | |
1572 | req->cryptlen - m : req->cryptlen, l); | |
1573 | } | |
1574 | ||
1575 | static inline int crypto_ccm_check_iv(const u8 *iv) | |
1576 | { | |
1577 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ | |
1578 | if (iv[0] < 1 || iv[0] > 7) | |
1579 | return -EINVAL; | |
1580 | ||
1581 | return 0; | |
1582 | } | |
1583 | ||
1584 | static int ccm_format_packet(struct aead_request *req, | |
1585 | struct chcr_aead_ctx *aeadctx, | |
1586 | unsigned int sub_type, | |
1587 | unsigned short op_type) | |
1588 | { | |
1589 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1590 | int rc = 0; | |
1591 | ||
2debd332 HJ |
1592 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
1593 | reqctx->iv[0] = 3; | |
1594 | memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); | |
1595 | memcpy(reqctx->iv + 4, req->iv, 8); | |
1596 | memset(reqctx->iv + 12, 0, 4); | |
1597 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
1598 | htons(req->assoclen - 8); | |
1599 | } else { | |
1600 | memcpy(reqctx->iv, req->iv, 16); | |
1601 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
1602 | htons(req->assoclen); | |
1603 | } | |
1604 | generate_b0(req, aeadctx, op_type); | |
1605 | /* zero the ctr value */ | |
1606 | memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); | |
1607 | return rc; | |
1608 | } | |
1609 | ||
1610 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |
1611 | unsigned int dst_size, | |
1612 | struct aead_request *req, | |
1613 | unsigned short op_type, | |
1614 | struct chcr_context *chcrctx) | |
1615 | { | |
1616 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
0a7bd30c | 1617 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); |
2debd332 HJ |
1618 | unsigned int ivsize = AES_BLOCK_SIZE; |
1619 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; | |
1620 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; | |
8a13449f | 1621 | unsigned int c_id = chcrctx->dev->rx_channel_id; |
2debd332 HJ |
1622 | unsigned int ccm_xtra; |
1623 | unsigned char tag_offset = 0, auth_offset = 0; | |
2debd332 HJ |
1624 | unsigned int assoclen; |
1625 | ||
1626 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
1627 | assoclen = req->assoclen - 8; | |
1628 | else | |
1629 | assoclen = req->assoclen; | |
1630 | ccm_xtra = CCM_B0_SIZE + | |
1631 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); | |
1632 | ||
1633 | auth_offset = req->cryptlen ? | |
1634 | (assoclen + ivsize + 1 + ccm_xtra) : 0; | |
1635 | if (op_type == CHCR_DECRYPT_OP) { | |
1636 | if (crypto_aead_authsize(tfm) != req->cryptlen) | |
1637 | tag_offset = crypto_aead_authsize(tfm); | |
1638 | else | |
1639 | auth_offset = 0; | |
1640 | } | |
1641 | ||
1642 | ||
1643 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, | |
1644 | 2, (ivsize ? (assoclen + 1) : 0) + | |
1645 | ccm_xtra); | |
1646 | sec_cpl->pldlen = | |
1647 | htonl(assoclen + ivsize + req->cryptlen + ccm_xtra); | |
1648 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ | |
1649 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | |
1650 | 1, assoclen + ccm_xtra, assoclen | |
1651 | + ivsize + 1 + ccm_xtra, 0); | |
1652 | ||
1653 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, | |
1654 | auth_offset, tag_offset, | |
1655 | (op_type == CHCR_ENCRYPT_OP) ? 0 : | |
1656 | crypto_aead_authsize(tfm)); | |
1657 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | |
1658 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, | |
0a7bd30c HJ |
1659 | cipher_mode, mac_mode, |
1660 | aeadctx->hmac_ctrl, ivsize >> 1); | |
2debd332 HJ |
1661 | |
1662 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, | |
1663 | 1, dst_size); | |
1664 | } | |
1665 | ||
1666 | int aead_ccm_validate_input(unsigned short op_type, | |
1667 | struct aead_request *req, | |
1668 | struct chcr_aead_ctx *aeadctx, | |
1669 | unsigned int sub_type) | |
1670 | { | |
1671 | if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { | |
1672 | if (crypto_ccm_check_iv(req->iv)) { | |
1673 | pr_err("CCM: IV check fails\n"); | |
1674 | return -EINVAL; | |
1675 | } | |
1676 | } else { | |
1677 | if (req->assoclen != 16 && req->assoclen != 20) { | |
1678 | pr_err("RFC4309: Invalid AAD length %d\n", | |
1679 | req->assoclen); | |
1680 | return -EINVAL; | |
1681 | } | |
1682 | } | |
1683 | if (aeadctx->enckey_len == 0) { | |
1684 | pr_err("CCM: Encryption key not set\n"); | |
1685 | return -EINVAL; | |
1686 | } | |
1687 | return 0; | |
1688 | } | |
1689 | ||
1690 | unsigned int fill_aead_req_fields(struct sk_buff *skb, | |
1691 | struct aead_request *req, | |
1692 | struct scatterlist *src, | |
1693 | unsigned int ivsize, | |
1694 | struct chcr_aead_ctx *aeadctx) | |
1695 | { | |
1696 | unsigned int frags = 0; | |
1697 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1698 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1699 | /* b0 and aad length(if available) */ | |
1700 | ||
1701 | write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE + | |
1702 | (req->assoclen ? CCM_AAD_FIELD_SIZE : 0)); | |
1703 | if (req->assoclen) { | |
1704 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
1705 | write_sg_to_skb(skb, &frags, req->src, | |
1706 | req->assoclen - 8); | |
1707 | else | |
1708 | write_sg_to_skb(skb, &frags, req->src, req->assoclen); | |
1709 | } | |
1710 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | |
1711 | if (req->cryptlen) | |
1712 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | |
1713 | ||
1714 | return frags; | |
1715 | } | |
1716 | ||
1717 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |
1718 | unsigned short qid, | |
1719 | int size, | |
1720 | unsigned short op_type) | |
1721 | { | |
1722 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1723 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
1724 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
1725 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
1726 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1727 | struct sk_buff *skb = NULL; | |
1728 | struct chcr_wr *chcr_req; | |
1729 | struct cpl_rx_phys_dsgl *phys_cpl; | |
1730 | struct phys_sge_parm sg_param; | |
94e1dab1 | 1731 | struct scatterlist *src; |
2debd332 HJ |
1732 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; |
1733 | unsigned int dst_size = 0, kctx_len; | |
1734 | unsigned int sub_type; | |
1735 | unsigned int authsize = crypto_aead_authsize(tfm); | |
5fe8c711 | 1736 | int error = -EINVAL, src_nent; |
2debd332 HJ |
1737 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1738 | GFP_ATOMIC; | |
1739 | ||
1740 | ||
1741 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | |
1742 | goto err; | |
0e93708d HJ |
1743 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); |
1744 | if (src_nent < 0) | |
2debd332 | 1745 | goto err; |
0e93708d | 1746 | |
2debd332 | 1747 | sub_type = get_aead_subtype(tfm); |
94e1dab1 HJ |
1748 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1749 | reqctx->dst = src; | |
1750 | ||
2debd332 | 1751 | if (req->src != req->dst) { |
5fe8c711 HJ |
1752 | error = chcr_copy_assoc(req, aeadctx); |
1753 | if (error) { | |
2debd332 | 1754 | pr_err("AAD copy to destination buffer fails\n"); |
5fe8c711 | 1755 | return ERR_PTR(error); |
2debd332 | 1756 | } |
94e1dab1 HJ |
1757 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
1758 | req->assoclen); | |
2debd332 | 1759 | } |
94e1dab1 | 1760 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
2debd332 | 1761 | (op_type ? -authsize : authsize)); |
0e93708d | 1762 | if (reqctx->dst_nents < 0) { |
2debd332 | 1763 | pr_err("CCM:Invalid Destination sg entries\n"); |
5fe8c711 | 1764 | error = -EINVAL; |
2debd332 HJ |
1765 | goto err; |
1766 | } | |
5fe8c711 HJ |
1767 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); |
1768 | if (error) | |
2debd332 HJ |
1769 | goto err; |
1770 | ||
1771 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | |
1772 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; | |
1773 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | |
0e93708d HJ |
1774 | if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG, |
1775 | T6_MAX_AAD_SIZE - 18, | |
1776 | transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), | |
1777 | op_type)) { | |
1778 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | |
1779 | } | |
1780 | ||
2debd332 HJ |
1781 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
1782 | ||
5fe8c711 HJ |
1783 | if (!skb) { |
1784 | error = -ENOMEM; | |
2debd332 | 1785 | goto err; |
5fe8c711 | 1786 | } |
2debd332 HJ |
1787 | |
1788 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | |
1789 | ||
1790 | chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len); | |
1791 | memset(chcr_req, 0, transhdr_len); | |
1792 | ||
1793 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx); | |
1794 | ||
1795 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
1796 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
1797 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * | |
1798 | 16), aeadctx->key, aeadctx->enckey_len); | |
1799 | ||
1800 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
5fe8c711 HJ |
1801 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); |
1802 | if (error) | |
2debd332 HJ |
1803 | goto dstmap_fail; |
1804 | ||
1805 | sg_param.nents = reqctx->dst_nents; | |
1806 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | |
1807 | sg_param.qid = qid; | |
5fe8c711 HJ |
1808 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1809 | reqctx->dst, &sg_param); | |
1810 | if (error) | |
2debd332 HJ |
1811 | goto dstmap_fail; |
1812 | ||
1813 | skb_set_transport_header(skb, transhdr_len); | |
1814 | frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); | |
1815 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, | |
2512a624 | 1816 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); |
2debd332 HJ |
1817 | reqctx->skb = skb; |
1818 | skb_get(skb); | |
1819 | return skb; | |
1820 | dstmap_fail: | |
1821 | kfree_skb(skb); | |
2debd332 | 1822 | err: |
5fe8c711 | 1823 | return ERR_PTR(error); |
2debd332 HJ |
1824 | } |
1825 | ||
1826 | static struct sk_buff *create_gcm_wr(struct aead_request *req, | |
1827 | unsigned short qid, | |
1828 | int size, | |
1829 | unsigned short op_type) | |
1830 | { | |
1831 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1832 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
1833 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
1834 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
1835 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1836 | struct sk_buff *skb = NULL; | |
1837 | struct chcr_wr *chcr_req; | |
1838 | struct cpl_rx_phys_dsgl *phys_cpl; | |
1839 | struct phys_sge_parm sg_param; | |
94e1dab1 | 1840 | struct scatterlist *src; |
2debd332 HJ |
1841 | unsigned int frags = 0, transhdr_len; |
1842 | unsigned int ivsize = AES_BLOCK_SIZE; | |
d600fc8a | 1843 | unsigned int dst_size = 0, kctx_len, assoclen = req->assoclen; |
2debd332 | 1844 | unsigned char tag_offset = 0; |
2debd332 | 1845 | unsigned int authsize = crypto_aead_authsize(tfm); |
5fe8c711 | 1846 | int error = -EINVAL, src_nent; |
2debd332 HJ |
1847 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1848 | GFP_ATOMIC; | |
1849 | ||
1850 | /* validate key size */ | |
1851 | if (aeadctx->enckey_len == 0) | |
1852 | goto err; | |
1853 | ||
1854 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | |
1855 | goto err; | |
d600fc8a | 1856 | src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen); |
0e93708d | 1857 | if (src_nent < 0) |
2debd332 HJ |
1858 | goto err; |
1859 | ||
d600fc8a | 1860 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); |
94e1dab1 | 1861 | reqctx->dst = src; |
2debd332 | 1862 | if (req->src != req->dst) { |
5fe8c711 HJ |
1863 | error = chcr_copy_assoc(req, aeadctx); |
1864 | if (error) | |
1865 | return ERR_PTR(error); | |
94e1dab1 | 1866 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
d600fc8a | 1867 | assoclen); |
2debd332 HJ |
1868 | } |
1869 | ||
d600fc8a | 1870 | |
94e1dab1 | 1871 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
2debd332 | 1872 | (op_type ? -authsize : authsize)); |
0e93708d | 1873 | if (reqctx->dst_nents < 0) { |
2debd332 | 1874 | pr_err("GCM:Invalid Destination sg entries\n"); |
5fe8c711 | 1875 | error = -EINVAL; |
2debd332 HJ |
1876 | goto err; |
1877 | } | |
1878 | ||
1879 | ||
1880 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | |
1881 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + | |
1882 | AEAD_H_SIZE; | |
1883 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | |
0e93708d HJ |
1884 | if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG, |
1885 | T6_MAX_AAD_SIZE, | |
1886 | transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), | |
1887 | op_type)) { | |
1888 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | |
1889 | } | |
2debd332 | 1890 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
5fe8c711 HJ |
1891 | if (!skb) { |
1892 | error = -ENOMEM; | |
2debd332 | 1893 | goto err; |
5fe8c711 | 1894 | } |
2debd332 HJ |
1895 | |
1896 | /* NIC driver is going to write the sge hdr. */ | |
1897 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | |
1898 | ||
1899 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); | |
1900 | memset(chcr_req, 0, transhdr_len); | |
1901 | ||
1902 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) | |
d600fc8a | 1903 | assoclen = req->assoclen - 8; |
2debd332 HJ |
1904 | |
1905 | tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | |
1906 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( | |
8a13449f | 1907 | ctx->dev->rx_channel_id, 2, (ivsize ? |
d600fc8a | 1908 | (assoclen + 1) : 0)); |
0e93708d | 1909 | chcr_req->sec_cpl.pldlen = |
d600fc8a | 1910 | htonl(assoclen + ivsize + req->cryptlen); |
2debd332 | 1911 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
d600fc8a HJ |
1912 | assoclen ? 1 : 0, assoclen, |
1913 | assoclen + ivsize + 1, 0); | |
2debd332 | 1914 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
d600fc8a | 1915 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1, |
2debd332 HJ |
1916 | tag_offset, tag_offset); |
1917 | chcr_req->sec_cpl.seqno_numivs = | |
1918 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == | |
1919 | CHCR_ENCRYPT_OP) ? 1 : 0, | |
1920 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | |
0a7bd30c HJ |
1921 | CHCR_SCMD_AUTH_MODE_GHASH, |
1922 | aeadctx->hmac_ctrl, ivsize >> 1); | |
2debd332 HJ |
1923 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
1924 | 0, 1, dst_size); | |
1925 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
1926 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
1927 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * | |
1928 | 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); | |
1929 | ||
1930 | /* prepare a 16 byte iv */ | |
1931 | /* S A L T | IV | 0x00000001 */ | |
1932 | if (get_aead_subtype(tfm) == | |
1933 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | |
1934 | memcpy(reqctx->iv, aeadctx->salt, 4); | |
1935 | memcpy(reqctx->iv + 4, req->iv, 8); | |
1936 | } else { | |
1937 | memcpy(reqctx->iv, req->iv, 12); | |
1938 | } | |
1939 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); | |
1940 | ||
1941 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
1942 | sg_param.nents = reqctx->dst_nents; | |
1943 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | |
1944 | sg_param.qid = qid; | |
5fe8c711 HJ |
1945 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1946 | reqctx->dst, &sg_param); | |
1947 | if (error) | |
2debd332 HJ |
1948 | goto dstmap_fail; |
1949 | ||
1950 | skb_set_transport_header(skb, transhdr_len); | |
d600fc8a | 1951 | write_sg_to_skb(skb, &frags, req->src, assoclen); |
2debd332 | 1952 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); |
0e93708d | 1953 | write_sg_to_skb(skb, &frags, src, req->cryptlen); |
2debd332 | 1954 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, |
2512a624 HJ |
1955 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, |
1956 | reqctx->verify); | |
2debd332 HJ |
1957 | reqctx->skb = skb; |
1958 | skb_get(skb); | |
1959 | return skb; | |
1960 | ||
1961 | dstmap_fail: | |
1962 | /* ivmap_fail: */ | |
1963 | kfree_skb(skb); | |
2debd332 | 1964 | err: |
5fe8c711 | 1965 | return ERR_PTR(error); |
2debd332 HJ |
1966 | } |
1967 | ||
1968 | ||
1969 | ||
1970 | static int chcr_aead_cra_init(struct crypto_aead *tfm) | |
1971 | { | |
1972 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
1973 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
0e93708d HJ |
1974 | struct aead_alg *alg = crypto_aead_alg(tfm); |
1975 | ||
1976 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, | |
5fe8c711 HJ |
1977 | CRYPTO_ALG_NEED_FALLBACK | |
1978 | CRYPTO_ALG_ASYNC); | |
0e93708d HJ |
1979 | if (IS_ERR(aeadctx->sw_cipher)) |
1980 | return PTR_ERR(aeadctx->sw_cipher); | |
1981 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), | |
1982 | sizeof(struct aead_request) + | |
1983 | crypto_aead_reqsize(aeadctx->sw_cipher))); | |
2debd332 HJ |
1984 | aeadctx->null = crypto_get_default_null_skcipher(); |
1985 | if (IS_ERR(aeadctx->null)) | |
1986 | return PTR_ERR(aeadctx->null); | |
1987 | return chcr_device_init(ctx); | |
1988 | } | |
1989 | ||
1990 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) | |
1991 | { | |
0e93708d HJ |
1992 | struct chcr_context *ctx = crypto_aead_ctx(tfm); |
1993 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
1994 | ||
2debd332 | 1995 | crypto_put_default_null_skcipher(); |
0e93708d | 1996 | crypto_free_aead(aeadctx->sw_cipher); |
2debd332 HJ |
1997 | } |
1998 | ||
1999 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | |
2000 | unsigned int authsize) | |
2001 | { | |
2002 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2003 | ||
2004 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; | |
2005 | aeadctx->mayverify = VERIFY_HW; | |
0e93708d | 2006 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2007 | } |
2008 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | |
2009 | unsigned int authsize) | |
2010 | { | |
2011 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2012 | u32 maxauth = crypto_aead_maxauthsize(tfm); | |
2013 | ||
2014 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not | |
2015 | * true for sha1. authsize == 12 condition should be before | |
2016 | * authsize == (maxauth >> 1) | |
2017 | */ | |
2018 | if (authsize == ICV_4) { | |
2019 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
2020 | aeadctx->mayverify = VERIFY_HW; | |
2021 | } else if (authsize == ICV_6) { | |
2022 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
2023 | aeadctx->mayverify = VERIFY_HW; | |
2024 | } else if (authsize == ICV_10) { | |
2025 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
2026 | aeadctx->mayverify = VERIFY_HW; | |
2027 | } else if (authsize == ICV_12) { | |
2028 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2029 | aeadctx->mayverify = VERIFY_HW; | |
2030 | } else if (authsize == ICV_14) { | |
2031 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
2032 | aeadctx->mayverify = VERIFY_HW; | |
2033 | } else if (authsize == (maxauth >> 1)) { | |
2034 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2035 | aeadctx->mayverify = VERIFY_HW; | |
2036 | } else if (authsize == maxauth) { | |
2037 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2038 | aeadctx->mayverify = VERIFY_HW; | |
2039 | } else { | |
2040 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2041 | aeadctx->mayverify = VERIFY_SW; | |
2042 | } | |
0e93708d | 2043 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2044 | } |
2045 | ||
2046 | ||
2047 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |
2048 | { | |
2049 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2050 | ||
2051 | switch (authsize) { | |
2052 | case ICV_4: | |
2053 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
2054 | aeadctx->mayverify = VERIFY_HW; | |
2055 | break; | |
2056 | case ICV_8: | |
2057 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2058 | aeadctx->mayverify = VERIFY_HW; | |
2059 | break; | |
2060 | case ICV_12: | |
2061 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2062 | aeadctx->mayverify = VERIFY_HW; | |
2063 | break; | |
2064 | case ICV_14: | |
2065 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
2066 | aeadctx->mayverify = VERIFY_HW; | |
2067 | break; | |
2068 | case ICV_16: | |
2069 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2070 | aeadctx->mayverify = VERIFY_HW; | |
2071 | break; | |
2072 | case ICV_13: | |
2073 | case ICV_15: | |
2074 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2075 | aeadctx->mayverify = VERIFY_SW; | |
2076 | break; | |
2077 | default: | |
2078 | ||
2079 | crypto_tfm_set_flags((struct crypto_tfm *) tfm, | |
2080 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2081 | return -EINVAL; | |
2082 | } | |
0e93708d | 2083 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2084 | } |
2085 | ||
2086 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | |
2087 | unsigned int authsize) | |
2088 | { | |
2089 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2090 | ||
2091 | switch (authsize) { | |
2092 | case ICV_8: | |
2093 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2094 | aeadctx->mayverify = VERIFY_HW; | |
2095 | break; | |
2096 | case ICV_12: | |
2097 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2098 | aeadctx->mayverify = VERIFY_HW; | |
2099 | break; | |
2100 | case ICV_16: | |
2101 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2102 | aeadctx->mayverify = VERIFY_HW; | |
2103 | break; | |
2104 | default: | |
2105 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
2106 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2107 | return -EINVAL; | |
2108 | } | |
0e93708d | 2109 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2110 | } |
2111 | ||
2112 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, | |
2113 | unsigned int authsize) | |
2114 | { | |
2115 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2116 | ||
2117 | switch (authsize) { | |
2118 | case ICV_4: | |
2119 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
2120 | aeadctx->mayverify = VERIFY_HW; | |
2121 | break; | |
2122 | case ICV_6: | |
2123 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
2124 | aeadctx->mayverify = VERIFY_HW; | |
2125 | break; | |
2126 | case ICV_8: | |
2127 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
2128 | aeadctx->mayverify = VERIFY_HW; | |
2129 | break; | |
2130 | case ICV_10: | |
2131 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
2132 | aeadctx->mayverify = VERIFY_HW; | |
2133 | break; | |
2134 | case ICV_12: | |
2135 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
2136 | aeadctx->mayverify = VERIFY_HW; | |
2137 | break; | |
2138 | case ICV_14: | |
2139 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
2140 | aeadctx->mayverify = VERIFY_HW; | |
2141 | break; | |
2142 | case ICV_16: | |
2143 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
2144 | aeadctx->mayverify = VERIFY_HW; | |
2145 | break; | |
2146 | default: | |
2147 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
2148 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2149 | return -EINVAL; | |
2150 | } | |
0e93708d | 2151 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
2152 | } |
2153 | ||
0e93708d | 2154 | static int chcr_ccm_common_setkey(struct crypto_aead *aead, |
2debd332 HJ |
2155 | const u8 *key, |
2156 | unsigned int keylen) | |
2157 | { | |
2158 | struct chcr_context *ctx = crypto_aead_ctx(aead); | |
2159 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
2160 | unsigned char ck_size, mk_size; | |
2161 | int key_ctx_size = 0; | |
2162 | ||
2debd332 HJ |
2163 | key_ctx_size = sizeof(struct _key_ctx) + |
2164 | ((DIV_ROUND_UP(keylen, 16)) << 4) * 2; | |
2165 | if (keylen == AES_KEYSIZE_128) { | |
2166 | mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
2167 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
2168 | } else if (keylen == AES_KEYSIZE_192) { | |
2169 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
2170 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; | |
2171 | } else if (keylen == AES_KEYSIZE_256) { | |
2172 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
2173 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
2174 | } else { | |
2175 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
2176 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2177 | aeadctx->enckey_len = 0; | |
2178 | return -EINVAL; | |
2179 | } | |
2180 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, | |
2181 | key_ctx_size >> 4); | |
0e93708d HJ |
2182 | memcpy(aeadctx->key, key, keylen); |
2183 | aeadctx->enckey_len = keylen; | |
2184 | ||
2debd332 HJ |
2185 | return 0; |
2186 | } | |
2187 | ||
0e93708d HJ |
2188 | static int chcr_aead_ccm_setkey(struct crypto_aead *aead, |
2189 | const u8 *key, | |
2190 | unsigned int keylen) | |
2191 | { | |
2192 | struct chcr_context *ctx = crypto_aead_ctx(aead); | |
2193 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
2194 | int error; | |
2195 | ||
2196 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
2197 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
2198 | CRYPTO_TFM_REQ_MASK); | |
2199 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
2200 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
2201 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
2202 | CRYPTO_TFM_RES_MASK); | |
2203 | if (error) | |
2204 | return error; | |
2205 | return chcr_ccm_common_setkey(aead, key, keylen); | |
2206 | } | |
2207 | ||
2debd332 HJ |
2208 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, |
2209 | unsigned int keylen) | |
2210 | { | |
2211 | struct chcr_context *ctx = crypto_aead_ctx(aead); | |
4dbeae42 HJ |
2212 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
2213 | int error; | |
2debd332 HJ |
2214 | |
2215 | if (keylen < 3) { | |
2216 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
2217 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2218 | aeadctx->enckey_len = 0; | |
2219 | return -EINVAL; | |
2220 | } | |
4dbeae42 HJ |
2221 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
2222 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
2223 | CRYPTO_TFM_REQ_MASK); | |
2224 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
2225 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
2226 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
2227 | CRYPTO_TFM_RES_MASK); | |
2228 | if (error) | |
2229 | return error; | |
2debd332 HJ |
2230 | keylen -= 3; |
2231 | memcpy(aeadctx->salt, key + keylen, 3); | |
0e93708d | 2232 | return chcr_ccm_common_setkey(aead, key, keylen); |
2debd332 HJ |
2233 | } |
2234 | ||
2235 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |
2236 | unsigned int keylen) | |
2237 | { | |
2238 | struct chcr_context *ctx = crypto_aead_ctx(aead); | |
2239 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
2240 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); | |
8356ea51 | 2241 | struct crypto_cipher *cipher; |
2debd332 HJ |
2242 | unsigned int ck_size; |
2243 | int ret = 0, key_ctx_size = 0; | |
2244 | ||
0e93708d HJ |
2245 | aeadctx->enckey_len = 0; |
2246 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
2247 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) | |
2248 | & CRYPTO_TFM_REQ_MASK); | |
2249 | ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
2250 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
2251 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
2252 | CRYPTO_TFM_RES_MASK); | |
2253 | if (ret) | |
2254 | goto out; | |
2255 | ||
7c2cf1c4 HJ |
2256 | if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
2257 | keylen > 3) { | |
2debd332 HJ |
2258 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ |
2259 | memcpy(aeadctx->salt, key + keylen, 4); | |
2260 | } | |
2261 | if (keylen == AES_KEYSIZE_128) { | |
2262 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
2263 | } else if (keylen == AES_KEYSIZE_192) { | |
2264 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
2265 | } else if (keylen == AES_KEYSIZE_256) { | |
2266 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
2267 | } else { | |
2268 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
2269 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
0e93708d | 2270 | pr_err("GCM: Invalid key length %d\n", keylen); |
2debd332 HJ |
2271 | ret = -EINVAL; |
2272 | goto out; | |
2273 | } | |
2274 | ||
2275 | memcpy(aeadctx->key, key, keylen); | |
2276 | aeadctx->enckey_len = keylen; | |
2277 | key_ctx_size = sizeof(struct _key_ctx) + | |
2278 | ((DIV_ROUND_UP(keylen, 16)) << 4) + | |
2279 | AEAD_H_SIZE; | |
2280 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, | |
2281 | CHCR_KEYCTX_MAC_KEY_SIZE_128, | |
2282 | 0, 0, | |
2283 | key_ctx_size >> 4); | |
8356ea51 HJ |
2284 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
2285 | * It will go in key context | |
2debd332 | 2286 | */ |
8356ea51 HJ |
2287 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); |
2288 | if (IS_ERR(cipher)) { | |
2debd332 HJ |
2289 | aeadctx->enckey_len = 0; |
2290 | ret = -ENOMEM; | |
2291 | goto out; | |
2292 | } | |
8356ea51 HJ |
2293 | |
2294 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
2debd332 HJ |
2295 | if (ret) { |
2296 | aeadctx->enckey_len = 0; | |
2297 | goto out1; | |
2298 | } | |
2299 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); | |
8356ea51 | 2300 | crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); |
2debd332 HJ |
2301 | |
2302 | out1: | |
8356ea51 | 2303 | crypto_free_cipher(cipher); |
2debd332 HJ |
2304 | out: |
2305 | return ret; | |
2306 | } | |
2307 | ||
2308 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |
2309 | unsigned int keylen) | |
2310 | { | |
2311 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | |
2312 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
2313 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | |
2314 | /* it contains auth and cipher key both*/ | |
2315 | struct crypto_authenc_keys keys; | |
2316 | unsigned int bs; | |
2317 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; | |
2318 | int err = 0, i, key_ctx_len = 0; | |
2319 | unsigned char ck_size = 0; | |
2320 | unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; | |
ec1bca94 | 2321 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
2debd332 HJ |
2322 | struct algo_param param; |
2323 | int align; | |
2324 | u8 *o_ptr = NULL; | |
2325 | ||
0e93708d HJ |
2326 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
2327 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
2328 | & CRYPTO_TFM_REQ_MASK); | |
2329 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
2330 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
2331 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
2332 | & CRYPTO_TFM_RES_MASK); | |
2333 | if (err) | |
2334 | goto out; | |
2335 | ||
2debd332 HJ |
2336 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
2337 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2338 | goto out; | |
2339 | } | |
2340 | ||
2341 | if (get_alg_config(¶m, max_authsize)) { | |
2342 | pr_err("chcr : Unsupported digest size\n"); | |
2343 | goto out; | |
2344 | } | |
2345 | if (keys.enckeylen == AES_KEYSIZE_128) { | |
2346 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
2347 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
2348 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
2349 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
2350 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
2351 | } else { | |
2352 | pr_err("chcr : Unsupported cipher key\n"); | |
2353 | goto out; | |
2354 | } | |
2355 | ||
2356 | /* Copy only encryption key. We use authkey to generate h(ipad) and | |
2357 | * h(opad) so authkey is not needed again. authkeylen size have the | |
2358 | * size of the hash digest size. | |
2359 | */ | |
2360 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
2361 | aeadctx->enckey_len = keys.enckeylen; | |
2362 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | |
2363 | aeadctx->enckey_len << 3); | |
2364 | ||
2365 | base_hash = chcr_alloc_shash(max_authsize); | |
2366 | if (IS_ERR(base_hash)) { | |
2367 | pr_err("chcr : Base driver cannot be loaded\n"); | |
0e93708d HJ |
2368 | aeadctx->enckey_len = 0; |
2369 | return -EINVAL; | |
324429d7 | 2370 | } |
2debd332 HJ |
2371 | { |
2372 | SHASH_DESC_ON_STACK(shash, base_hash); | |
2373 | shash->tfm = base_hash; | |
2374 | shash->flags = crypto_shash_get_flags(base_hash); | |
2375 | bs = crypto_shash_blocksize(base_hash); | |
2376 | align = KEYCTX_ALIGN_PAD(max_authsize); | |
2377 | o_ptr = actx->h_iopad + param.result_size + align; | |
2378 | ||
2379 | if (keys.authkeylen > bs) { | |
2380 | err = crypto_shash_digest(shash, keys.authkey, | |
2381 | keys.authkeylen, | |
2382 | o_ptr); | |
2383 | if (err) { | |
2384 | pr_err("chcr : Base driver cannot be loaded\n"); | |
2385 | goto out; | |
2386 | } | |
2387 | keys.authkeylen = max_authsize; | |
2388 | } else | |
2389 | memcpy(o_ptr, keys.authkey, keys.authkeylen); | |
2390 | ||
2391 | /* Compute the ipad-digest*/ | |
2392 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
2393 | memcpy(pad, o_ptr, keys.authkeylen); | |
2394 | for (i = 0; i < bs >> 2; i++) | |
2395 | *((unsigned int *)pad + i) ^= IPAD_DATA; | |
2396 | ||
2397 | if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, | |
2398 | max_authsize)) | |
2399 | goto out; | |
2400 | /* Compute the opad-digest */ | |
2401 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
2402 | memcpy(pad, o_ptr, keys.authkeylen); | |
2403 | for (i = 0; i < bs >> 2; i++) | |
2404 | *((unsigned int *)pad + i) ^= OPAD_DATA; | |
2405 | ||
2406 | if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) | |
2407 | goto out; | |
2408 | ||
2409 | /* convert the ipad and opad digest to network order */ | |
2410 | chcr_change_order(actx->h_iopad, param.result_size); | |
2411 | chcr_change_order(o_ptr, param.result_size); | |
2412 | key_ctx_len = sizeof(struct _key_ctx) + | |
2413 | ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) + | |
2414 | (param.result_size + align) * 2; | |
2415 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, | |
2416 | 0, 1, key_ctx_len >> 4); | |
2417 | actx->auth_mode = param.auth_mode; | |
2418 | chcr_free_shash(base_hash); | |
2419 | ||
2420 | return 0; | |
2421 | } | |
2422 | out: | |
2423 | aeadctx->enckey_len = 0; | |
ec1bca94 | 2424 | if (!IS_ERR(base_hash)) |
2debd332 HJ |
2425 | chcr_free_shash(base_hash); |
2426 | return -EINVAL; | |
324429d7 HS |
2427 | } |
2428 | ||
2debd332 HJ |
2429 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, |
2430 | const u8 *key, unsigned int keylen) | |
2431 | { | |
2432 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | |
2433 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | |
2434 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | |
2435 | struct crypto_authenc_keys keys; | |
0e93708d | 2436 | int err; |
2debd332 HJ |
2437 | /* it contains auth and cipher key both*/ |
2438 | int key_ctx_len = 0; | |
2439 | unsigned char ck_size = 0; | |
2440 | ||
0e93708d HJ |
2441 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
2442 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
2443 | & CRYPTO_TFM_REQ_MASK); | |
2444 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
2445 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
2446 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
2447 | & CRYPTO_TFM_RES_MASK); | |
2448 | if (err) | |
2449 | goto out; | |
2450 | ||
2debd332 HJ |
2451 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
2452 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2453 | goto out; | |
2454 | } | |
2455 | if (keys.enckeylen == AES_KEYSIZE_128) { | |
2456 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
2457 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
2458 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
2459 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
2460 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
2461 | } else { | |
2462 | pr_err("chcr : Unsupported cipher key\n"); | |
2463 | goto out; | |
2464 | } | |
2465 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
2466 | aeadctx->enckey_len = keys.enckeylen; | |
2467 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | |
2468 | aeadctx->enckey_len << 3); | |
2469 | key_ctx_len = sizeof(struct _key_ctx) | |
2470 | + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); | |
2471 | ||
2472 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, | |
2473 | 0, key_ctx_len >> 4); | |
2474 | actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; | |
2475 | return 0; | |
2476 | out: | |
2477 | aeadctx->enckey_len = 0; | |
2478 | return -EINVAL; | |
2479 | } | |
2480 | static int chcr_aead_encrypt(struct aead_request *req) | |
2481 | { | |
2482 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2483 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2484 | ||
2485 | reqctx->verify = VERIFY_HW; | |
2486 | ||
2487 | switch (get_aead_subtype(tfm)) { | |
2488 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | |
2489 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | |
2490 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
2491 | create_authenc_wr); | |
2492 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
2493 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
2494 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
2495 | create_aead_ccm_wr); | |
2496 | default: | |
2497 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
2498 | create_gcm_wr); | |
2499 | } | |
2500 | } | |
2501 | ||
2502 | static int chcr_aead_decrypt(struct aead_request *req) | |
2503 | { | |
2504 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2505 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | |
2506 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2507 | int size; | |
2508 | ||
2509 | if (aeadctx->mayverify == VERIFY_SW) { | |
2510 | size = crypto_aead_maxauthsize(tfm); | |
2511 | reqctx->verify = VERIFY_SW; | |
2512 | } else { | |
2513 | size = 0; | |
2514 | reqctx->verify = VERIFY_HW; | |
2515 | } | |
2516 | ||
2517 | switch (get_aead_subtype(tfm)) { | |
2518 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | |
2519 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | |
2520 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
2521 | create_authenc_wr); | |
2522 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
2523 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
2524 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
2525 | create_aead_ccm_wr); | |
2526 | default: | |
2527 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
2528 | create_gcm_wr); | |
2529 | } | |
2530 | } | |
2531 | ||
2532 | static int chcr_aead_op(struct aead_request *req, | |
2533 | unsigned short op_type, | |
2534 | int size, | |
2535 | create_wr_t create_wr_fn) | |
2536 | { | |
2537 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2538 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | |
5ba042c0 | 2539 | struct uld_ctx *u_ctx; |
2debd332 HJ |
2540 | struct sk_buff *skb; |
2541 | ||
5ba042c0 | 2542 | if (!ctx->dev) { |
2debd332 HJ |
2543 | pr_err("chcr : %s : No crypto device.\n", __func__); |
2544 | return -ENXIO; | |
2545 | } | |
5ba042c0 | 2546 | u_ctx = ULD_CTX(ctx); |
2debd332 | 2547 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
72a56ca9 | 2548 | ctx->tx_qidx)) { |
2debd332 HJ |
2549 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
2550 | return -EBUSY; | |
2551 | } | |
2552 | ||
2553 | /* Form a WR from req */ | |
72a56ca9 | 2554 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size, |
2debd332 HJ |
2555 | op_type); |
2556 | ||
0e93708d | 2557 | if (IS_ERR(skb) || !skb) |
2debd332 | 2558 | return PTR_ERR(skb); |
2debd332 HJ |
2559 | |
2560 | skb->dev = u_ctx->lldi.ports[0]; | |
72a56ca9 | 2561 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
2debd332 HJ |
2562 | chcr_send_wr(skb); |
2563 | return -EINPROGRESS; | |
2564 | } | |
324429d7 HS |
2565 | static struct chcr_alg_template driver_algs[] = { |
2566 | /* AES-CBC */ | |
2567 | { | |
2568 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | |
2569 | .is_registered = 0, | |
2570 | .alg.crypto = { | |
2571 | .cra_name = "cbc(aes)", | |
2debd332 | 2572 | .cra_driver_name = "cbc-aes-chcr", |
324429d7 | 2573 | .cra_priority = CHCR_CRA_PRIORITY, |
44e9f799 | 2574 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
324429d7 HS |
2575 | CRYPTO_ALG_ASYNC, |
2576 | .cra_blocksize = AES_BLOCK_SIZE, | |
2577 | .cra_ctxsize = sizeof(struct chcr_context) | |
2578 | + sizeof(struct ablk_ctx), | |
2579 | .cra_alignmask = 0, | |
2580 | .cra_type = &crypto_ablkcipher_type, | |
2581 | .cra_module = THIS_MODULE, | |
2582 | .cra_init = chcr_cra_init, | |
2583 | .cra_exit = NULL, | |
2584 | .cra_u.ablkcipher = { | |
2585 | .min_keysize = AES_MIN_KEY_SIZE, | |
2586 | .max_keysize = AES_MAX_KEY_SIZE, | |
2587 | .ivsize = AES_BLOCK_SIZE, | |
2588 | .setkey = chcr_aes_cbc_setkey, | |
2589 | .encrypt = chcr_aes_encrypt, | |
2590 | .decrypt = chcr_aes_decrypt, | |
2591 | } | |
2592 | } | |
2593 | }, | |
2594 | { | |
2595 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | |
2596 | .is_registered = 0, | |
2597 | .alg.crypto = { | |
2598 | .cra_name = "xts(aes)", | |
2debd332 | 2599 | .cra_driver_name = "xts-aes-chcr", |
324429d7 | 2600 | .cra_priority = CHCR_CRA_PRIORITY, |
44e9f799 | 2601 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
324429d7 HS |
2602 | CRYPTO_ALG_ASYNC, |
2603 | .cra_blocksize = AES_BLOCK_SIZE, | |
2604 | .cra_ctxsize = sizeof(struct chcr_context) + | |
2605 | sizeof(struct ablk_ctx), | |
2606 | .cra_alignmask = 0, | |
2607 | .cra_type = &crypto_ablkcipher_type, | |
2608 | .cra_module = THIS_MODULE, | |
2609 | .cra_init = chcr_cra_init, | |
2610 | .cra_exit = NULL, | |
2611 | .cra_u = { | |
2612 | .ablkcipher = { | |
2613 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | |
2614 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
2615 | .ivsize = AES_BLOCK_SIZE, | |
2616 | .setkey = chcr_aes_xts_setkey, | |
2617 | .encrypt = chcr_aes_encrypt, | |
2618 | .decrypt = chcr_aes_decrypt, | |
2619 | } | |
2620 | } | |
2621 | } | |
2622 | }, | |
2623 | /* SHA */ | |
2624 | { | |
2625 | .type = CRYPTO_ALG_TYPE_AHASH, | |
2626 | .is_registered = 0, | |
2627 | .alg.hash = { | |
2628 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
2629 | .halg.base = { | |
2630 | .cra_name = "sha1", | |
2631 | .cra_driver_name = "sha1-chcr", | |
2632 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
2633 | } | |
2634 | } | |
2635 | }, | |
2636 | { | |
2637 | .type = CRYPTO_ALG_TYPE_AHASH, | |
2638 | .is_registered = 0, | |
2639 | .alg.hash = { | |
2640 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
2641 | .halg.base = { | |
2642 | .cra_name = "sha256", | |
2643 | .cra_driver_name = "sha256-chcr", | |
2644 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
2645 | } | |
2646 | } | |
2647 | }, | |
2648 | { | |
2649 | .type = CRYPTO_ALG_TYPE_AHASH, | |
2650 | .is_registered = 0, | |
2651 | .alg.hash = { | |
2652 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
2653 | .halg.base = { | |
2654 | .cra_name = "sha224", | |
2655 | .cra_driver_name = "sha224-chcr", | |
2656 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
2657 | } | |
2658 | } | |
2659 | }, | |
2660 | { | |
2661 | .type = CRYPTO_ALG_TYPE_AHASH, | |
2662 | .is_registered = 0, | |
2663 | .alg.hash = { | |
2664 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
2665 | .halg.base = { | |
2666 | .cra_name = "sha384", | |
2667 | .cra_driver_name = "sha384-chcr", | |
2668 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
2669 | } | |
2670 | } | |
2671 | }, | |
2672 | { | |
2673 | .type = CRYPTO_ALG_TYPE_AHASH, | |
2674 | .is_registered = 0, | |
2675 | .alg.hash = { | |
2676 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
2677 | .halg.base = { | |
2678 | .cra_name = "sha512", | |
2679 | .cra_driver_name = "sha512-chcr", | |
2680 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
2681 | } | |
2682 | } | |
2683 | }, | |
2684 | /* HMAC */ | |
2685 | { | |
2686 | .type = CRYPTO_ALG_TYPE_HMAC, | |
2687 | .is_registered = 0, | |
2688 | .alg.hash = { | |
2689 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
2690 | .halg.base = { | |
2691 | .cra_name = "hmac(sha1)", | |
2debd332 | 2692 | .cra_driver_name = "hmac-sha1-chcr", |
324429d7 HS |
2693 | .cra_blocksize = SHA1_BLOCK_SIZE, |
2694 | } | |
2695 | } | |
2696 | }, | |
2697 | { | |
2698 | .type = CRYPTO_ALG_TYPE_HMAC, | |
2699 | .is_registered = 0, | |
2700 | .alg.hash = { | |
2701 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
2702 | .halg.base = { | |
2703 | .cra_name = "hmac(sha224)", | |
2debd332 | 2704 | .cra_driver_name = "hmac-sha224-chcr", |
324429d7 HS |
2705 | .cra_blocksize = SHA224_BLOCK_SIZE, |
2706 | } | |
2707 | } | |
2708 | }, | |
2709 | { | |
2710 | .type = CRYPTO_ALG_TYPE_HMAC, | |
2711 | .is_registered = 0, | |
2712 | .alg.hash = { | |
2713 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
2714 | .halg.base = { | |
2715 | .cra_name = "hmac(sha256)", | |
2debd332 | 2716 | .cra_driver_name = "hmac-sha256-chcr", |
324429d7 HS |
2717 | .cra_blocksize = SHA256_BLOCK_SIZE, |
2718 | } | |
2719 | } | |
2720 | }, | |
2721 | { | |
2722 | .type = CRYPTO_ALG_TYPE_HMAC, | |
2723 | .is_registered = 0, | |
2724 | .alg.hash = { | |
2725 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
2726 | .halg.base = { | |
2727 | .cra_name = "hmac(sha384)", | |
2debd332 | 2728 | .cra_driver_name = "hmac-sha384-chcr", |
324429d7 HS |
2729 | .cra_blocksize = SHA384_BLOCK_SIZE, |
2730 | } | |
2731 | } | |
2732 | }, | |
2733 | { | |
2734 | .type = CRYPTO_ALG_TYPE_HMAC, | |
2735 | .is_registered = 0, | |
2736 | .alg.hash = { | |
2737 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
2738 | .halg.base = { | |
2739 | .cra_name = "hmac(sha512)", | |
2debd332 | 2740 | .cra_driver_name = "hmac-sha512-chcr", |
324429d7 HS |
2741 | .cra_blocksize = SHA512_BLOCK_SIZE, |
2742 | } | |
2743 | } | |
2744 | }, | |
2debd332 HJ |
2745 | /* Add AEAD Algorithms */ |
2746 | { | |
2747 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, | |
2748 | .is_registered = 0, | |
2749 | .alg.aead = { | |
2750 | .base = { | |
2751 | .cra_name = "gcm(aes)", | |
2752 | .cra_driver_name = "gcm-aes-chcr", | |
2753 | .cra_blocksize = 1, | |
e29abda5 | 2754 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2755 | .cra_ctxsize = sizeof(struct chcr_context) + |
2756 | sizeof(struct chcr_aead_ctx) + | |
2757 | sizeof(struct chcr_gcm_ctx), | |
2758 | }, | |
2759 | .ivsize = 12, | |
2760 | .maxauthsize = GHASH_DIGEST_SIZE, | |
2761 | .setkey = chcr_gcm_setkey, | |
2762 | .setauthsize = chcr_gcm_setauthsize, | |
2763 | } | |
2764 | }, | |
2765 | { | |
2766 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, | |
2767 | .is_registered = 0, | |
2768 | .alg.aead = { | |
2769 | .base = { | |
2770 | .cra_name = "rfc4106(gcm(aes))", | |
2771 | .cra_driver_name = "rfc4106-gcm-aes-chcr", | |
2772 | .cra_blocksize = 1, | |
e29abda5 | 2773 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
2774 | .cra_ctxsize = sizeof(struct chcr_context) + |
2775 | sizeof(struct chcr_aead_ctx) + | |
2776 | sizeof(struct chcr_gcm_ctx), | |
2777 | ||
2778 | }, | |
2779 | .ivsize = 8, | |
2780 | .maxauthsize = GHASH_DIGEST_SIZE, | |
2781 | .setkey = chcr_gcm_setkey, | |
2782 | .setauthsize = chcr_4106_4309_setauthsize, | |
2783 | } | |
2784 | }, | |
2785 | { | |
2786 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, | |
2787 | .is_registered = 0, | |
2788 | .alg.aead = { | |
2789 | .base = { | |
2790 | .cra_name = "ccm(aes)", | |
2791 | .cra_driver_name = "ccm-aes-chcr", | |
2792 | .cra_blocksize = 1, | |
e29abda5 | 2793 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2794 | .cra_ctxsize = sizeof(struct chcr_context) + |
2795 | sizeof(struct chcr_aead_ctx), | |
2796 | ||
2797 | }, | |
2798 | .ivsize = AES_BLOCK_SIZE, | |
2799 | .maxauthsize = GHASH_DIGEST_SIZE, | |
2800 | .setkey = chcr_aead_ccm_setkey, | |
2801 | .setauthsize = chcr_ccm_setauthsize, | |
2802 | } | |
2803 | }, | |
2804 | { | |
2805 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, | |
2806 | .is_registered = 0, | |
2807 | .alg.aead = { | |
2808 | .base = { | |
2809 | .cra_name = "rfc4309(ccm(aes))", | |
2810 | .cra_driver_name = "rfc4309-ccm-aes-chcr", | |
2811 | .cra_blocksize = 1, | |
e29abda5 | 2812 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
2813 | .cra_ctxsize = sizeof(struct chcr_context) + |
2814 | sizeof(struct chcr_aead_ctx), | |
2815 | ||
2816 | }, | |
2817 | .ivsize = 8, | |
2818 | .maxauthsize = GHASH_DIGEST_SIZE, | |
2819 | .setkey = chcr_aead_rfc4309_setkey, | |
2820 | .setauthsize = chcr_4106_4309_setauthsize, | |
2821 | } | |
2822 | }, | |
2823 | { | |
2824 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | |
2825 | .is_registered = 0, | |
2826 | .alg.aead = { | |
2827 | .base = { | |
2828 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
2829 | .cra_driver_name = | |
2830 | "authenc-hmac-sha1-cbc-aes-chcr", | |
2831 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2832 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2833 | .cra_ctxsize = sizeof(struct chcr_context) + |
2834 | sizeof(struct chcr_aead_ctx) + | |
2835 | sizeof(struct chcr_authenc_ctx), | |
2836 | ||
2837 | }, | |
2838 | .ivsize = AES_BLOCK_SIZE, | |
2839 | .maxauthsize = SHA1_DIGEST_SIZE, | |
2840 | .setkey = chcr_authenc_setkey, | |
2841 | .setauthsize = chcr_authenc_setauthsize, | |
2842 | } | |
2843 | }, | |
2844 | { | |
2845 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | |
2846 | .is_registered = 0, | |
2847 | .alg.aead = { | |
2848 | .base = { | |
2849 | ||
2850 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
2851 | .cra_driver_name = | |
2852 | "authenc-hmac-sha256-cbc-aes-chcr", | |
2853 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2854 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2855 | .cra_ctxsize = sizeof(struct chcr_context) + |
2856 | sizeof(struct chcr_aead_ctx) + | |
2857 | sizeof(struct chcr_authenc_ctx), | |
2858 | ||
2859 | }, | |
2860 | .ivsize = AES_BLOCK_SIZE, | |
2861 | .maxauthsize = SHA256_DIGEST_SIZE, | |
2862 | .setkey = chcr_authenc_setkey, | |
2863 | .setauthsize = chcr_authenc_setauthsize, | |
2864 | } | |
2865 | }, | |
2866 | { | |
2867 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | |
2868 | .is_registered = 0, | |
2869 | .alg.aead = { | |
2870 | .base = { | |
2871 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | |
2872 | .cra_driver_name = | |
2873 | "authenc-hmac-sha224-cbc-aes-chcr", | |
2874 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2875 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2876 | .cra_ctxsize = sizeof(struct chcr_context) + |
2877 | sizeof(struct chcr_aead_ctx) + | |
2878 | sizeof(struct chcr_authenc_ctx), | |
2879 | }, | |
2880 | .ivsize = AES_BLOCK_SIZE, | |
2881 | .maxauthsize = SHA224_DIGEST_SIZE, | |
2882 | .setkey = chcr_authenc_setkey, | |
2883 | .setauthsize = chcr_authenc_setauthsize, | |
2884 | } | |
2885 | }, | |
2886 | { | |
2887 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | |
2888 | .is_registered = 0, | |
2889 | .alg.aead = { | |
2890 | .base = { | |
2891 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | |
2892 | .cra_driver_name = | |
2893 | "authenc-hmac-sha384-cbc-aes-chcr", | |
2894 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2895 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2896 | .cra_ctxsize = sizeof(struct chcr_context) + |
2897 | sizeof(struct chcr_aead_ctx) + | |
2898 | sizeof(struct chcr_authenc_ctx), | |
2899 | ||
2900 | }, | |
2901 | .ivsize = AES_BLOCK_SIZE, | |
2902 | .maxauthsize = SHA384_DIGEST_SIZE, | |
2903 | .setkey = chcr_authenc_setkey, | |
2904 | .setauthsize = chcr_authenc_setauthsize, | |
2905 | } | |
2906 | }, | |
2907 | { | |
2908 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | |
2909 | .is_registered = 0, | |
2910 | .alg.aead = { | |
2911 | .base = { | |
2912 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | |
2913 | .cra_driver_name = | |
2914 | "authenc-hmac-sha512-cbc-aes-chcr", | |
2915 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2916 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2917 | .cra_ctxsize = sizeof(struct chcr_context) + |
2918 | sizeof(struct chcr_aead_ctx) + | |
2919 | sizeof(struct chcr_authenc_ctx), | |
2920 | ||
2921 | }, | |
2922 | .ivsize = AES_BLOCK_SIZE, | |
2923 | .maxauthsize = SHA512_DIGEST_SIZE, | |
2924 | .setkey = chcr_authenc_setkey, | |
2925 | .setauthsize = chcr_authenc_setauthsize, | |
2926 | } | |
2927 | }, | |
2928 | { | |
2929 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL, | |
2930 | .is_registered = 0, | |
2931 | .alg.aead = { | |
2932 | .base = { | |
2933 | .cra_name = "authenc(digest_null,cbc(aes))", | |
2934 | .cra_driver_name = | |
2935 | "authenc-digest_null-cbc-aes-chcr", | |
2936 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 2937 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
2938 | .cra_ctxsize = sizeof(struct chcr_context) + |
2939 | sizeof(struct chcr_aead_ctx) + | |
2940 | sizeof(struct chcr_authenc_ctx), | |
2941 | ||
2942 | }, | |
2943 | .ivsize = AES_BLOCK_SIZE, | |
2944 | .maxauthsize = 0, | |
2945 | .setkey = chcr_aead_digest_null_setkey, | |
2946 | .setauthsize = chcr_authenc_null_setauthsize, | |
2947 | } | |
2948 | }, | |
324429d7 HS |
2949 | }; |
2950 | ||
2951 | /* | |
2952 | * chcr_unregister_alg - Deregister crypto algorithms with | |
2953 | * kernel framework. | |
2954 | */ | |
2955 | static int chcr_unregister_alg(void) | |
2956 | { | |
2957 | int i; | |
2958 | ||
2959 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
2960 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
2961 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
2962 | if (driver_algs[i].is_registered) | |
2963 | crypto_unregister_alg( | |
2964 | &driver_algs[i].alg.crypto); | |
2965 | break; | |
2debd332 HJ |
2966 | case CRYPTO_ALG_TYPE_AEAD: |
2967 | if (driver_algs[i].is_registered) | |
2968 | crypto_unregister_aead( | |
2969 | &driver_algs[i].alg.aead); | |
2970 | break; | |
324429d7 HS |
2971 | case CRYPTO_ALG_TYPE_AHASH: |
2972 | if (driver_algs[i].is_registered) | |
2973 | crypto_unregister_ahash( | |
2974 | &driver_algs[i].alg.hash); | |
2975 | break; | |
2976 | } | |
2977 | driver_algs[i].is_registered = 0; | |
2978 | } | |
2979 | return 0; | |
2980 | } | |
2981 | ||
2982 | #define SZ_AHASH_CTX sizeof(struct chcr_context) | |
2983 | #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) | |
2984 | #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) | |
2985 | #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC) | |
2986 | ||
2987 | /* | |
2988 | * chcr_register_alg - Register crypto algorithms with kernel framework. | |
2989 | */ | |
2990 | static int chcr_register_alg(void) | |
2991 | { | |
2992 | struct crypto_alg ai; | |
2993 | struct ahash_alg *a_hash; | |
2994 | int err = 0, i; | |
2995 | char *name = NULL; | |
2996 | ||
2997 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
2998 | if (driver_algs[i].is_registered) | |
2999 | continue; | |
3000 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
3001 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
3002 | err = crypto_register_alg(&driver_algs[i].alg.crypto); | |
3003 | name = driver_algs[i].alg.crypto.cra_driver_name; | |
3004 | break; | |
2debd332 | 3005 | case CRYPTO_ALG_TYPE_AEAD: |
2debd332 | 3006 | driver_algs[i].alg.aead.base.cra_flags = |
0e93708d HJ |
3007 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | |
3008 | CRYPTO_ALG_NEED_FALLBACK; | |
2debd332 HJ |
3009 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; |
3010 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; | |
3011 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; | |
3012 | driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; | |
3013 | driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; | |
3014 | err = crypto_register_aead(&driver_algs[i].alg.aead); | |
3015 | name = driver_algs[i].alg.aead.base.cra_driver_name; | |
3016 | break; | |
324429d7 HS |
3017 | case CRYPTO_ALG_TYPE_AHASH: |
3018 | a_hash = &driver_algs[i].alg.hash; | |
3019 | a_hash->update = chcr_ahash_update; | |
3020 | a_hash->final = chcr_ahash_final; | |
3021 | a_hash->finup = chcr_ahash_finup; | |
3022 | a_hash->digest = chcr_ahash_digest; | |
3023 | a_hash->export = chcr_ahash_export; | |
3024 | a_hash->import = chcr_ahash_import; | |
3025 | a_hash->halg.statesize = SZ_AHASH_REQ_CTX; | |
3026 | a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; | |
3027 | a_hash->halg.base.cra_module = THIS_MODULE; | |
3028 | a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS; | |
3029 | a_hash->halg.base.cra_alignmask = 0; | |
3030 | a_hash->halg.base.cra_exit = NULL; | |
3031 | a_hash->halg.base.cra_type = &crypto_ahash_type; | |
3032 | ||
3033 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { | |
3034 | a_hash->halg.base.cra_init = chcr_hmac_cra_init; | |
3035 | a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; | |
3036 | a_hash->init = chcr_hmac_init; | |
3037 | a_hash->setkey = chcr_ahash_setkey; | |
3038 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; | |
3039 | } else { | |
3040 | a_hash->init = chcr_sha_init; | |
3041 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; | |
3042 | a_hash->halg.base.cra_init = chcr_sha_cra_init; | |
3043 | } | |
3044 | err = crypto_register_ahash(&driver_algs[i].alg.hash); | |
3045 | ai = driver_algs[i].alg.hash.halg.base; | |
3046 | name = ai.cra_driver_name; | |
3047 | break; | |
3048 | } | |
3049 | if (err) { | |
3050 | pr_err("chcr : %s : Algorithm registration failed\n", | |
3051 | name); | |
3052 | goto register_err; | |
3053 | } else { | |
3054 | driver_algs[i].is_registered = 1; | |
3055 | } | |
3056 | } | |
3057 | return 0; | |
3058 | ||
3059 | register_err: | |
3060 | chcr_unregister_alg(); | |
3061 | return err; | |
3062 | } | |
3063 | ||
3064 | /* | |
3065 | * start_crypto - Register the crypto algorithms. | |
3066 | * This should called once when the first device comesup. After this | |
3067 | * kernel will start calling driver APIs for crypto operations. | |
3068 | */ | |
3069 | int start_crypto(void) | |
3070 | { | |
3071 | return chcr_register_alg(); | |
3072 | } | |
3073 | ||
3074 | /* | |
3075 | * stop_crypto - Deregister all the crypto algorithms with kernel. | |
3076 | * This should be called once when the last device goes down. After this | |
3077 | * kernel will not call the driver API for crypto operations. | |
3078 | */ | |
3079 | int stop_crypto(void) | |
3080 | { | |
3081 | chcr_unregister_alg(); | |
3082 | return 0; | |
3083 | } |