]>
Commit | Line | Data |
---|---|---|
1b44c5a6 AT |
1 | /* |
2 | * Copyright (C) 2017 Marvell | |
3 | * | |
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | |
5 | * | |
6 | * This file is licensed under the terms of the GNU General Public | |
7 | * License version 2. This program is licensed "as is" without any | |
8 | * warranty of any kind, whether express or implied. | |
9 | */ | |
10 | ||
11 | #include <linux/device.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/dmapool.h> | |
14 | ||
15 | #include <crypto/aes.h> | |
16 | #include <crypto/skcipher.h> | |
1eb7b403 | 17 | #include <crypto/internal/skcipher.h> |
1b44c5a6 AT |
18 | |
19 | #include "safexcel.h" | |
20 | ||
21 | enum safexcel_cipher_direction { | |
22 | SAFEXCEL_ENCRYPT, | |
23 | SAFEXCEL_DECRYPT, | |
24 | }; | |
25 | ||
26 | struct safexcel_cipher_ctx { | |
27 | struct safexcel_context base; | |
28 | struct safexcel_crypto_priv *priv; | |
29 | ||
30 | enum safexcel_cipher_direction direction; | |
31 | u32 mode; | |
32 | ||
33 | __le32 key[8]; | |
34 | unsigned int key_len; | |
35 | }; | |
36 | ||
1eb7b403 OH |
37 | struct safexcel_cipher_req { |
38 | bool needs_inv; | |
39 | }; | |
40 | ||
1b44c5a6 AT |
41 | static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, |
42 | struct crypto_async_request *async, | |
43 | struct safexcel_command_desc *cdesc, | |
44 | u32 length) | |
45 | { | |
46 | struct skcipher_request *req = skcipher_request_cast(async); | |
47 | struct safexcel_token *token; | |
48 | unsigned offset = 0; | |
49 | ||
50 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | |
51 | offset = AES_BLOCK_SIZE / sizeof(u32); | |
52 | memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE); | |
53 | ||
54 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | |
55 | } | |
56 | ||
57 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); | |
58 | ||
59 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | |
60 | token[0].packet_length = length; | |
61 | token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET; | |
62 | token[0].instructions = EIP197_TOKEN_INS_LAST | | |
63 | EIP197_TOKEN_INS_TYPE_CRYTO | | |
64 | EIP197_TOKEN_INS_TYPE_OUTPUT; | |
65 | } | |
66 | ||
67 | static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, | |
68 | unsigned int len) | |
69 | { | |
70 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); | |
71 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
72 | struct crypto_aes_ctx aes; | |
73 | int ret, i; | |
74 | ||
75 | ret = crypto_aes_expand_key(&aes, key, len); | |
76 | if (ret) { | |
77 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
78 | return ret; | |
79 | } | |
80 | ||
81 | for (i = 0; i < len / sizeof(u32); i++) { | |
82 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | |
83 | ctx->base.needs_inv = true; | |
84 | break; | |
85 | } | |
86 | } | |
87 | ||
88 | for (i = 0; i < len / sizeof(u32); i++) | |
89 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | |
90 | ||
91 | ctx->key_len = len; | |
92 | ||
93 | memzero_explicit(&aes, sizeof(aes)); | |
94 | return 0; | |
95 | } | |
96 | ||
97 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, | |
98 | struct safexcel_command_desc *cdesc) | |
99 | { | |
100 | struct safexcel_crypto_priv *priv = ctx->priv; | |
101 | int ctrl_size; | |
102 | ||
103 | if (ctx->direction == SAFEXCEL_ENCRYPT) | |
104 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; | |
105 | else | |
106 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; | |
107 | ||
108 | cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; | |
109 | cdesc->control_data.control1 |= ctx->mode; | |
110 | ||
111 | switch (ctx->key_len) { | |
112 | case AES_KEYSIZE_128: | |
113 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; | |
114 | ctrl_size = 4; | |
115 | break; | |
116 | case AES_KEYSIZE_192: | |
117 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; | |
118 | ctrl_size = 6; | |
119 | break; | |
120 | case AES_KEYSIZE_256: | |
121 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; | |
122 | ctrl_size = 8; | |
123 | break; | |
124 | default: | |
125 | dev_err(priv->dev, "aes keysize not supported: %u\n", | |
126 | ctx->key_len); | |
127 | return -EINVAL; | |
128 | } | |
129 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
1eb7b403 OH |
134 | static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, |
135 | struct crypto_async_request *async, | |
136 | bool *should_complete, int *ret) | |
1b44c5a6 AT |
137 | { |
138 | struct skcipher_request *req = skcipher_request_cast(async); | |
139 | struct safexcel_result_desc *rdesc; | |
140 | int ndesc = 0; | |
141 | ||
142 | *ret = 0; | |
143 | ||
144 | spin_lock_bh(&priv->ring[ring].egress_lock); | |
145 | do { | |
146 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | |
147 | if (IS_ERR(rdesc)) { | |
148 | dev_err(priv->dev, | |
149 | "cipher: result: could not retrieve the result descriptor\n"); | |
150 | *ret = PTR_ERR(rdesc); | |
151 | break; | |
152 | } | |
153 | ||
154 | if (rdesc->result_data.error_code) { | |
155 | dev_err(priv->dev, | |
156 | "cipher: result: result descriptor error (%d)\n", | |
157 | rdesc->result_data.error_code); | |
158 | *ret = -EIO; | |
159 | } | |
160 | ||
161 | ndesc++; | |
162 | } while (!rdesc->last_seg); | |
163 | ||
164 | safexcel_complete(priv, ring); | |
165 | spin_unlock_bh(&priv->ring[ring].egress_lock); | |
166 | ||
167 | if (req->src == req->dst) { | |
168 | dma_unmap_sg(priv->dev, req->src, | |
169 | sg_nents_for_len(req->src, req->cryptlen), | |
170 | DMA_BIDIRECTIONAL); | |
171 | } else { | |
172 | dma_unmap_sg(priv->dev, req->src, | |
173 | sg_nents_for_len(req->src, req->cryptlen), | |
174 | DMA_TO_DEVICE); | |
175 | dma_unmap_sg(priv->dev, req->dst, | |
176 | sg_nents_for_len(req->dst, req->cryptlen), | |
177 | DMA_FROM_DEVICE); | |
178 | } | |
179 | ||
180 | *should_complete = true; | |
181 | ||
182 | return ndesc; | |
183 | } | |
184 | ||
185 | static int safexcel_aes_send(struct crypto_async_request *async, | |
186 | int ring, struct safexcel_request *request, | |
187 | int *commands, int *results) | |
188 | { | |
189 | struct skcipher_request *req = skcipher_request_cast(async); | |
190 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
191 | struct safexcel_crypto_priv *priv = ctx->priv; | |
192 | struct safexcel_command_desc *cdesc; | |
193 | struct safexcel_result_desc *rdesc; | |
194 | struct scatterlist *sg; | |
195 | int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen; | |
196 | int i, ret = 0; | |
197 | ||
1b44c5a6 AT |
198 | if (req->src == req->dst) { |
199 | nr_src = dma_map_sg(priv->dev, req->src, | |
200 | sg_nents_for_len(req->src, req->cryptlen), | |
201 | DMA_BIDIRECTIONAL); | |
202 | nr_dst = nr_src; | |
203 | if (!nr_src) | |
204 | return -EINVAL; | |
205 | } else { | |
206 | nr_src = dma_map_sg(priv->dev, req->src, | |
207 | sg_nents_for_len(req->src, req->cryptlen), | |
208 | DMA_TO_DEVICE); | |
209 | if (!nr_src) | |
210 | return -EINVAL; | |
211 | ||
212 | nr_dst = dma_map_sg(priv->dev, req->dst, | |
213 | sg_nents_for_len(req->dst, req->cryptlen), | |
214 | DMA_FROM_DEVICE); | |
215 | if (!nr_dst) { | |
216 | dma_unmap_sg(priv->dev, req->src, | |
217 | sg_nents_for_len(req->src, req->cryptlen), | |
218 | DMA_TO_DEVICE); | |
219 | return -EINVAL; | |
220 | } | |
221 | } | |
222 | ||
223 | memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); | |
224 | ||
225 | spin_lock_bh(&priv->ring[ring].egress_lock); | |
226 | ||
227 | /* command descriptors */ | |
228 | for_each_sg(req->src, sg, nr_src, i) { | |
229 | int len = sg_dma_len(sg); | |
230 | ||
231 | /* Do not overflow the request */ | |
232 | if (queued - len < 0) | |
233 | len = queued; | |
234 | ||
235 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), | |
236 | sg_dma_address(sg), len, req->cryptlen, | |
237 | ctx->base.ctxr_dma); | |
238 | if (IS_ERR(cdesc)) { | |
239 | /* No space left in the command descriptor ring */ | |
240 | ret = PTR_ERR(cdesc); | |
241 | goto cdesc_rollback; | |
242 | } | |
243 | n_cdesc++; | |
244 | ||
245 | if (n_cdesc == 1) { | |
246 | safexcel_context_control(ctx, cdesc); | |
247 | safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); | |
248 | } | |
249 | ||
250 | queued -= len; | |
251 | if (!queued) | |
252 | break; | |
253 | } | |
254 | ||
255 | /* result descriptors */ | |
256 | for_each_sg(req->dst, sg, nr_dst, i) { | |
257 | bool first = !i, last = (i == nr_dst - 1); | |
258 | u32 len = sg_dma_len(sg); | |
259 | ||
260 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | |
261 | sg_dma_address(sg), len); | |
262 | if (IS_ERR(rdesc)) { | |
263 | /* No space left in the result descriptor ring */ | |
264 | ret = PTR_ERR(rdesc); | |
265 | goto rdesc_rollback; | |
266 | } | |
267 | n_rdesc++; | |
268 | } | |
269 | ||
1b44c5a6 AT |
270 | spin_unlock_bh(&priv->ring[ring].egress_lock); |
271 | ||
97858434 | 272 | request->req = &req->base; |
97858434 | 273 | |
1b44c5a6 | 274 | *commands = n_cdesc; |
152bdf4c | 275 | *results = n_rdesc; |
1b44c5a6 AT |
276 | return 0; |
277 | ||
278 | rdesc_rollback: | |
279 | for (i = 0; i < n_rdesc; i++) | |
280 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr); | |
281 | cdesc_rollback: | |
282 | for (i = 0; i < n_cdesc; i++) | |
283 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | |
284 | ||
285 | spin_unlock_bh(&priv->ring[ring].egress_lock); | |
286 | ||
287 | if (req->src == req->dst) { | |
288 | dma_unmap_sg(priv->dev, req->src, | |
289 | sg_nents_for_len(req->src, req->cryptlen), | |
290 | DMA_BIDIRECTIONAL); | |
291 | } else { | |
292 | dma_unmap_sg(priv->dev, req->src, | |
293 | sg_nents_for_len(req->src, req->cryptlen), | |
294 | DMA_TO_DEVICE); | |
295 | dma_unmap_sg(priv->dev, req->dst, | |
296 | sg_nents_for_len(req->dst, req->cryptlen), | |
297 | DMA_FROM_DEVICE); | |
298 | } | |
299 | ||
300 | return ret; | |
301 | } | |
302 | ||
303 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |
304 | int ring, | |
305 | struct crypto_async_request *async, | |
306 | bool *should_complete, int *ret) | |
307 | { | |
308 | struct skcipher_request *req = skcipher_request_cast(async); | |
309 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
310 | struct safexcel_result_desc *rdesc; | |
311 | int ndesc = 0, enq_ret; | |
312 | ||
313 | *ret = 0; | |
314 | ||
315 | spin_lock_bh(&priv->ring[ring].egress_lock); | |
316 | do { | |
317 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | |
318 | if (IS_ERR(rdesc)) { | |
319 | dev_err(priv->dev, | |
320 | "cipher: invalidate: could not retrieve the result descriptor\n"); | |
321 | *ret = PTR_ERR(rdesc); | |
322 | break; | |
323 | } | |
324 | ||
325 | if (rdesc->result_data.error_code) { | |
326 | dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n", | |
327 | rdesc->result_data.error_code); | |
328 | *ret = -EIO; | |
329 | } | |
330 | ||
331 | ndesc++; | |
332 | } while (!rdesc->last_seg); | |
333 | ||
334 | safexcel_complete(priv, ring); | |
335 | spin_unlock_bh(&priv->ring[ring].egress_lock); | |
336 | ||
337 | if (ctx->base.exit_inv) { | |
338 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | |
339 | ctx->base.ctxr_dma); | |
340 | ||
341 | *should_complete = true; | |
342 | ||
343 | return ndesc; | |
344 | } | |
345 | ||
86671abb AT |
346 | ring = safexcel_select_ring(priv); |
347 | ctx->base.ring = ring; | |
1b44c5a6 | 348 | |
86671abb AT |
349 | spin_lock_bh(&priv->ring[ring].queue_lock); |
350 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); | |
351 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 AT |
352 | |
353 | if (enq_ret != -EINPROGRESS) | |
354 | *ret = enq_ret; | |
355 | ||
86671abb AT |
356 | if (!priv->ring[ring].need_dequeue) |
357 | safexcel_dequeue(priv, ring); | |
358 | ||
1b44c5a6 AT |
359 | *should_complete = false; |
360 | ||
361 | return ndesc; | |
362 | } | |
363 | ||
1eb7b403 OH |
364 | static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
365 | struct crypto_async_request *async, | |
366 | bool *should_complete, int *ret) | |
367 | { | |
368 | struct skcipher_request *req = skcipher_request_cast(async); | |
369 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | |
370 | int err; | |
371 | ||
372 | if (sreq->needs_inv) { | |
373 | sreq->needs_inv = false; | |
374 | err = safexcel_handle_inv_result(priv, ring, async, | |
375 | should_complete, ret); | |
376 | } else { | |
377 | err = safexcel_handle_req_result(priv, ring, async, | |
378 | should_complete, ret); | |
379 | } | |
380 | ||
381 | return err; | |
382 | } | |
383 | ||
1b44c5a6 AT |
384 | static int safexcel_cipher_send_inv(struct crypto_async_request *async, |
385 | int ring, struct safexcel_request *request, | |
386 | int *commands, int *results) | |
387 | { | |
388 | struct skcipher_request *req = skcipher_request_cast(async); | |
389 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
390 | struct safexcel_crypto_priv *priv = ctx->priv; | |
391 | int ret; | |
392 | ||
1b44c5a6 AT |
393 | ret = safexcel_invalidate_cache(async, &ctx->base, priv, |
394 | ctx->base.ctxr_dma, ring, request); | |
395 | if (unlikely(ret)) | |
396 | return ret; | |
397 | ||
398 | *commands = 1; | |
399 | *results = 1; | |
400 | ||
401 | return 0; | |
402 | } | |
403 | ||
1eb7b403 OH |
404 | static int safexcel_send(struct crypto_async_request *async, |
405 | int ring, struct safexcel_request *request, | |
406 | int *commands, int *results) | |
407 | { | |
408 | struct skcipher_request *req = skcipher_request_cast(async); | |
409 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | |
410 | int ret; | |
411 | ||
412 | if (sreq->needs_inv) | |
413 | ret = safexcel_cipher_send_inv(async, ring, request, | |
414 | commands, results); | |
415 | else | |
416 | ret = safexcel_aes_send(async, ring, request, | |
417 | commands, results); | |
418 | return ret; | |
419 | } | |
420 | ||
1b44c5a6 AT |
421 | static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) |
422 | { | |
423 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
424 | struct safexcel_crypto_priv *priv = ctx->priv; | |
7cad2fab AT |
425 | SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); |
426 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | |
3e1166b9 | 427 | struct safexcel_inv_result result = {}; |
86671abb | 428 | int ring = ctx->base.ring; |
1b44c5a6 | 429 | |
7cad2fab | 430 | memset(req, 0, sizeof(struct skcipher_request)); |
1b44c5a6 AT |
431 | |
432 | /* create invalidation request */ | |
433 | init_completion(&result.completion); | |
7cad2fab AT |
434 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
435 | safexcel_inv_complete, &result); | |
1b44c5a6 | 436 | |
7cad2fab AT |
437 | skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); |
438 | ctx = crypto_tfm_ctx(req->base.tfm); | |
1b44c5a6 | 439 | ctx->base.exit_inv = true; |
1eb7b403 | 440 | sreq->needs_inv = true; |
1b44c5a6 | 441 | |
86671abb | 442 | spin_lock_bh(&priv->ring[ring].queue_lock); |
7cad2fab | 443 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
86671abb | 444 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
1b44c5a6 | 445 | |
86671abb AT |
446 | if (!priv->ring[ring].need_dequeue) |
447 | safexcel_dequeue(priv, ring); | |
1b44c5a6 AT |
448 | |
449 | wait_for_completion_interruptible(&result.completion); | |
450 | ||
451 | if (result.error) { | |
452 | dev_warn(priv->dev, | |
453 | "cipher: sync: invalidate: completion error %d\n", | |
454 | result.error); | |
455 | return result.error; | |
456 | } | |
457 | ||
458 | return 0; | |
459 | } | |
460 | ||
461 | static int safexcel_aes(struct skcipher_request *req, | |
462 | enum safexcel_cipher_direction dir, u32 mode) | |
463 | { | |
464 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
1eb7b403 | 465 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
1b44c5a6 | 466 | struct safexcel_crypto_priv *priv = ctx->priv; |
86671abb | 467 | int ret, ring; |
1b44c5a6 | 468 | |
1eb7b403 | 469 | sreq->needs_inv = false; |
1b44c5a6 AT |
470 | ctx->direction = dir; |
471 | ctx->mode = mode; | |
472 | ||
473 | if (ctx->base.ctxr) { | |
1eb7b403 OH |
474 | if (ctx->base.needs_inv) { |
475 | sreq->needs_inv = true; | |
476 | ctx->base.needs_inv = false; | |
477 | } | |
1b44c5a6 AT |
478 | } else { |
479 | ctx->base.ring = safexcel_select_ring(priv); | |
1b44c5a6 AT |
480 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, |
481 | EIP197_GFP_FLAGS(req->base), | |
482 | &ctx->base.ctxr_dma); | |
483 | if (!ctx->base.ctxr) | |
484 | return -ENOMEM; | |
485 | } | |
486 | ||
86671abb AT |
487 | ring = ctx->base.ring; |
488 | ||
489 | spin_lock_bh(&priv->ring[ring].queue_lock); | |
490 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); | |
491 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 | 492 | |
86671abb AT |
493 | if (!priv->ring[ring].need_dequeue) |
494 | safexcel_dequeue(priv, ring); | |
1b44c5a6 AT |
495 | |
496 | return ret; | |
497 | } | |
498 | ||
499 | static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) | |
500 | { | |
501 | return safexcel_aes(req, SAFEXCEL_ENCRYPT, | |
502 | CONTEXT_CONTROL_CRYPTO_MODE_ECB); | |
503 | } | |
504 | ||
505 | static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) | |
506 | { | |
507 | return safexcel_aes(req, SAFEXCEL_DECRYPT, | |
508 | CONTEXT_CONTROL_CRYPTO_MODE_ECB); | |
509 | } | |
510 | ||
511 | static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) | |
512 | { | |
513 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
514 | struct safexcel_alg_template *tmpl = | |
515 | container_of(tfm->__crt_alg, struct safexcel_alg_template, | |
516 | alg.skcipher.base); | |
517 | ||
518 | ctx->priv = tmpl->priv; | |
1eb7b403 OH |
519 | ctx->base.send = safexcel_send; |
520 | ctx->base.handle_result = safexcel_handle_result; | |
521 | ||
522 | crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), | |
523 | sizeof(struct safexcel_cipher_req)); | |
1b44c5a6 AT |
524 | |
525 | return 0; | |
526 | } | |
527 | ||
528 | static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) | |
529 | { | |
530 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
531 | struct safexcel_crypto_priv *priv = ctx->priv; | |
532 | int ret; | |
533 | ||
534 | memzero_explicit(ctx->key, 8 * sizeof(u32)); | |
535 | ||
536 | /* context not allocated, skip invalidation */ | |
537 | if (!ctx->base.ctxr) | |
538 | return; | |
539 | ||
540 | memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); | |
541 | ||
542 | ret = safexcel_cipher_exit_inv(tfm); | |
543 | if (ret) | |
544 | dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); | |
545 | } | |
546 | ||
547 | struct safexcel_alg_template safexcel_alg_ecb_aes = { | |
548 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | |
549 | .alg.skcipher = { | |
550 | .setkey = safexcel_aes_setkey, | |
551 | .encrypt = safexcel_ecb_aes_encrypt, | |
552 | .decrypt = safexcel_ecb_aes_decrypt, | |
553 | .min_keysize = AES_MIN_KEY_SIZE, | |
554 | .max_keysize = AES_MAX_KEY_SIZE, | |
555 | .base = { | |
556 | .cra_name = "ecb(aes)", | |
557 | .cra_driver_name = "safexcel-ecb-aes", | |
558 | .cra_priority = 300, | |
559 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | | |
560 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
561 | .cra_blocksize = AES_BLOCK_SIZE, | |
562 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | |
563 | .cra_alignmask = 0, | |
564 | .cra_init = safexcel_skcipher_cra_init, | |
565 | .cra_exit = safexcel_skcipher_cra_exit, | |
566 | .cra_module = THIS_MODULE, | |
567 | }, | |
568 | }, | |
569 | }; | |
570 | ||
571 | static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) | |
572 | { | |
573 | return safexcel_aes(req, SAFEXCEL_ENCRYPT, | |
574 | CONTEXT_CONTROL_CRYPTO_MODE_CBC); | |
575 | } | |
576 | ||
577 | static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) | |
578 | { | |
579 | return safexcel_aes(req, SAFEXCEL_DECRYPT, | |
580 | CONTEXT_CONTROL_CRYPTO_MODE_CBC); | |
581 | } | |
582 | ||
583 | struct safexcel_alg_template safexcel_alg_cbc_aes = { | |
584 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | |
585 | .alg.skcipher = { | |
586 | .setkey = safexcel_aes_setkey, | |
587 | .encrypt = safexcel_cbc_aes_encrypt, | |
588 | .decrypt = safexcel_cbc_aes_decrypt, | |
589 | .min_keysize = AES_MIN_KEY_SIZE, | |
590 | .max_keysize = AES_MAX_KEY_SIZE, | |
591 | .ivsize = AES_BLOCK_SIZE, | |
592 | .base = { | |
593 | .cra_name = "cbc(aes)", | |
594 | .cra_driver_name = "safexcel-cbc-aes", | |
595 | .cra_priority = 300, | |
596 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | | |
597 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
598 | .cra_blocksize = AES_BLOCK_SIZE, | |
599 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | |
600 | .cra_alignmask = 0, | |
601 | .cra_init = safexcel_skcipher_cra_init, | |
602 | .cra_exit = safexcel_skcipher_cra_exit, | |
603 | .cra_module = THIS_MODULE, | |
604 | }, | |
605 | }, | |
606 | }; |