]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - drivers/crypto/inside-secure/safexcel_cipher.c
crypto: inside-secure - authenc(hmac(sha224), cbc(aes)) support
[mirror_ubuntu-disco-kernel.git] / drivers / crypto / inside-secure / safexcel_cipher.c
1 /*
2 * Copyright (C) 2017 Marvell
3 *
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14
15 #include <crypto/aead.h>
16 #include <crypto/aes.h>
17 #include <crypto/authenc.h>
18 #include <crypto/sha.h>
19 #include <crypto/skcipher.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/internal/skcipher.h>
22
23 #include "safexcel.h"
24
25 enum safexcel_cipher_direction {
26 SAFEXCEL_ENCRYPT,
27 SAFEXCEL_DECRYPT,
28 };
29
30 struct safexcel_cipher_ctx {
31 struct safexcel_context base;
32 struct safexcel_crypto_priv *priv;
33
34 u32 mode;
35 bool aead;
36
37 __le32 key[8];
38 unsigned int key_len;
39
40 /* All the below is AEAD specific */
41 u32 alg;
42 u32 state_sz;
43 u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
44 u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
45 };
46
47 struct safexcel_cipher_req {
48 enum safexcel_cipher_direction direction;
49 bool needs_inv;
50 };
51
52 static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
53 struct safexcel_command_desc *cdesc,
54 u32 length)
55 {
56 struct safexcel_token *token;
57 unsigned offset = 0;
58
59 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
60 offset = AES_BLOCK_SIZE / sizeof(u32);
61 memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
62
63 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
64 }
65
66 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
67
68 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
69 token[0].packet_length = length;
70 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
71 EIP197_TOKEN_STAT_LAST_HASH;
72 token[0].instructions = EIP197_TOKEN_INS_LAST |
73 EIP197_TOKEN_INS_TYPE_CRYTO |
74 EIP197_TOKEN_INS_TYPE_OUTPUT;
75 }
76
77 static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
78 struct safexcel_command_desc *cdesc,
79 enum safexcel_cipher_direction direction,
80 u32 cryptlen, u32 assoclen, u32 digestsize)
81 {
82 struct safexcel_token *token;
83 unsigned offset = 0;
84
85 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
86 offset = AES_BLOCK_SIZE / sizeof(u32);
87 memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
88
89 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
90 }
91
92 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
93
94 if (direction == SAFEXCEL_DECRYPT)
95 cryptlen -= digestsize;
96
97 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
98 token[0].packet_length = assoclen;
99 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH |
100 EIP197_TOKEN_INS_TYPE_OUTPUT;
101
102 token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
103 token[1].packet_length = cryptlen;
104 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
105 token[1].instructions = EIP197_TOKEN_INS_LAST |
106 EIP197_TOKEN_INS_TYPE_CRYTO |
107 EIP197_TOKEN_INS_TYPE_HASH |
108 EIP197_TOKEN_INS_TYPE_OUTPUT;
109
110 if (direction == SAFEXCEL_ENCRYPT) {
111 token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
112 token[2].packet_length = digestsize;
113 token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
114 EIP197_TOKEN_STAT_LAST_PACKET;
115 token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
116 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
117 } else {
118 token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
119 token[2].packet_length = digestsize;
120 token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
121 EIP197_TOKEN_STAT_LAST_PACKET;
122 token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
123
124 token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY;
125 token[3].packet_length = digestsize |
126 EIP197_TOKEN_HASH_RESULT_VERIFY;
127 token[3].stat = EIP197_TOKEN_STAT_LAST_HASH |
128 EIP197_TOKEN_STAT_LAST_PACKET;
129 token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
130 }
131 }
132
133 static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
134 const u8 *key, unsigned int len)
135 {
136 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
137 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
138 struct safexcel_crypto_priv *priv = ctx->priv;
139 struct crypto_aes_ctx aes;
140 int ret, i;
141
142 ret = crypto_aes_expand_key(&aes, key, len);
143 if (ret) {
144 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
145 return ret;
146 }
147
148 if (priv->version == EIP197 && ctx->base.ctxr_dma) {
149 for (i = 0; i < len / sizeof(u32); i++) {
150 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
151 ctx->base.needs_inv = true;
152 break;
153 }
154 }
155 }
156
157 for (i = 0; i < len / sizeof(u32); i++)
158 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
159
160 ctx->key_len = len;
161
162 memzero_explicit(&aes, sizeof(aes));
163 return 0;
164 }
165
166 static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
167 unsigned int len)
168 {
169 struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
170 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
171 struct safexcel_ahash_export_state istate, ostate;
172 struct safexcel_crypto_priv *priv = ctx->priv;
173 struct crypto_authenc_keys keys;
174
175 if (crypto_authenc_extractkeys(&keys, key, len) != 0)
176 goto badkey;
177
178 if (keys.enckeylen > sizeof(ctx->key))
179 goto badkey;
180
181 /* Encryption key */
182 if (priv->version == EIP197 && ctx->base.ctxr_dma &&
183 memcmp(ctx->key, keys.enckey, keys.enckeylen))
184 ctx->base.needs_inv = true;
185
186 /* Auth key */
187 switch (ctx->alg) {
188 case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
189 if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
190 keys.authkeylen, &istate, &ostate))
191 goto badkey;
192 break;
193 case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
194 if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
195 keys.authkeylen, &istate, &ostate))
196 goto badkey;
197 break;
198 default:
199 dev_err(priv->dev, "aead: unsupported hash algorithm\n");
200 goto badkey;
201 }
202
203 crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
204 CRYPTO_TFM_RES_MASK);
205
206 if (priv->version == EIP197 && ctx->base.ctxr_dma &&
207 (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
208 memcmp(ctx->opad, ostate.state, ctx->state_sz)))
209 ctx->base.needs_inv = true;
210
211 /* Now copy the keys into the context */
212 memcpy(ctx->key, keys.enckey, keys.enckeylen);
213 ctx->key_len = keys.enckeylen;
214
215 memcpy(ctx->ipad, &istate.state, ctx->state_sz);
216 memcpy(ctx->opad, &ostate.state, ctx->state_sz);
217
218 memzero_explicit(&keys, sizeof(keys));
219 return 0;
220
221 badkey:
222 crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
223 memzero_explicit(&keys, sizeof(keys));
224 return -EINVAL;
225 }
226
227 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
228 struct crypto_async_request *async,
229 struct safexcel_cipher_req *sreq,
230 struct safexcel_command_desc *cdesc)
231 {
232 struct safexcel_crypto_priv *priv = ctx->priv;
233 int ctrl_size;
234
235 if (ctx->aead) {
236 if (sreq->direction == SAFEXCEL_ENCRYPT)
237 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
238 else
239 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
240 } else {
241 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
242
243 /* The decryption control type is a combination of the
244 * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all
245 * types.
246 */
247 if (sreq->direction == SAFEXCEL_DECRYPT)
248 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN;
249 }
250
251 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
252 cdesc->control_data.control1 |= ctx->mode;
253
254 if (ctx->aead)
255 cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
256 ctx->alg;
257
258 switch (ctx->key_len) {
259 case AES_KEYSIZE_128:
260 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
261 break;
262 case AES_KEYSIZE_192:
263 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
264 break;
265 case AES_KEYSIZE_256:
266 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
267 break;
268 default:
269 dev_err(priv->dev, "aes keysize not supported: %u\n",
270 ctx->key_len);
271 return -EINVAL;
272 }
273
274 ctrl_size = ctx->key_len / sizeof(u32);
275 if (ctx->aead)
276 /* Take in account the ipad+opad digests */
277 ctrl_size += ctx->state_sz / sizeof(u32) * 2;
278 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
279
280 return 0;
281 }
282
283 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
284 struct crypto_async_request *async,
285 struct scatterlist *src,
286 struct scatterlist *dst,
287 unsigned int cryptlen,
288 struct safexcel_cipher_req *sreq,
289 bool *should_complete, int *ret)
290 {
291 struct safexcel_result_desc *rdesc;
292 int ndesc = 0;
293
294 *ret = 0;
295
296 spin_lock_bh(&priv->ring[ring].egress_lock);
297 do {
298 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
299 if (IS_ERR(rdesc)) {
300 dev_err(priv->dev,
301 "cipher: result: could not retrieve the result descriptor\n");
302 *ret = PTR_ERR(rdesc);
303 break;
304 }
305
306 if (likely(!*ret))
307 *ret = safexcel_rdesc_check_errors(priv, rdesc);
308
309 ndesc++;
310 } while (!rdesc->last_seg);
311
312 safexcel_complete(priv, ring);
313 spin_unlock_bh(&priv->ring[ring].egress_lock);
314
315 if (src == dst) {
316 dma_unmap_sg(priv->dev, src,
317 sg_nents_for_len(src, cryptlen),
318 DMA_BIDIRECTIONAL);
319 } else {
320 dma_unmap_sg(priv->dev, src,
321 sg_nents_for_len(src, cryptlen),
322 DMA_TO_DEVICE);
323 dma_unmap_sg(priv->dev, dst,
324 sg_nents_for_len(dst, cryptlen),
325 DMA_FROM_DEVICE);
326 }
327
328 *should_complete = true;
329
330 return ndesc;
331 }
332
333 static int safexcel_aes_send(struct crypto_async_request *base, int ring,
334 struct safexcel_request *request,
335 struct safexcel_cipher_req *sreq,
336 struct scatterlist *src, struct scatterlist *dst,
337 unsigned int cryptlen, unsigned int assoclen,
338 unsigned int digestsize, u8 *iv, int *commands,
339 int *results)
340 {
341 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
342 struct safexcel_crypto_priv *priv = ctx->priv;
343 struct safexcel_command_desc *cdesc;
344 struct safexcel_result_desc *rdesc;
345 struct scatterlist *sg;
346 unsigned int totlen = cryptlen + assoclen;
347 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
348 int i, ret = 0;
349
350 if (src == dst) {
351 nr_src = dma_map_sg(priv->dev, src,
352 sg_nents_for_len(src, totlen),
353 DMA_BIDIRECTIONAL);
354 nr_dst = nr_src;
355 if (!nr_src)
356 return -EINVAL;
357 } else {
358 nr_src = dma_map_sg(priv->dev, src,
359 sg_nents_for_len(src, totlen),
360 DMA_TO_DEVICE);
361 if (!nr_src)
362 return -EINVAL;
363
364 nr_dst = dma_map_sg(priv->dev, dst,
365 sg_nents_for_len(dst, totlen),
366 DMA_FROM_DEVICE);
367 if (!nr_dst) {
368 dma_unmap_sg(priv->dev, src,
369 sg_nents_for_len(src, totlen),
370 DMA_TO_DEVICE);
371 return -EINVAL;
372 }
373 }
374
375 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
376
377 if (ctx->aead) {
378 memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
379 ctx->ipad, ctx->state_sz);
380 memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
381 ctx->opad, ctx->state_sz);
382 }
383
384 spin_lock_bh(&priv->ring[ring].egress_lock);
385
386 /* command descriptors */
387 for_each_sg(src, sg, nr_src, i) {
388 int len = sg_dma_len(sg);
389
390 /* Do not overflow the request */
391 if (queued - len < 0)
392 len = queued;
393
394 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
395 sg_dma_address(sg), len, totlen,
396 ctx->base.ctxr_dma);
397 if (IS_ERR(cdesc)) {
398 /* No space left in the command descriptor ring */
399 ret = PTR_ERR(cdesc);
400 goto cdesc_rollback;
401 }
402 n_cdesc++;
403
404 if (n_cdesc == 1) {
405 safexcel_context_control(ctx, base, sreq, cdesc);
406 if (ctx->aead)
407 safexcel_aead_token(ctx, iv, cdesc,
408 sreq->direction, cryptlen,
409 assoclen, digestsize);
410 else
411 safexcel_skcipher_token(ctx, iv, cdesc,
412 cryptlen);
413 }
414
415 queued -= len;
416 if (!queued)
417 break;
418 }
419
420 /* result descriptors */
421 for_each_sg(dst, sg, nr_dst, i) {
422 bool first = !i, last = (i == nr_dst - 1);
423 u32 len = sg_dma_len(sg);
424
425 rdesc = safexcel_add_rdesc(priv, ring, first, last,
426 sg_dma_address(sg), len);
427 if (IS_ERR(rdesc)) {
428 /* No space left in the result descriptor ring */
429 ret = PTR_ERR(rdesc);
430 goto rdesc_rollback;
431 }
432 n_rdesc++;
433 }
434
435 spin_unlock_bh(&priv->ring[ring].egress_lock);
436
437 request->req = base;
438
439 *commands = n_cdesc;
440 *results = n_rdesc;
441 return 0;
442
443 rdesc_rollback:
444 for (i = 0; i < n_rdesc; i++)
445 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
446 cdesc_rollback:
447 for (i = 0; i < n_cdesc; i++)
448 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
449
450 spin_unlock_bh(&priv->ring[ring].egress_lock);
451
452 if (src == dst) {
453 dma_unmap_sg(priv->dev, src,
454 sg_nents_for_len(src, totlen),
455 DMA_BIDIRECTIONAL);
456 } else {
457 dma_unmap_sg(priv->dev, src,
458 sg_nents_for_len(src, totlen),
459 DMA_TO_DEVICE);
460 dma_unmap_sg(priv->dev, dst,
461 sg_nents_for_len(dst, totlen),
462 DMA_FROM_DEVICE);
463 }
464
465 return ret;
466 }
467
468 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
469 int ring,
470 struct crypto_async_request *base,
471 bool *should_complete, int *ret)
472 {
473 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
474 struct safexcel_result_desc *rdesc;
475 int ndesc = 0, enq_ret;
476
477 *ret = 0;
478
479 spin_lock_bh(&priv->ring[ring].egress_lock);
480 do {
481 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
482 if (IS_ERR(rdesc)) {
483 dev_err(priv->dev,
484 "cipher: invalidate: could not retrieve the result descriptor\n");
485 *ret = PTR_ERR(rdesc);
486 break;
487 }
488
489 if (rdesc->result_data.error_code) {
490 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
491 rdesc->result_data.error_code);
492 *ret = -EIO;
493 }
494
495 ndesc++;
496 } while (!rdesc->last_seg);
497
498 safexcel_complete(priv, ring);
499 spin_unlock_bh(&priv->ring[ring].egress_lock);
500
501 if (ctx->base.exit_inv) {
502 dma_pool_free(priv->context_pool, ctx->base.ctxr,
503 ctx->base.ctxr_dma);
504
505 *should_complete = true;
506
507 return ndesc;
508 }
509
510 ring = safexcel_select_ring(priv);
511 ctx->base.ring = ring;
512
513 spin_lock_bh(&priv->ring[ring].queue_lock);
514 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
515 spin_unlock_bh(&priv->ring[ring].queue_lock);
516
517 if (enq_ret != -EINPROGRESS)
518 *ret = enq_ret;
519
520 queue_work(priv->ring[ring].workqueue,
521 &priv->ring[ring].work_data.work);
522
523 *should_complete = false;
524
525 return ndesc;
526 }
527
528 static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
529 int ring,
530 struct crypto_async_request *async,
531 bool *should_complete, int *ret)
532 {
533 struct skcipher_request *req = skcipher_request_cast(async);
534 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
535 int err;
536
537 if (sreq->needs_inv) {
538 sreq->needs_inv = false;
539 err = safexcel_handle_inv_result(priv, ring, async,
540 should_complete, ret);
541 } else {
542 err = safexcel_handle_req_result(priv, ring, async, req->src,
543 req->dst, req->cryptlen, sreq,
544 should_complete, ret);
545 }
546
547 return err;
548 }
549
550 static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
551 int ring,
552 struct crypto_async_request *async,
553 bool *should_complete, int *ret)
554 {
555 struct aead_request *req = aead_request_cast(async);
556 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
557 struct safexcel_cipher_req *sreq = aead_request_ctx(req);
558 int err;
559
560 if (sreq->needs_inv) {
561 sreq->needs_inv = false;
562 err = safexcel_handle_inv_result(priv, ring, async,
563 should_complete, ret);
564 } else {
565 err = safexcel_handle_req_result(priv, ring, async, req->src,
566 req->dst,
567 req->cryptlen + crypto_aead_authsize(tfm),
568 sreq, should_complete, ret);
569 }
570
571 return err;
572 }
573
574 static int safexcel_cipher_send_inv(struct crypto_async_request *base,
575 int ring, struct safexcel_request *request,
576 int *commands, int *results)
577 {
578 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
579 struct safexcel_crypto_priv *priv = ctx->priv;
580 int ret;
581
582 ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
583 request);
584 if (unlikely(ret))
585 return ret;
586
587 *commands = 1;
588 *results = 1;
589
590 return 0;
591 }
592
593 static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
594 struct safexcel_request *request,
595 int *commands, int *results)
596 {
597 struct skcipher_request *req = skcipher_request_cast(async);
598 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
599 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
600 struct safexcel_crypto_priv *priv = ctx->priv;
601 int ret;
602
603 BUG_ON(priv->version == EIP97 && sreq->needs_inv);
604
605 if (sreq->needs_inv)
606 ret = safexcel_cipher_send_inv(async, ring, request, commands,
607 results);
608 else
609 ret = safexcel_aes_send(async, ring, request, sreq, req->src,
610 req->dst, req->cryptlen, 0, 0, req->iv,
611 commands, results);
612 return ret;
613 }
614
615 static int safexcel_aead_send(struct crypto_async_request *async, int ring,
616 struct safexcel_request *request, int *commands,
617 int *results)
618 {
619 struct aead_request *req = aead_request_cast(async);
620 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
621 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
622 struct safexcel_cipher_req *sreq = aead_request_ctx(req);
623 struct safexcel_crypto_priv *priv = ctx->priv;
624 int ret;
625
626 BUG_ON(priv->version == EIP97 && sreq->needs_inv);
627
628 if (sreq->needs_inv)
629 ret = safexcel_cipher_send_inv(async, ring, request, commands,
630 results);
631 else
632 ret = safexcel_aes_send(async, ring, request, sreq, req->src,
633 req->dst, req->cryptlen, req->assoclen,
634 crypto_aead_authsize(tfm), req->iv,
635 commands, results);
636 return ret;
637 }
638
639 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
640 struct crypto_async_request *base,
641 struct safexcel_cipher_req *sreq,
642 struct safexcel_inv_result *result)
643 {
644 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
645 struct safexcel_crypto_priv *priv = ctx->priv;
646 int ring = ctx->base.ring;
647
648 init_completion(&result->completion);
649
650 ctx = crypto_tfm_ctx(base->tfm);
651 ctx->base.exit_inv = true;
652 sreq->needs_inv = true;
653
654 spin_lock_bh(&priv->ring[ring].queue_lock);
655 crypto_enqueue_request(&priv->ring[ring].queue, base);
656 spin_unlock_bh(&priv->ring[ring].queue_lock);
657
658 queue_work(priv->ring[ring].workqueue,
659 &priv->ring[ring].work_data.work);
660
661 wait_for_completion(&result->completion);
662
663 if (result->error) {
664 dev_warn(priv->dev,
665 "cipher: sync: invalidate: completion error %d\n",
666 result->error);
667 return result->error;
668 }
669
670 return 0;
671 }
672
673 static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
674 {
675 EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
676 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
677 struct safexcel_inv_result result = {};
678
679 memset(req, 0, sizeof(struct skcipher_request));
680
681 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
682 safexcel_inv_complete, &result);
683 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
684
685 return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
686 }
687
688 static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
689 {
690 EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
691 struct safexcel_cipher_req *sreq = aead_request_ctx(req);
692 struct safexcel_inv_result result = {};
693
694 memset(req, 0, sizeof(struct aead_request));
695
696 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
697 safexcel_inv_complete, &result);
698 aead_request_set_tfm(req, __crypto_aead_cast(tfm));
699
700 return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
701 }
702
703 static int safexcel_aes(struct crypto_async_request *base,
704 struct safexcel_cipher_req *sreq,
705 enum safexcel_cipher_direction dir, u32 mode)
706 {
707 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
708 struct safexcel_crypto_priv *priv = ctx->priv;
709 int ret, ring;
710
711 sreq->needs_inv = false;
712 sreq->direction = dir;
713 ctx->mode = mode;
714
715 if (ctx->base.ctxr) {
716 if (priv->version == EIP197 && ctx->base.needs_inv) {
717 sreq->needs_inv = true;
718 ctx->base.needs_inv = false;
719 }
720 } else {
721 ctx->base.ring = safexcel_select_ring(priv);
722 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
723 EIP197_GFP_FLAGS(*base),
724 &ctx->base.ctxr_dma);
725 if (!ctx->base.ctxr)
726 return -ENOMEM;
727 }
728
729 ring = ctx->base.ring;
730
731 spin_lock_bh(&priv->ring[ring].queue_lock);
732 ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
733 spin_unlock_bh(&priv->ring[ring].queue_lock);
734
735 queue_work(priv->ring[ring].workqueue,
736 &priv->ring[ring].work_data.work);
737
738 return ret;
739 }
740
741 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
742 {
743 return safexcel_aes(&req->base, skcipher_request_ctx(req),
744 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
745 }
746
747 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
748 {
749 return safexcel_aes(&req->base, skcipher_request_ctx(req),
750 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
751 }
752
753 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
754 {
755 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
756 struct safexcel_alg_template *tmpl =
757 container_of(tfm->__crt_alg, struct safexcel_alg_template,
758 alg.skcipher.base);
759
760 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
761 sizeof(struct safexcel_cipher_req));
762
763 ctx->priv = tmpl->priv;
764
765 ctx->base.send = safexcel_skcipher_send;
766 ctx->base.handle_result = safexcel_skcipher_handle_result;
767 return 0;
768 }
769
770 static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
771 {
772 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
773
774 memzero_explicit(ctx->key, sizeof(ctx->key));
775
776 /* context not allocated, skip invalidation */
777 if (!ctx->base.ctxr)
778 return -ENOMEM;
779
780 memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
781 return 0;
782 }
783
784 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
785 {
786 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
787 struct safexcel_crypto_priv *priv = ctx->priv;
788 int ret;
789
790 if (safexcel_cipher_cra_exit(tfm))
791 return;
792
793 if (priv->version == EIP197) {
794 ret = safexcel_skcipher_exit_inv(tfm);
795 if (ret)
796 dev_warn(priv->dev, "skcipher: invalidation error %d\n",
797 ret);
798 } else {
799 dma_pool_free(priv->context_pool, ctx->base.ctxr,
800 ctx->base.ctxr_dma);
801 }
802 }
803
804 static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
805 {
806 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
807 struct safexcel_crypto_priv *priv = ctx->priv;
808 int ret;
809
810 if (safexcel_cipher_cra_exit(tfm))
811 return;
812
813 if (priv->version == EIP197) {
814 ret = safexcel_aead_exit_inv(tfm);
815 if (ret)
816 dev_warn(priv->dev, "aead: invalidation error %d\n",
817 ret);
818 } else {
819 dma_pool_free(priv->context_pool, ctx->base.ctxr,
820 ctx->base.ctxr_dma);
821 }
822 }
823
824 struct safexcel_alg_template safexcel_alg_ecb_aes = {
825 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
826 .alg.skcipher = {
827 .setkey = safexcel_skcipher_aes_setkey,
828 .encrypt = safexcel_ecb_aes_encrypt,
829 .decrypt = safexcel_ecb_aes_decrypt,
830 .min_keysize = AES_MIN_KEY_SIZE,
831 .max_keysize = AES_MAX_KEY_SIZE,
832 .base = {
833 .cra_name = "ecb(aes)",
834 .cra_driver_name = "safexcel-ecb-aes",
835 .cra_priority = 300,
836 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
837 CRYPTO_ALG_KERN_DRIVER_ONLY,
838 .cra_blocksize = AES_BLOCK_SIZE,
839 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
840 .cra_alignmask = 0,
841 .cra_init = safexcel_skcipher_cra_init,
842 .cra_exit = safexcel_skcipher_cra_exit,
843 .cra_module = THIS_MODULE,
844 },
845 },
846 };
847
848 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
849 {
850 return safexcel_aes(&req->base, skcipher_request_ctx(req),
851 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
852 }
853
854 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
855 {
856 return safexcel_aes(&req->base, skcipher_request_ctx(req),
857 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
858 }
859
860 struct safexcel_alg_template safexcel_alg_cbc_aes = {
861 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
862 .alg.skcipher = {
863 .setkey = safexcel_skcipher_aes_setkey,
864 .encrypt = safexcel_cbc_aes_encrypt,
865 .decrypt = safexcel_cbc_aes_decrypt,
866 .min_keysize = AES_MIN_KEY_SIZE,
867 .max_keysize = AES_MAX_KEY_SIZE,
868 .ivsize = AES_BLOCK_SIZE,
869 .base = {
870 .cra_name = "cbc(aes)",
871 .cra_driver_name = "safexcel-cbc-aes",
872 .cra_priority = 300,
873 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
874 CRYPTO_ALG_KERN_DRIVER_ONLY,
875 .cra_blocksize = AES_BLOCK_SIZE,
876 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
877 .cra_alignmask = 0,
878 .cra_init = safexcel_skcipher_cra_init,
879 .cra_exit = safexcel_skcipher_cra_exit,
880 .cra_module = THIS_MODULE,
881 },
882 },
883 };
884
885 static int safexcel_aead_encrypt(struct aead_request *req)
886 {
887 struct safexcel_cipher_req *creq = aead_request_ctx(req);
888
889 return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
890 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
891 }
892
893 static int safexcel_aead_decrypt(struct aead_request *req)
894 {
895 struct safexcel_cipher_req *creq = aead_request_ctx(req);
896
897 return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
898 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
899 }
900
901 static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
902 {
903 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
904 struct safexcel_alg_template *tmpl =
905 container_of(tfm->__crt_alg, struct safexcel_alg_template,
906 alg.aead.base);
907
908 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
909 sizeof(struct safexcel_cipher_req));
910
911 ctx->priv = tmpl->priv;
912
913 ctx->aead = true;
914 ctx->base.send = safexcel_aead_send;
915 ctx->base.handle_result = safexcel_aead_handle_result;
916 return 0;
917 }
918
919 static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
920 {
921 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
922
923 safexcel_aead_cra_init(tfm);
924 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
925 ctx->state_sz = SHA256_DIGEST_SIZE;
926 return 0;
927 }
928
929 struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
930 .type = SAFEXCEL_ALG_TYPE_AEAD,
931 .alg.aead = {
932 .setkey = safexcel_aead_aes_setkey,
933 .encrypt = safexcel_aead_encrypt,
934 .decrypt = safexcel_aead_decrypt,
935 .ivsize = AES_BLOCK_SIZE,
936 .maxauthsize = SHA256_DIGEST_SIZE,
937 .base = {
938 .cra_name = "authenc(hmac(sha256),cbc(aes))",
939 .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
940 .cra_priority = 300,
941 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
942 CRYPTO_ALG_KERN_DRIVER_ONLY,
943 .cra_blocksize = AES_BLOCK_SIZE,
944 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
945 .cra_alignmask = 0,
946 .cra_init = safexcel_aead_sha256_cra_init,
947 .cra_exit = safexcel_aead_cra_exit,
948 .cra_module = THIS_MODULE,
949 },
950 },
951 };
952
953 static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
954 {
955 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
956
957 safexcel_aead_cra_init(tfm);
958 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
959 ctx->state_sz = SHA256_DIGEST_SIZE;
960 return 0;
961 }
962
963 struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
964 .type = SAFEXCEL_ALG_TYPE_AEAD,
965 .alg.aead = {
966 .setkey = safexcel_aead_aes_setkey,
967 .encrypt = safexcel_aead_encrypt,
968 .decrypt = safexcel_aead_decrypt,
969 .ivsize = AES_BLOCK_SIZE,
970 .maxauthsize = SHA224_DIGEST_SIZE,
971 .base = {
972 .cra_name = "authenc(hmac(sha224),cbc(aes))",
973 .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
974 .cra_priority = 300,
975 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
976 CRYPTO_ALG_KERN_DRIVER_ONLY,
977 .cra_blocksize = AES_BLOCK_SIZE,
978 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
979 .cra_alignmask = 0,
980 .cra_init = safexcel_aead_sha224_cra_init,
981 .cra_exit = safexcel_aead_cra_exit,
982 .cra_module = THIS_MODULE,
983 },
984 },
985 };