2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
20 enum safexcel_cipher_direction
{
25 struct safexcel_cipher_ctx
{
26 struct safexcel_context base
;
27 struct safexcel_crypto_priv
*priv
;
29 enum safexcel_cipher_direction direction
;
36 static void safexcel_cipher_token(struct safexcel_cipher_ctx
*ctx
,
37 struct crypto_async_request
*async
,
38 struct safexcel_command_desc
*cdesc
,
41 struct skcipher_request
*req
= skcipher_request_cast(async
);
42 struct safexcel_token
*token
;
45 if (ctx
->mode
== CONTEXT_CONTROL_CRYPTO_MODE_CBC
) {
46 offset
= AES_BLOCK_SIZE
/ sizeof(u32
);
47 memcpy(cdesc
->control_data
.token
, req
->iv
, AES_BLOCK_SIZE
);
49 cdesc
->control_data
.options
|= EIP197_OPTION_4_TOKEN_IV_CMD
;
52 token
= (struct safexcel_token
*)(cdesc
->control_data
.token
+ offset
);
54 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
55 token
[0].packet_length
= length
;
56 token
[0].stat
= EIP197_TOKEN_STAT_LAST_PACKET
;
57 token
[0].instructions
= EIP197_TOKEN_INS_LAST
|
58 EIP197_TOKEN_INS_TYPE_CRYTO
|
59 EIP197_TOKEN_INS_TYPE_OUTPUT
;
62 static int safexcel_aes_setkey(struct crypto_skcipher
*ctfm
, const u8
*key
,
65 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(ctfm
);
66 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
67 struct crypto_aes_ctx aes
;
70 ret
= crypto_aes_expand_key(&aes
, key
, len
);
72 crypto_skcipher_set_flags(ctfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
76 for (i
= 0; i
< len
/ sizeof(u32
); i
++) {
77 if (ctx
->key
[i
] != cpu_to_le32(aes
.key_enc
[i
])) {
78 ctx
->base
.needs_inv
= true;
83 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
84 ctx
->key
[i
] = cpu_to_le32(aes
.key_enc
[i
]);
88 memzero_explicit(&aes
, sizeof(aes
));
92 static int safexcel_context_control(struct safexcel_cipher_ctx
*ctx
,
93 struct safexcel_command_desc
*cdesc
)
95 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
98 if (ctx
->direction
== SAFEXCEL_ENCRYPT
)
99 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_CRYPTO_OUT
;
101 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_CRYPTO_IN
;
103 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_KEY_EN
;
104 cdesc
->control_data
.control1
|= ctx
->mode
;
106 switch (ctx
->key_len
) {
107 case AES_KEYSIZE_128
:
108 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES128
;
111 case AES_KEYSIZE_192
:
112 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES192
;
115 case AES_KEYSIZE_256
:
116 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES256
;
120 dev_err(priv
->dev
, "aes keysize not supported: %u\n",
124 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(ctrl_size
);
129 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
130 struct crypto_async_request
*async
,
131 bool *should_complete
, int *ret
)
133 struct skcipher_request
*req
= skcipher_request_cast(async
);
134 struct safexcel_result_desc
*rdesc
;
139 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
141 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
144 "cipher: result: could not retrieve the result descriptor\n");
145 *ret
= PTR_ERR(rdesc
);
149 if (rdesc
->result_data
.error_code
) {
151 "cipher: result: result descriptor error (%d)\n",
152 rdesc
->result_data
.error_code
);
157 } while (!rdesc
->last_seg
);
159 safexcel_complete(priv
, ring
);
160 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
162 if (req
->src
== req
->dst
) {
163 dma_unmap_sg(priv
->dev
, req
->src
,
164 sg_nents_for_len(req
->src
, req
->cryptlen
),
167 dma_unmap_sg(priv
->dev
, req
->src
,
168 sg_nents_for_len(req
->src
, req
->cryptlen
),
170 dma_unmap_sg(priv
->dev
, req
->dst
,
171 sg_nents_for_len(req
->dst
, req
->cryptlen
),
175 *should_complete
= true;
180 static int safexcel_aes_send(struct crypto_async_request
*async
,
181 int ring
, struct safexcel_request
*request
,
182 int *commands
, int *results
)
184 struct skcipher_request
*req
= skcipher_request_cast(async
);
185 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
186 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
187 struct safexcel_command_desc
*cdesc
;
188 struct safexcel_result_desc
*rdesc
;
189 struct scatterlist
*sg
;
190 int nr_src
, nr_dst
, n_cdesc
= 0, n_rdesc
= 0, queued
= req
->cryptlen
;
193 if (req
->src
== req
->dst
) {
194 nr_src
= dma_map_sg(priv
->dev
, req
->src
,
195 sg_nents_for_len(req
->src
, req
->cryptlen
),
201 nr_src
= dma_map_sg(priv
->dev
, req
->src
,
202 sg_nents_for_len(req
->src
, req
->cryptlen
),
207 nr_dst
= dma_map_sg(priv
->dev
, req
->dst
,
208 sg_nents_for_len(req
->dst
, req
->cryptlen
),
211 dma_unmap_sg(priv
->dev
, req
->src
,
212 sg_nents_for_len(req
->src
, req
->cryptlen
),
218 memcpy(ctx
->base
.ctxr
->data
, ctx
->key
, ctx
->key_len
);
220 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
222 /* command descriptors */
223 for_each_sg(req
->src
, sg
, nr_src
, i
) {
224 int len
= sg_dma_len(sg
);
226 /* Do not overflow the request */
227 if (queued
- len
< 0)
230 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
, !(queued
- len
),
231 sg_dma_address(sg
), len
, req
->cryptlen
,
234 /* No space left in the command descriptor ring */
235 ret
= PTR_ERR(cdesc
);
241 safexcel_context_control(ctx
, cdesc
);
242 safexcel_cipher_token(ctx
, async
, cdesc
, req
->cryptlen
);
250 /* result descriptors */
251 for_each_sg(req
->dst
, sg
, nr_dst
, i
) {
252 bool first
= !i
, last
= (i
== nr_dst
- 1);
253 u32 len
= sg_dma_len(sg
);
255 rdesc
= safexcel_add_rdesc(priv
, ring
, first
, last
,
256 sg_dma_address(sg
), len
);
258 /* No space left in the result descriptor ring */
259 ret
= PTR_ERR(rdesc
);
265 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
267 request
->req
= &req
->base
;
268 ctx
->base
.handle_result
= safexcel_handle_result
;
275 for (i
= 0; i
< n_rdesc
; i
++)
276 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].rdr
);
278 for (i
= 0; i
< n_cdesc
; i
++)
279 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
281 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
283 if (req
->src
== req
->dst
) {
284 dma_unmap_sg(priv
->dev
, req
->src
,
285 sg_nents_for_len(req
->src
, req
->cryptlen
),
288 dma_unmap_sg(priv
->dev
, req
->src
,
289 sg_nents_for_len(req
->src
, req
->cryptlen
),
291 dma_unmap_sg(priv
->dev
, req
->dst
,
292 sg_nents_for_len(req
->dst
, req
->cryptlen
),
299 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
301 struct crypto_async_request
*async
,
302 bool *should_complete
, int *ret
)
304 struct skcipher_request
*req
= skcipher_request_cast(async
);
305 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
306 struct safexcel_result_desc
*rdesc
;
307 int ndesc
= 0, enq_ret
;
311 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
313 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
316 "cipher: invalidate: could not retrieve the result descriptor\n");
317 *ret
= PTR_ERR(rdesc
);
321 if (rdesc
->result_data
.error_code
) {
322 dev_err(priv
->dev
, "cipher: invalidate: result descriptor error (%d)\n",
323 rdesc
->result_data
.error_code
);
328 } while (!rdesc
->last_seg
);
330 safexcel_complete(priv
, ring
);
331 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
333 if (ctx
->base
.exit_inv
) {
334 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
337 *should_complete
= true;
342 ring
= safexcel_select_ring(priv
);
343 ctx
->base
.ring
= ring
;
344 ctx
->base
.needs_inv
= false;
345 ctx
->base
.send
= safexcel_aes_send
;
347 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
348 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
349 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
351 if (enq_ret
!= -EINPROGRESS
)
354 if (!priv
->ring
[ring
].need_dequeue
)
355 safexcel_dequeue(priv
, ring
);
357 *should_complete
= false;
362 static int safexcel_cipher_send_inv(struct crypto_async_request
*async
,
363 int ring
, struct safexcel_request
*request
,
364 int *commands
, int *results
)
366 struct skcipher_request
*req
= skcipher_request_cast(async
);
367 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
368 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
371 ctx
->base
.handle_result
= safexcel_handle_inv_result
;
373 ret
= safexcel_invalidate_cache(async
, &ctx
->base
, priv
,
374 ctx
->base
.ctxr_dma
, ring
, request
);
384 static int safexcel_cipher_exit_inv(struct crypto_tfm
*tfm
)
386 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
387 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
388 struct skcipher_request req
;
389 struct safexcel_inv_result result
= {};
390 int ring
= ctx
->base
.ring
;
392 memset(&req
, 0, sizeof(struct skcipher_request
));
394 /* create invalidation request */
395 init_completion(&result
.completion
);
396 skcipher_request_set_callback(&req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
397 safexcel_inv_complete
, &result
);
399 skcipher_request_set_tfm(&req
, __crypto_skcipher_cast(tfm
));
400 ctx
= crypto_tfm_ctx(req
.base
.tfm
);
401 ctx
->base
.exit_inv
= true;
402 ctx
->base
.send
= safexcel_cipher_send_inv
;
404 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
405 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
.base
);
406 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
408 if (!priv
->ring
[ring
].need_dequeue
)
409 safexcel_dequeue(priv
, ring
);
411 wait_for_completion_interruptible(&result
.completion
);
415 "cipher: sync: invalidate: completion error %d\n",
423 static int safexcel_aes(struct skcipher_request
*req
,
424 enum safexcel_cipher_direction dir
, u32 mode
)
426 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
427 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
430 ctx
->direction
= dir
;
433 if (ctx
->base
.ctxr
) {
434 if (ctx
->base
.needs_inv
)
435 ctx
->base
.send
= safexcel_cipher_send_inv
;
437 ctx
->base
.ring
= safexcel_select_ring(priv
);
438 ctx
->base
.send
= safexcel_aes_send
;
440 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
441 EIP197_GFP_FLAGS(req
->base
),
442 &ctx
->base
.ctxr_dma
);
447 ring
= ctx
->base
.ring
;
449 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
450 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
451 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
453 if (!priv
->ring
[ring
].need_dequeue
)
454 safexcel_dequeue(priv
, ring
);
459 static int safexcel_ecb_aes_encrypt(struct skcipher_request
*req
)
461 return safexcel_aes(req
, SAFEXCEL_ENCRYPT
,
462 CONTEXT_CONTROL_CRYPTO_MODE_ECB
);
465 static int safexcel_ecb_aes_decrypt(struct skcipher_request
*req
)
467 return safexcel_aes(req
, SAFEXCEL_DECRYPT
,
468 CONTEXT_CONTROL_CRYPTO_MODE_ECB
);
471 static int safexcel_skcipher_cra_init(struct crypto_tfm
*tfm
)
473 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
474 struct safexcel_alg_template
*tmpl
=
475 container_of(tfm
->__crt_alg
, struct safexcel_alg_template
,
478 ctx
->priv
= tmpl
->priv
;
483 static void safexcel_skcipher_cra_exit(struct crypto_tfm
*tfm
)
485 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
486 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
489 memzero_explicit(ctx
->key
, 8 * sizeof(u32
));
491 /* context not allocated, skip invalidation */
495 memzero_explicit(ctx
->base
.ctxr
->data
, 8 * sizeof(u32
));
497 ret
= safexcel_cipher_exit_inv(tfm
);
499 dev_warn(priv
->dev
, "cipher: invalidation error %d\n", ret
);
502 struct safexcel_alg_template safexcel_alg_ecb_aes
= {
503 .type
= SAFEXCEL_ALG_TYPE_SKCIPHER
,
505 .setkey
= safexcel_aes_setkey
,
506 .encrypt
= safexcel_ecb_aes_encrypt
,
507 .decrypt
= safexcel_ecb_aes_decrypt
,
508 .min_keysize
= AES_MIN_KEY_SIZE
,
509 .max_keysize
= AES_MAX_KEY_SIZE
,
511 .cra_name
= "ecb(aes)",
512 .cra_driver_name
= "safexcel-ecb-aes",
514 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
515 CRYPTO_ALG_KERN_DRIVER_ONLY
,
516 .cra_blocksize
= AES_BLOCK_SIZE
,
517 .cra_ctxsize
= sizeof(struct safexcel_cipher_ctx
),
519 .cra_init
= safexcel_skcipher_cra_init
,
520 .cra_exit
= safexcel_skcipher_cra_exit
,
521 .cra_module
= THIS_MODULE
,
526 static int safexcel_cbc_aes_encrypt(struct skcipher_request
*req
)
528 return safexcel_aes(req
, SAFEXCEL_ENCRYPT
,
529 CONTEXT_CONTROL_CRYPTO_MODE_CBC
);
532 static int safexcel_cbc_aes_decrypt(struct skcipher_request
*req
)
534 return safexcel_aes(req
, SAFEXCEL_DECRYPT
,
535 CONTEXT_CONTROL_CRYPTO_MODE_CBC
);
538 struct safexcel_alg_template safexcel_alg_cbc_aes
= {
539 .type
= SAFEXCEL_ALG_TYPE_SKCIPHER
,
541 .setkey
= safexcel_aes_setkey
,
542 .encrypt
= safexcel_cbc_aes_encrypt
,
543 .decrypt
= safexcel_cbc_aes_decrypt
,
544 .min_keysize
= AES_MIN_KEY_SIZE
,
545 .max_keysize
= AES_MAX_KEY_SIZE
,
546 .ivsize
= AES_BLOCK_SIZE
,
548 .cra_name
= "cbc(aes)",
549 .cra_driver_name
= "safexcel-cbc-aes",
551 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
552 CRYPTO_ALG_KERN_DRIVER_ONLY
,
553 .cra_blocksize
= AES_BLOCK_SIZE
,
554 .cra_ctxsize
= sizeof(struct safexcel_cipher_ctx
),
556 .cra_init
= safexcel_skcipher_cra_init
,
557 .cra_exit
= safexcel_skcipher_cra_exit
,
558 .cra_module
= THIS_MODULE
,