1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
19 #define SEC_MAX_CIPHER_KEY 64
20 #define SEC_REQ_LIMIT SZ_32M
22 struct sec_c_alg_cfg
{
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs
[] = {
30 [SEC_C_DES_ECB_64
] = {
31 .c_alg
= SEC_C_ALG_DES
,
32 .c_mode
= SEC_C_MODE_ECB
,
33 .key_len
= SEC_KEY_LEN_DES
,
35 [SEC_C_DES_CBC_64
] = {
36 .c_alg
= SEC_C_ALG_DES
,
37 .c_mode
= SEC_C_MODE_CBC
,
38 .key_len
= SEC_KEY_LEN_DES
,
40 [SEC_C_3DES_ECB_192_3KEY
] = {
41 .c_alg
= SEC_C_ALG_3DES
,
42 .c_mode
= SEC_C_MODE_ECB
,
43 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
45 [SEC_C_3DES_ECB_192_2KEY
] = {
46 .c_alg
= SEC_C_ALG_3DES
,
47 .c_mode
= SEC_C_MODE_ECB
,
48 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
50 [SEC_C_3DES_CBC_192_3KEY
] = {
51 .c_alg
= SEC_C_ALG_3DES
,
52 .c_mode
= SEC_C_MODE_CBC
,
53 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
55 [SEC_C_3DES_CBC_192_2KEY
] = {
56 .c_alg
= SEC_C_ALG_3DES
,
57 .c_mode
= SEC_C_MODE_CBC
,
58 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
60 [SEC_C_AES_ECB_128
] = {
61 .c_alg
= SEC_C_ALG_AES
,
62 .c_mode
= SEC_C_MODE_ECB
,
63 .key_len
= SEC_KEY_LEN_AES_128
,
65 [SEC_C_AES_ECB_192
] = {
66 .c_alg
= SEC_C_ALG_AES
,
67 .c_mode
= SEC_C_MODE_ECB
,
68 .key_len
= SEC_KEY_LEN_AES_192
,
70 [SEC_C_AES_ECB_256
] = {
71 .c_alg
= SEC_C_ALG_AES
,
72 .c_mode
= SEC_C_MODE_ECB
,
73 .key_len
= SEC_KEY_LEN_AES_256
,
75 [SEC_C_AES_CBC_128
] = {
76 .c_alg
= SEC_C_ALG_AES
,
77 .c_mode
= SEC_C_MODE_CBC
,
78 .key_len
= SEC_KEY_LEN_AES_128
,
80 [SEC_C_AES_CBC_192
] = {
81 .c_alg
= SEC_C_ALG_AES
,
82 .c_mode
= SEC_C_MODE_CBC
,
83 .key_len
= SEC_KEY_LEN_AES_192
,
85 [SEC_C_AES_CBC_256
] = {
86 .c_alg
= SEC_C_ALG_AES
,
87 .c_mode
= SEC_C_MODE_CBC
,
88 .key_len
= SEC_KEY_LEN_AES_256
,
90 [SEC_C_AES_CTR_128
] = {
91 .c_alg
= SEC_C_ALG_AES
,
92 .c_mode
= SEC_C_MODE_CTR
,
93 .key_len
= SEC_KEY_LEN_AES_128
,
95 [SEC_C_AES_CTR_192
] = {
96 .c_alg
= SEC_C_ALG_AES
,
97 .c_mode
= SEC_C_MODE_CTR
,
98 .key_len
= SEC_KEY_LEN_AES_192
,
100 [SEC_C_AES_CTR_256
] = {
101 .c_alg
= SEC_C_ALG_AES
,
102 .c_mode
= SEC_C_MODE_CTR
,
103 .key_len
= SEC_KEY_LEN_AES_256
,
105 [SEC_C_AES_XTS_128
] = {
106 .c_alg
= SEC_C_ALG_AES
,
107 .c_mode
= SEC_C_MODE_XTS
,
108 .key_len
= SEC_KEY_LEN_AES_128
,
110 [SEC_C_AES_XTS_256
] = {
111 .c_alg
= SEC_C_ALG_AES
,
112 .c_mode
= SEC_C_MODE_XTS
,
113 .key_len
= SEC_KEY_LEN_AES_256
,
120 * Mutex used to ensure safe operation of reference count of
123 static DEFINE_MUTEX(algs_lock
);
124 static unsigned int active_devs
;
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx
*ctx
,
127 struct sec_bd_info
*req
,
128 enum sec_cipher_alg alg
)
130 const struct sec_c_alg_cfg
*cfg
= &sec_c_alg_cfgs
[alg
];
132 memset(req
, 0, sizeof(*req
));
133 req
->w0
|= cfg
->c_mode
<< SEC_BD_W0_C_MODE_S
;
134 req
->w1
|= cfg
->c_alg
<< SEC_BD_W1_C_ALG_S
;
135 req
->w3
|= cfg
->key_len
<< SEC_BD_W3_C_KEY_LEN_S
;
136 req
->w0
|= cfg
->c_width
<< SEC_BD_W0_C_WIDTH_S
;
138 req
->cipher_key_addr_lo
= lower_32_bits(ctx
->pkey
);
139 req
->cipher_key_addr_hi
= upper_32_bits(ctx
->pkey
);
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher
*atfm
,
145 enum sec_cipher_alg alg
)
147 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
148 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
150 ctx
->cipher_alg
= alg
;
151 memcpy(ctx
->key
, key
, keylen
);
152 sec_alg_skcipher_init_template(ctx
, &ctx
->req_template
,
156 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl
**sec_sgl
,
157 dma_addr_t
*psec_sgl
,
158 struct scatterlist
*sgl
,
160 struct sec_dev_info
*info
)
162 struct sec_hw_sgl
*sgl_current
= NULL
;
163 struct sec_hw_sgl
*sgl_next
;
164 dma_addr_t sgl_next_dma
;
165 struct scatterlist
*sg
;
166 int ret
, sge_index
, i
;
171 for_each_sg(sgl
, sg
, count
, i
) {
172 sge_index
= i
% SEC_MAX_SGE_NUM
;
173 if (sge_index
== 0) {
174 sgl_next
= dma_pool_zalloc(info
->hw_sgl_pool
,
175 GFP_KERNEL
, &sgl_next_dma
);
178 goto err_free_hw_sgls
;
181 if (!sgl_current
) { /* First one */
182 *psec_sgl
= sgl_next_dma
;
184 } else { /* Chained */
185 sgl_current
->entry_sum_in_sgl
= SEC_MAX_SGE_NUM
;
186 sgl_current
->next_sgl
= sgl_next_dma
;
187 sgl_current
->next
= sgl_next
;
189 sgl_current
= sgl_next
;
191 sgl_current
->sge_entries
[sge_index
].buf
= sg_dma_address(sg
);
192 sgl_current
->sge_entries
[sge_index
].len
= sg_dma_len(sg
);
193 sgl_current
->data_bytes_in_sgl
+= sg_dma_len(sg
);
195 sgl_current
->entry_sum_in_sgl
= count
% SEC_MAX_SGE_NUM
;
196 sgl_current
->next_sgl
= 0;
197 (*sec_sgl
)->entry_sum_in_chain
= count
;
202 sgl_current
= *sec_sgl
;
203 while (sgl_current
) {
204 sgl_next
= sgl_current
->next
;
205 dma_pool_free(info
->hw_sgl_pool
, sgl_current
,
206 sgl_current
->next_sgl
);
207 sgl_current
= sgl_next
;
214 static void sec_free_hw_sgl(struct sec_hw_sgl
*hw_sgl
,
215 dma_addr_t psec_sgl
, struct sec_dev_info
*info
)
217 struct sec_hw_sgl
*sgl_current
, *sgl_next
;
218 dma_addr_t sgl_next_dma
;
220 sgl_current
= hw_sgl
;
221 while (sgl_current
) {
222 sgl_next
= sgl_current
->next
;
223 sgl_next_dma
= sgl_current
->next_sgl
;
225 dma_pool_free(info
->hw_sgl_pool
, sgl_current
, psec_sgl
);
227 sgl_current
= sgl_next
;
228 psec_sgl
= sgl_next_dma
;
232 static int sec_alg_skcipher_setkey(struct crypto_skcipher
*tfm
,
233 const u8
*key
, unsigned int keylen
,
234 enum sec_cipher_alg alg
)
236 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
237 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
239 mutex_lock(&ctx
->lock
);
242 memset(ctx
->key
, 0, SEC_MAX_CIPHER_KEY
);
245 ctx
->key
= dma_alloc_coherent(dev
, SEC_MAX_CIPHER_KEY
,
246 &ctx
->pkey
, GFP_KERNEL
);
248 mutex_unlock(&ctx
->lock
);
252 mutex_unlock(&ctx
->lock
);
253 sec_alg_skcipher_init_context(tfm
, key
, keylen
, alg
);
258 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher
*tfm
,
259 const u8
*key
, unsigned int keylen
)
261 enum sec_cipher_alg alg
;
264 case AES_KEYSIZE_128
:
265 alg
= SEC_C_AES_ECB_128
;
267 case AES_KEYSIZE_192
:
268 alg
= SEC_C_AES_ECB_192
;
270 case AES_KEYSIZE_256
:
271 alg
= SEC_C_AES_ECB_256
;
277 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
280 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher
*tfm
,
281 const u8
*key
, unsigned int keylen
)
283 enum sec_cipher_alg alg
;
286 case AES_KEYSIZE_128
:
287 alg
= SEC_C_AES_CBC_128
;
289 case AES_KEYSIZE_192
:
290 alg
= SEC_C_AES_CBC_192
;
292 case AES_KEYSIZE_256
:
293 alg
= SEC_C_AES_CBC_256
;
299 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
302 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher
*tfm
,
303 const u8
*key
, unsigned int keylen
)
305 enum sec_cipher_alg alg
;
308 case AES_KEYSIZE_128
:
309 alg
= SEC_C_AES_CTR_128
;
311 case AES_KEYSIZE_192
:
312 alg
= SEC_C_AES_CTR_192
;
314 case AES_KEYSIZE_256
:
315 alg
= SEC_C_AES_CTR_256
;
321 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
324 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher
*tfm
,
325 const u8
*key
, unsigned int keylen
)
327 enum sec_cipher_alg alg
;
330 ret
= xts_verify_key(tfm
, key
, keylen
);
335 case AES_KEYSIZE_128
* 2:
336 alg
= SEC_C_AES_XTS_128
;
338 case AES_KEYSIZE_256
* 2:
339 alg
= SEC_C_AES_XTS_256
;
345 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
348 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher
*tfm
,
349 const u8
*key
, unsigned int keylen
)
351 return verify_skcipher_des_key(tfm
, key
) ?:
352 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_ECB_64
);
355 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher
*tfm
,
356 const u8
*key
, unsigned int keylen
)
358 return verify_skcipher_des_key(tfm
, key
) ?:
359 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_CBC_64
);
362 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher
*tfm
,
363 const u8
*key
, unsigned int keylen
)
365 return verify_skcipher_des3_key(tfm
, key
) ?:
366 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
367 SEC_C_3DES_ECB_192_3KEY
);
370 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher
*tfm
,
371 const u8
*key
, unsigned int keylen
)
373 return verify_skcipher_des3_key(tfm
, key
) ?:
374 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
375 SEC_C_3DES_CBC_192_3KEY
);
378 static void sec_alg_free_el(struct sec_request_el
*el
,
379 struct sec_dev_info
*info
)
381 sec_free_hw_sgl(el
->out
, el
->dma_out
, info
);
382 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
388 /* queuelock must be held */
389 static int sec_send_request(struct sec_request
*sec_req
, struct sec_queue
*queue
)
391 struct sec_request_el
*el
, *temp
;
394 mutex_lock(&sec_req
->lock
);
395 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
397 * Add to hardware queue only under following circumstances
398 * 1) Software and hardware queue empty so no chain dependencies
399 * 2) No dependencies as new IV - (check software queue empty
401 * 3) No dependencies because the mode does no chaining.
403 * In other cases first insert onto the software queue which
404 * is then emptied as requests complete
406 if (!queue
->havesoftqueue
||
407 (kfifo_is_empty(&queue
->softqueue
) &&
408 sec_queue_empty(queue
))) {
409 ret
= sec_queue_send(queue
, &el
->req
, sec_req
);
410 if (ret
== -EAGAIN
) {
411 /* Wait unti we can send then try again */
412 /* DEAD if here - should not happen */
417 kfifo_put(&queue
->softqueue
, el
);
421 mutex_unlock(&sec_req
->lock
);
426 static void sec_skcipher_alg_callback(struct sec_bd_info
*sec_resp
,
427 struct crypto_async_request
*req_base
)
429 struct skcipher_request
*skreq
= container_of(req_base
,
430 struct skcipher_request
,
432 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
433 struct sec_request
*backlog_req
;
434 struct sec_request_el
*sec_req_el
, *nextrequest
;
435 struct sec_alg_tfm_ctx
*ctx
= sec_req
->tfm_ctx
;
436 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
437 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
438 int icv_or_skey_en
, ret
;
441 sec_req_el
= list_first_entry(&sec_req
->elements
, struct sec_request_el
,
443 icv_or_skey_en
= (sec_resp
->w0
& SEC_BD_W0_ICV_OR_SKEY_EN_M
) >>
444 SEC_BD_W0_ICV_OR_SKEY_EN_S
;
445 if (sec_resp
->w1
& SEC_BD_W1_BD_INVALID
|| icv_or_skey_en
== 3) {
446 dev_err(dev
, "Got an invalid answer %lu %d\n",
447 sec_resp
->w1
& SEC_BD_W1_BD_INVALID
,
449 sec_req
->err
= -EINVAL
;
451 * We need to muddle on to avoid getting stuck with elements
452 * on the queue. Error will be reported so requester so
453 * it should be able to handle appropriately.
457 mutex_lock(&ctx
->queue
->queuelock
);
458 /* Put the IV in place for chained cases */
459 switch (ctx
->cipher_alg
) {
460 case SEC_C_AES_CBC_128
:
461 case SEC_C_AES_CBC_192
:
462 case SEC_C_AES_CBC_256
:
463 if (sec_req_el
->req
.w0
& SEC_BD_W0_DE
)
464 sg_pcopy_to_buffer(sec_req_el
->sgl_out
,
465 sg_nents(sec_req_el
->sgl_out
),
467 crypto_skcipher_ivsize(atfm
),
468 sec_req_el
->el_length
-
469 crypto_skcipher_ivsize(atfm
));
471 sg_pcopy_to_buffer(sec_req_el
->sgl_in
,
472 sg_nents(sec_req_el
->sgl_in
),
474 crypto_skcipher_ivsize(atfm
),
475 sec_req_el
->el_length
-
476 crypto_skcipher_ivsize(atfm
));
477 /* No need to sync to the device as coherent DMA */
479 case SEC_C_AES_CTR_128
:
480 case SEC_C_AES_CTR_192
:
481 case SEC_C_AES_CTR_256
:
482 crypto_inc(skreq
->iv
, 16);
489 if (ctx
->queue
->havesoftqueue
&&
490 !kfifo_is_empty(&ctx
->queue
->softqueue
) &&
491 sec_queue_empty(ctx
->queue
)) {
492 ret
= kfifo_get(&ctx
->queue
->softqueue
, &nextrequest
);
495 "Error getting next element from kfifo %d\n",
498 /* We know there is space so this cannot fail */
499 sec_queue_send(ctx
->queue
, &nextrequest
->req
,
500 nextrequest
->sec_req
);
501 } else if (!list_empty(&ctx
->backlog
)) {
502 /* Need to verify there is room first */
503 backlog_req
= list_first_entry(&ctx
->backlog
,
504 typeof(*backlog_req
),
506 if (sec_queue_can_enqueue(ctx
->queue
,
507 backlog_req
->num_elements
) ||
508 (ctx
->queue
->havesoftqueue
&&
509 kfifo_avail(&ctx
->queue
->softqueue
) >
510 backlog_req
->num_elements
)) {
511 sec_send_request(backlog_req
, ctx
->queue
);
512 backlog_req
->req_base
->complete(backlog_req
->req_base
,
514 list_del(&backlog_req
->backlog_head
);
517 mutex_unlock(&ctx
->queue
->queuelock
);
519 mutex_lock(&sec_req
->lock
);
520 list_del(&sec_req_el
->head
);
521 mutex_unlock(&sec_req
->lock
);
522 sec_alg_free_el(sec_req_el
, ctx
->queue
->dev_info
);
526 * The dance is needed as the lock is freed in the completion
528 mutex_lock(&sec_req
->lock
);
529 done
= list_empty(&sec_req
->elements
);
530 mutex_unlock(&sec_req
->lock
);
532 if (crypto_skcipher_ivsize(atfm
)) {
533 dma_unmap_single(dev
, sec_req
->dma_iv
,
534 crypto_skcipher_ivsize(atfm
),
537 dma_unmap_sg(dev
, skreq
->src
, sec_req
->len_in
,
539 if (skreq
->src
!= skreq
->dst
)
540 dma_unmap_sg(dev
, skreq
->dst
, sec_req
->len_out
,
542 skreq
->base
.complete(&skreq
->base
, sec_req
->err
);
546 void sec_alg_callback(struct sec_bd_info
*resp
, void *shadow
)
548 struct sec_request
*sec_req
= shadow
;
550 sec_req
->cb(resp
, sec_req
->req_base
);
553 static int sec_alg_alloc_and_calc_split_sizes(int length
, size_t **split_sizes
,
559 /* Split into suitable sized blocks */
560 *steps
= roundup(length
, SEC_REQ_LIMIT
) / SEC_REQ_LIMIT
;
561 sizes
= kcalloc(*steps
, sizeof(*sizes
), GFP_KERNEL
);
565 for (i
= 0; i
< *steps
- 1; i
++)
566 sizes
[i
] = SEC_REQ_LIMIT
;
567 sizes
[*steps
- 1] = length
- SEC_REQ_LIMIT
* (*steps
- 1);
568 *split_sizes
= sizes
;
573 static int sec_map_and_split_sg(struct scatterlist
*sgl
, size_t *split_sizes
,
574 int steps
, struct scatterlist
***splits
,
581 count
= dma_map_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
585 *splits
= kcalloc(steps
, sizeof(struct scatterlist
*), GFP_KERNEL
);
590 *splits_nents
= kcalloc(steps
, sizeof(int), GFP_KERNEL
);
591 if (!*splits_nents
) {
593 goto err_free_splits
;
596 /* output the scatter list before and after this */
597 ret
= sg_split(sgl
, count
, 0, steps
, split_sizes
,
598 *splits
, *splits_nents
, GFP_KERNEL
);
601 goto err_free_splits_nents
;
606 err_free_splits_nents
:
607 kfree(*splits_nents
);
611 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
617 * Reverses the sec_map_and_split_sg call for messages not yet added to
620 static void sec_unmap_sg_on_err(struct scatterlist
*sgl
, int steps
,
621 struct scatterlist
**splits
, int *splits_nents
,
622 int sgl_len_in
, struct device
*dev
)
626 for (i
= 0; i
< steps
; i
++)
631 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
634 static struct sec_request_el
635 *sec_alg_alloc_and_fill_el(struct sec_bd_info
*template, int encrypt
,
636 int el_size
, bool different_dest
,
637 struct scatterlist
*sgl_in
, int n_ents_in
,
638 struct scatterlist
*sgl_out
, int n_ents_out
,
639 struct sec_dev_info
*info
)
641 struct sec_request_el
*el
;
642 struct sec_bd_info
*req
;
645 el
= kzalloc(sizeof(*el
), GFP_KERNEL
);
647 return ERR_PTR(-ENOMEM
);
648 el
->el_length
= el_size
;
650 memcpy(req
, template, sizeof(*req
));
652 req
->w0
&= ~SEC_BD_W0_CIPHER_M
;
654 req
->w0
|= SEC_CIPHER_ENCRYPT
<< SEC_BD_W0_CIPHER_S
;
656 req
->w0
|= SEC_CIPHER_DECRYPT
<< SEC_BD_W0_CIPHER_S
;
658 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
659 req
->w0
|= ((el_size
>> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S
) &
660 SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
662 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
663 req
->w0
|= ((el_size
>> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S
) &
664 SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
666 /* Writing whole u32 so no need to take care of masking */
667 req
->w2
= ((1 << SEC_BD_W2_GRAN_NUM_S
) & SEC_BD_W2_GRAN_NUM_M
) |
668 ((el_size
<< SEC_BD_W2_C_GRAN_SIZE_15_0_S
) &
669 SEC_BD_W2_C_GRAN_SIZE_15_0_M
);
671 req
->w3
&= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M
;
672 req
->w1
|= SEC_BD_W1_ADDR_TYPE
;
676 ret
= sec_alloc_and_fill_hw_sgl(&el
->in
, &el
->dma_in
, el
->sgl_in
,
681 req
->data_addr_lo
= lower_32_bits(el
->dma_in
);
682 req
->data_addr_hi
= upper_32_bits(el
->dma_in
);
684 if (different_dest
) {
685 el
->sgl_out
= sgl_out
;
686 ret
= sec_alloc_and_fill_hw_sgl(&el
->out
, &el
->dma_out
,
690 goto err_free_hw_sgl_in
;
692 req
->w0
|= SEC_BD_W0_DE
;
693 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_out
);
694 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_out
);
697 req
->w0
&= ~SEC_BD_W0_DE
;
698 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_in
);
699 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_in
);
705 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
712 static int sec_alg_skcipher_crypto(struct skcipher_request
*skreq
,
715 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
716 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
717 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
718 struct sec_queue
*queue
= ctx
->queue
;
719 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
720 struct sec_dev_info
*info
= queue
->dev_info
;
723 struct scatterlist
**splits_in
;
724 struct scatterlist
**splits_out
= NULL
;
725 int *splits_in_nents
;
726 int *splits_out_nents
= NULL
;
727 struct sec_request_el
*el
, *temp
;
728 bool split
= skreq
->src
!= skreq
->dst
;
730 mutex_init(&sec_req
->lock
);
731 sec_req
->req_base
= &skreq
->base
;
733 /* SGL mapping out here to allow us to break it up as necessary */
734 sec_req
->len_in
= sg_nents(skreq
->src
);
736 ret
= sec_alg_alloc_and_calc_split_sizes(skreq
->cryptlen
, &split_sizes
,
740 sec_req
->num_elements
= steps
;
741 ret
= sec_map_and_split_sg(skreq
->src
, split_sizes
, steps
, &splits_in
,
742 &splits_in_nents
, sec_req
->len_in
,
745 goto err_free_split_sizes
;
748 sec_req
->len_out
= sg_nents(skreq
->dst
);
749 ret
= sec_map_and_split_sg(skreq
->dst
, split_sizes
, steps
,
750 &splits_out
, &splits_out_nents
,
751 sec_req
->len_out
, info
->dev
);
753 goto err_unmap_in_sg
;
755 /* Shared info stored in seq_req - applies to all BDs */
756 sec_req
->tfm_ctx
= ctx
;
757 sec_req
->cb
= sec_skcipher_alg_callback
;
758 INIT_LIST_HEAD(&sec_req
->elements
);
761 * Future optimization.
762 * In the chaining case we can't use a dma pool bounce buffer
763 * but in the case where we know there is no chaining we can
765 if (crypto_skcipher_ivsize(atfm
)) {
766 sec_req
->dma_iv
= dma_map_single(info
->dev
, skreq
->iv
,
767 crypto_skcipher_ivsize(atfm
),
769 if (dma_mapping_error(info
->dev
, sec_req
->dma_iv
)) {
771 goto err_unmap_out_sg
;
775 /* Set them all up then queue - cleaner error handling. */
776 for (i
= 0; i
< steps
; i
++) {
777 el
= sec_alg_alloc_and_fill_el(&ctx
->req_template
,
780 skreq
->src
!= skreq
->dst
,
781 splits_in
[i
], splits_in_nents
[i
],
782 split
? splits_out
[i
] : NULL
,
783 split
? splits_out_nents
[i
] : 0,
787 goto err_free_elements
;
789 el
->req
.cipher_iv_addr_lo
= lower_32_bits(sec_req
->dma_iv
);
790 el
->req
.cipher_iv_addr_hi
= upper_32_bits(sec_req
->dma_iv
);
791 el
->sec_req
= sec_req
;
792 list_add_tail(&el
->head
, &sec_req
->elements
);
796 * Only attempt to queue if the whole lot can fit in the queue -
797 * we can't successfully cleanup after a partial queing so this
798 * must succeed or fail atomically.
800 * Big hammer test of both software and hardware queues - could be
801 * more refined but this is unlikely to happen so no need.
804 /* Grab a big lock for a long time to avoid concurrency issues */
805 mutex_lock(&queue
->queuelock
);
808 * Can go on to queue if we have space in either:
809 * 1) The hardware queue and no software queue
810 * 2) The software queue
811 * AND there is nothing in the backlog. If there is backlog we
812 * have to only queue to the backlog queue and return busy.
814 if ((!sec_queue_can_enqueue(queue
, steps
) &&
815 (!queue
->havesoftqueue
||
816 kfifo_avail(&queue
->softqueue
) > steps
)) ||
817 !list_empty(&ctx
->backlog
)) {
819 if ((skreq
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
820 list_add_tail(&sec_req
->backlog_head
, &ctx
->backlog
);
821 mutex_unlock(&queue
->queuelock
);
825 mutex_unlock(&queue
->queuelock
);
826 goto err_free_elements
;
828 ret
= sec_send_request(sec_req
, queue
);
829 mutex_unlock(&queue
->queuelock
);
831 goto err_free_elements
;
835 /* Cleanup - all elements in pointer arrays have been copied */
836 kfree(splits_in_nents
);
838 kfree(splits_out_nents
);
844 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
846 sec_alg_free_el(el
, info
);
848 if (crypto_skcipher_ivsize(atfm
))
849 dma_unmap_single(info
->dev
, sec_req
->dma_iv
,
850 crypto_skcipher_ivsize(atfm
),
854 sec_unmap_sg_on_err(skreq
->dst
, steps
, splits_out
,
855 splits_out_nents
, sec_req
->len_out
,
858 sec_unmap_sg_on_err(skreq
->src
, steps
, splits_in
, splits_in_nents
,
859 sec_req
->len_in
, info
->dev
);
860 err_free_split_sizes
:
866 static int sec_alg_skcipher_encrypt(struct skcipher_request
*req
)
868 return sec_alg_skcipher_crypto(req
, true);
871 static int sec_alg_skcipher_decrypt(struct skcipher_request
*req
)
873 return sec_alg_skcipher_crypto(req
, false);
876 static int sec_alg_skcipher_init(struct crypto_skcipher
*tfm
)
878 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
880 mutex_init(&ctx
->lock
);
881 INIT_LIST_HEAD(&ctx
->backlog
);
882 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_request
));
884 ctx
->queue
= sec_queue_alloc_start_safe();
885 if (IS_ERR(ctx
->queue
))
886 return PTR_ERR(ctx
->queue
);
888 mutex_init(&ctx
->queue
->queuelock
);
889 ctx
->queue
->havesoftqueue
= false;
894 static void sec_alg_skcipher_exit(struct crypto_skcipher
*tfm
)
896 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
897 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
900 memzero_explicit(ctx
->key
, SEC_MAX_CIPHER_KEY
);
901 dma_free_coherent(dev
, SEC_MAX_CIPHER_KEY
, ctx
->key
,
904 sec_queue_stop_release(ctx
->queue
);
907 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher
*tfm
)
909 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
912 ret
= sec_alg_skcipher_init(tfm
);
916 INIT_KFIFO(ctx
->queue
->softqueue
);
917 ret
= kfifo_alloc(&ctx
->queue
->softqueue
, 512, GFP_KERNEL
);
919 sec_alg_skcipher_exit(tfm
);
922 ctx
->queue
->havesoftqueue
= true;
927 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher
*tfm
)
929 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
931 kfifo_free(&ctx
->queue
->softqueue
);
932 sec_alg_skcipher_exit(tfm
);
935 static struct skcipher_alg sec_algs
[] = {
938 .cra_name
= "ecb(aes)",
939 .cra_driver_name
= "hisi_sec_aes_ecb",
940 .cra_priority
= 4001,
941 .cra_flags
= CRYPTO_ALG_ASYNC
,
942 .cra_blocksize
= AES_BLOCK_SIZE
,
943 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
945 .cra_module
= THIS_MODULE
,
947 .init
= sec_alg_skcipher_init
,
948 .exit
= sec_alg_skcipher_exit
,
949 .setkey
= sec_alg_skcipher_setkey_aes_ecb
,
950 .decrypt
= sec_alg_skcipher_decrypt
,
951 .encrypt
= sec_alg_skcipher_encrypt
,
952 .min_keysize
= AES_MIN_KEY_SIZE
,
953 .max_keysize
= AES_MAX_KEY_SIZE
,
957 .cra_name
= "cbc(aes)",
958 .cra_driver_name
= "hisi_sec_aes_cbc",
959 .cra_priority
= 4001,
960 .cra_flags
= CRYPTO_ALG_ASYNC
,
961 .cra_blocksize
= AES_BLOCK_SIZE
,
962 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
964 .cra_module
= THIS_MODULE
,
966 .init
= sec_alg_skcipher_init_with_queue
,
967 .exit
= sec_alg_skcipher_exit_with_queue
,
968 .setkey
= sec_alg_skcipher_setkey_aes_cbc
,
969 .decrypt
= sec_alg_skcipher_decrypt
,
970 .encrypt
= sec_alg_skcipher_encrypt
,
971 .min_keysize
= AES_MIN_KEY_SIZE
,
972 .max_keysize
= AES_MAX_KEY_SIZE
,
973 .ivsize
= AES_BLOCK_SIZE
,
976 .cra_name
= "ctr(aes)",
977 .cra_driver_name
= "hisi_sec_aes_ctr",
978 .cra_priority
= 4001,
979 .cra_flags
= CRYPTO_ALG_ASYNC
,
980 .cra_blocksize
= AES_BLOCK_SIZE
,
981 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
983 .cra_module
= THIS_MODULE
,
985 .init
= sec_alg_skcipher_init_with_queue
,
986 .exit
= sec_alg_skcipher_exit_with_queue
,
987 .setkey
= sec_alg_skcipher_setkey_aes_ctr
,
988 .decrypt
= sec_alg_skcipher_decrypt
,
989 .encrypt
= sec_alg_skcipher_encrypt
,
990 .min_keysize
= AES_MIN_KEY_SIZE
,
991 .max_keysize
= AES_MAX_KEY_SIZE
,
992 .ivsize
= AES_BLOCK_SIZE
,
995 .cra_name
= "xts(aes)",
996 .cra_driver_name
= "hisi_sec_aes_xts",
997 .cra_priority
= 4001,
998 .cra_flags
= CRYPTO_ALG_ASYNC
,
999 .cra_blocksize
= AES_BLOCK_SIZE
,
1000 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1002 .cra_module
= THIS_MODULE
,
1004 .init
= sec_alg_skcipher_init
,
1005 .exit
= sec_alg_skcipher_exit
,
1006 .setkey
= sec_alg_skcipher_setkey_aes_xts
,
1007 .decrypt
= sec_alg_skcipher_decrypt
,
1008 .encrypt
= sec_alg_skcipher_encrypt
,
1009 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1010 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1011 .ivsize
= AES_BLOCK_SIZE
,
1013 /* Unable to find any test vectors so untested */
1015 .cra_name
= "ecb(des)",
1016 .cra_driver_name
= "hisi_sec_des_ecb",
1017 .cra_priority
= 4001,
1018 .cra_flags
= CRYPTO_ALG_ASYNC
,
1019 .cra_blocksize
= DES_BLOCK_SIZE
,
1020 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1022 .cra_module
= THIS_MODULE
,
1024 .init
= sec_alg_skcipher_init
,
1025 .exit
= sec_alg_skcipher_exit
,
1026 .setkey
= sec_alg_skcipher_setkey_des_ecb
,
1027 .decrypt
= sec_alg_skcipher_decrypt
,
1028 .encrypt
= sec_alg_skcipher_encrypt
,
1029 .min_keysize
= DES_KEY_SIZE
,
1030 .max_keysize
= DES_KEY_SIZE
,
1034 .cra_name
= "cbc(des)",
1035 .cra_driver_name
= "hisi_sec_des_cbc",
1036 .cra_priority
= 4001,
1037 .cra_flags
= CRYPTO_ALG_ASYNC
,
1038 .cra_blocksize
= DES_BLOCK_SIZE
,
1039 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1041 .cra_module
= THIS_MODULE
,
1043 .init
= sec_alg_skcipher_init_with_queue
,
1044 .exit
= sec_alg_skcipher_exit_with_queue
,
1045 .setkey
= sec_alg_skcipher_setkey_des_cbc
,
1046 .decrypt
= sec_alg_skcipher_decrypt
,
1047 .encrypt
= sec_alg_skcipher_encrypt
,
1048 .min_keysize
= DES_KEY_SIZE
,
1049 .max_keysize
= DES_KEY_SIZE
,
1050 .ivsize
= DES_BLOCK_SIZE
,
1053 .cra_name
= "cbc(des3_ede)",
1054 .cra_driver_name
= "hisi_sec_3des_cbc",
1055 .cra_priority
= 4001,
1056 .cra_flags
= CRYPTO_ALG_ASYNC
,
1057 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1058 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1060 .cra_module
= THIS_MODULE
,
1062 .init
= sec_alg_skcipher_init_with_queue
,
1063 .exit
= sec_alg_skcipher_exit_with_queue
,
1064 .setkey
= sec_alg_skcipher_setkey_3des_cbc
,
1065 .decrypt
= sec_alg_skcipher_decrypt
,
1066 .encrypt
= sec_alg_skcipher_encrypt
,
1067 .min_keysize
= DES3_EDE_KEY_SIZE
,
1068 .max_keysize
= DES3_EDE_KEY_SIZE
,
1069 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1072 .cra_name
= "ecb(des3_ede)",
1073 .cra_driver_name
= "hisi_sec_3des_ecb",
1074 .cra_priority
= 4001,
1075 .cra_flags
= CRYPTO_ALG_ASYNC
,
1076 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1077 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1079 .cra_module
= THIS_MODULE
,
1081 .init
= sec_alg_skcipher_init
,
1082 .exit
= sec_alg_skcipher_exit
,
1083 .setkey
= sec_alg_skcipher_setkey_3des_ecb
,
1084 .decrypt
= sec_alg_skcipher_decrypt
,
1085 .encrypt
= sec_alg_skcipher_encrypt
,
1086 .min_keysize
= DES3_EDE_KEY_SIZE
,
1087 .max_keysize
= DES3_EDE_KEY_SIZE
,
1092 int sec_algs_register(void)
1096 mutex_lock(&algs_lock
);
1097 if (++active_devs
!= 1)
1100 ret
= crypto_register_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1104 mutex_unlock(&algs_lock
);
1109 void sec_algs_unregister(void)
1111 mutex_lock(&algs_lock
);
1112 if (--active_devs
!= 0)
1114 crypto_unregister_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1117 mutex_unlock(&algs_lock
);