1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arm/sunxi/README
15 static int sun4i_ss_opti_poll(struct skcipher_request
*areq
)
17 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
18 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
19 struct sun4i_ss_ctx
*ss
= op
->ss
;
20 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
21 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt
= SS_RX_DEFAULT
;
30 unsigned int ileft
= areq
->cryptlen
;
31 unsigned int oleft
= areq
->cryptlen
;
33 struct sg_mapping_iter mi
, mo
;
34 unsigned int oi
, oo
; /* offset for in and out */
40 if (!areq
->src
|| !areq
->dst
) {
41 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
45 spin_lock_irqsave(&ss
->slock
, flags
);
47 for (i
= 0; i
< op
->keylen
; i
+= 4)
48 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
51 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
52 v
= *(u32
*)(areq
->iv
+ i
* 4);
53 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
56 writel(mode
, ss
->base
+ SS_CTL
);
58 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
59 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
60 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
61 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
64 if (!mi
.addr
|| !mo
.addr
) {
65 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
70 ileft
= areq
->cryptlen
/ 4;
71 oleft
= areq
->cryptlen
/ 4;
75 todo
= min3(rx_cnt
, ileft
, (mi
.length
- oi
) / 4);
78 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
, todo
);
81 if (oi
== mi
.length
) {
86 spaces
= readl(ss
->base
+ SS_FCSR
);
87 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
88 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
90 todo
= min3(tx_cnt
, oleft
, (mo
.length
- oo
) / 4);
93 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
96 if (oo
== mo
.length
) {
103 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
104 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
105 *(u32
*)(areq
->iv
+ i
* 4) = v
;
112 writel(0, ss
->base
+ SS_CTL
);
113 spin_unlock_irqrestore(&ss
->slock
, flags
);
117 /* Generic function that support SG with size not multiple of 4 */
118 static int sun4i_ss_cipher_poll(struct skcipher_request
*areq
)
120 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
121 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
122 struct sun4i_ss_ctx
*ss
= op
->ss
;
124 struct scatterlist
*in_sg
= areq
->src
;
125 struct scatterlist
*out_sg
= areq
->dst
;
126 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
127 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
128 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
129 struct sun4i_ss_alg_template
*algt
;
130 u32 mode
= ctx
->mode
;
131 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
132 u32 rx_cnt
= SS_RX_DEFAULT
;
138 unsigned int ileft
= areq
->cryptlen
;
139 unsigned int oleft
= areq
->cryptlen
;
141 struct sg_mapping_iter mi
, mo
;
142 unsigned int oi
, oo
; /* offset for in and out */
143 char buf
[4 * SS_RX_MAX
];/* buffer for linearize SG src */
144 char bufo
[4 * SS_TX_MAX
]; /* buffer for linearize SG dst */
145 unsigned int ob
= 0; /* offset in buf */
146 unsigned int obo
= 0; /* offset in bufo*/
147 unsigned int obl
= 0; /* length of data in bufo */
154 if (!areq
->src
|| !areq
->dst
) {
155 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
159 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.crypto
);
160 if (areq
->cryptlen
% algt
->alg
.crypto
.base
.cra_blocksize
)
161 need_fallback
= true;
164 * if we have only SGs with size multiple of 4,
165 * we can use the SS optimized function
167 while (in_sg
&& no_chunk
== 1) {
168 if (in_sg
->length
% 4)
170 in_sg
= sg_next(in_sg
);
172 while (out_sg
&& no_chunk
== 1) {
173 if (out_sg
->length
% 4)
175 out_sg
= sg_next(out_sg
);
178 if (no_chunk
== 1 && !need_fallback
)
179 return sun4i_ss_opti_poll(areq
);
182 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, op
->fallback_tfm
);
183 skcipher_request_set_sync_tfm(subreq
, op
->fallback_tfm
);
184 skcipher_request_set_callback(subreq
, areq
->base
.flags
, NULL
,
186 skcipher_request_set_crypt(subreq
, areq
->src
, areq
->dst
,
187 areq
->cryptlen
, areq
->iv
);
188 if (ctx
->mode
& SS_DECRYPTION
)
189 err
= crypto_skcipher_decrypt(subreq
);
191 err
= crypto_skcipher_encrypt(subreq
);
192 skcipher_request_zero(subreq
);
196 spin_lock_irqsave(&ss
->slock
, flags
);
198 for (i
= 0; i
< op
->keylen
; i
+= 4)
199 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
202 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
203 v
= *(u32
*)(areq
->iv
+ i
* 4);
204 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
207 writel(mode
, ss
->base
+ SS_CTL
);
209 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
210 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
211 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
212 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
215 if (!mi
.addr
|| !mo
.addr
) {
216 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
220 ileft
= areq
->cryptlen
;
221 oleft
= areq
->cryptlen
;
228 * todo is the number of consecutive 4byte word that we
229 * can read from current SG
231 todo
= min3(rx_cnt
, ileft
/ 4, (mi
.length
- oi
) / 4);
233 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
,
239 * not enough consecutive bytes, so we need to
240 * linearize in buf. todo is in bytes
241 * After that copy, if we have a multiple of 4
242 * we need to be able to write all buf in one
243 * pass, so it is why we min() with rx_cnt
245 todo
= min3(rx_cnt
* 4 - ob
, ileft
,
247 memcpy(buf
+ ob
, mi
.addr
+ oi
, todo
);
252 writesl(ss
->base
+ SS_RXFIFO
, buf
,
257 if (oi
== mi
.length
) {
263 spaces
= readl(ss
->base
+ SS_FCSR
);
264 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
265 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
266 dev_dbg(ss
->dev
, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
268 oi
, mi
.length
, ileft
, areq
->cryptlen
, rx_cnt
,
269 oo
, mo
.length
, oleft
, areq
->cryptlen
, tx_cnt
, ob
);
273 /* todo in 4bytes word */
274 todo
= min3(tx_cnt
, oleft
/ 4, (mo
.length
- oo
) / 4);
276 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
279 if (oo
== mo
.length
) {
285 * read obl bytes in bufo, we read at maximum for
286 * emptying the device
288 readsl(ss
->base
+ SS_TXFIFO
, bufo
, tx_cnt
);
293 * how many bytes we can copy ?
294 * no more than remaining SG size
295 * no more than remaining buffer
296 * no need to test against oleft
298 todo
= min(mo
.length
- oo
, obl
- obo
);
299 memcpy(mo
.addr
+ oo
, bufo
+ obo
, todo
);
303 if (oo
== mo
.length
) {
308 /* bufo must be fully used here */
312 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
313 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
314 *(u32
*)(areq
->iv
+ i
* 4) = v
;
321 writel(0, ss
->base
+ SS_CTL
);
322 spin_unlock_irqrestore(&ss
->slock
, flags
);
328 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request
*areq
)
330 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
331 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
332 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
334 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
336 return sun4i_ss_cipher_poll(areq
);
339 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request
*areq
)
341 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
342 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
343 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
345 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
347 return sun4i_ss_cipher_poll(areq
);
351 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request
*areq
)
353 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
354 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
355 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
357 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
359 return sun4i_ss_cipher_poll(areq
);
362 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request
*areq
)
364 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
365 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
366 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
368 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
370 return sun4i_ss_cipher_poll(areq
);
374 int sun4i_ss_cbc_des_encrypt(struct skcipher_request
*areq
)
376 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
377 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
378 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
380 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
382 return sun4i_ss_cipher_poll(areq
);
385 int sun4i_ss_cbc_des_decrypt(struct skcipher_request
*areq
)
387 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
388 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
389 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
391 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
393 return sun4i_ss_cipher_poll(areq
);
397 int sun4i_ss_ecb_des_encrypt(struct skcipher_request
*areq
)
399 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
400 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
401 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
403 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
405 return sun4i_ss_cipher_poll(areq
);
408 int sun4i_ss_ecb_des_decrypt(struct skcipher_request
*areq
)
410 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
411 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
412 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
414 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
416 return sun4i_ss_cipher_poll(areq
);
420 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request
*areq
)
422 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
423 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
424 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
426 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
428 return sun4i_ss_cipher_poll(areq
);
431 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request
*areq
)
433 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
434 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
435 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
437 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
439 return sun4i_ss_cipher_poll(areq
);
443 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request
*areq
)
445 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
446 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
447 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
449 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
451 return sun4i_ss_cipher_poll(areq
);
454 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request
*areq
)
456 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
457 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
458 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
460 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
462 return sun4i_ss_cipher_poll(areq
);
465 int sun4i_ss_cipher_init(struct crypto_tfm
*tfm
)
467 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
468 struct sun4i_ss_alg_template
*algt
;
469 const char *name
= crypto_tfm_alg_name(tfm
);
471 memset(op
, 0, sizeof(struct sun4i_tfm_ctx
));
473 algt
= container_of(tfm
->__crt_alg
, struct sun4i_ss_alg_template
,
477 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
478 sizeof(struct sun4i_cipher_req_ctx
));
480 op
->fallback_tfm
= crypto_alloc_sync_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
481 if (IS_ERR(op
->fallback_tfm
)) {
482 dev_err(op
->ss
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
483 name
, PTR_ERR(op
->fallback_tfm
));
484 return PTR_ERR(op
->fallback_tfm
);
490 void sun4i_ss_cipher_exit(struct crypto_tfm
*tfm
)
492 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
493 crypto_free_sync_skcipher(op
->fallback_tfm
);
496 /* check and set the AES key, prepare the mode to be used */
497 int sun4i_ss_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
500 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
501 struct sun4i_ss_ctx
*ss
= op
->ss
;
505 op
->keymode
= SS_AES_128BITS
;
508 op
->keymode
= SS_AES_192BITS
;
511 op
->keymode
= SS_AES_256BITS
;
514 dev_err(ss
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
515 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
519 memcpy(op
->key
, key
, keylen
);
521 crypto_sync_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
522 crypto_sync_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
524 return crypto_sync_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);
527 /* check and set the DES key, prepare the mode to be used */
528 int sun4i_ss_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
531 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
532 struct sun4i_ss_ctx
*ss
= op
->ss
;
534 u32 tmp
[DES_EXPKEY_WORDS
];
537 if (unlikely(keylen
!= DES_KEY_SIZE
)) {
538 dev_err(ss
->dev
, "Invalid keylen %u\n", keylen
);
539 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
543 flags
= crypto_skcipher_get_flags(tfm
);
545 ret
= des_ekey(tmp
, key
);
546 if (unlikely(!ret
) && (flags
& CRYPTO_TFM_REQ_FORBID_WEAK_KEYS
)) {
547 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_WEAK_KEY
);
548 dev_dbg(ss
->dev
, "Weak key %u\n", keylen
);
553 memcpy(op
->key
, key
, keylen
);
555 crypto_sync_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
556 crypto_sync_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
558 return crypto_sync_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);
561 /* check and set the 3DES key, prepare the mode to be used */
562 int sun4i_ss_des3_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
565 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
568 err
= des3_verify_key(tfm
, key
);
573 memcpy(op
->key
, key
, keylen
);
575 crypto_sync_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
576 crypto_sync_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
578 return crypto_sync_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);