]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
Merge remote-tracking branches 'regulator/topic/can-change', 'regulator/topic/constra...
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / sunxi-ss / sun4i-ss-cipher.c
1 /*
2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
3 *
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
5 *
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
9 *
10 * You could find the datasheet in Documentation/arm/sunxi/README
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 */
17 #include "sun4i-ss.h"
18
19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
20 {
21 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
26 u32 mode = ctx->mode;
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
29 u32 tx_cnt = 0;
30 u32 spaces;
31 u32 v;
32 int i, err = 0;
33 unsigned int ileft = areq->nbytes;
34 unsigned int oleft = areq->nbytes;
35 unsigned int todo;
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
38
39 if (areq->nbytes == 0)
40 return 0;
41
42 if (!areq->info) {
43 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
44 return -EINVAL;
45 }
46
47 if (!areq->src || !areq->dst) {
48 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
49 return -EINVAL;
50 }
51
52 spin_lock_bh(&ss->slock);
53
54 for (i = 0; i < op->keylen; i += 4)
55 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
56
57 if (areq->info) {
58 for (i = 0; i < 4 && i < ivsize / 4; i++) {
59 v = *(u32 *)(areq->info + i * 4);
60 writel(v, ss->base + SS_IV0 + i * 4);
61 }
62 }
63 writel(mode, ss->base + SS_CTL);
64
65 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
66 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
67 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
68 SG_MITER_TO_SG | SG_MITER_ATOMIC);
69 sg_miter_next(&mi);
70 sg_miter_next(&mo);
71 if (!mi.addr || !mo.addr) {
72 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
73 err = -EINVAL;
74 goto release_ss;
75 }
76
77 ileft = areq->nbytes / 4;
78 oleft = areq->nbytes / 4;
79 oi = 0;
80 oo = 0;
81 do {
82 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
83 if (todo > 0) {
84 ileft -= todo;
85 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
86 oi += todo * 4;
87 }
88 if (oi == mi.length) {
89 sg_miter_next(&mi);
90 oi = 0;
91 }
92
93 spaces = readl(ss->base + SS_FCSR);
94 rx_cnt = SS_RXFIFO_SPACES(spaces);
95 tx_cnt = SS_TXFIFO_SPACES(spaces);
96
97 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
98 if (todo > 0) {
99 oleft -= todo;
100 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
101 oo += todo * 4;
102 }
103 if (oo == mo.length) {
104 sg_miter_next(&mo);
105 oo = 0;
106 }
107 } while (oleft > 0);
108
109 if (areq->info) {
110 for (i = 0; i < 4 && i < ivsize / 4; i++) {
111 v = readl(ss->base + SS_IV0 + i * 4);
112 *(u32 *)(areq->info + i * 4) = v;
113 }
114 }
115
116 release_ss:
117 sg_miter_stop(&mi);
118 sg_miter_stop(&mo);
119 writel(0, ss->base + SS_CTL);
120 spin_unlock_bh(&ss->slock);
121 return err;
122 }
123
124 /* Generic function that support SG with size not multiple of 4 */
125 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
126 {
127 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
128 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
129 struct sun4i_ss_ctx *ss = op->ss;
130 int no_chunk = 1;
131 struct scatterlist *in_sg = areq->src;
132 struct scatterlist *out_sg = areq->dst;
133 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
134 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
135 u32 mode = ctx->mode;
136 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
137 u32 rx_cnt = SS_RX_DEFAULT;
138 u32 tx_cnt = 0;
139 u32 v;
140 u32 spaces;
141 int i, err = 0;
142 unsigned int ileft = areq->nbytes;
143 unsigned int oleft = areq->nbytes;
144 unsigned int todo;
145 struct sg_mapping_iter mi, mo;
146 unsigned int oi, oo; /* offset for in and out */
147 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
148 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
149 unsigned int ob = 0; /* offset in buf */
150 unsigned int obo = 0; /* offset in bufo*/
151 unsigned int obl = 0; /* length of data in bufo */
152
153 if (areq->nbytes == 0)
154 return 0;
155
156 if (!areq->info) {
157 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
158 return -EINVAL;
159 }
160
161 if (!areq->src || !areq->dst) {
162 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
163 return -EINVAL;
164 }
165
166 /*
167 * if we have only SGs with size multiple of 4,
168 * we can use the SS optimized function
169 */
170 while (in_sg && no_chunk == 1) {
171 if ((in_sg->length % 4) != 0)
172 no_chunk = 0;
173 in_sg = sg_next(in_sg);
174 }
175 while (out_sg && no_chunk == 1) {
176 if ((out_sg->length % 4) != 0)
177 no_chunk = 0;
178 out_sg = sg_next(out_sg);
179 }
180
181 if (no_chunk == 1)
182 return sun4i_ss_opti_poll(areq);
183
184 spin_lock_bh(&ss->slock);
185
186 for (i = 0; i < op->keylen; i += 4)
187 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
188
189 if (areq->info) {
190 for (i = 0; i < 4 && i < ivsize / 4; i++) {
191 v = *(u32 *)(areq->info + i * 4);
192 writel(v, ss->base + SS_IV0 + i * 4);
193 }
194 }
195 writel(mode, ss->base + SS_CTL);
196
197 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
198 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
199 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
200 SG_MITER_TO_SG | SG_MITER_ATOMIC);
201 sg_miter_next(&mi);
202 sg_miter_next(&mo);
203 if (!mi.addr || !mo.addr) {
204 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
205 err = -EINVAL;
206 goto release_ss;
207 }
208 ileft = areq->nbytes;
209 oleft = areq->nbytes;
210 oi = 0;
211 oo = 0;
212
213 while (oleft > 0) {
214 if (ileft > 0) {
215 /*
216 * todo is the number of consecutive 4byte word that we
217 * can read from current SG
218 */
219 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
220 if (todo > 0 && ob == 0) {
221 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
222 todo);
223 ileft -= todo * 4;
224 oi += todo * 4;
225 } else {
226 /*
227 * not enough consecutive bytes, so we need to
228 * linearize in buf. todo is in bytes
229 * After that copy, if we have a multiple of 4
230 * we need to be able to write all buf in one
231 * pass, so it is why we min() with rx_cnt
232 */
233 todo = min3(rx_cnt * 4 - ob, ileft,
234 mi.length - oi);
235 memcpy(buf + ob, mi.addr + oi, todo);
236 ileft -= todo;
237 oi += todo;
238 ob += todo;
239 if (ob % 4 == 0) {
240 writesl(ss->base + SS_RXFIFO, buf,
241 ob / 4);
242 ob = 0;
243 }
244 }
245 if (oi == mi.length) {
246 sg_miter_next(&mi);
247 oi = 0;
248 }
249 }
250
251 spaces = readl(ss->base + SS_FCSR);
252 rx_cnt = SS_RXFIFO_SPACES(spaces);
253 tx_cnt = SS_TXFIFO_SPACES(spaces);
254 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
255 mode,
256 oi, mi.length, ileft, areq->nbytes, rx_cnt,
257 oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
258
259 if (tx_cnt == 0)
260 continue;
261 /* todo in 4bytes word */
262 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
263 if (todo > 0) {
264 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
265 oleft -= todo * 4;
266 oo += todo * 4;
267 if (oo == mo.length) {
268 sg_miter_next(&mo);
269 oo = 0;
270 }
271 } else {
272 /*
273 * read obl bytes in bufo, we read at maximum for
274 * emptying the device
275 */
276 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
277 obl = tx_cnt * 4;
278 obo = 0;
279 do {
280 /*
281 * how many bytes we can copy ?
282 * no more than remaining SG size
283 * no more than remaining buffer
284 * no need to test against oleft
285 */
286 todo = min(mo.length - oo, obl - obo);
287 memcpy(mo.addr + oo, bufo + obo, todo);
288 oleft -= todo;
289 obo += todo;
290 oo += todo;
291 if (oo == mo.length) {
292 sg_miter_next(&mo);
293 oo = 0;
294 }
295 } while (obo < obl);
296 /* bufo must be fully used here */
297 }
298 }
299 if (areq->info) {
300 for (i = 0; i < 4 && i < ivsize / 4; i++) {
301 v = readl(ss->base + SS_IV0 + i * 4);
302 *(u32 *)(areq->info + i * 4) = v;
303 }
304 }
305
306 release_ss:
307 sg_miter_stop(&mi);
308 sg_miter_stop(&mo);
309 writel(0, ss->base + SS_CTL);
310 spin_unlock_bh(&ss->slock);
311
312 return err;
313 }
314
315 /* CBC AES */
316 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
317 {
318 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
319 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
320 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
321
322 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
323 op->keymode;
324 return sun4i_ss_cipher_poll(areq);
325 }
326
327 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
328 {
329 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
330 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
331 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
332
333 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
334 op->keymode;
335 return sun4i_ss_cipher_poll(areq);
336 }
337
338 /* ECB AES */
339 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
340 {
341 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
342 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
343 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
344
345 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
346 op->keymode;
347 return sun4i_ss_cipher_poll(areq);
348 }
349
350 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
351 {
352 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
353 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
354 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
355
356 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
357 op->keymode;
358 return sun4i_ss_cipher_poll(areq);
359 }
360
361 /* CBC DES */
362 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
363 {
364 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
365 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
366 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
367
368 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
369 op->keymode;
370 return sun4i_ss_cipher_poll(areq);
371 }
372
373 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
374 {
375 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
376 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
377 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
378
379 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
380 op->keymode;
381 return sun4i_ss_cipher_poll(areq);
382 }
383
384 /* ECB DES */
385 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
386 {
387 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
388 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
389 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
390
391 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
392 op->keymode;
393 return sun4i_ss_cipher_poll(areq);
394 }
395
396 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
397 {
398 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
399 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
400 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
401
402 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
403 op->keymode;
404 return sun4i_ss_cipher_poll(areq);
405 }
406
407 /* CBC 3DES */
408 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
409 {
410 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
411 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
412 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
413
414 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
415 op->keymode;
416 return sun4i_ss_cipher_poll(areq);
417 }
418
419 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
420 {
421 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
422 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
423 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
424
425 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
426 op->keymode;
427 return sun4i_ss_cipher_poll(areq);
428 }
429
430 /* ECB 3DES */
431 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
432 {
433 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
434 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
435 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
436
437 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
438 op->keymode;
439 return sun4i_ss_cipher_poll(areq);
440 }
441
442 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
443 {
444 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
445 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
446 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
447
448 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
449 op->keymode;
450 return sun4i_ss_cipher_poll(areq);
451 }
452
453 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
454 {
455 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
456 struct crypto_alg *alg = tfm->__crt_alg;
457 struct sun4i_ss_alg_template *algt;
458
459 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
460
461 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
462 op->ss = algt->ss;
463
464 tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
465
466 return 0;
467 }
468
469 /* check and set the AES key, prepare the mode to be used */
470 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
471 unsigned int keylen)
472 {
473 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
474 struct sun4i_ss_ctx *ss = op->ss;
475
476 switch (keylen) {
477 case 128 / 8:
478 op->keymode = SS_AES_128BITS;
479 break;
480 case 192 / 8:
481 op->keymode = SS_AES_192BITS;
482 break;
483 case 256 / 8:
484 op->keymode = SS_AES_256BITS;
485 break;
486 default:
487 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
488 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
489 return -EINVAL;
490 }
491 op->keylen = keylen;
492 memcpy(op->key, key, keylen);
493 return 0;
494 }
495
496 /* check and set the DES key, prepare the mode to be used */
497 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
498 unsigned int keylen)
499 {
500 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
501 struct sun4i_ss_ctx *ss = op->ss;
502 u32 flags;
503 u32 tmp[DES_EXPKEY_WORDS];
504 int ret;
505
506 if (unlikely(keylen != DES_KEY_SIZE)) {
507 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
508 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
509 return -EINVAL;
510 }
511
512 flags = crypto_ablkcipher_get_flags(tfm);
513
514 ret = des_ekey(tmp, key);
515 if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
516 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
517 dev_dbg(ss->dev, "Weak key %u\n", keylen);
518 return -EINVAL;
519 }
520
521 op->keylen = keylen;
522 memcpy(op->key, key, keylen);
523 return 0;
524 }
525
526 /* check and set the 3DES key, prepare the mode to be used */
527 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
528 unsigned int keylen)
529 {
530 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
531 struct sun4i_ss_ctx *ss = op->ss;
532
533 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
534 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
535 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
536 return -EINVAL;
537 }
538 op->keylen = keylen;
539 memcpy(op->key, key, keylen);
540 return 0;
541 }