]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
Merge tag 'samsung-fixes-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / sunxi-ss / sun4i-ss-cipher.c
1 /*
2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
3 *
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
5 *
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
9 *
10 * You could find the datasheet in Documentation/arm/sunxi/README
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 */
17 #include "sun4i-ss.h"
18
19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
20 {
21 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
26 u32 mode = ctx->mode;
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
29 u32 tx_cnt = 0;
30 u32 spaces;
31 u32 v;
32 int i, err = 0;
33 unsigned int ileft = areq->nbytes;
34 unsigned int oleft = areq->nbytes;
35 unsigned int todo;
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
38 unsigned long flags;
39
40 if (areq->nbytes == 0)
41 return 0;
42
43 if (!areq->info) {
44 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
45 return -EINVAL;
46 }
47
48 if (!areq->src || !areq->dst) {
49 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
50 return -EINVAL;
51 }
52
53 spin_lock_irqsave(&ss->slock, flags);
54
55 for (i = 0; i < op->keylen; i += 4)
56 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
57
58 if (areq->info) {
59 for (i = 0; i < 4 && i < ivsize / 4; i++) {
60 v = *(u32 *)(areq->info + i * 4);
61 writel(v, ss->base + SS_IV0 + i * 4);
62 }
63 }
64 writel(mode, ss->base + SS_CTL);
65
66 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
67 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
68 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
69 SG_MITER_TO_SG | SG_MITER_ATOMIC);
70 sg_miter_next(&mi);
71 sg_miter_next(&mo);
72 if (!mi.addr || !mo.addr) {
73 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
74 err = -EINVAL;
75 goto release_ss;
76 }
77
78 ileft = areq->nbytes / 4;
79 oleft = areq->nbytes / 4;
80 oi = 0;
81 oo = 0;
82 do {
83 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
84 if (todo > 0) {
85 ileft -= todo;
86 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
87 oi += todo * 4;
88 }
89 if (oi == mi.length) {
90 sg_miter_next(&mi);
91 oi = 0;
92 }
93
94 spaces = readl(ss->base + SS_FCSR);
95 rx_cnt = SS_RXFIFO_SPACES(spaces);
96 tx_cnt = SS_TXFIFO_SPACES(spaces);
97
98 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
99 if (todo > 0) {
100 oleft -= todo;
101 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
102 oo += todo * 4;
103 }
104 if (oo == mo.length) {
105 sg_miter_next(&mo);
106 oo = 0;
107 }
108 } while (oleft > 0);
109
110 if (areq->info) {
111 for (i = 0; i < 4 && i < ivsize / 4; i++) {
112 v = readl(ss->base + SS_IV0 + i * 4);
113 *(u32 *)(areq->info + i * 4) = v;
114 }
115 }
116
117 release_ss:
118 sg_miter_stop(&mi);
119 sg_miter_stop(&mo);
120 writel(0, ss->base + SS_CTL);
121 spin_unlock_irqrestore(&ss->slock, flags);
122 return err;
123 }
124
125 /* Generic function that support SG with size not multiple of 4 */
126 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
127 {
128 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
129 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
130 struct sun4i_ss_ctx *ss = op->ss;
131 int no_chunk = 1;
132 struct scatterlist *in_sg = areq->src;
133 struct scatterlist *out_sg = areq->dst;
134 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
135 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
136 u32 mode = ctx->mode;
137 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
138 u32 rx_cnt = SS_RX_DEFAULT;
139 u32 tx_cnt = 0;
140 u32 v;
141 u32 spaces;
142 int i, err = 0;
143 unsigned int ileft = areq->nbytes;
144 unsigned int oleft = areq->nbytes;
145 unsigned int todo;
146 struct sg_mapping_iter mi, mo;
147 unsigned int oi, oo; /* offset for in and out */
148 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
149 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
150 unsigned int ob = 0; /* offset in buf */
151 unsigned int obo = 0; /* offset in bufo*/
152 unsigned int obl = 0; /* length of data in bufo */
153 unsigned long flags;
154
155 if (areq->nbytes == 0)
156 return 0;
157
158 if (!areq->info) {
159 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
160 return -EINVAL;
161 }
162
163 if (!areq->src || !areq->dst) {
164 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
165 return -EINVAL;
166 }
167
168 /*
169 * if we have only SGs with size multiple of 4,
170 * we can use the SS optimized function
171 */
172 while (in_sg && no_chunk == 1) {
173 if ((in_sg->length % 4) != 0)
174 no_chunk = 0;
175 in_sg = sg_next(in_sg);
176 }
177 while (out_sg && no_chunk == 1) {
178 if ((out_sg->length % 4) != 0)
179 no_chunk = 0;
180 out_sg = sg_next(out_sg);
181 }
182
183 if (no_chunk == 1)
184 return sun4i_ss_opti_poll(areq);
185
186 spin_lock_irqsave(&ss->slock, flags);
187
188 for (i = 0; i < op->keylen; i += 4)
189 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
190
191 if (areq->info) {
192 for (i = 0; i < 4 && i < ivsize / 4; i++) {
193 v = *(u32 *)(areq->info + i * 4);
194 writel(v, ss->base + SS_IV0 + i * 4);
195 }
196 }
197 writel(mode, ss->base + SS_CTL);
198
199 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
200 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
201 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
202 SG_MITER_TO_SG | SG_MITER_ATOMIC);
203 sg_miter_next(&mi);
204 sg_miter_next(&mo);
205 if (!mi.addr || !mo.addr) {
206 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
207 err = -EINVAL;
208 goto release_ss;
209 }
210 ileft = areq->nbytes;
211 oleft = areq->nbytes;
212 oi = 0;
213 oo = 0;
214
215 while (oleft > 0) {
216 if (ileft > 0) {
217 /*
218 * todo is the number of consecutive 4byte word that we
219 * can read from current SG
220 */
221 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
222 if (todo > 0 && ob == 0) {
223 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
224 todo);
225 ileft -= todo * 4;
226 oi += todo * 4;
227 } else {
228 /*
229 * not enough consecutive bytes, so we need to
230 * linearize in buf. todo is in bytes
231 * After that copy, if we have a multiple of 4
232 * we need to be able to write all buf in one
233 * pass, so it is why we min() with rx_cnt
234 */
235 todo = min3(rx_cnt * 4 - ob, ileft,
236 mi.length - oi);
237 memcpy(buf + ob, mi.addr + oi, todo);
238 ileft -= todo;
239 oi += todo;
240 ob += todo;
241 if (ob % 4 == 0) {
242 writesl(ss->base + SS_RXFIFO, buf,
243 ob / 4);
244 ob = 0;
245 }
246 }
247 if (oi == mi.length) {
248 sg_miter_next(&mi);
249 oi = 0;
250 }
251 }
252
253 spaces = readl(ss->base + SS_FCSR);
254 rx_cnt = SS_RXFIFO_SPACES(spaces);
255 tx_cnt = SS_TXFIFO_SPACES(spaces);
256 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
257 mode,
258 oi, mi.length, ileft, areq->nbytes, rx_cnt,
259 oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
260
261 if (tx_cnt == 0)
262 continue;
263 /* todo in 4bytes word */
264 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
265 if (todo > 0) {
266 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
267 oleft -= todo * 4;
268 oo += todo * 4;
269 if (oo == mo.length) {
270 sg_miter_next(&mo);
271 oo = 0;
272 }
273 } else {
274 /*
275 * read obl bytes in bufo, we read at maximum for
276 * emptying the device
277 */
278 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
279 obl = tx_cnt * 4;
280 obo = 0;
281 do {
282 /*
283 * how many bytes we can copy ?
284 * no more than remaining SG size
285 * no more than remaining buffer
286 * no need to test against oleft
287 */
288 todo = min(mo.length - oo, obl - obo);
289 memcpy(mo.addr + oo, bufo + obo, todo);
290 oleft -= todo;
291 obo += todo;
292 oo += todo;
293 if (oo == mo.length) {
294 sg_miter_next(&mo);
295 oo = 0;
296 }
297 } while (obo < obl);
298 /* bufo must be fully used here */
299 }
300 }
301 if (areq->info) {
302 for (i = 0; i < 4 && i < ivsize / 4; i++) {
303 v = readl(ss->base + SS_IV0 + i * 4);
304 *(u32 *)(areq->info + i * 4) = v;
305 }
306 }
307
308 release_ss:
309 sg_miter_stop(&mi);
310 sg_miter_stop(&mo);
311 writel(0, ss->base + SS_CTL);
312 spin_unlock_irqrestore(&ss->slock, flags);
313
314 return err;
315 }
316
317 /* CBC AES */
318 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
319 {
320 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
321 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
322 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
323
324 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
325 op->keymode;
326 return sun4i_ss_cipher_poll(areq);
327 }
328
329 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
330 {
331 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
332 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
333 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
334
335 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
336 op->keymode;
337 return sun4i_ss_cipher_poll(areq);
338 }
339
340 /* ECB AES */
341 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
342 {
343 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
344 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
345 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
346
347 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
348 op->keymode;
349 return sun4i_ss_cipher_poll(areq);
350 }
351
352 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
353 {
354 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
355 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
356 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
357
358 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
359 op->keymode;
360 return sun4i_ss_cipher_poll(areq);
361 }
362
363 /* CBC DES */
364 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
365 {
366 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
367 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
368 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
369
370 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
371 op->keymode;
372 return sun4i_ss_cipher_poll(areq);
373 }
374
375 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
376 {
377 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
378 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
379 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
380
381 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
382 op->keymode;
383 return sun4i_ss_cipher_poll(areq);
384 }
385
386 /* ECB DES */
387 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
388 {
389 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
390 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
391 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
392
393 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
394 op->keymode;
395 return sun4i_ss_cipher_poll(areq);
396 }
397
398 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
399 {
400 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
401 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
402 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
403
404 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
405 op->keymode;
406 return sun4i_ss_cipher_poll(areq);
407 }
408
409 /* CBC 3DES */
410 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
411 {
412 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
413 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
414 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
415
416 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
417 op->keymode;
418 return sun4i_ss_cipher_poll(areq);
419 }
420
421 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
422 {
423 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
424 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
425 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
426
427 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
428 op->keymode;
429 return sun4i_ss_cipher_poll(areq);
430 }
431
432 /* ECB 3DES */
433 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
434 {
435 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
436 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
437 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
438
439 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
440 op->keymode;
441 return sun4i_ss_cipher_poll(areq);
442 }
443
444 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
445 {
446 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
447 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
448 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
449
450 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
451 op->keymode;
452 return sun4i_ss_cipher_poll(areq);
453 }
454
455 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
456 {
457 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
458 struct crypto_alg *alg = tfm->__crt_alg;
459 struct sun4i_ss_alg_template *algt;
460
461 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
462
463 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
464 op->ss = algt->ss;
465
466 tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
467
468 return 0;
469 }
470
471 /* check and set the AES key, prepare the mode to be used */
472 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
473 unsigned int keylen)
474 {
475 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
476 struct sun4i_ss_ctx *ss = op->ss;
477
478 switch (keylen) {
479 case 128 / 8:
480 op->keymode = SS_AES_128BITS;
481 break;
482 case 192 / 8:
483 op->keymode = SS_AES_192BITS;
484 break;
485 case 256 / 8:
486 op->keymode = SS_AES_256BITS;
487 break;
488 default:
489 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
490 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
491 return -EINVAL;
492 }
493 op->keylen = keylen;
494 memcpy(op->key, key, keylen);
495 return 0;
496 }
497
498 /* check and set the DES key, prepare the mode to be used */
499 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
500 unsigned int keylen)
501 {
502 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
503 struct sun4i_ss_ctx *ss = op->ss;
504 u32 flags;
505 u32 tmp[DES_EXPKEY_WORDS];
506 int ret;
507
508 if (unlikely(keylen != DES_KEY_SIZE)) {
509 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
510 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
511 return -EINVAL;
512 }
513
514 flags = crypto_ablkcipher_get_flags(tfm);
515
516 ret = des_ekey(tmp, key);
517 if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
518 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
519 dev_dbg(ss->dev, "Weak key %u\n", keylen);
520 return -EINVAL;
521 }
522
523 op->keylen = keylen;
524 memcpy(op->key, key, keylen);
525 return 0;
526 }
527
528 /* check and set the 3DES key, prepare the mode to be used */
529 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
530 unsigned int keylen)
531 {
532 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
533 struct sun4i_ss_ctx *ss = op->ss;
534
535 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
536 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
537 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 return -EINVAL;
539 }
540 op->keylen = keylen;
541 memcpy(op->key, key, keylen);
542 return 0;
543 }