]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/crypto/atmel-aes.c
crypto: atmel-aes - remove useless write in the Control Register
[mirror_ubuntu-zesty-kernel.git] / drivers / crypto / atmel-aes.c
CommitLineData
bd3c7b5c
NR
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL AES HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c driver.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
bd3c7b5c
NR
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
bd3c7b5c 30#include <linux/irq.h>
bd3c7b5c
NR
31#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
be943c7d 33#include <linux/of_device.h>
bd3c7b5c
NR
34#include <linux/delay.h>
35#include <linux/crypto.h>
bd3c7b5c
NR
36#include <crypto/scatterwalk.h>
37#include <crypto/algapi.h>
38#include <crypto/aes.h>
cadc4ab8 39#include <linux/platform_data/crypto-atmel.h>
be943c7d 40#include <dt-bindings/dma/at91.h>
bd3c7b5c
NR
41#include "atmel-aes-regs.h"
42
88efd9a9
CP
43#define ATMEL_AES_PRIORITY 300
44
bd3c7b5c
NR
45#define CFB8_BLOCK_SIZE 1
46#define CFB16_BLOCK_SIZE 2
47#define CFB32_BLOCK_SIZE 4
48#define CFB64_BLOCK_SIZE 8
49
50/* AES flags */
cadc4ab8 51#define AES_FLAGS_MODE_MASK 0x03ff
bd3c7b5c
NR
52#define AES_FLAGS_ENCRYPT BIT(0)
53#define AES_FLAGS_CBC BIT(1)
54#define AES_FLAGS_CFB BIT(2)
55#define AES_FLAGS_CFB8 BIT(3)
56#define AES_FLAGS_CFB16 BIT(4)
57#define AES_FLAGS_CFB32 BIT(5)
58#define AES_FLAGS_CFB64 BIT(6)
cadc4ab8
NR
59#define AES_FLAGS_CFB128 BIT(7)
60#define AES_FLAGS_OFB BIT(8)
61#define AES_FLAGS_CTR BIT(9)
bd3c7b5c
NR
62
63#define AES_FLAGS_INIT BIT(16)
64#define AES_FLAGS_DMA BIT(17)
65#define AES_FLAGS_BUSY BIT(18)
cadc4ab8 66#define AES_FLAGS_FAST BIT(19)
bd3c7b5c 67
cadc4ab8 68#define ATMEL_AES_QUEUE_LENGTH 50
bd3c7b5c
NR
69
70#define ATMEL_AES_DMA_THRESHOLD 16
71
72
cadc4ab8
NR
73struct atmel_aes_caps {
74 bool has_dualbuff;
75 bool has_cfb64;
76 u32 max_burst_size;
77};
78
bd3c7b5c
NR
79struct atmel_aes_dev;
80
ccbf7298
CP
81
82typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
83
84
85struct atmel_aes_base_ctx {
bd3c7b5c 86 struct atmel_aes_dev *dd;
ccbf7298 87 atmel_aes_fn_t start;
bd3c7b5c
NR
88
89 int keylen;
90 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
cadc4ab8
NR
91
92 u16 block_size;
bd3c7b5c
NR
93};
94
ccbf7298
CP
95struct atmel_aes_ctx {
96 struct atmel_aes_base_ctx base;
97};
98
bd3c7b5c
NR
99struct atmel_aes_reqctx {
100 unsigned long mode;
101};
102
103struct atmel_aes_dma {
104 struct dma_chan *chan;
105 struct dma_slave_config dma_conf;
106};
107
108struct atmel_aes_dev {
109 struct list_head list;
110 unsigned long phys_base;
111 void __iomem *io_base;
112
ccbf7298
CP
113 struct crypto_async_request *areq;
114 struct atmel_aes_base_ctx *ctx;
115
bd3c7b5c
NR
116 struct device *dev;
117 struct clk *iclk;
118 int irq;
119
120 unsigned long flags;
121 int err;
122
123 spinlock_t lock;
124 struct crypto_queue queue;
125
126 struct tasklet_struct done_task;
127 struct tasklet_struct queue_task;
128
bd3c7b5c
NR
129 size_t total;
130
131 struct scatterlist *in_sg;
132 unsigned int nb_in_sg;
cadc4ab8 133 size_t in_offset;
bd3c7b5c
NR
134 struct scatterlist *out_sg;
135 unsigned int nb_out_sg;
cadc4ab8 136 size_t out_offset;
bd3c7b5c
NR
137
138 size_t bufcnt;
cadc4ab8
NR
139 size_t buflen;
140 size_t dma_size;
bd3c7b5c 141
cadc4ab8
NR
142 void *buf_in;
143 int dma_in;
144 dma_addr_t dma_addr_in;
bd3c7b5c
NR
145 struct atmel_aes_dma dma_lch_in;
146
cadc4ab8
NR
147 void *buf_out;
148 int dma_out;
149 dma_addr_t dma_addr_out;
bd3c7b5c
NR
150 struct atmel_aes_dma dma_lch_out;
151
cadc4ab8
NR
152 struct atmel_aes_caps caps;
153
bd3c7b5c
NR
154 u32 hw_version;
155};
156
157struct atmel_aes_drv {
158 struct list_head dev_list;
159 spinlock_t lock;
160};
161
162static struct atmel_aes_drv atmel_aes = {
163 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
164 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
165};
166
167static int atmel_aes_sg_length(struct ablkcipher_request *req,
168 struct scatterlist *sg)
169{
170 unsigned int total = req->nbytes;
171 int sg_nb;
172 unsigned int len;
173 struct scatterlist *sg_list;
174
175 sg_nb = 0;
176 sg_list = sg;
177 total = req->nbytes;
178
179 while (total) {
180 len = min(sg_list->length, total);
181
182 sg_nb++;
183 total -= len;
184
185 sg_list = sg_next(sg_list);
186 if (!sg_list)
187 total = 0;
188 }
189
190 return sg_nb;
191}
192
cadc4ab8
NR
193static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
194 void *buf, size_t buflen, size_t total, int out)
195{
20ecae79 196 size_t count, off = 0;
cadc4ab8
NR
197
198 while (buflen && total) {
199 count = min((*sg)->length - *offset, total);
200 count = min(count, buflen);
201
202 if (!count)
203 return off;
204
205 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
206
207 off += count;
208 buflen -= count;
209 *offset += count;
210 total -= count;
211
212 if (*offset == (*sg)->length) {
213 *sg = sg_next(*sg);
214 if (*sg)
215 *offset = 0;
216 else
217 total = 0;
218 }
219 }
220
221 return off;
222}
223
bd3c7b5c
NR
224static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
225{
226 return readl_relaxed(dd->io_base + offset);
227}
228
229static inline void atmel_aes_write(struct atmel_aes_dev *dd,
230 u32 offset, u32 value)
231{
232 writel_relaxed(value, dd->io_base + offset);
233}
234
235static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
236 u32 *value, int count)
237{
238 for (; count--; value++, offset += 4)
239 *value = atmel_aes_read(dd, offset);
240}
241
242static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
c0b28d8c 243 const u32 *value, int count)
bd3c7b5c
NR
244{
245 for (; count--; value++, offset += 4)
246 atmel_aes_write(dd, offset, *value);
247}
248
ccbf7298 249static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
bd3c7b5c
NR
250{
251 struct atmel_aes_dev *aes_dd = NULL;
252 struct atmel_aes_dev *tmp;
253
254 spin_lock_bh(&atmel_aes.lock);
255 if (!ctx->dd) {
256 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
257 aes_dd = tmp;
258 break;
259 }
260 ctx->dd = aes_dd;
261 } else {
262 aes_dd = ctx->dd;
263 }
264
265 spin_unlock_bh(&atmel_aes.lock);
266
267 return aes_dd;
268}
269
270static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
271{
9d83d299
LC
272 int err;
273
274 err = clk_prepare_enable(dd->iclk);
275 if (err)
276 return err;
bd3c7b5c
NR
277
278 if (!(dd->flags & AES_FLAGS_INIT)) {
279 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
cadc4ab8 280 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
bd3c7b5c
NR
281 dd->flags |= AES_FLAGS_INIT;
282 dd->err = 0;
283 }
284
285 return 0;
286}
287
cadc4ab8
NR
288static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
289{
290 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
291}
292
aab0a39b 293static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
bd3c7b5c 294{
aab0a39b
CP
295 int err;
296
297 err = atmel_aes_hw_init(dd);
298 if (err)
299 return err;
bd3c7b5c 300
cadc4ab8
NR
301 dd->hw_version = atmel_aes_get_version(dd);
302
aab0a39b 303 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
bd3c7b5c
NR
304
305 clk_disable_unprepare(dd->iclk);
aab0a39b 306 return 0;
bd3c7b5c
NR
307}
308
309static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
310{
ccbf7298 311 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
bd3c7b5c
NR
312
313 clk_disable_unprepare(dd->iclk);
314 dd->flags &= ~AES_FLAGS_BUSY;
315
316 req->base.complete(&req->base, err);
317}
318
319static void atmel_aes_dma_callback(void *data)
320{
321 struct atmel_aes_dev *dd = data;
322
323 /* dma_lch_out - completed */
324 tasklet_schedule(&dd->done_task);
325}
326
cadc4ab8
NR
327static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
328 dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
bd3c7b5c 329{
cadc4ab8 330 struct scatterlist sg[2];
bd3c7b5c 331 struct dma_async_tx_descriptor *in_desc, *out_desc;
bd3c7b5c 332
cadc4ab8 333 dd->dma_size = length;
bd3c7b5c 334
289b2623
LZ
335 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
336 DMA_TO_DEVICE);
337 dma_sync_single_for_device(dd->dev, dma_addr_out, length,
338 DMA_FROM_DEVICE);
bd3c7b5c 339
cadc4ab8
NR
340 if (dd->flags & AES_FLAGS_CFB8) {
341 dd->dma_lch_in.dma_conf.dst_addr_width =
342 DMA_SLAVE_BUSWIDTH_1_BYTE;
343 dd->dma_lch_out.dma_conf.src_addr_width =
344 DMA_SLAVE_BUSWIDTH_1_BYTE;
345 } else if (dd->flags & AES_FLAGS_CFB16) {
346 dd->dma_lch_in.dma_conf.dst_addr_width =
347 DMA_SLAVE_BUSWIDTH_2_BYTES;
348 dd->dma_lch_out.dma_conf.src_addr_width =
349 DMA_SLAVE_BUSWIDTH_2_BYTES;
350 } else {
351 dd->dma_lch_in.dma_conf.dst_addr_width =
352 DMA_SLAVE_BUSWIDTH_4_BYTES;
353 dd->dma_lch_out.dma_conf.src_addr_width =
354 DMA_SLAVE_BUSWIDTH_4_BYTES;
355 }
bd3c7b5c 356
cadc4ab8
NR
357 if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
358 AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
359 dd->dma_lch_in.dma_conf.src_maxburst = 1;
360 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
361 dd->dma_lch_out.dma_conf.src_maxburst = 1;
362 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
363 } else {
364 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
365 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
366 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
367 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
368 }
bd3c7b5c 369
cadc4ab8
NR
370 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
371 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
bd3c7b5c 372
cadc4ab8 373 dd->flags |= AES_FLAGS_DMA;
bd3c7b5c 374
cadc4ab8
NR
375 sg_init_table(&sg[0], 1);
376 sg_dma_address(&sg[0]) = dma_addr_in;
377 sg_dma_len(&sg[0]) = length;
bd3c7b5c 378
cadc4ab8
NR
379 sg_init_table(&sg[1], 1);
380 sg_dma_address(&sg[1]) = dma_addr_out;
381 sg_dma_len(&sg[1]) = length;
382
383 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
384 1, DMA_MEM_TO_DEV,
385 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
386 if (!in_desc)
387 return -EINVAL;
bd3c7b5c 388
cadc4ab8
NR
389 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
390 1, DMA_DEV_TO_MEM,
391 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
bd3c7b5c 392 if (!out_desc)
cadc4ab8 393 return -EINVAL;
bd3c7b5c
NR
394
395 out_desc->callback = atmel_aes_dma_callback;
396 out_desc->callback_param = dd;
397
bd3c7b5c
NR
398 dmaengine_submit(out_desc);
399 dma_async_issue_pending(dd->dma_lch_out.chan);
400
401 dmaengine_submit(in_desc);
402 dma_async_issue_pending(dd->dma_lch_in.chan);
403
404 return 0;
bd3c7b5c
NR
405}
406
407static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
408{
ccbf7298
CP
409 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
410
bd3c7b5c
NR
411 dd->flags &= ~AES_FLAGS_DMA;
412
289b2623
LZ
413 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
414 dd->dma_size, DMA_TO_DEVICE);
415 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
416 dd->dma_size, DMA_FROM_DEVICE);
417
bd3c7b5c 418 /* use cache buffers */
ccbf7298 419 dd->nb_in_sg = atmel_aes_sg_length(req, dd->in_sg);
bd3c7b5c
NR
420 if (!dd->nb_in_sg)
421 return -EINVAL;
422
ccbf7298 423 dd->nb_out_sg = atmel_aes_sg_length(req, dd->out_sg);
7b5c253c 424 if (!dd->nb_out_sg)
bd3c7b5c
NR
425 return -EINVAL;
426
427 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
428 dd->buf_in, dd->total);
429
430 if (!dd->bufcnt)
431 return -EINVAL;
432
433 dd->total -= dd->bufcnt;
434
435 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
436 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
437 dd->bufcnt >> 2);
438
439 return 0;
440}
441
442static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
443{
cadc4ab8
NR
444 int err, fast = 0, in, out;
445 size_t count;
446 dma_addr_t addr_in, addr_out;
447
448 if ((!dd->in_offset) && (!dd->out_offset)) {
449 /* check for alignment */
450 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
451 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
452 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
453 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
454 fast = in && out;
455
456 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
457 fast = 0;
458 }
459
460
461 if (fast) {
20ecae79
AB
462 count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
463 count = min_t(size_t, count, sg_dma_len(dd->out_sg));
cadc4ab8
NR
464
465 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
466 if (!err) {
467 dev_err(dd->dev, "dma_map_sg() error\n");
468 return -EINVAL;
469 }
470
471 err = dma_map_sg(dd->dev, dd->out_sg, 1,
472 DMA_FROM_DEVICE);
473 if (!err) {
474 dev_err(dd->dev, "dma_map_sg() error\n");
475 dma_unmap_sg(dd->dev, dd->in_sg, 1,
476 DMA_TO_DEVICE);
477 return -EINVAL;
478 }
479
480 addr_in = sg_dma_address(dd->in_sg);
481 addr_out = sg_dma_address(dd->out_sg);
482
483 dd->flags |= AES_FLAGS_FAST;
bd3c7b5c 484
bd3c7b5c 485 } else {
289b2623
LZ
486 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
487 dd->dma_size, DMA_TO_DEVICE);
488
cadc4ab8
NR
489 /* use cache buffers */
490 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
491 dd->buf_in, dd->buflen, dd->total, 0);
492
493 addr_in = dd->dma_addr_in;
494 addr_out = dd->dma_addr_out;
495
496 dd->flags &= ~AES_FLAGS_FAST;
bd3c7b5c
NR
497 }
498
cadc4ab8 499 dd->total -= count;
bd3c7b5c 500
cadc4ab8
NR
501 err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
502
503 if (err && (dd->flags & AES_FLAGS_FAST)) {
504 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
505 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
506 }
bd3c7b5c
NR
507
508 return err;
509}
510
cdfab4a7
CP
511static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
512 const u32 *iv)
bd3c7b5c 513{
794595d2 514 u32 valmr = 0;
bd3c7b5c 515
bd3c7b5c
NR
516 /* MR register must be set before IV registers */
517 if (dd->ctx->keylen == AES_KEYSIZE_128)
518 valmr |= AES_MR_KEYSIZE_128;
519 else if (dd->ctx->keylen == AES_KEYSIZE_192)
520 valmr |= AES_MR_KEYSIZE_192;
521 else
522 valmr |= AES_MR_KEYSIZE_256;
523
524 if (dd->flags & AES_FLAGS_CBC) {
525 valmr |= AES_MR_OPMOD_CBC;
526 } else if (dd->flags & AES_FLAGS_CFB) {
527 valmr |= AES_MR_OPMOD_CFB;
528 if (dd->flags & AES_FLAGS_CFB8)
529 valmr |= AES_MR_CFBS_8b;
530 else if (dd->flags & AES_FLAGS_CFB16)
531 valmr |= AES_MR_CFBS_16b;
532 else if (dd->flags & AES_FLAGS_CFB32)
533 valmr |= AES_MR_CFBS_32b;
534 else if (dd->flags & AES_FLAGS_CFB64)
535 valmr |= AES_MR_CFBS_64b;
cadc4ab8
NR
536 else if (dd->flags & AES_FLAGS_CFB128)
537 valmr |= AES_MR_CFBS_128b;
bd3c7b5c
NR
538 } else if (dd->flags & AES_FLAGS_OFB) {
539 valmr |= AES_MR_OPMOD_OFB;
540 } else if (dd->flags & AES_FLAGS_CTR) {
541 valmr |= AES_MR_OPMOD_CTR;
542 } else {
543 valmr |= AES_MR_OPMOD_ECB;
544 }
545
546 if (dd->flags & AES_FLAGS_ENCRYPT)
547 valmr |= AES_MR_CYPHER_ENC;
548
cdfab4a7 549 if (use_dma) {
bd3c7b5c 550 valmr |= AES_MR_SMOD_IDATAR0;
cadc4ab8 551 if (dd->caps.has_dualbuff)
bd3c7b5c
NR
552 valmr |= AES_MR_DUALBUFF;
553 } else {
554 valmr |= AES_MR_SMOD_AUTO;
555 }
556
bd3c7b5c
NR
557 atmel_aes_write(dd, AES_MR, valmr);
558
559 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
560 dd->ctx->keylen >> 2);
561
562 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
563 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
cdfab4a7
CP
564 iv) {
565 atmel_aes_write_n(dd, AES_IVR(0), iv, 4);
bd3c7b5c 566 }
bd3c7b5c
NR
567}
568
569static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
ccbf7298 570 struct crypto_async_request *new_areq)
bd3c7b5c 571{
ccbf7298
CP
572 struct crypto_async_request *areq, *backlog;
573 struct atmel_aes_base_ctx *ctx;
bd3c7b5c
NR
574 unsigned long flags;
575 int err, ret = 0;
576
577 spin_lock_irqsave(&dd->lock, flags);
ccbf7298
CP
578 if (new_areq)
579 ret = crypto_enqueue_request(&dd->queue, new_areq);
bd3c7b5c
NR
580 if (dd->flags & AES_FLAGS_BUSY) {
581 spin_unlock_irqrestore(&dd->lock, flags);
582 return ret;
583 }
584 backlog = crypto_get_backlog(&dd->queue);
ccbf7298
CP
585 areq = crypto_dequeue_request(&dd->queue);
586 if (areq)
bd3c7b5c
NR
587 dd->flags |= AES_FLAGS_BUSY;
588 spin_unlock_irqrestore(&dd->lock, flags);
589
ccbf7298 590 if (!areq)
bd3c7b5c
NR
591 return ret;
592
593 if (backlog)
594 backlog->complete(backlog, -EINPROGRESS);
595
ccbf7298
CP
596 ctx = crypto_tfm_ctx(areq->tfm);
597
598 dd->areq = areq;
599 dd->ctx = ctx;
600
601 err = ctx->start(dd);
602 return (areq != new_areq) ? ret : err;
603}
604
605static int atmel_aes_start(struct atmel_aes_dev *dd)
606{
607 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
608 struct atmel_aes_reqctx *rctx;
609 bool use_dma;
610 int err;
bd3c7b5c
NR
611
612 /* assign new request to device */
bd3c7b5c 613 dd->total = req->nbytes;
cadc4ab8 614 dd->in_offset = 0;
bd3c7b5c 615 dd->in_sg = req->src;
cadc4ab8 616 dd->out_offset = 0;
bd3c7b5c
NR
617 dd->out_sg = req->dst;
618
619 rctx = ablkcipher_request_ctx(req);
bd3c7b5c
NR
620 rctx->mode &= AES_FLAGS_MODE_MASK;
621 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
bd3c7b5c 622
cdfab4a7 623 err = atmel_aes_hw_init(dd);
bd3c7b5c 624 if (!err) {
cdfab4a7
CP
625 use_dma = (dd->total > ATMEL_AES_DMA_THRESHOLD);
626 atmel_aes_write_ctrl(dd, use_dma, req->info);
627 if (use_dma)
bd3c7b5c
NR
628 err = atmel_aes_crypt_dma_start(dd);
629 else
630 err = atmel_aes_crypt_cpu_start(dd);
631 }
632 if (err) {
633 /* aes_task will not finish it, so do it here */
634 atmel_aes_finish_req(dd, err);
635 tasklet_schedule(&dd->queue_task);
636 }
637
ccbf7298 638 return -EINPROGRESS;
bd3c7b5c
NR
639}
640
641static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
642{
643 int err = -EINVAL;
cadc4ab8 644 size_t count;
bd3c7b5c
NR
645
646 if (dd->flags & AES_FLAGS_DMA) {
bd3c7b5c 647 err = 0;
cadc4ab8
NR
648 if (dd->flags & AES_FLAGS_FAST) {
649 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
650 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
651 } else {
9cd22323 652 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
cadc4ab8
NR
653 dd->dma_size, DMA_FROM_DEVICE);
654
655 /* copy data */
656 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
657 dd->buf_out, dd->buflen, dd->dma_size, 1);
658 if (count != dd->dma_size) {
659 err = -EINVAL;
20ecae79 660 pr_err("not all data converted: %zu\n", count);
cadc4ab8
NR
661 }
662 }
bd3c7b5c
NR
663 }
664
665 return err;
666}
667
cadc4ab8
NR
668
669static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
670{
671 int err = -ENOMEM;
672
673 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
674 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
675 dd->buflen = PAGE_SIZE;
676 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
677
678 if (!dd->buf_in || !dd->buf_out) {
679 dev_err(dd->dev, "unable to alloc pages.\n");
680 goto err_alloc;
681 }
682
683 /* MAP here */
684 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
685 dd->buflen, DMA_TO_DEVICE);
686 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
20ecae79 687 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
cadc4ab8
NR
688 err = -EINVAL;
689 goto err_map_in;
690 }
691
692 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
693 dd->buflen, DMA_FROM_DEVICE);
694 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
20ecae79 695 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
cadc4ab8
NR
696 err = -EINVAL;
697 goto err_map_out;
698 }
699
700 return 0;
701
702err_map_out:
703 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
704 DMA_TO_DEVICE);
705err_map_in:
088f628c 706err_alloc:
cadc4ab8
NR
707 free_page((unsigned long)dd->buf_out);
708 free_page((unsigned long)dd->buf_in);
cadc4ab8
NR
709 if (err)
710 pr_err("error: %d\n", err);
711 return err;
712}
713
714static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
715{
716 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
717 DMA_FROM_DEVICE);
718 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
719 DMA_TO_DEVICE);
720 free_page((unsigned long)dd->buf_out);
721 free_page((unsigned long)dd->buf_in);
722}
723
bd3c7b5c
NR
724static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
725{
ccbf7298 726 struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(
bd3c7b5c
NR
727 crypto_ablkcipher_reqtfm(req));
728 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
729 struct atmel_aes_dev *dd;
730
cadc4ab8
NR
731 if (mode & AES_FLAGS_CFB8) {
732 if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
733 pr_err("request size is not exact amount of CFB8 blocks\n");
734 return -EINVAL;
735 }
736 ctx->block_size = CFB8_BLOCK_SIZE;
737 } else if (mode & AES_FLAGS_CFB16) {
738 if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
739 pr_err("request size is not exact amount of CFB16 blocks\n");
740 return -EINVAL;
741 }
742 ctx->block_size = CFB16_BLOCK_SIZE;
743 } else if (mode & AES_FLAGS_CFB32) {
744 if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
745 pr_err("request size is not exact amount of CFB32 blocks\n");
746 return -EINVAL;
747 }
748 ctx->block_size = CFB32_BLOCK_SIZE;
9f84951f
LZ
749 } else if (mode & AES_FLAGS_CFB64) {
750 if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
751 pr_err("request size is not exact amount of CFB64 blocks\n");
752 return -EINVAL;
753 }
754 ctx->block_size = CFB64_BLOCK_SIZE;
cadc4ab8
NR
755 } else {
756 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
757 pr_err("request size is not exact amount of AES blocks\n");
758 return -EINVAL;
759 }
760 ctx->block_size = AES_BLOCK_SIZE;
bd3c7b5c
NR
761 }
762
763 dd = atmel_aes_find_dev(ctx);
764 if (!dd)
765 return -ENODEV;
766
767 rctx->mode = mode;
768
ccbf7298 769 return atmel_aes_handle_queue(dd, &req->base);
bd3c7b5c
NR
770}
771
772static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
773{
774 struct at_dma_slave *sl = slave;
775
776 if (sl && sl->dma_dev == chan->device->dev) {
777 chan->private = sl;
778 return true;
779 } else {
780 return false;
781 }
782}
783
cadc4ab8
NR
784static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
785 struct crypto_platform_data *pdata)
bd3c7b5c
NR
786{
787 int err = -ENOMEM;
be943c7d
NF
788 dma_cap_mask_t mask;
789
790 dma_cap_zero(mask);
791 dma_cap_set(DMA_SLAVE, mask);
792
793 /* Try to grab 2 DMA channels */
794 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
795 atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
796 if (!dd->dma_lch_in.chan)
797 goto err_dma_in;
798
799 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
800 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
801 AES_IDATAR(0);
802 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
803 dd->dma_lch_in.dma_conf.src_addr_width =
804 DMA_SLAVE_BUSWIDTH_4_BYTES;
805 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
806 dd->dma_lch_in.dma_conf.dst_addr_width =
807 DMA_SLAVE_BUSWIDTH_4_BYTES;
808 dd->dma_lch_in.dma_conf.device_fc = false;
809
810 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
811 atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
812 if (!dd->dma_lch_out.chan)
813 goto err_dma_out;
814
815 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
816 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
817 AES_ODATAR(0);
818 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
819 dd->dma_lch_out.dma_conf.src_addr_width =
820 DMA_SLAVE_BUSWIDTH_4_BYTES;
821 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
822 dd->dma_lch_out.dma_conf.dst_addr_width =
823 DMA_SLAVE_BUSWIDTH_4_BYTES;
824 dd->dma_lch_out.dma_conf.device_fc = false;
bd3c7b5c 825
be943c7d 826 return 0;
bd3c7b5c
NR
827
828err_dma_out:
829 dma_release_channel(dd->dma_lch_in.chan);
830err_dma_in:
be943c7d 831 dev_warn(dd->dev, "no DMA channel available\n");
bd3c7b5c
NR
832 return err;
833}
834
835static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
836{
837 dma_release_channel(dd->dma_lch_in.chan);
838 dma_release_channel(dd->dma_lch_out.chan);
839}
840
841static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
842 unsigned int keylen)
843{
ccbf7298 844 struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
bd3c7b5c
NR
845
846 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
847 keylen != AES_KEYSIZE_256) {
848 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
849 return -EINVAL;
850 }
851
852 memcpy(ctx->key, key, keylen);
853 ctx->keylen = keylen;
854
855 return 0;
856}
857
858static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
859{
860 return atmel_aes_crypt(req,
861 AES_FLAGS_ENCRYPT);
862}
863
864static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
865{
866 return atmel_aes_crypt(req,
867 0);
868}
869
870static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
871{
872 return atmel_aes_crypt(req,
873 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
874}
875
876static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
877{
878 return atmel_aes_crypt(req,
879 AES_FLAGS_CBC);
880}
881
882static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
883{
884 return atmel_aes_crypt(req,
885 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
886}
887
888static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
889{
890 return atmel_aes_crypt(req,
891 AES_FLAGS_OFB);
892}
893
894static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
895{
896 return atmel_aes_crypt(req,
cadc4ab8 897 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
bd3c7b5c
NR
898}
899
900static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
901{
902 return atmel_aes_crypt(req,
cadc4ab8 903 AES_FLAGS_CFB | AES_FLAGS_CFB128);
bd3c7b5c
NR
904}
905
906static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
907{
908 return atmel_aes_crypt(req,
909 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
910}
911
912static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
913{
914 return atmel_aes_crypt(req,
915 AES_FLAGS_CFB | AES_FLAGS_CFB64);
916}
917
918static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
919{
920 return atmel_aes_crypt(req,
921 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
922}
923
924static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
925{
926 return atmel_aes_crypt(req,
927 AES_FLAGS_CFB | AES_FLAGS_CFB32);
928}
929
930static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
931{
932 return atmel_aes_crypt(req,
933 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
934}
935
936static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
937{
938 return atmel_aes_crypt(req,
939 AES_FLAGS_CFB | AES_FLAGS_CFB16);
940}
941
942static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
943{
944 return atmel_aes_crypt(req,
945 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
946}
947
948static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
949{
950 return atmel_aes_crypt(req,
951 AES_FLAGS_CFB | AES_FLAGS_CFB8);
952}
953
954static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
955{
956 return atmel_aes_crypt(req,
957 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
958}
959
960static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
961{
962 return atmel_aes_crypt(req,
963 AES_FLAGS_CTR);
964}
965
966static int atmel_aes_cra_init(struct crypto_tfm *tfm)
967{
ccbf7298
CP
968 struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
969
bd3c7b5c 970 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
ccbf7298 971 ctx->base.start = atmel_aes_start;
bd3c7b5c
NR
972
973 return 0;
974}
975
976static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
977{
978}
979
980static struct crypto_alg aes_algs[] = {
981{
982 .cra_name = "ecb(aes)",
983 .cra_driver_name = "atmel-ecb-aes",
88efd9a9 984 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
985 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
986 .cra_blocksize = AES_BLOCK_SIZE,
987 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 988 .cra_alignmask = 0xf,
bd3c7b5c
NR
989 .cra_type = &crypto_ablkcipher_type,
990 .cra_module = THIS_MODULE,
991 .cra_init = atmel_aes_cra_init,
992 .cra_exit = atmel_aes_cra_exit,
993 .cra_u.ablkcipher = {
994 .min_keysize = AES_MIN_KEY_SIZE,
995 .max_keysize = AES_MAX_KEY_SIZE,
996 .setkey = atmel_aes_setkey,
997 .encrypt = atmel_aes_ecb_encrypt,
998 .decrypt = atmel_aes_ecb_decrypt,
999 }
1000},
1001{
1002 .cra_name = "cbc(aes)",
1003 .cra_driver_name = "atmel-cbc-aes",
88efd9a9 1004 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1005 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1006 .cra_blocksize = AES_BLOCK_SIZE,
1007 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1008 .cra_alignmask = 0xf,
bd3c7b5c
NR
1009 .cra_type = &crypto_ablkcipher_type,
1010 .cra_module = THIS_MODULE,
1011 .cra_init = atmel_aes_cra_init,
1012 .cra_exit = atmel_aes_cra_exit,
1013 .cra_u.ablkcipher = {
1014 .min_keysize = AES_MIN_KEY_SIZE,
1015 .max_keysize = AES_MAX_KEY_SIZE,
1016 .ivsize = AES_BLOCK_SIZE,
1017 .setkey = atmel_aes_setkey,
1018 .encrypt = atmel_aes_cbc_encrypt,
1019 .decrypt = atmel_aes_cbc_decrypt,
1020 }
1021},
1022{
1023 .cra_name = "ofb(aes)",
1024 .cra_driver_name = "atmel-ofb-aes",
88efd9a9 1025 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1026 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1027 .cra_blocksize = AES_BLOCK_SIZE,
1028 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1029 .cra_alignmask = 0xf,
bd3c7b5c
NR
1030 .cra_type = &crypto_ablkcipher_type,
1031 .cra_module = THIS_MODULE,
1032 .cra_init = atmel_aes_cra_init,
1033 .cra_exit = atmel_aes_cra_exit,
1034 .cra_u.ablkcipher = {
1035 .min_keysize = AES_MIN_KEY_SIZE,
1036 .max_keysize = AES_MAX_KEY_SIZE,
1037 .ivsize = AES_BLOCK_SIZE,
1038 .setkey = atmel_aes_setkey,
1039 .encrypt = atmel_aes_ofb_encrypt,
1040 .decrypt = atmel_aes_ofb_decrypt,
1041 }
1042},
1043{
1044 .cra_name = "cfb(aes)",
1045 .cra_driver_name = "atmel-cfb-aes",
88efd9a9 1046 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1047 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1048 .cra_blocksize = AES_BLOCK_SIZE,
1049 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1050 .cra_alignmask = 0xf,
bd3c7b5c
NR
1051 .cra_type = &crypto_ablkcipher_type,
1052 .cra_module = THIS_MODULE,
1053 .cra_init = atmel_aes_cra_init,
1054 .cra_exit = atmel_aes_cra_exit,
1055 .cra_u.ablkcipher = {
1056 .min_keysize = AES_MIN_KEY_SIZE,
1057 .max_keysize = AES_MAX_KEY_SIZE,
1058 .ivsize = AES_BLOCK_SIZE,
1059 .setkey = atmel_aes_setkey,
1060 .encrypt = atmel_aes_cfb_encrypt,
1061 .decrypt = atmel_aes_cfb_decrypt,
1062 }
1063},
1064{
1065 .cra_name = "cfb32(aes)",
1066 .cra_driver_name = "atmel-cfb32-aes",
88efd9a9 1067 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1068 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1069 .cra_blocksize = CFB32_BLOCK_SIZE,
1070 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1071 .cra_alignmask = 0x3,
bd3c7b5c
NR
1072 .cra_type = &crypto_ablkcipher_type,
1073 .cra_module = THIS_MODULE,
1074 .cra_init = atmel_aes_cra_init,
1075 .cra_exit = atmel_aes_cra_exit,
1076 .cra_u.ablkcipher = {
1077 .min_keysize = AES_MIN_KEY_SIZE,
1078 .max_keysize = AES_MAX_KEY_SIZE,
1079 .ivsize = AES_BLOCK_SIZE,
1080 .setkey = atmel_aes_setkey,
1081 .encrypt = atmel_aes_cfb32_encrypt,
1082 .decrypt = atmel_aes_cfb32_decrypt,
1083 }
1084},
1085{
1086 .cra_name = "cfb16(aes)",
1087 .cra_driver_name = "atmel-cfb16-aes",
88efd9a9 1088 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1089 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1090 .cra_blocksize = CFB16_BLOCK_SIZE,
1091 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1092 .cra_alignmask = 0x1,
bd3c7b5c
NR
1093 .cra_type = &crypto_ablkcipher_type,
1094 .cra_module = THIS_MODULE,
1095 .cra_init = atmel_aes_cra_init,
1096 .cra_exit = atmel_aes_cra_exit,
1097 .cra_u.ablkcipher = {
1098 .min_keysize = AES_MIN_KEY_SIZE,
1099 .max_keysize = AES_MAX_KEY_SIZE,
1100 .ivsize = AES_BLOCK_SIZE,
1101 .setkey = atmel_aes_setkey,
1102 .encrypt = atmel_aes_cfb16_encrypt,
1103 .decrypt = atmel_aes_cfb16_decrypt,
1104 }
1105},
1106{
1107 .cra_name = "cfb8(aes)",
1108 .cra_driver_name = "atmel-cfb8-aes",
88efd9a9 1109 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c 1110 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
e5d8c961 1111 .cra_blocksize = CFB8_BLOCK_SIZE,
bd3c7b5c
NR
1112 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1113 .cra_alignmask = 0x0,
1114 .cra_type = &crypto_ablkcipher_type,
1115 .cra_module = THIS_MODULE,
1116 .cra_init = atmel_aes_cra_init,
1117 .cra_exit = atmel_aes_cra_exit,
1118 .cra_u.ablkcipher = {
1119 .min_keysize = AES_MIN_KEY_SIZE,
1120 .max_keysize = AES_MAX_KEY_SIZE,
1121 .ivsize = AES_BLOCK_SIZE,
1122 .setkey = atmel_aes_setkey,
1123 .encrypt = atmel_aes_cfb8_encrypt,
1124 .decrypt = atmel_aes_cfb8_decrypt,
1125 }
1126},
1127{
1128 .cra_name = "ctr(aes)",
1129 .cra_driver_name = "atmel-ctr-aes",
88efd9a9 1130 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1131 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1132 .cra_blocksize = AES_BLOCK_SIZE,
1133 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1134 .cra_alignmask = 0xf,
bd3c7b5c
NR
1135 .cra_type = &crypto_ablkcipher_type,
1136 .cra_module = THIS_MODULE,
1137 .cra_init = atmel_aes_cra_init,
1138 .cra_exit = atmel_aes_cra_exit,
1139 .cra_u.ablkcipher = {
1140 .min_keysize = AES_MIN_KEY_SIZE,
1141 .max_keysize = AES_MAX_KEY_SIZE,
1142 .ivsize = AES_BLOCK_SIZE,
1143 .setkey = atmel_aes_setkey,
1144 .encrypt = atmel_aes_ctr_encrypt,
1145 .decrypt = atmel_aes_ctr_decrypt,
1146 }
1147},
1148};
1149
cadc4ab8 1150static struct crypto_alg aes_cfb64_alg = {
bd3c7b5c
NR
1151 .cra_name = "cfb64(aes)",
1152 .cra_driver_name = "atmel-cfb64-aes",
88efd9a9 1153 .cra_priority = ATMEL_AES_PRIORITY,
bd3c7b5c
NR
1154 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1155 .cra_blocksize = CFB64_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
cadc4ab8 1157 .cra_alignmask = 0x7,
bd3c7b5c
NR
1158 .cra_type = &crypto_ablkcipher_type,
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_aes_cra_init,
1161 .cra_exit = atmel_aes_cra_exit,
1162 .cra_u.ablkcipher = {
1163 .min_keysize = AES_MIN_KEY_SIZE,
1164 .max_keysize = AES_MAX_KEY_SIZE,
1165 .ivsize = AES_BLOCK_SIZE,
1166 .setkey = atmel_aes_setkey,
1167 .encrypt = atmel_aes_cfb64_encrypt,
1168 .decrypt = atmel_aes_cfb64_decrypt,
1169 }
bd3c7b5c
NR
1170};
1171
1172static void atmel_aes_queue_task(unsigned long data)
1173{
1174 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1175
1176 atmel_aes_handle_queue(dd, NULL);
1177}
1178
1179static void atmel_aes_done_task(unsigned long data)
1180{
1181 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1182 int err;
1183
1184 if (!(dd->flags & AES_FLAGS_DMA)) {
1185 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1186 dd->bufcnt >> 2);
1187
1188 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1189 dd->buf_out, dd->bufcnt))
1190 err = 0;
1191 else
1192 err = -EINVAL;
1193
1194 goto cpu_end;
1195 }
1196
1197 err = atmel_aes_crypt_dma_stop(dd);
1198
1199 err = dd->err ? : err;
1200
1201 if (dd->total && !err) {
cadc4ab8
NR
1202 if (dd->flags & AES_FLAGS_FAST) {
1203 dd->in_sg = sg_next(dd->in_sg);
1204 dd->out_sg = sg_next(dd->out_sg);
1205 if (!dd->in_sg || !dd->out_sg)
1206 err = -EINVAL;
1207 }
1208 if (!err)
1209 err = atmel_aes_crypt_dma_start(dd);
bd3c7b5c
NR
1210 if (!err)
1211 return; /* DMA started. Not fininishing. */
1212 }
1213
1214cpu_end:
1215 atmel_aes_finish_req(dd, err);
1216 atmel_aes_handle_queue(dd, NULL);
1217}
1218
1219static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1220{
1221 struct atmel_aes_dev *aes_dd = dev_id;
1222 u32 reg;
1223
1224 reg = atmel_aes_read(aes_dd, AES_ISR);
1225 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1226 atmel_aes_write(aes_dd, AES_IDR, reg);
1227 if (AES_FLAGS_BUSY & aes_dd->flags)
1228 tasklet_schedule(&aes_dd->done_task);
1229 else
1230 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1231 return IRQ_HANDLED;
1232 }
1233
1234 return IRQ_NONE;
1235}
1236
1237static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1238{
1239 int i;
1240
cadc4ab8
NR
1241 if (dd->caps.has_cfb64)
1242 crypto_unregister_alg(&aes_cfb64_alg);
924a8bc7
CP
1243
1244 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1245 crypto_unregister_alg(&aes_algs[i]);
bd3c7b5c
NR
1246}
1247
1248static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1249{
1250 int err, i, j;
1251
1252 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
bd3c7b5c
NR
1253 err = crypto_register_alg(&aes_algs[i]);
1254 if (err)
1255 goto err_aes_algs;
1256 }
1257
cadc4ab8
NR
1258 if (dd->caps.has_cfb64) {
1259 err = crypto_register_alg(&aes_cfb64_alg);
bd3c7b5c
NR
1260 if (err)
1261 goto err_aes_cfb64_alg;
1262 }
1263
1264 return 0;
1265
1266err_aes_cfb64_alg:
1267 i = ARRAY_SIZE(aes_algs);
1268err_aes_algs:
1269 for (j = 0; j < i; j++)
1270 crypto_unregister_alg(&aes_algs[j]);
1271
1272 return err;
1273}
1274
cadc4ab8
NR
1275static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1276{
1277 dd->caps.has_dualbuff = 0;
1278 dd->caps.has_cfb64 = 0;
1279 dd->caps.max_burst_size = 1;
1280
1281 /* keep only major version number */
1282 switch (dd->hw_version & 0xff0) {
973e209d
LZ
1283 case 0x500:
1284 dd->caps.has_dualbuff = 1;
1285 dd->caps.has_cfb64 = 1;
1286 dd->caps.max_burst_size = 4;
1287 break;
cf1f0d12
LZ
1288 case 0x200:
1289 dd->caps.has_dualbuff = 1;
1290 dd->caps.has_cfb64 = 1;
1291 dd->caps.max_burst_size = 4;
1292 break;
cadc4ab8
NR
1293 case 0x130:
1294 dd->caps.has_dualbuff = 1;
1295 dd->caps.has_cfb64 = 1;
1296 dd->caps.max_burst_size = 4;
1297 break;
1298 case 0x120:
1299 break;
1300 default:
1301 dev_warn(dd->dev,
1302 "Unmanaged aes version, set minimum capabilities\n");
1303 break;
1304 }
1305}
1306
be943c7d
NF
1307#if defined(CONFIG_OF)
1308static const struct of_device_id atmel_aes_dt_ids[] = {
1309 { .compatible = "atmel,at91sam9g46-aes" },
1310 { /* sentinel */ }
1311};
1312MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1313
1314static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1315{
1316 struct device_node *np = pdev->dev.of_node;
1317 struct crypto_platform_data *pdata;
1318
1319 if (!np) {
1320 dev_err(&pdev->dev, "device node not found\n");
1321 return ERR_PTR(-EINVAL);
1322 }
1323
1324 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1325 if (!pdata) {
1326 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1327 return ERR_PTR(-ENOMEM);
1328 }
1329
1330 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1331 sizeof(*(pdata->dma_slave)),
1332 GFP_KERNEL);
1333 if (!pdata->dma_slave) {
1334 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1335 devm_kfree(&pdev->dev, pdata);
1336 return ERR_PTR(-ENOMEM);
1337 }
1338
1339 return pdata;
1340}
1341#else
1342static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1343{
1344 return ERR_PTR(-EINVAL);
1345}
1346#endif
1347
49cfe4db 1348static int atmel_aes_probe(struct platform_device *pdev)
bd3c7b5c
NR
1349{
1350 struct atmel_aes_dev *aes_dd;
cadc4ab8 1351 struct crypto_platform_data *pdata;
bd3c7b5c
NR
1352 struct device *dev = &pdev->dev;
1353 struct resource *aes_res;
bd3c7b5c
NR
1354 int err;
1355
1356 pdata = pdev->dev.platform_data;
1357 if (!pdata) {
be943c7d
NF
1358 pdata = atmel_aes_of_init(pdev);
1359 if (IS_ERR(pdata)) {
1360 err = PTR_ERR(pdata);
1361 goto aes_dd_err;
1362 }
1363 }
1364
1365 if (!pdata->dma_slave) {
bd3c7b5c
NR
1366 err = -ENXIO;
1367 goto aes_dd_err;
1368 }
1369
b0e8b341 1370 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
bd3c7b5c
NR
1371 if (aes_dd == NULL) {
1372 dev_err(dev, "unable to alloc data struct.\n");
1373 err = -ENOMEM;
1374 goto aes_dd_err;
1375 }
1376
1377 aes_dd->dev = dev;
1378
1379 platform_set_drvdata(pdev, aes_dd);
1380
1381 INIT_LIST_HEAD(&aes_dd->list);
8a10eb8d 1382 spin_lock_init(&aes_dd->lock);
bd3c7b5c
NR
1383
1384 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1385 (unsigned long)aes_dd);
1386 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1387 (unsigned long)aes_dd);
1388
1389 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1390
1391 aes_dd->irq = -1;
1392
1393 /* Get the base address */
1394 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1395 if (!aes_res) {
1396 dev_err(dev, "no MEM resource info\n");
1397 err = -ENODEV;
1398 goto res_err;
1399 }
1400 aes_dd->phys_base = aes_res->start;
bd3c7b5c
NR
1401
1402 /* Get the IRQ */
1403 aes_dd->irq = platform_get_irq(pdev, 0);
1404 if (aes_dd->irq < 0) {
1405 dev_err(dev, "no IRQ resource info\n");
1406 err = aes_dd->irq;
b0e8b341 1407 goto res_err;
bd3c7b5c
NR
1408 }
1409
b0e8b341
LC
1410 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
1411 IRQF_SHARED, "atmel-aes", aes_dd);
bd3c7b5c
NR
1412 if (err) {
1413 dev_err(dev, "unable to request aes irq.\n");
b0e8b341 1414 goto res_err;
bd3c7b5c
NR
1415 }
1416
1417 /* Initializing the clock */
b0e8b341 1418 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
bd3c7b5c 1419 if (IS_ERR(aes_dd->iclk)) {
be208356 1420 dev_err(dev, "clock initialization failed.\n");
bd3c7b5c 1421 err = PTR_ERR(aes_dd->iclk);
b0e8b341 1422 goto res_err;
bd3c7b5c
NR
1423 }
1424
b0e8b341 1425 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
bd3c7b5c
NR
1426 if (!aes_dd->io_base) {
1427 dev_err(dev, "can't ioremap\n");
1428 err = -ENOMEM;
b0e8b341 1429 goto res_err;
bd3c7b5c
NR
1430 }
1431
aab0a39b
CP
1432 err = atmel_aes_hw_version_init(aes_dd);
1433 if (err)
1434 goto res_err;
cadc4ab8
NR
1435
1436 atmel_aes_get_cap(aes_dd);
1437
1438 err = atmel_aes_buff_init(aes_dd);
1439 if (err)
1440 goto err_aes_buff;
1441
1442 err = atmel_aes_dma_init(aes_dd, pdata);
bd3c7b5c
NR
1443 if (err)
1444 goto err_aes_dma;
1445
1446 spin_lock(&atmel_aes.lock);
1447 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1448 spin_unlock(&atmel_aes.lock);
1449
1450 err = atmel_aes_register_algs(aes_dd);
1451 if (err)
1452 goto err_algs;
1453
be943c7d
NF
1454 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1455 dma_chan_name(aes_dd->dma_lch_in.chan),
1456 dma_chan_name(aes_dd->dma_lch_out.chan));
bd3c7b5c
NR
1457
1458 return 0;
1459
1460err_algs:
1461 spin_lock(&atmel_aes.lock);
1462 list_del(&aes_dd->list);
1463 spin_unlock(&atmel_aes.lock);
1464 atmel_aes_dma_cleanup(aes_dd);
1465err_aes_dma:
cadc4ab8
NR
1466 atmel_aes_buff_cleanup(aes_dd);
1467err_aes_buff:
bd3c7b5c
NR
1468res_err:
1469 tasklet_kill(&aes_dd->done_task);
1470 tasklet_kill(&aes_dd->queue_task);
bd3c7b5c
NR
1471aes_dd_err:
1472 dev_err(dev, "initialization failed.\n");
1473
1474 return err;
1475}
1476
49cfe4db 1477static int atmel_aes_remove(struct platform_device *pdev)
bd3c7b5c
NR
1478{
1479 static struct atmel_aes_dev *aes_dd;
1480
1481 aes_dd = platform_get_drvdata(pdev);
1482 if (!aes_dd)
1483 return -ENODEV;
1484 spin_lock(&atmel_aes.lock);
1485 list_del(&aes_dd->list);
1486 spin_unlock(&atmel_aes.lock);
1487
1488 atmel_aes_unregister_algs(aes_dd);
1489
1490 tasklet_kill(&aes_dd->done_task);
1491 tasklet_kill(&aes_dd->queue_task);
1492
1493 atmel_aes_dma_cleanup(aes_dd);
1494
bd3c7b5c
NR
1495 return 0;
1496}
1497
1498static struct platform_driver atmel_aes_driver = {
1499 .probe = atmel_aes_probe,
49cfe4db 1500 .remove = atmel_aes_remove,
bd3c7b5c
NR
1501 .driver = {
1502 .name = "atmel_aes",
be943c7d 1503 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
bd3c7b5c
NR
1504 },
1505};
1506
1507module_platform_driver(atmel_aes_driver);
1508
1509MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1510MODULE_LICENSE("GPL v2");
1511MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");