]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/omap-aes.c
crypto: omap-aes - Remove usage of private DMA API
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / omap-aes.c
CommitLineData
537559a5
DK
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
537559a5
DK
22#include <linux/platform_device.h>
23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h>
ebedbf79
MG
25#include <linux/dmaengine.h>
26#include <linux/omap-dma.h>
5946c4a5 27#include <linux/pm_runtime.h>
537559a5
DK
28#include <linux/io.h>
29#include <linux/crypto.h>
30#include <linux/interrupt.h>
31#include <crypto/scatterwalk.h>
32#include <crypto/aes.h>
33
ebedbf79
MG
34#define DST_MAXBURST 4
35#define DMA_MIN (DST_MAXBURST * sizeof(u32))
537559a5
DK
36
37/* OMAP TRM gives bitfields as start:end, where start is the higher bit
38 number. For example 7:0 */
39#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
40#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
41
42#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
43#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
44
45#define AES_REG_CTRL 0x30
46#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
47#define AES_REG_CTRL_CTR (1 << 6)
48#define AES_REG_CTRL_CBC (1 << 5)
49#define AES_REG_CTRL_KEY_SIZE (3 << 3)
50#define AES_REG_CTRL_DIRECTION (1 << 2)
51#define AES_REG_CTRL_INPUT_READY (1 << 1)
52#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
53
54#define AES_REG_DATA 0x34
55#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
56
57#define AES_REG_REV 0x44
58#define AES_REG_REV_MAJOR 0xF0
59#define AES_REG_REV_MINOR 0x0F
60
61#define AES_REG_MASK 0x48
62#define AES_REG_MASK_SIDLE (1 << 6)
63#define AES_REG_MASK_START (1 << 5)
64#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
65#define AES_REG_MASK_DMA_IN_EN (1 << 2)
66#define AES_REG_MASK_SOFTRESET (1 << 1)
67#define AES_REG_AUTOIDLE (1 << 0)
68
69#define AES_REG_SYSSTATUS 0x4C
70#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
71
72#define DEFAULT_TIMEOUT (5*HZ)
73
74#define FLAGS_MODE_MASK 0x000f
75#define FLAGS_ENCRYPT BIT(0)
76#define FLAGS_CBC BIT(1)
77#define FLAGS_GIV BIT(2)
78
67a730ce
DK
79#define FLAGS_INIT BIT(4)
80#define FLAGS_FAST BIT(5)
81#define FLAGS_BUSY BIT(6)
537559a5
DK
82
83struct omap_aes_ctx {
84 struct omap_aes_dev *dd;
85
86 int keylen;
87 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
88 unsigned long flags;
89};
90
91struct omap_aes_reqctx {
92 unsigned long mode;
93};
94
95#define OMAP_AES_QUEUE_LENGTH 1
96#define OMAP_AES_CACHE_SIZE 0
97
98struct omap_aes_dev {
99 struct list_head list;
100 unsigned long phys_base;
efce41b6 101 void __iomem *io_base;
537559a5
DK
102 struct omap_aes_ctx *ctx;
103 struct device *dev;
104 unsigned long flags;
21fe9767 105 int err;
537559a5 106
21fe9767
DK
107 spinlock_t lock;
108 struct crypto_queue queue;
537559a5 109
21fe9767
DK
110 struct tasklet_struct done_task;
111 struct tasklet_struct queue_task;
537559a5
DK
112
113 struct ablkcipher_request *req;
114 size_t total;
115 struct scatterlist *in_sg;
ebedbf79 116 struct scatterlist in_sgl;
537559a5
DK
117 size_t in_offset;
118 struct scatterlist *out_sg;
ebedbf79 119 struct scatterlist out_sgl;
537559a5
DK
120 size_t out_offset;
121
122 size_t buflen;
123 void *buf_in;
124 size_t dma_size;
125 int dma_in;
ebedbf79 126 struct dma_chan *dma_lch_in;
537559a5
DK
127 dma_addr_t dma_addr_in;
128 void *buf_out;
129 int dma_out;
ebedbf79 130 struct dma_chan *dma_lch_out;
537559a5
DK
131 dma_addr_t dma_addr_out;
132};
133
134/* keep registered devices data here */
135static LIST_HEAD(dev_list);
136static DEFINE_SPINLOCK(list_lock);
137
138static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
139{
140 return __raw_readl(dd->io_base + offset);
141}
142
143static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
144 u32 value)
145{
146 __raw_writel(value, dd->io_base + offset);
147}
148
149static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
150 u32 value, u32 mask)
151{
152 u32 val;
153
154 val = omap_aes_read(dd, offset);
155 val &= ~mask;
156 val |= value;
157 omap_aes_write(dd, offset, val);
158}
159
160static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
161 u32 *value, int count)
162{
163 for (; count--; value++, offset += 4)
164 omap_aes_write(dd, offset, *value);
165}
166
537559a5
DK
167static int omap_aes_hw_init(struct omap_aes_dev *dd)
168{
83ea7e0f
DK
169 /*
170 * clocks are enabled when request starts and disabled when finished.
171 * It may be long delays between requests.
172 * Device might go to off mode to save power.
173 */
5946c4a5 174 pm_runtime_get_sync(dd->dev);
eeb2b202 175
537559a5 176 if (!(dd->flags & FLAGS_INIT)) {
eeb2b202 177 dd->flags |= FLAGS_INIT;
21fe9767 178 dd->err = 0;
537559a5
DK
179 }
180
eeb2b202 181 return 0;
537559a5
DK
182}
183
21fe9767 184static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
537559a5
DK
185{
186 unsigned int key32;
67a730ce 187 int i, err;
537559a5
DK
188 u32 val, mask;
189
21fe9767
DK
190 err = omap_aes_hw_init(dd);
191 if (err)
192 return err;
193
537559a5 194 val = 0;
ebedbf79
MG
195 if (dd->dma_lch_out != NULL)
196 val |= AES_REG_MASK_DMA_OUT_EN;
197 if (dd->dma_lch_in != NULL)
198 val |= AES_REG_MASK_DMA_IN_EN;
537559a5
DK
199
200 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
201
202 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
203
537559a5 204 key32 = dd->ctx->keylen / sizeof(u32);
67a730ce
DK
205
206 /* it seems a key should always be set even if it has not changed */
537559a5
DK
207 for (i = 0; i < key32; i++) {
208 omap_aes_write(dd, AES_REG_KEY(i),
209 __le32_to_cpu(dd->ctx->key[i]));
210 }
537559a5 211
67a730ce
DK
212 if ((dd->flags & FLAGS_CBC) && dd->req->info)
213 omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
214
215 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
216 if (dd->flags & FLAGS_CBC)
217 val |= AES_REG_CTRL_CBC;
218 if (dd->flags & FLAGS_ENCRYPT)
219 val |= AES_REG_CTRL_DIRECTION;
537559a5
DK
220
221 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
222 AES_REG_CTRL_KEY_SIZE;
223
67a730ce 224 omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
537559a5 225
21fe9767 226 return 0;
537559a5
DK
227}
228
229static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
230{
231 struct omap_aes_dev *dd = NULL, *tmp;
232
233 spin_lock_bh(&list_lock);
234 if (!ctx->dd) {
235 list_for_each_entry(tmp, &dev_list, list) {
236 /* FIXME: take fist available aes core */
237 dd = tmp;
238 break;
239 }
240 ctx->dd = dd;
241 } else {
242 /* already found before */
243 dd = ctx->dd;
244 }
245 spin_unlock_bh(&list_lock);
246
247 return dd;
248}
249
ebedbf79
MG
250static void omap_aes_dma_out_callback(void *data)
251{
252 struct omap_aes_dev *dd = data;
253
254 /* dma_lch_out - completed */
255 tasklet_schedule(&dd->done_task);
256}
537559a5
DK
257
258static int omap_aes_dma_init(struct omap_aes_dev *dd)
259{
260 int err = -ENOMEM;
ebedbf79 261 dma_cap_mask_t mask;
537559a5 262
ebedbf79
MG
263 dd->dma_lch_out = NULL;
264 dd->dma_lch_in = NULL;
537559a5
DK
265
266 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
267 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
268 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
269 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
270
271 if (!dd->buf_in || !dd->buf_out) {
272 dev_err(dd->dev, "unable to alloc pages.\n");
273 goto err_alloc;
274 }
275
276 /* MAP here */
277 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
278 DMA_TO_DEVICE);
279 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
280 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
281 err = -EINVAL;
282 goto err_map_in;
283 }
284
285 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
286 DMA_FROM_DEVICE);
287 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
288 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
289 err = -EINVAL;
290 goto err_map_out;
291 }
292
ebedbf79
MG
293 dma_cap_zero(mask);
294 dma_cap_set(DMA_SLAVE, mask);
295
296 dd->dma_lch_in = dma_request_channel(mask, omap_dma_filter_fn,
297 &dd->dma_in);
298 if (!dd->dma_lch_in) {
299 dev_err(dd->dev, "Unable to request in DMA channel\n");
300 goto err_dma_in;
301 }
302
303 dd->dma_lch_out = dma_request_channel(mask, omap_dma_filter_fn,
304 &dd->dma_out);
305 if (!dd->dma_lch_out) {
306 dev_err(dd->dev, "Unable to request out DMA channel\n");
307 goto err_dma_out;
308 }
537559a5 309
537559a5
DK
310 return 0;
311
312err_dma_out:
ebedbf79 313 dma_release_channel(dd->dma_lch_in);
537559a5
DK
314err_dma_in:
315 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
316 DMA_FROM_DEVICE);
317err_map_out:
318 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
319err_map_in:
320 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
321 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
322err_alloc:
323 if (err)
324 pr_err("error: %d\n", err);
325 return err;
326}
327
328static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
329{
ebedbf79
MG
330 dma_release_channel(dd->dma_lch_out);
331 dma_release_channel(dd->dma_lch_in);
537559a5
DK
332 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
333 DMA_FROM_DEVICE);
334 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
335 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
336 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
337}
338
339static void sg_copy_buf(void *buf, struct scatterlist *sg,
340 unsigned int start, unsigned int nbytes, int out)
341{
342 struct scatter_walk walk;
343
344 if (!nbytes)
345 return;
346
347 scatterwalk_start(&walk, sg);
348 scatterwalk_advance(&walk, start);
349 scatterwalk_copychunks(buf, &walk, nbytes, out);
350 scatterwalk_done(&walk, out, 0);
351}
352
353static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
354 size_t buflen, size_t total, int out)
355{
356 unsigned int count, off = 0;
357
358 while (buflen && total) {
359 count = min((*sg)->length - *offset, total);
360 count = min(count, buflen);
361
362 if (!count)
363 return off;
364
21fe9767
DK
365 /*
366 * buflen and total are AES_BLOCK_SIZE size aligned,
367 * so count should be also aligned
368 */
369
537559a5
DK
370 sg_copy_buf(buf + off, *sg, *offset, count, out);
371
372 off += count;
373 buflen -= count;
374 *offset += count;
375 total -= count;
376
377 if (*offset == (*sg)->length) {
378 *sg = sg_next(*sg);
379 if (*sg)
380 *offset = 0;
381 else
382 total = 0;
383 }
384 }
385
386 return off;
387}
388
ebedbf79
MG
389static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
390 struct scatterlist *in_sg, struct scatterlist *out_sg)
537559a5
DK
391{
392 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
393 struct omap_aes_dev *dd = ctx->dd;
ebedbf79
MG
394 struct dma_async_tx_descriptor *tx_in, *tx_out;
395 struct dma_slave_config cfg;
396 dma_addr_t dma_addr_in = sg_dma_address(in_sg);
397 int ret, length = sg_dma_len(in_sg);
537559a5
DK
398
399 pr_debug("len: %d\n", length);
400
401 dd->dma_size = length;
402
403 if (!(dd->flags & FLAGS_FAST))
404 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
405 DMA_TO_DEVICE);
406
ebedbf79
MG
407 memset(&cfg, 0, sizeof(cfg));
408
409 cfg.src_addr = dd->phys_base + AES_REG_DATA;
410 cfg.dst_addr = dd->phys_base + AES_REG_DATA;
411 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
412 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
413 cfg.src_maxburst = DST_MAXBURST;
414 cfg.dst_maxburst = DST_MAXBURST;
415
416 /* IN */
417 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
418 if (ret) {
419 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
420 ret);
421 return ret;
422 }
423
424 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
425 DMA_MEM_TO_DEV,
426 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
427 if (!tx_in) {
428 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
429 return -EINVAL;
430 }
431
432 /* No callback necessary */
433 tx_in->callback_param = dd;
434
435 /* OUT */
436 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
437 if (ret) {
438 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
439 ret);
440 return ret;
441 }
442
443 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
444 DMA_DEV_TO_MEM,
445 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
446 if (!tx_out) {
447 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
448 return -EINVAL;
449 }
450
451 tx_out->callback = omap_aes_dma_out_callback;
452 tx_out->callback_param = dd;
453
454 dmaengine_submit(tx_in);
455 dmaengine_submit(tx_out);
456
457 dma_async_issue_pending(dd->dma_lch_in);
458 dma_async_issue_pending(dd->dma_lch_out);
537559a5 459
83ea7e0f
DK
460 /* start DMA or disable idle mode */
461 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
462 AES_REG_MASK_START);
463
537559a5
DK
464 return 0;
465}
466
467static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
468{
469 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
470 crypto_ablkcipher_reqtfm(dd->req));
471 int err, fast = 0, in, out;
472 size_t count;
473 dma_addr_t addr_in, addr_out;
ebedbf79
MG
474 struct scatterlist *in_sg, *out_sg;
475 int len32;
537559a5
DK
476
477 pr_debug("total: %d\n", dd->total);
478
479 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
480 /* check for alignment */
481 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
482 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
483
484 fast = in && out;
485 }
486
487 if (fast) {
488 count = min(dd->total, sg_dma_len(dd->in_sg));
489 count = min(count, sg_dma_len(dd->out_sg));
490
21fe9767
DK
491 if (count != dd->total) {
492 pr_err("request length != buffer length\n");
537559a5 493 return -EINVAL;
21fe9767 494 }
537559a5
DK
495
496 pr_debug("fast\n");
497
498 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
499 if (!err) {
500 dev_err(dd->dev, "dma_map_sg() error\n");
501 return -EINVAL;
502 }
503
504 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
505 if (!err) {
506 dev_err(dd->dev, "dma_map_sg() error\n");
507 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
508 return -EINVAL;
509 }
510
511 addr_in = sg_dma_address(dd->in_sg);
512 addr_out = sg_dma_address(dd->out_sg);
513
ebedbf79
MG
514 in_sg = dd->in_sg;
515 out_sg = dd->out_sg;
ebedbf79 516
537559a5
DK
517 dd->flags |= FLAGS_FAST;
518
519 } else {
520 /* use cache buffers */
521 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
522 dd->buflen, dd->total, 0);
523
ebedbf79
MG
524 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
525
526 /*
527 * The data going into the AES module has been copied
528 * to a local buffer and the data coming out will go
529 * into a local buffer so set up local SG entries for
530 * both.
531 */
532 sg_init_table(&dd->in_sgl, 1);
533 dd->in_sgl.offset = dd->in_offset;
534 sg_dma_len(&dd->in_sgl) = len32;
535 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
536
537 sg_init_table(&dd->out_sgl, 1);
538 dd->out_sgl.offset = dd->out_offset;
539 sg_dma_len(&dd->out_sgl) = len32;
540 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
541
542 in_sg = &dd->in_sgl;
543 out_sg = &dd->out_sgl;
ebedbf79 544
537559a5
DK
545 addr_in = dd->dma_addr_in;
546 addr_out = dd->dma_addr_out;
547
548 dd->flags &= ~FLAGS_FAST;
549
550 }
551
552 dd->total -= count;
553
ebedbf79 554 err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
21fe9767
DK
555 if (err) {
556 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
557 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
558 }
537559a5
DK
559
560 return err;
561}
562
563static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
564{
21fe9767 565 struct ablkcipher_request *req = dd->req;
537559a5
DK
566
567 pr_debug("err: %d\n", err);
568
5946c4a5 569 pm_runtime_put_sync(dd->dev);
eeb2b202
DK
570 dd->flags &= ~FLAGS_BUSY;
571
67a730ce 572 req->base.complete(&req->base, err);
537559a5
DK
573}
574
575static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
576{
577 int err = 0;
578 size_t count;
579
580 pr_debug("total: %d\n", dd->total);
581
582 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
583
ebedbf79
MG
584 dmaengine_terminate_all(dd->dma_lch_in);
585 dmaengine_terminate_all(dd->dma_lch_out);
537559a5
DK
586
587 if (dd->flags & FLAGS_FAST) {
588 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
589 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
590 } else {
591 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
592 dd->dma_size, DMA_FROM_DEVICE);
593
594 /* copy data */
595 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
596 dd->buflen, dd->dma_size, 1);
597 if (count != dd->dma_size) {
598 err = -EINVAL;
599 pr_err("not all data converted: %u\n", count);
600 }
601 }
602
537559a5
DK
603 return err;
604}
605
21fe9767 606static int omap_aes_handle_queue(struct omap_aes_dev *dd,
eeb2b202 607 struct ablkcipher_request *req)
537559a5
DK
608{
609 struct crypto_async_request *async_req, *backlog;
610 struct omap_aes_ctx *ctx;
611 struct omap_aes_reqctx *rctx;
537559a5 612 unsigned long flags;
21fe9767 613 int err, ret = 0;
537559a5
DK
614
615 spin_lock_irqsave(&dd->lock, flags);
eeb2b202 616 if (req)
21fe9767 617 ret = ablkcipher_enqueue_request(&dd->queue, req);
eeb2b202
DK
618 if (dd->flags & FLAGS_BUSY) {
619 spin_unlock_irqrestore(&dd->lock, flags);
21fe9767 620 return ret;
eeb2b202 621 }
537559a5
DK
622 backlog = crypto_get_backlog(&dd->queue);
623 async_req = crypto_dequeue_request(&dd->queue);
eeb2b202
DK
624 if (async_req)
625 dd->flags |= FLAGS_BUSY;
537559a5
DK
626 spin_unlock_irqrestore(&dd->lock, flags);
627
628 if (!async_req)
21fe9767 629 return ret;
537559a5
DK
630
631 if (backlog)
632 backlog->complete(backlog, -EINPROGRESS);
633
634 req = ablkcipher_request_cast(async_req);
635
537559a5
DK
636 /* assign new request to device */
637 dd->req = req;
638 dd->total = req->nbytes;
639 dd->in_offset = 0;
640 dd->in_sg = req->src;
641 dd->out_offset = 0;
642 dd->out_sg = req->dst;
643
644 rctx = ablkcipher_request_ctx(req);
645 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
646 rctx->mode &= FLAGS_MODE_MASK;
647 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
648
67a730ce 649 dd->ctx = ctx;
537559a5 650 ctx->dd = dd;
537559a5 651
83ea7e0f
DK
652 err = omap_aes_write_ctrl(dd);
653 if (!err)
654 err = omap_aes_crypt_dma_start(dd);
21fe9767
DK
655 if (err) {
656 /* aes_task will not finish it, so do it here */
657 omap_aes_finish_req(dd, err);
658 tasklet_schedule(&dd->queue_task);
659 }
eeb2b202 660
21fe9767 661 return ret; /* return ret, which is enqueue return value */
537559a5
DK
662}
663
21fe9767 664static void omap_aes_done_task(unsigned long data)
537559a5
DK
665{
666 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
21fe9767 667 int err;
537559a5
DK
668
669 pr_debug("enter\n");
670
21fe9767 671 err = omap_aes_crypt_dma_stop(dd);
537559a5 672
21fe9767
DK
673 err = dd->err ? : err;
674
675 if (dd->total && !err) {
676 err = omap_aes_crypt_dma_start(dd);
677 if (!err)
678 return; /* DMA started. Not fininishing. */
679 }
680
681 omap_aes_finish_req(dd, err);
682 omap_aes_handle_queue(dd, NULL);
537559a5
DK
683
684 pr_debug("exit\n");
685}
686
21fe9767
DK
687static void omap_aes_queue_task(unsigned long data)
688{
689 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
690
691 omap_aes_handle_queue(dd, NULL);
692}
693
537559a5
DK
694static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
695{
696 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
697 crypto_ablkcipher_reqtfm(req));
698 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
699 struct omap_aes_dev *dd;
537559a5
DK
700
701 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
702 !!(mode & FLAGS_ENCRYPT),
703 !!(mode & FLAGS_CBC));
704
21fe9767
DK
705 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
706 pr_err("request size is not exact amount of AES blocks\n");
707 return -EINVAL;
708 }
709
537559a5
DK
710 dd = omap_aes_find_dev(ctx);
711 if (!dd)
712 return -ENODEV;
713
714 rctx->mode = mode;
715
21fe9767 716 return omap_aes_handle_queue(dd, req);
537559a5
DK
717}
718
719/* ********************** ALG API ************************************ */
720
721static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
722 unsigned int keylen)
723{
724 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
725
726 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
727 keylen != AES_KEYSIZE_256)
728 return -EINVAL;
729
730 pr_debug("enter, keylen: %d\n", keylen);
731
732 memcpy(ctx->key, key, keylen);
733 ctx->keylen = keylen;
537559a5
DK
734
735 return 0;
736}
737
738static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
739{
740 return omap_aes_crypt(req, FLAGS_ENCRYPT);
741}
742
743static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
744{
745 return omap_aes_crypt(req, 0);
746}
747
748static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
749{
750 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
751}
752
753static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
754{
755 return omap_aes_crypt(req, FLAGS_CBC);
756}
757
758static int omap_aes_cra_init(struct crypto_tfm *tfm)
759{
760 pr_debug("enter\n");
761
762 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
763
764 return 0;
765}
766
767static void omap_aes_cra_exit(struct crypto_tfm *tfm)
768{
769 pr_debug("enter\n");
770}
771
772/* ********************** ALGS ************************************ */
773
774static struct crypto_alg algs[] = {
775{
776 .cra_name = "ecb(aes)",
777 .cra_driver_name = "ecb-aes-omap",
778 .cra_priority = 100,
d912bb76
NM
779 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
780 CRYPTO_ALG_KERN_DRIVER_ONLY |
781 CRYPTO_ALG_ASYNC,
537559a5
DK
782 .cra_blocksize = AES_BLOCK_SIZE,
783 .cra_ctxsize = sizeof(struct omap_aes_ctx),
efce41b6 784 .cra_alignmask = 0,
537559a5
DK
785 .cra_type = &crypto_ablkcipher_type,
786 .cra_module = THIS_MODULE,
787 .cra_init = omap_aes_cra_init,
788 .cra_exit = omap_aes_cra_exit,
789 .cra_u.ablkcipher = {
790 .min_keysize = AES_MIN_KEY_SIZE,
791 .max_keysize = AES_MAX_KEY_SIZE,
792 .setkey = omap_aes_setkey,
793 .encrypt = omap_aes_ecb_encrypt,
794 .decrypt = omap_aes_ecb_decrypt,
795 }
796},
797{
798 .cra_name = "cbc(aes)",
799 .cra_driver_name = "cbc-aes-omap",
800 .cra_priority = 100,
d912bb76
NM
801 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
802 CRYPTO_ALG_KERN_DRIVER_ONLY |
803 CRYPTO_ALG_ASYNC,
537559a5
DK
804 .cra_blocksize = AES_BLOCK_SIZE,
805 .cra_ctxsize = sizeof(struct omap_aes_ctx),
efce41b6 806 .cra_alignmask = 0,
537559a5
DK
807 .cra_type = &crypto_ablkcipher_type,
808 .cra_module = THIS_MODULE,
809 .cra_init = omap_aes_cra_init,
810 .cra_exit = omap_aes_cra_exit,
811 .cra_u.ablkcipher = {
812 .min_keysize = AES_MIN_KEY_SIZE,
813 .max_keysize = AES_MAX_KEY_SIZE,
814 .ivsize = AES_BLOCK_SIZE,
815 .setkey = omap_aes_setkey,
816 .encrypt = omap_aes_cbc_encrypt,
817 .decrypt = omap_aes_cbc_decrypt,
818 }
819}
820};
821
822static int omap_aes_probe(struct platform_device *pdev)
823{
824 struct device *dev = &pdev->dev;
825 struct omap_aes_dev *dd;
826 struct resource *res;
827 int err = -ENOMEM, i, j;
828 u32 reg;
829
830 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
831 if (dd == NULL) {
832 dev_err(dev, "unable to alloc data struct.\n");
833 goto err_data;
834 }
835 dd->dev = dev;
836 platform_set_drvdata(pdev, dd);
837
838 spin_lock_init(&dd->lock);
839 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
840
841 /* Get the base address */
842 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
843 if (!res) {
844 dev_err(dev, "invalid resource type\n");
845 err = -ENODEV;
846 goto err_res;
847 }
848 dd->phys_base = res->start;
849
850 /* Get the DMA */
851 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
852 if (!res)
853 dev_info(dev, "no DMA info\n");
854 else
855 dd->dma_out = res->start;
856
857 /* Get the DMA */
858 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
859 if (!res)
860 dev_info(dev, "no DMA info\n");
861 else
862 dd->dma_in = res->start;
863
537559a5
DK
864 dd->io_base = ioremap(dd->phys_base, SZ_4K);
865 if (!dd->io_base) {
866 dev_err(dev, "can't ioremap\n");
867 err = -ENOMEM;
5946c4a5 868 goto err_res;
537559a5
DK
869 }
870
5946c4a5
MG
871 pm_runtime_enable(dev);
872 pm_runtime_get_sync(dev);
873
537559a5
DK
874 reg = omap_aes_read(dd, AES_REG_REV);
875 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
876 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
5946c4a5
MG
877
878 pm_runtime_put_sync(dev);
537559a5 879
21fe9767
DK
880 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
881 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
537559a5
DK
882
883 err = omap_aes_dma_init(dd);
884 if (err)
885 goto err_dma;
886
887 INIT_LIST_HEAD(&dd->list);
888 spin_lock(&list_lock);
889 list_add_tail(&dd->list, &dev_list);
890 spin_unlock(&list_lock);
891
892 for (i = 0; i < ARRAY_SIZE(algs); i++) {
893 pr_debug("i: %d\n", i);
537559a5
DK
894 err = crypto_register_alg(&algs[i]);
895 if (err)
896 goto err_algs;
897 }
898
537559a5
DK
899 return 0;
900err_algs:
901 for (j = 0; j < i; j++)
902 crypto_unregister_alg(&algs[j]);
903 omap_aes_dma_cleanup(dd);
904err_dma:
21fe9767
DK
905 tasklet_kill(&dd->done_task);
906 tasklet_kill(&dd->queue_task);
537559a5 907 iounmap(dd->io_base);
5946c4a5 908 pm_runtime_disable(dev);
537559a5
DK
909err_res:
910 kfree(dd);
911 dd = NULL;
912err_data:
913 dev_err(dev, "initialization failed.\n");
914 return err;
915}
916
917static int omap_aes_remove(struct platform_device *pdev)
918{
919 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
920 int i;
921
922 if (!dd)
923 return -ENODEV;
924
925 spin_lock(&list_lock);
926 list_del(&dd->list);
927 spin_unlock(&list_lock);
928
929 for (i = 0; i < ARRAY_SIZE(algs); i++)
930 crypto_unregister_alg(&algs[i]);
931
21fe9767
DK
932 tasklet_kill(&dd->done_task);
933 tasklet_kill(&dd->queue_task);
537559a5
DK
934 omap_aes_dma_cleanup(dd);
935 iounmap(dd->io_base);
5946c4a5 936 pm_runtime_disable(dd->dev);
537559a5
DK
937 kfree(dd);
938 dd = NULL;
939
940 return 0;
941}
942
0635fb3a
MG
943#ifdef CONFIG_PM_SLEEP
944static int omap_aes_suspend(struct device *dev)
945{
946 pm_runtime_put_sync(dev);
947 return 0;
948}
949
950static int omap_aes_resume(struct device *dev)
951{
952 pm_runtime_get_sync(dev);
953 return 0;
954}
955#endif
956
957static const struct dev_pm_ops omap_aes_pm_ops = {
958 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
959};
960
537559a5
DK
961static struct platform_driver omap_aes_driver = {
962 .probe = omap_aes_probe,
963 .remove = omap_aes_remove,
964 .driver = {
965 .name = "omap-aes",
966 .owner = THIS_MODULE,
0635fb3a 967 .pm = &omap_aes_pm_ops,
537559a5
DK
968 },
969};
970
971static int __init omap_aes_mod_init(void)
972{
537559a5
DK
973 return platform_driver_register(&omap_aes_driver);
974}
975
976static void __exit omap_aes_mod_exit(void)
977{
978 platform_driver_unregister(&omap_aes_driver);
979}
980
981module_init(omap_aes_mod_init);
982module_exit(omap_aes_mod_exit);
983
984MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
985MODULE_LICENSE("GPL v2");
986MODULE_AUTHOR("Dmitry Kasatkin");
987