]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/dma/shdma.c
Merge branch 'ioat' into dmaengine
[mirror_ubuntu-eoan-kernel.git] / drivers / dma / shdma.c
1 /*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29
30 #include <asm/dmaengine.h>
31
32 #include "shdma.h"
33
34 /* DMA descriptor control */
35 enum sh_dmae_desc_status {
36 DESC_IDLE,
37 DESC_PREPARED,
38 DESC_SUBMITTED,
39 DESC_COMPLETED, /* completed, have to call callback */
40 DESC_WAITING, /* callback called, waiting for ack / re-submit */
41 };
42
43 #define NR_DESCS_PER_CHANNEL 32
44 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
45 #define LOG2_DEFAULT_XFER_SIZE 2
46
47 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
48 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
49
50 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
51
52 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
53 {
54 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
55 }
56
57 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
58 {
59 return __raw_readl(sh_dc->base + reg / sizeof(u32));
60 }
61
62 static u16 dmaor_read(struct sh_dmae_device *shdev)
63 {
64 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
65 }
66
67 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
68 {
69 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
70 }
71
72 /*
73 * Reset DMA controller
74 *
75 * SH7780 has two DMAOR register
76 */
77 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
78 {
79 unsigned short dmaor = dmaor_read(shdev);
80
81 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
82 }
83
84 static int sh_dmae_rst(struct sh_dmae_device *shdev)
85 {
86 unsigned short dmaor;
87
88 sh_dmae_ctl_stop(shdev);
89 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
90
91 dmaor_write(shdev, dmaor);
92 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
93 pr_warning("dma-sh: Can't initialize DMAOR.\n");
94 return -EINVAL;
95 }
96 return 0;
97 }
98
99 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
100 {
101 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
102
103 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
104 return true; /* working */
105
106 return false; /* waiting */
107 }
108
109 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
110 {
111 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
112 struct sh_dmae_device, common);
113 struct sh_dmae_pdata *pdata = shdev->pdata;
114 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
115 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
116
117 if (cnt >= pdata->ts_shift_num)
118 cnt = 0;
119
120 return pdata->ts_shift[cnt];
121 }
122
123 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
124 {
125 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
126 struct sh_dmae_device, common);
127 struct sh_dmae_pdata *pdata = shdev->pdata;
128 int i;
129
130 for (i = 0; i < pdata->ts_shift_num; i++)
131 if (pdata->ts_shift[i] == l2size)
132 break;
133
134 if (i == pdata->ts_shift_num)
135 i = 0;
136
137 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
138 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
139 }
140
141 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
142 {
143 sh_dmae_writel(sh_chan, hw->sar, SAR);
144 sh_dmae_writel(sh_chan, hw->dar, DAR);
145 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
146 }
147
148 static void dmae_start(struct sh_dmae_chan *sh_chan)
149 {
150 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
151
152 chcr |= CHCR_DE | CHCR_IE;
153 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
154 }
155
156 static void dmae_halt(struct sh_dmae_chan *sh_chan)
157 {
158 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
159
160 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
161 sh_dmae_writel(sh_chan, chcr, CHCR);
162 }
163
164 static void dmae_init(struct sh_dmae_chan *sh_chan)
165 {
166 /*
167 * Default configuration for dual address memory-memory transfer.
168 * 0x400 represents auto-request.
169 */
170 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
171 LOG2_DEFAULT_XFER_SIZE);
172 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
173 sh_dmae_writel(sh_chan, chcr, CHCR);
174 }
175
176 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
177 {
178 /* When DMA was working, can not set data to CHCR */
179 if (dmae_is_busy(sh_chan))
180 return -EBUSY;
181
182 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
183 sh_dmae_writel(sh_chan, val, CHCR);
184
185 return 0;
186 }
187
188 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
189 {
190 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
191 struct sh_dmae_device, common);
192 struct sh_dmae_pdata *pdata = shdev->pdata;
193 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
194 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
195 int shift = chan_pdata->dmars_bit;
196
197 if (dmae_is_busy(sh_chan))
198 return -EBUSY;
199
200 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
201 addr);
202
203 return 0;
204 }
205
206 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
207 {
208 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
209 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
210 dma_async_tx_callback callback = tx->callback;
211 dma_cookie_t cookie;
212
213 spin_lock_bh(&sh_chan->desc_lock);
214
215 cookie = sh_chan->common.cookie;
216 cookie++;
217 if (cookie < 0)
218 cookie = 1;
219
220 sh_chan->common.cookie = cookie;
221 tx->cookie = cookie;
222
223 /* Mark all chunks of this descriptor as submitted, move to the queue */
224 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
225 /*
226 * All chunks are on the global ld_free, so, we have to find
227 * the end of the chain ourselves
228 */
229 if (chunk != desc && (chunk->mark == DESC_IDLE ||
230 chunk->async_tx.cookie > 0 ||
231 chunk->async_tx.cookie == -EBUSY ||
232 &chunk->node == &sh_chan->ld_free))
233 break;
234 chunk->mark = DESC_SUBMITTED;
235 /* Callback goes to the last chunk */
236 chunk->async_tx.callback = NULL;
237 chunk->cookie = cookie;
238 list_move_tail(&chunk->node, &sh_chan->ld_queue);
239 last = chunk;
240 }
241
242 last->async_tx.callback = callback;
243 last->async_tx.callback_param = tx->callback_param;
244
245 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
246 tx->cookie, &last->async_tx, sh_chan->id,
247 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
248
249 spin_unlock_bh(&sh_chan->desc_lock);
250
251 return cookie;
252 }
253
254 /* Called with desc_lock held */
255 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
256 {
257 struct sh_desc *desc;
258
259 list_for_each_entry(desc, &sh_chan->ld_free, node)
260 if (desc->mark != DESC_PREPARED) {
261 BUG_ON(desc->mark != DESC_IDLE);
262 list_del(&desc->node);
263 return desc;
264 }
265
266 return NULL;
267 }
268
269 static struct sh_dmae_slave_config *sh_dmae_find_slave(
270 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
271 {
272 struct dma_device *dma_dev = sh_chan->common.device;
273 struct sh_dmae_device *shdev = container_of(dma_dev,
274 struct sh_dmae_device, common);
275 struct sh_dmae_pdata *pdata = shdev->pdata;
276 int i;
277
278 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
279 return NULL;
280
281 for (i = 0; i < pdata->slave_num; i++)
282 if (pdata->slave[i].slave_id == slave_id)
283 return pdata->slave + i;
284
285 return NULL;
286 }
287
288 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
289 {
290 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
291 struct sh_desc *desc;
292 struct sh_dmae_slave *param = chan->private;
293
294 pm_runtime_get_sync(sh_chan->dev);
295
296 /*
297 * This relies on the guarantee from dmaengine that alloc_chan_resources
298 * never runs concurrently with itself or free_chan_resources.
299 */
300 if (param) {
301 struct sh_dmae_slave_config *cfg;
302
303 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
304 if (!cfg)
305 return -EINVAL;
306
307 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
308 return -EBUSY;
309
310 param->config = cfg;
311
312 dmae_set_dmars(sh_chan, cfg->mid_rid);
313 dmae_set_chcr(sh_chan, cfg->chcr);
314 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
315 dmae_init(sh_chan);
316 }
317
318 spin_lock_bh(&sh_chan->desc_lock);
319 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
320 spin_unlock_bh(&sh_chan->desc_lock);
321 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
322 if (!desc) {
323 spin_lock_bh(&sh_chan->desc_lock);
324 break;
325 }
326 dma_async_tx_descriptor_init(&desc->async_tx,
327 &sh_chan->common);
328 desc->async_tx.tx_submit = sh_dmae_tx_submit;
329 desc->mark = DESC_IDLE;
330
331 spin_lock_bh(&sh_chan->desc_lock);
332 list_add(&desc->node, &sh_chan->ld_free);
333 sh_chan->descs_allocated++;
334 }
335 spin_unlock_bh(&sh_chan->desc_lock);
336
337 if (!sh_chan->descs_allocated)
338 pm_runtime_put(sh_chan->dev);
339
340 return sh_chan->descs_allocated;
341 }
342
343 /*
344 * sh_dma_free_chan_resources - Free all resources of the channel.
345 */
346 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
347 {
348 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
349 struct sh_desc *desc, *_desc;
350 LIST_HEAD(list);
351 int descs = sh_chan->descs_allocated;
352
353 dmae_halt(sh_chan);
354
355 /* Prepared and not submitted descriptors can still be on the queue */
356 if (!list_empty(&sh_chan->ld_queue))
357 sh_dmae_chan_ld_cleanup(sh_chan, true);
358
359 if (chan->private) {
360 /* The caller is holding dma_list_mutex */
361 struct sh_dmae_slave *param = chan->private;
362 clear_bit(param->slave_id, sh_dmae_slave_used);
363 }
364
365 spin_lock_bh(&sh_chan->desc_lock);
366
367 list_splice_init(&sh_chan->ld_free, &list);
368 sh_chan->descs_allocated = 0;
369
370 spin_unlock_bh(&sh_chan->desc_lock);
371
372 if (descs > 0)
373 pm_runtime_put(sh_chan->dev);
374
375 list_for_each_entry_safe(desc, _desc, &list, node)
376 kfree(desc);
377 }
378
379 /**
380 * sh_dmae_add_desc - get, set up and return one transfer descriptor
381 * @sh_chan: DMA channel
382 * @flags: DMA transfer flags
383 * @dest: destination DMA address, incremented when direction equals
384 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
385 * @src: source DMA address, incremented when direction equals
386 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
387 * @len: DMA transfer length
388 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
389 * @direction: needed for slave DMA to decide which address to keep constant,
390 * equals DMA_BIDIRECTIONAL for MEMCPY
391 * Returns 0 or an error
392 * Locks: called with desc_lock held
393 */
394 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
395 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
396 struct sh_desc **first, enum dma_data_direction direction)
397 {
398 struct sh_desc *new;
399 size_t copy_size;
400
401 if (!*len)
402 return NULL;
403
404 /* Allocate the link descriptor from the free list */
405 new = sh_dmae_get_desc(sh_chan);
406 if (!new) {
407 dev_err(sh_chan->dev, "No free link descriptor available\n");
408 return NULL;
409 }
410
411 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
412
413 new->hw.sar = *src;
414 new->hw.dar = *dest;
415 new->hw.tcr = copy_size;
416
417 if (!*first) {
418 /* First desc */
419 new->async_tx.cookie = -EBUSY;
420 *first = new;
421 } else {
422 /* Other desc - invisible to the user */
423 new->async_tx.cookie = -EINVAL;
424 }
425
426 dev_dbg(sh_chan->dev,
427 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
428 copy_size, *len, *src, *dest, &new->async_tx,
429 new->async_tx.cookie, sh_chan->xmit_shift);
430
431 new->mark = DESC_PREPARED;
432 new->async_tx.flags = flags;
433 new->direction = direction;
434
435 *len -= copy_size;
436 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
437 *src += copy_size;
438 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
439 *dest += copy_size;
440
441 return new;
442 }
443
444 /*
445 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
446 *
447 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
448 * converted to scatter-gather to guarantee consistent locking and a correct
449 * list manipulation. For slave DMA direction carries the usual meaning, and,
450 * logically, the SG list is RAM and the addr variable contains slave address,
451 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
452 * and the SG list contains only one element and points at the source buffer.
453 */
454 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
455 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
456 enum dma_data_direction direction, unsigned long flags)
457 {
458 struct scatterlist *sg;
459 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
460 LIST_HEAD(tx_list);
461 int chunks = 0;
462 int i;
463
464 if (!sg_len)
465 return NULL;
466
467 for_each_sg(sgl, sg, sg_len, i)
468 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
469 (SH_DMA_TCR_MAX + 1);
470
471 /* Have to lock the whole loop to protect against concurrent release */
472 spin_lock_bh(&sh_chan->desc_lock);
473
474 /*
475 * Chaining:
476 * first descriptor is what user is dealing with in all API calls, its
477 * cookie is at first set to -EBUSY, at tx-submit to a positive
478 * number
479 * if more than one chunk is needed further chunks have cookie = -EINVAL
480 * the last chunk, if not equal to the first, has cookie = -ENOSPC
481 * all chunks are linked onto the tx_list head with their .node heads
482 * only during this function, then they are immediately spliced
483 * back onto the free list in form of a chain
484 */
485 for_each_sg(sgl, sg, sg_len, i) {
486 dma_addr_t sg_addr = sg_dma_address(sg);
487 size_t len = sg_dma_len(sg);
488
489 if (!len)
490 goto err_get_desc;
491
492 do {
493 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
494 i, sg, len, (unsigned long long)sg_addr);
495
496 if (direction == DMA_FROM_DEVICE)
497 new = sh_dmae_add_desc(sh_chan, flags,
498 &sg_addr, addr, &len, &first,
499 direction);
500 else
501 new = sh_dmae_add_desc(sh_chan, flags,
502 addr, &sg_addr, &len, &first,
503 direction);
504 if (!new)
505 goto err_get_desc;
506
507 new->chunks = chunks--;
508 list_add_tail(&new->node, &tx_list);
509 } while (len);
510 }
511
512 if (new != first)
513 new->async_tx.cookie = -ENOSPC;
514
515 /* Put them back on the free list, so, they don't get lost */
516 list_splice_tail(&tx_list, &sh_chan->ld_free);
517
518 spin_unlock_bh(&sh_chan->desc_lock);
519
520 return &first->async_tx;
521
522 err_get_desc:
523 list_for_each_entry(new, &tx_list, node)
524 new->mark = DESC_IDLE;
525 list_splice(&tx_list, &sh_chan->ld_free);
526
527 spin_unlock_bh(&sh_chan->desc_lock);
528
529 return NULL;
530 }
531
532 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
533 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
534 size_t len, unsigned long flags)
535 {
536 struct sh_dmae_chan *sh_chan;
537 struct scatterlist sg;
538
539 if (!chan || !len)
540 return NULL;
541
542 chan->private = NULL;
543
544 sh_chan = to_sh_chan(chan);
545
546 sg_init_table(&sg, 1);
547 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
548 offset_in_page(dma_src));
549 sg_dma_address(&sg) = dma_src;
550 sg_dma_len(&sg) = len;
551
552 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
553 flags);
554 }
555
556 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
557 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
558 enum dma_data_direction direction, unsigned long flags)
559 {
560 struct sh_dmae_slave *param;
561 struct sh_dmae_chan *sh_chan;
562
563 if (!chan)
564 return NULL;
565
566 sh_chan = to_sh_chan(chan);
567 param = chan->private;
568
569 /* Someone calling slave DMA on a public channel? */
570 if (!param || !sg_len) {
571 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
572 __func__, param, sg_len, param ? param->slave_id : -1);
573 return NULL;
574 }
575
576 /*
577 * if (param != NULL), this is a successfully requested slave channel,
578 * therefore param->config != NULL too.
579 */
580 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
581 direction, flags);
582 }
583
584 static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
585 unsigned long arg)
586 {
587 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
588
589 /* Only supports DMA_TERMINATE_ALL */
590 if (cmd != DMA_TERMINATE_ALL)
591 return -ENXIO;
592
593 if (!chan)
594 return -EINVAL;
595
596 dmae_halt(sh_chan);
597
598 spin_lock_bh(&sh_chan->desc_lock);
599 if (!list_empty(&sh_chan->ld_queue)) {
600 /* Record partial transfer */
601 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
602 struct sh_desc, node);
603 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
604 sh_chan->xmit_shift;
605
606 }
607 spin_unlock_bh(&sh_chan->desc_lock);
608
609 sh_dmae_chan_ld_cleanup(sh_chan, true);
610
611 return 0;
612 }
613
614 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
615 {
616 struct sh_desc *desc, *_desc;
617 /* Is the "exposed" head of a chain acked? */
618 bool head_acked = false;
619 dma_cookie_t cookie = 0;
620 dma_async_tx_callback callback = NULL;
621 void *param = NULL;
622
623 spin_lock_bh(&sh_chan->desc_lock);
624 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
625 struct dma_async_tx_descriptor *tx = &desc->async_tx;
626
627 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
628 BUG_ON(desc->mark != DESC_SUBMITTED &&
629 desc->mark != DESC_COMPLETED &&
630 desc->mark != DESC_WAITING);
631
632 /*
633 * queue is ordered, and we use this loop to (1) clean up all
634 * completed descriptors, and to (2) update descriptor flags of
635 * any chunks in a (partially) completed chain
636 */
637 if (!all && desc->mark == DESC_SUBMITTED &&
638 desc->cookie != cookie)
639 break;
640
641 if (tx->cookie > 0)
642 cookie = tx->cookie;
643
644 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
645 if (sh_chan->completed_cookie != desc->cookie - 1)
646 dev_dbg(sh_chan->dev,
647 "Completing cookie %d, expected %d\n",
648 desc->cookie,
649 sh_chan->completed_cookie + 1);
650 sh_chan->completed_cookie = desc->cookie;
651 }
652
653 /* Call callback on the last chunk */
654 if (desc->mark == DESC_COMPLETED && tx->callback) {
655 desc->mark = DESC_WAITING;
656 callback = tx->callback;
657 param = tx->callback_param;
658 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
659 tx->cookie, tx, sh_chan->id);
660 BUG_ON(desc->chunks != 1);
661 break;
662 }
663
664 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
665 if (desc->mark == DESC_COMPLETED) {
666 BUG_ON(tx->cookie < 0);
667 desc->mark = DESC_WAITING;
668 }
669 head_acked = async_tx_test_ack(tx);
670 } else {
671 switch (desc->mark) {
672 case DESC_COMPLETED:
673 desc->mark = DESC_WAITING;
674 /* Fall through */
675 case DESC_WAITING:
676 if (head_acked)
677 async_tx_ack(&desc->async_tx);
678 }
679 }
680
681 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
682 tx, tx->cookie);
683
684 if (((desc->mark == DESC_COMPLETED ||
685 desc->mark == DESC_WAITING) &&
686 async_tx_test_ack(&desc->async_tx)) || all) {
687 /* Remove from ld_queue list */
688 desc->mark = DESC_IDLE;
689 list_move(&desc->node, &sh_chan->ld_free);
690 }
691 }
692 spin_unlock_bh(&sh_chan->desc_lock);
693
694 if (callback)
695 callback(param);
696
697 return callback;
698 }
699
700 /*
701 * sh_chan_ld_cleanup - Clean up link descriptors
702 *
703 * This function cleans up the ld_queue of DMA channel.
704 */
705 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
706 {
707 while (__ld_cleanup(sh_chan, all))
708 ;
709 }
710
711 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
712 {
713 struct sh_desc *desc;
714
715 spin_lock_bh(&sh_chan->desc_lock);
716 /* DMA work check */
717 if (dmae_is_busy(sh_chan)) {
718 spin_unlock_bh(&sh_chan->desc_lock);
719 return;
720 }
721
722 /* Find the first not transferred desciptor */
723 list_for_each_entry(desc, &sh_chan->ld_queue, node)
724 if (desc->mark == DESC_SUBMITTED) {
725 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
726 desc->async_tx.cookie, sh_chan->id,
727 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
728 /* Get the ld start address from ld_queue */
729 dmae_set_reg(sh_chan, &desc->hw);
730 dmae_start(sh_chan);
731 break;
732 }
733
734 spin_unlock_bh(&sh_chan->desc_lock);
735 }
736
737 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
738 {
739 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
740 sh_chan_xfer_ld_queue(sh_chan);
741 }
742
743 static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
744 dma_cookie_t cookie,
745 struct dma_tx_state *txstate)
746 {
747 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
748 dma_cookie_t last_used;
749 dma_cookie_t last_complete;
750 enum dma_status status;
751
752 sh_dmae_chan_ld_cleanup(sh_chan, false);
753
754 last_used = chan->cookie;
755 last_complete = sh_chan->completed_cookie;
756 BUG_ON(last_complete < 0);
757 dma_set_tx_state(txstate, last_complete, last_used, 0);
758
759 spin_lock_bh(&sh_chan->desc_lock);
760
761 status = dma_async_is_complete(cookie, last_complete, last_used);
762
763 /*
764 * If we don't find cookie on the queue, it has been aborted and we have
765 * to report error
766 */
767 if (status != DMA_SUCCESS) {
768 struct sh_desc *desc;
769 status = DMA_ERROR;
770 list_for_each_entry(desc, &sh_chan->ld_queue, node)
771 if (desc->cookie == cookie) {
772 status = DMA_IN_PROGRESS;
773 break;
774 }
775 }
776
777 spin_unlock_bh(&sh_chan->desc_lock);
778
779 return status;
780 }
781
782 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
783 {
784 irqreturn_t ret = IRQ_NONE;
785 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
786 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
787
788 if (chcr & CHCR_TE) {
789 /* DMA stop */
790 dmae_halt(sh_chan);
791
792 ret = IRQ_HANDLED;
793 tasklet_schedule(&sh_chan->tasklet);
794 }
795
796 return ret;
797 }
798
799 #if defined(CONFIG_CPU_SH4)
800 static irqreturn_t sh_dmae_err(int irq, void *data)
801 {
802 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
803 int i;
804
805 /* halt the dma controller */
806 sh_dmae_ctl_stop(shdev);
807
808 /* We cannot detect, which channel caused the error, have to reset all */
809 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
810 struct sh_dmae_chan *sh_chan = shdev->chan[i];
811 if (sh_chan) {
812 struct sh_desc *desc;
813 /* Stop the channel */
814 dmae_halt(sh_chan);
815 /* Complete all */
816 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
817 struct dma_async_tx_descriptor *tx = &desc->async_tx;
818 desc->mark = DESC_IDLE;
819 if (tx->callback)
820 tx->callback(tx->callback_param);
821 }
822 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
823 }
824 }
825 sh_dmae_rst(shdev);
826
827 return IRQ_HANDLED;
828 }
829 #endif
830
831 static void dmae_do_tasklet(unsigned long data)
832 {
833 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
834 struct sh_desc *desc;
835 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
836 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
837
838 spin_lock(&sh_chan->desc_lock);
839 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
840 if (desc->mark == DESC_SUBMITTED &&
841 ((desc->direction == DMA_FROM_DEVICE &&
842 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
843 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
844 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
845 desc->async_tx.cookie, &desc->async_tx,
846 desc->hw.dar);
847 desc->mark = DESC_COMPLETED;
848 break;
849 }
850 }
851 spin_unlock(&sh_chan->desc_lock);
852
853 /* Next desc */
854 sh_chan_xfer_ld_queue(sh_chan);
855 sh_dmae_chan_ld_cleanup(sh_chan, false);
856 }
857
858 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
859 int irq, unsigned long flags)
860 {
861 int err;
862 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
863 struct platform_device *pdev = to_platform_device(shdev->common.dev);
864 struct sh_dmae_chan *new_sh_chan;
865
866 /* alloc channel */
867 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
868 if (!new_sh_chan) {
869 dev_err(shdev->common.dev,
870 "No free memory for allocating dma channels!\n");
871 return -ENOMEM;
872 }
873
874 /* copy struct dma_device */
875 new_sh_chan->common.device = &shdev->common;
876
877 new_sh_chan->dev = shdev->common.dev;
878 new_sh_chan->id = id;
879 new_sh_chan->irq = irq;
880 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
881
882 /* Init DMA tasklet */
883 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
884 (unsigned long)new_sh_chan);
885
886 /* Init the channel */
887 dmae_init(new_sh_chan);
888
889 spin_lock_init(&new_sh_chan->desc_lock);
890
891 /* Init descripter manage list */
892 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
893 INIT_LIST_HEAD(&new_sh_chan->ld_free);
894
895 /* Add the channel to DMA device channel list */
896 list_add_tail(&new_sh_chan->common.device_node,
897 &shdev->common.channels);
898 shdev->common.chancnt++;
899
900 if (pdev->id >= 0)
901 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
902 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
903 else
904 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
905 "sh-dma%d", new_sh_chan->id);
906
907 /* set up channel irq */
908 err = request_irq(irq, &sh_dmae_interrupt, flags,
909 new_sh_chan->dev_id, new_sh_chan);
910 if (err) {
911 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
912 "with return %d\n", id, err);
913 goto err_no_irq;
914 }
915
916 shdev->chan[id] = new_sh_chan;
917 return 0;
918
919 err_no_irq:
920 /* remove from dmaengine device node */
921 list_del(&new_sh_chan->common.device_node);
922 kfree(new_sh_chan);
923 return err;
924 }
925
926 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
927 {
928 int i;
929
930 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
931 if (shdev->chan[i]) {
932 struct sh_dmae_chan *sh_chan = shdev->chan[i];
933
934 free_irq(sh_chan->irq, sh_chan);
935
936 list_del(&sh_chan->common.device_node);
937 kfree(sh_chan);
938 shdev->chan[i] = NULL;
939 }
940 }
941 shdev->common.chancnt = 0;
942 }
943
944 static int __init sh_dmae_probe(struct platform_device *pdev)
945 {
946 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
947 unsigned long irqflags = IRQF_DISABLED,
948 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
949 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
950 int err, i, irq_cnt = 0, irqres = 0;
951 struct sh_dmae_device *shdev;
952 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
953
954 /* get platform data */
955 if (!pdata || !pdata->channel_num)
956 return -ENODEV;
957
958 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
959 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
960 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
961 /*
962 * IRQ resources:
963 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
964 * the error IRQ, in which case it is the only IRQ in this resource:
965 * start == end. If it is the only IRQ resource, all channels also
966 * use the same IRQ.
967 * 2. DMA channel IRQ resources can be specified one per resource or in
968 * ranges (start != end)
969 * 3. iff all events (channels and, optionally, error) on this
970 * controller use the same IRQ, only one IRQ resource can be
971 * specified, otherwise there must be one IRQ per channel, even if
972 * some of them are equal
973 * 4. if all IRQs on this controller are equal or if some specific IRQs
974 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
975 * requested with the IRQF_SHARED flag
976 */
977 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
978 if (!chan || !errirq_res)
979 return -ENODEV;
980
981 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
982 dev_err(&pdev->dev, "DMAC register region already claimed\n");
983 return -EBUSY;
984 }
985
986 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
987 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
988 err = -EBUSY;
989 goto ermrdmars;
990 }
991
992 err = -ENOMEM;
993 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
994 if (!shdev) {
995 dev_err(&pdev->dev, "Not enough memory\n");
996 goto ealloc;
997 }
998
999 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1000 if (!shdev->chan_reg)
1001 goto emapchan;
1002 if (dmars) {
1003 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1004 if (!shdev->dmars)
1005 goto emapdmars;
1006 }
1007
1008 /* platform data */
1009 shdev->pdata = pdata;
1010
1011 pm_runtime_enable(&pdev->dev);
1012 pm_runtime_get_sync(&pdev->dev);
1013
1014 /* reset dma controller */
1015 err = sh_dmae_rst(shdev);
1016 if (err)
1017 goto rst_err;
1018
1019 INIT_LIST_HEAD(&shdev->common.channels);
1020
1021 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1022 if (dmars)
1023 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1024
1025 shdev->common.device_alloc_chan_resources
1026 = sh_dmae_alloc_chan_resources;
1027 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1028 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1029 shdev->common.device_tx_status = sh_dmae_tx_status;
1030 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1031
1032 /* Compulsory for DMA_SLAVE fields */
1033 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1034 shdev->common.device_control = sh_dmae_control;
1035
1036 shdev->common.dev = &pdev->dev;
1037 /* Default transfer size of 32 bytes requires 32-byte alignment */
1038 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1039
1040 #if defined(CONFIG_CPU_SH4)
1041 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1042
1043 if (!chanirq_res)
1044 chanirq_res = errirq_res;
1045 else
1046 irqres++;
1047
1048 if (chanirq_res == errirq_res ||
1049 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1050 irqflags = IRQF_SHARED;
1051
1052 errirq = errirq_res->start;
1053
1054 err = request_irq(errirq, sh_dmae_err, irqflags,
1055 "DMAC Address Error", shdev);
1056 if (err) {
1057 dev_err(&pdev->dev,
1058 "DMA failed requesting irq #%d, error %d\n",
1059 errirq, err);
1060 goto eirq_err;
1061 }
1062
1063 #else
1064 chanirq_res = errirq_res;
1065 #endif /* CONFIG_CPU_SH4 */
1066
1067 if (chanirq_res->start == chanirq_res->end &&
1068 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1069 /* Special case - all multiplexed */
1070 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1071 chan_irq[irq_cnt] = chanirq_res->start;
1072 chan_flag[irq_cnt] = IRQF_SHARED;
1073 }
1074 } else {
1075 do {
1076 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1077 if ((errirq_res->flags & IORESOURCE_BITS) ==
1078 IORESOURCE_IRQ_SHAREABLE)
1079 chan_flag[irq_cnt] = IRQF_SHARED;
1080 else
1081 chan_flag[irq_cnt] = IRQF_DISABLED;
1082 dev_dbg(&pdev->dev,
1083 "Found IRQ %d for channel %d\n",
1084 i, irq_cnt);
1085 chan_irq[irq_cnt++] = i;
1086 }
1087 chanirq_res = platform_get_resource(pdev,
1088 IORESOURCE_IRQ, ++irqres);
1089 } while (irq_cnt < pdata->channel_num && chanirq_res);
1090 }
1091
1092 if (irq_cnt < pdata->channel_num)
1093 goto eirqres;
1094
1095 /* Create DMA Channel */
1096 for (i = 0; i < pdata->channel_num; i++) {
1097 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1098 if (err)
1099 goto chan_probe_err;
1100 }
1101
1102 pm_runtime_put(&pdev->dev);
1103
1104 platform_set_drvdata(pdev, shdev);
1105 dma_async_device_register(&shdev->common);
1106
1107 return err;
1108
1109 chan_probe_err:
1110 sh_dmae_chan_remove(shdev);
1111 eirqres:
1112 #if defined(CONFIG_CPU_SH4)
1113 free_irq(errirq, shdev);
1114 eirq_err:
1115 #endif
1116 rst_err:
1117 pm_runtime_put(&pdev->dev);
1118 if (dmars)
1119 iounmap(shdev->dmars);
1120 emapdmars:
1121 iounmap(shdev->chan_reg);
1122 emapchan:
1123 kfree(shdev);
1124 ealloc:
1125 if (dmars)
1126 release_mem_region(dmars->start, resource_size(dmars));
1127 ermrdmars:
1128 release_mem_region(chan->start, resource_size(chan));
1129
1130 return err;
1131 }
1132
1133 static int __exit sh_dmae_remove(struct platform_device *pdev)
1134 {
1135 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1136 struct resource *res;
1137 int errirq = platform_get_irq(pdev, 0);
1138
1139 dma_async_device_unregister(&shdev->common);
1140
1141 if (errirq > 0)
1142 free_irq(errirq, shdev);
1143
1144 /* channel data remove */
1145 sh_dmae_chan_remove(shdev);
1146
1147 pm_runtime_disable(&pdev->dev);
1148
1149 if (shdev->dmars)
1150 iounmap(shdev->dmars);
1151 iounmap(shdev->chan_reg);
1152
1153 kfree(shdev);
1154
1155 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1156 if (res)
1157 release_mem_region(res->start, resource_size(res));
1158 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1159 if (res)
1160 release_mem_region(res->start, resource_size(res));
1161
1162 return 0;
1163 }
1164
1165 static void sh_dmae_shutdown(struct platform_device *pdev)
1166 {
1167 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1168 sh_dmae_ctl_stop(shdev);
1169 }
1170
1171 static struct platform_driver sh_dmae_driver = {
1172 .remove = __exit_p(sh_dmae_remove),
1173 .shutdown = sh_dmae_shutdown,
1174 .driver = {
1175 .name = "sh-dma-engine",
1176 },
1177 };
1178
1179 static int __init sh_dmae_init(void)
1180 {
1181 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1182 }
1183 module_init(sh_dmae_init);
1184
1185 static void __exit sh_dmae_exit(void)
1186 {
1187 platform_driver_unregister(&sh_dmae_driver);
1188 }
1189 module_exit(sh_dmae_exit);
1190
1191 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1192 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1193 MODULE_LICENSE("GPL");