2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
30 #include <linux/notifier.h>
31 #include <linux/kdebug.h>
32 #include <linux/spinlock.h>
33 #include <linux/rculist.h>
35 #include "../dmaengine.h"
38 #define SH_DMAE_DRV_NAME "sh-dma-engine"
40 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
41 #define LOG2_DEFAULT_XFER_SIZE 2
42 #define SH_DMA_SLAVE_NUMBER 256
43 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
46 * Used for write-side mutual exclusion for the global device list,
47 * read-side synchronization by way of RCU, and per-controller data.
49 static DEFINE_SPINLOCK(sh_dmae_lock
);
50 static LIST_HEAD(sh_dmae_devices
);
53 * Different DMAC implementations provide different ways to clear DMA channels:
54 * (1) none - no CHCLR registers are available
55 * (2) one CHCLR register per channel - 0 has to be written to it to clear
57 * (3) one CHCLR per several channels - 1 has to be written to the bit,
58 * corresponding to the specific channel to reset it
60 static void channel_clear(struct sh_dmae_chan
*sh_dc
)
62 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
63 const struct sh_dmae_channel
*chan_pdata
= shdev
->pdata
->channel
+
65 u32 val
= shdev
->pdata
->chclr_bitwise
? 1 << chan_pdata
->chclr_bit
: 0;
67 __raw_writel(val
, shdev
->chan_reg
+ chan_pdata
->chclr_offset
);
70 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
72 __raw_writel(data
, sh_dc
->base
+ reg
);
75 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
77 return __raw_readl(sh_dc
->base
+ reg
);
80 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
82 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
84 if (shdev
->pdata
->dmaor_is_32bit
)
85 return __raw_readl(addr
);
87 return __raw_readw(addr
);
90 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
92 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
94 if (shdev
->pdata
->dmaor_is_32bit
)
95 __raw_writel(data
, addr
);
97 __raw_writew(data
, addr
);
100 static void chcr_write(struct sh_dmae_chan
*sh_dc
, u32 data
)
102 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
104 __raw_writel(data
, sh_dc
->base
+ shdev
->chcr_offset
);
107 static u32
chcr_read(struct sh_dmae_chan
*sh_dc
)
109 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
111 return __raw_readl(sh_dc
->base
+ shdev
->chcr_offset
);
115 * Reset DMA controller
117 * SH7780 has two DMAOR register
119 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
121 unsigned short dmaor
;
124 spin_lock_irqsave(&sh_dmae_lock
, flags
);
126 dmaor
= dmaor_read(shdev
);
127 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
129 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
132 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
134 unsigned short dmaor
;
137 spin_lock_irqsave(&sh_dmae_lock
, flags
);
139 dmaor
= dmaor_read(shdev
) & ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
);
141 if (shdev
->pdata
->chclr_present
) {
143 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
144 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
146 channel_clear(sh_chan
);
150 dmaor_write(shdev
, dmaor
| shdev
->pdata
->dmaor_init
);
152 dmaor
= dmaor_read(shdev
);
154 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
156 if (dmaor
& (DMAOR_AE
| DMAOR_NMIF
)) {
157 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
, "Can't initialize DMAOR.\n");
160 if (shdev
->pdata
->dmaor_init
& ~dmaor
)
161 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
,
162 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
163 dmaor
, shdev
->pdata
->dmaor_init
);
167 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
169 u32 chcr
= chcr_read(sh_chan
);
171 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
172 return true; /* working */
174 return false; /* waiting */
177 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
179 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
180 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
181 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
182 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
184 if (cnt
>= pdata
->ts_shift_num
)
187 return pdata
->ts_shift
[cnt
];
190 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
192 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
193 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
196 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
197 if (pdata
->ts_shift
[i
] == l2size
)
200 if (i
== pdata
->ts_shift_num
)
203 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
204 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
207 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
209 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
210 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
211 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
214 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
216 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
217 u32 chcr
= chcr_read(sh_chan
);
219 if (shdev
->pdata
->needs_tend_set
)
220 sh_dmae_writel(sh_chan
, 0xFFFFFFFF, TEND
);
222 chcr
|= CHCR_DE
| shdev
->chcr_ie_bit
;
223 chcr_write(sh_chan
, chcr
& ~CHCR_TE
);
226 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
229 * Default configuration for dual address memory-memory transfer.
230 * 0x400 represents auto-request.
232 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
233 LOG2_DEFAULT_XFER_SIZE
);
234 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
235 chcr_write(sh_chan
, chcr
);
238 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
240 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
241 if (dmae_is_busy(sh_chan
))
244 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
245 chcr_write(sh_chan
, val
);
250 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
252 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
253 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
254 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->shdma_chan
.id
];
255 void __iomem
*addr
= shdev
->dmars
;
256 unsigned int shift
= chan_pdata
->dmars_bit
;
258 if (dmae_is_busy(sh_chan
))
264 /* in the case of a missing DMARS resource use first memory window */
266 addr
= shdev
->chan_reg
;
267 addr
+= chan_pdata
->dmars
;
269 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
275 static void sh_dmae_start_xfer(struct shdma_chan
*schan
,
276 struct shdma_desc
*sdesc
)
278 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
280 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
281 struct sh_dmae_desc
, shdma_desc
);
282 dev_dbg(sh_chan
->shdma_chan
.dev
, "Queue #%d to %d: %u@%x -> %x\n",
283 sdesc
->async_tx
.cookie
, sh_chan
->shdma_chan
.id
,
284 sh_desc
->hw
.tcr
, sh_desc
->hw
.sar
, sh_desc
->hw
.dar
);
285 /* Get the ld start address from ld_queue */
286 dmae_set_reg(sh_chan
, &sh_desc
->hw
);
290 static bool sh_dmae_channel_busy(struct shdma_chan
*schan
)
292 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
294 return dmae_is_busy(sh_chan
);
297 static void sh_dmae_setup_xfer(struct shdma_chan
*schan
,
300 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
304 const struct sh_dmae_slave_config
*cfg
=
307 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
308 dmae_set_chcr(sh_chan
, cfg
->chcr
);
315 * Find a slave channel configuration from the contoller list by either a slave
316 * ID in the non-DT case, or by a MID/RID value in the DT case
318 static const struct sh_dmae_slave_config
*dmae_find_slave(
319 struct sh_dmae_chan
*sh_chan
, int match
)
321 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
322 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
323 const struct sh_dmae_slave_config
*cfg
;
326 if (!sh_chan
->shdma_chan
.dev
->of_node
) {
327 if (match
>= SH_DMA_SLAVE_NUMBER
)
330 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
331 if (cfg
->slave_id
== match
)
334 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
335 if (cfg
->mid_rid
== match
) {
336 sh_chan
->shdma_chan
.slave_id
= cfg
->slave_id
;
344 static int sh_dmae_set_slave(struct shdma_chan
*schan
,
345 int slave_id
, bool try)
347 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
349 const struct sh_dmae_slave_config
*cfg
= dmae_find_slave(sh_chan
, slave_id
);
354 sh_chan
->config
= cfg
;
359 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
361 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
362 u32 chcr
= chcr_read(sh_chan
);
364 chcr
&= ~(CHCR_DE
| CHCR_TE
| shdev
->chcr_ie_bit
);
365 chcr_write(sh_chan
, chcr
);
368 static int sh_dmae_desc_setup(struct shdma_chan
*schan
,
369 struct shdma_desc
*sdesc
,
370 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
372 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
373 struct sh_dmae_desc
, shdma_desc
);
375 if (*len
> schan
->max_xfer_len
)
376 *len
= schan
->max_xfer_len
;
378 sh_desc
->hw
.sar
= src
;
379 sh_desc
->hw
.dar
= dst
;
380 sh_desc
->hw
.tcr
= *len
;
385 static void sh_dmae_halt(struct shdma_chan
*schan
)
387 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
392 static bool sh_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
394 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
397 if (!(chcr_read(sh_chan
) & CHCR_TE
))
406 static size_t sh_dmae_get_partial(struct shdma_chan
*schan
,
407 struct shdma_desc
*sdesc
)
409 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
411 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
412 struct sh_dmae_desc
, shdma_desc
);
413 return (sh_desc
->hw
.tcr
- sh_dmae_readl(sh_chan
, TCR
)) <<
417 /* Called from error IRQ or NMI */
418 static bool sh_dmae_reset(struct sh_dmae_device
*shdev
)
422 /* halt the dma controller */
423 sh_dmae_ctl_stop(shdev
);
425 /* We cannot detect, which channel caused the error, have to reset all */
426 ret
= shdma_reset(&shdev
->shdma_dev
);
433 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
435 struct sh_dmae_device
*shdev
= data
;
437 if (!(dmaor_read(shdev
) & DMAOR_AE
))
440 sh_dmae_reset(shdev
);
444 static bool sh_dmae_desc_completed(struct shdma_chan
*schan
,
445 struct shdma_desc
*sdesc
)
447 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
448 struct sh_dmae_chan
, shdma_chan
);
449 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
450 struct sh_dmae_desc
, shdma_desc
);
451 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
452 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
454 return (sdesc
->direction
== DMA_DEV_TO_MEM
&&
455 (sh_desc
->hw
.dar
+ sh_desc
->hw
.tcr
) == dar_buf
) ||
456 (sdesc
->direction
!= DMA_DEV_TO_MEM
&&
457 (sh_desc
->hw
.sar
+ sh_desc
->hw
.tcr
) == sar_buf
);
460 static bool sh_dmae_nmi_notify(struct sh_dmae_device
*shdev
)
462 /* Fast path out if NMIF is not asserted for this controller */
463 if ((dmaor_read(shdev
) & DMAOR_NMIF
) == 0)
466 return sh_dmae_reset(shdev
);
469 static int sh_dmae_nmi_handler(struct notifier_block
*self
,
470 unsigned long cmd
, void *data
)
472 struct sh_dmae_device
*shdev
;
473 int ret
= NOTIFY_DONE
;
477 * Only concern ourselves with NMI events.
479 * Normally we would check the die chain value, but as this needs
480 * to be architecture independent, check for NMI context instead.
486 list_for_each_entry_rcu(shdev
, &sh_dmae_devices
, node
) {
488 * Only stop if one of the controllers has NMIF asserted,
489 * we do not want to interfere with regular address error
490 * handling or NMI events that don't concern the DMACs.
492 triggered
= sh_dmae_nmi_notify(shdev
);
493 if (triggered
== true)
501 static struct notifier_block sh_dmae_nmi_notifier __read_mostly
= {
502 .notifier_call
= sh_dmae_nmi_handler
,
504 /* Run before NMI debug handler and KGDB */
508 static int sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
509 int irq
, unsigned long flags
)
511 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
512 struct shdma_dev
*sdev
= &shdev
->shdma_dev
;
513 struct platform_device
*pdev
= to_platform_device(sdev
->dma_dev
.dev
);
514 struct sh_dmae_chan
*sh_chan
;
515 struct shdma_chan
*schan
;
518 sh_chan
= devm_kzalloc(sdev
->dma_dev
.dev
, sizeof(struct sh_dmae_chan
),
521 dev_err(sdev
->dma_dev
.dev
,
522 "No free memory for allocating dma channels!\n");
526 schan
= &sh_chan
->shdma_chan
;
527 schan
->max_xfer_len
= SH_DMA_TCR_MAX
+ 1;
529 shdma_chan_probe(sdev
, schan
, id
);
531 sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
;
533 /* set up channel irq */
535 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
536 "sh-dmae%d.%d", pdev
->id
, id
);
538 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
541 err
= shdma_request_irq(schan
, irq
, flags
, sh_chan
->dev_id
);
543 dev_err(sdev
->dma_dev
.dev
,
544 "DMA channel %d request_irq error %d\n",
549 shdev
->chan
[id
] = sh_chan
;
553 /* remove from dmaengine device node */
554 shdma_chan_remove(schan
);
558 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
560 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
561 struct shdma_chan
*schan
;
564 shdma_for_each_chan(schan
, &shdev
->shdma_dev
, i
) {
567 shdma_chan_remove(schan
);
569 dma_dev
->chancnt
= 0;
572 static void sh_dmae_shutdown(struct platform_device
*pdev
)
574 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
575 sh_dmae_ctl_stop(shdev
);
578 static int sh_dmae_runtime_suspend(struct device
*dev
)
583 static int sh_dmae_runtime_resume(struct device
*dev
)
585 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
587 return sh_dmae_rst(shdev
);
591 static int sh_dmae_suspend(struct device
*dev
)
596 static int sh_dmae_resume(struct device
*dev
)
598 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
601 ret
= sh_dmae_rst(shdev
);
603 dev_err(dev
, "Failed to reset!\n");
605 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
606 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
608 if (!sh_chan
->shdma_chan
.desc_num
)
611 if (sh_chan
->shdma_chan
.slave_id
>= 0) {
612 const struct sh_dmae_slave_config
*cfg
= sh_chan
->config
;
613 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
614 dmae_set_chcr(sh_chan
, cfg
->chcr
);
623 #define sh_dmae_suspend NULL
624 #define sh_dmae_resume NULL
627 const struct dev_pm_ops sh_dmae_pm
= {
628 .suspend
= sh_dmae_suspend
,
629 .resume
= sh_dmae_resume
,
630 .runtime_suspend
= sh_dmae_runtime_suspend
,
631 .runtime_resume
= sh_dmae_runtime_resume
,
634 static dma_addr_t
sh_dmae_slave_addr(struct shdma_chan
*schan
)
636 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
637 struct sh_dmae_chan
, shdma_chan
);
640 * Implicit BUG_ON(!sh_chan->config)
641 * This is an exclusive slave DMA operation, may only be called after a
642 * successful slave configuration.
644 return sh_chan
->config
->addr
;
647 static struct shdma_desc
*sh_dmae_embedded_desc(void *buf
, int i
)
649 return &((struct sh_dmae_desc
*)buf
)[i
].shdma_desc
;
652 static const struct shdma_ops sh_dmae_shdma_ops
= {
653 .desc_completed
= sh_dmae_desc_completed
,
654 .halt_channel
= sh_dmae_halt
,
655 .channel_busy
= sh_dmae_channel_busy
,
656 .slave_addr
= sh_dmae_slave_addr
,
657 .desc_setup
= sh_dmae_desc_setup
,
658 .set_slave
= sh_dmae_set_slave
,
659 .setup_xfer
= sh_dmae_setup_xfer
,
660 .start_xfer
= sh_dmae_start_xfer
,
661 .embedded_desc
= sh_dmae_embedded_desc
,
662 .chan_irq
= sh_dmae_chan_irq
,
663 .get_partial
= sh_dmae_get_partial
,
666 static int sh_dmae_probe(struct platform_device
*pdev
)
668 const struct sh_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
669 unsigned long irqflags
= IRQF_DISABLED
,
670 chan_flag
[SH_DMAE_MAX_CHANNELS
] = {};
671 int errirq
, chan_irq
[SH_DMAE_MAX_CHANNELS
];
672 int err
, i
, irq_cnt
= 0, irqres
= 0, irq_cap
= 0;
673 struct sh_dmae_device
*shdev
;
674 struct dma_device
*dma_dev
;
675 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
677 /* get platform data */
678 if (!pdata
|| !pdata
->channel_num
)
681 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
682 /* DMARS area is optional */
683 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
686 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
687 * the error IRQ, in which case it is the only IRQ in this resource:
688 * start == end. If it is the only IRQ resource, all channels also
690 * 2. DMA channel IRQ resources can be specified one per resource or in
691 * ranges (start != end)
692 * 3. iff all events (channels and, optionally, error) on this
693 * controller use the same IRQ, only one IRQ resource can be
694 * specified, otherwise there must be one IRQ per channel, even if
695 * some of them are equal
696 * 4. if all IRQs on this controller are equal or if some specific IRQs
697 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
698 * requested with the IRQF_SHARED flag
700 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
701 if (!chan
|| !errirq_res
)
704 shdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct sh_dmae_device
),
707 dev_err(&pdev
->dev
, "Not enough memory\n");
711 dma_dev
= &shdev
->shdma_dev
.dma_dev
;
713 shdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
714 if (IS_ERR(shdev
->chan_reg
))
715 return PTR_ERR(shdev
->chan_reg
);
717 shdev
->dmars
= devm_ioremap_resource(&pdev
->dev
, dmars
);
718 if (IS_ERR(shdev
->dmars
))
719 return PTR_ERR(shdev
->dmars
);
722 if (!pdata
->slave_only
)
723 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
724 if (pdata
->slave
&& pdata
->slave_num
)
725 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
727 /* Default transfer size of 32 bytes requires 32-byte alignment */
728 dma_dev
->copy_align
= LOG2_DEFAULT_XFER_SIZE
;
730 shdev
->shdma_dev
.ops
= &sh_dmae_shdma_ops
;
731 shdev
->shdma_dev
.desc_size
= sizeof(struct sh_dmae_desc
);
732 err
= shdma_init(&pdev
->dev
, &shdev
->shdma_dev
,
738 shdev
->pdata
= pdata
;
740 if (pdata
->chcr_offset
)
741 shdev
->chcr_offset
= pdata
->chcr_offset
;
743 shdev
->chcr_offset
= CHCR
;
745 if (pdata
->chcr_ie_bit
)
746 shdev
->chcr_ie_bit
= pdata
->chcr_ie_bit
;
748 shdev
->chcr_ie_bit
= CHCR_IE
;
750 platform_set_drvdata(pdev
, shdev
);
752 pm_runtime_enable(&pdev
->dev
);
753 err
= pm_runtime_get_sync(&pdev
->dev
);
755 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
757 spin_lock_irq(&sh_dmae_lock
);
758 list_add_tail_rcu(&shdev
->node
, &sh_dmae_devices
);
759 spin_unlock_irq(&sh_dmae_lock
);
761 /* reset dma controller - only needed as a test */
762 err
= sh_dmae_rst(shdev
);
766 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
767 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
770 chanirq_res
= errirq_res
;
774 if (chanirq_res
== errirq_res
||
775 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
776 irqflags
= IRQF_SHARED
;
778 errirq
= errirq_res
->start
;
780 err
= devm_request_irq(&pdev
->dev
, errirq
, sh_dmae_err
, irqflags
,
781 "DMAC Address Error", shdev
);
784 "DMA failed requesting irq #%d, error %d\n",
790 chanirq_res
= errirq_res
;
791 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
793 if (chanirq_res
->start
== chanirq_res
->end
&&
794 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
795 /* Special case - all multiplexed */
796 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
797 if (irq_cnt
< SH_DMAE_MAX_CHANNELS
) {
798 chan_irq
[irq_cnt
] = chanirq_res
->start
;
799 chan_flag
[irq_cnt
] = IRQF_SHARED
;
807 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
808 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
) {
813 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
814 IORESOURCE_IRQ_SHAREABLE
)
815 chan_flag
[irq_cnt
] = IRQF_SHARED
;
817 chan_flag
[irq_cnt
] = IRQF_DISABLED
;
819 "Found IRQ %d for channel %d\n",
821 chan_irq
[irq_cnt
++] = i
;
824 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
)
827 chanirq_res
= platform_get_resource(pdev
,
828 IORESOURCE_IRQ
, ++irqres
);
829 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
832 /* Create DMA Channel */
833 for (i
= 0; i
< irq_cnt
; i
++) {
834 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
840 dev_notice(&pdev
->dev
, "Attempting to register %d DMA "
841 "channels when a maximum of %d are supported.\n",
842 pdata
->channel_num
, SH_DMAE_MAX_CHANNELS
);
844 pm_runtime_put(&pdev
->dev
);
846 err
= dma_async_device_register(&shdev
->shdma_dev
.dma_dev
);
853 pm_runtime_get(&pdev
->dev
);
856 sh_dmae_chan_remove(shdev
);
858 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
862 spin_lock_irq(&sh_dmae_lock
);
863 list_del_rcu(&shdev
->node
);
864 spin_unlock_irq(&sh_dmae_lock
);
866 pm_runtime_put(&pdev
->dev
);
867 pm_runtime_disable(&pdev
->dev
);
869 platform_set_drvdata(pdev
, NULL
);
870 shdma_cleanup(&shdev
->shdma_dev
);
877 static int sh_dmae_remove(struct platform_device
*pdev
)
879 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
880 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
881 struct resource
*res
;
882 int errirq
= platform_get_irq(pdev
, 0);
884 dma_async_device_unregister(dma_dev
);
887 free_irq(errirq
, shdev
);
889 spin_lock_irq(&sh_dmae_lock
);
890 list_del_rcu(&shdev
->node
);
891 spin_unlock_irq(&sh_dmae_lock
);
893 pm_runtime_disable(&pdev
->dev
);
895 sh_dmae_chan_remove(shdev
);
896 shdma_cleanup(&shdev
->shdma_dev
);
898 platform_set_drvdata(pdev
, NULL
);
905 static const struct of_device_id sh_dmae_of_match
[] = {
906 { .compatible
= "renesas,shdma", },
909 MODULE_DEVICE_TABLE(of
, sh_dmae_of_match
);
911 static struct platform_driver sh_dmae_driver
= {
913 .owner
= THIS_MODULE
,
915 .name
= SH_DMAE_DRV_NAME
,
916 .of_match_table
= sh_dmae_of_match
,
918 .remove
= sh_dmae_remove
,
919 .shutdown
= sh_dmae_shutdown
,
922 static int __init
sh_dmae_init(void)
924 /* Wire up NMI handling */
925 int err
= register_die_notifier(&sh_dmae_nmi_notifier
);
929 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
931 module_init(sh_dmae_init
);
933 static void __exit
sh_dmae_exit(void)
935 platform_driver_unregister(&sh_dmae_driver
);
937 unregister_die_notifier(&sh_dmae_nmi_notifier
);
939 module_exit(sh_dmae_exit
);
941 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
942 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
943 MODULE_LICENSE("GPL");
944 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME
);