2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
23 #include "dw_dmac_regs.h"
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
35 #define DWC_DEFAULT_CTLLO(private) ({ \
36 struct dw_dma_slave *__slave = (private); \
37 int dms = __slave ? __slave->dst_master : 0; \
38 int sms = __slave ? __slave->src_master : 1; \
40 (DWC_CTLL_DST_MSIZE(0) \
41 | DWC_CTLL_SRC_MSIZE(0) \
45 | DWC_CTLL_SMS(sms)); \
49 * This is configuration-dependent and usually a funny size like 4095.
51 * Note that this is a transfer count, i.e. if we transfer 32-bit
52 * words, we can do 16380 bytes per descriptor.
54 * This parameter is also system-specific.
56 #define DWC_MAX_COUNT 4095U
59 * Number of descriptors to allocate for each channel. This should be
60 * made configurable somehow; preferably, the clients (at least the
61 * ones using slave transfers) should be able to give us a hint.
63 #define NR_DESCS_PER_CHANNEL 64
65 /*----------------------------------------------------------------------*/
68 * Because we're not relying on writeback from the controller (it may not
69 * even be configured into the core!) we don't need to use dma_pool. These
70 * descriptors -- and associated data -- are cacheable. We do need to make
71 * sure their dcache entries are written back before handing them off to
72 * the controller, though.
75 static struct device
*chan2dev(struct dma_chan
*chan
)
77 return &chan
->dev
->device
;
79 static struct device
*chan2parent(struct dma_chan
*chan
)
81 return chan
->dev
->device
.parent
;
84 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
86 return list_entry(dwc
->active_list
.next
, struct dw_desc
, desc_node
);
89 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
91 struct dw_desc
*desc
, *_desc
;
92 struct dw_desc
*ret
= NULL
;
95 spin_lock_bh(&dwc
->lock
);
96 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
97 if (async_tx_test_ack(&desc
->txd
)) {
98 list_del(&desc
->desc_node
);
102 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
105 spin_unlock_bh(&dwc
->lock
);
107 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
112 static void dwc_sync_desc_for_cpu(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
114 struct dw_desc
*child
;
116 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
117 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
118 child
->txd
.phys
, sizeof(child
->lli
),
120 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
121 desc
->txd
.phys
, sizeof(desc
->lli
),
126 * Move a descriptor, including any children, to the free list.
127 * `desc' must not be on any lists.
129 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
132 struct dw_desc
*child
;
134 dwc_sync_desc_for_cpu(dwc
, desc
);
136 spin_lock_bh(&dwc
->lock
);
137 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
138 dev_vdbg(chan2dev(&dwc
->chan
),
139 "moving child desc %p to freelist\n",
141 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
142 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
143 list_add(&desc
->desc_node
, &dwc
->free_list
);
144 spin_unlock_bh(&dwc
->lock
);
148 /* Called with dwc->lock held and bh disabled */
150 dwc_assign_cookie(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
152 dma_cookie_t cookie
= dwc
->chan
.cookie
;
157 dwc
->chan
.cookie
= cookie
;
158 desc
->txd
.cookie
= cookie
;
163 /*----------------------------------------------------------------------*/
165 /* Called with dwc->lock held and bh disabled */
166 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
168 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
170 /* ASSERT: channel is idle */
171 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
172 dev_err(chan2dev(&dwc
->chan
),
173 "BUG: Attempted to start non-idle channel\n");
174 dev_err(chan2dev(&dwc
->chan
),
175 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
176 channel_readl(dwc
, SAR
),
177 channel_readl(dwc
, DAR
),
178 channel_readl(dwc
, LLP
),
179 channel_readl(dwc
, CTL_HI
),
180 channel_readl(dwc
, CTL_LO
));
182 /* The tasklet will hopefully advance the queue... */
186 channel_writel(dwc
, LLP
, first
->txd
.phys
);
187 channel_writel(dwc
, CTL_LO
,
188 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
189 channel_writel(dwc
, CTL_HI
, 0);
190 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
193 /*----------------------------------------------------------------------*/
196 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
198 dma_async_tx_callback callback
;
200 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
201 struct dw_desc
*child
;
203 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
205 dwc
->completed
= txd
->cookie
;
206 callback
= txd
->callback
;
207 param
= txd
->callback_param
;
209 dwc_sync_desc_for_cpu(dwc
, desc
);
212 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
213 async_tx_ack(&child
->txd
);
214 async_tx_ack(&desc
->txd
);
216 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
217 list_move(&desc
->desc_node
, &dwc
->free_list
);
219 if (!dwc
->chan
.private) {
220 struct device
*parent
= chan2parent(&dwc
->chan
);
221 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
222 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
223 dma_unmap_single(parent
, desc
->lli
.dar
,
224 desc
->len
, DMA_FROM_DEVICE
);
226 dma_unmap_page(parent
, desc
->lli
.dar
,
227 desc
->len
, DMA_FROM_DEVICE
);
229 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
230 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
231 dma_unmap_single(parent
, desc
->lli
.sar
,
232 desc
->len
, DMA_TO_DEVICE
);
234 dma_unmap_page(parent
, desc
->lli
.sar
,
235 desc
->len
, DMA_TO_DEVICE
);
240 * The API requires that no submissions are done from a
241 * callback, so we don't need to drop the lock here
247 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
249 struct dw_desc
*desc
, *_desc
;
252 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
253 dev_err(chan2dev(&dwc
->chan
),
254 "BUG: XFER bit set, but channel not idle!\n");
256 /* Try to continue after resetting the channel... */
257 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
258 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
263 * Submit queued descriptors ASAP, i.e. before we go through
264 * the completed ones.
266 list_splice_init(&dwc
->active_list
, &list
);
267 if (!list_empty(&dwc
->queue
)) {
268 list_move(dwc
->queue
.next
, &dwc
->active_list
);
269 dwc_dostart(dwc
, dwc_first_active(dwc
));
272 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
273 dwc_descriptor_complete(dwc
, desc
);
276 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
279 struct dw_desc
*desc
, *_desc
;
280 struct dw_desc
*child
;
284 * Clear block interrupt flag before scanning so that we don't
285 * miss any, and read LLP before RAW_XFER to ensure it is
286 * valid if we decide to scan the list.
288 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
289 llp
= channel_readl(dwc
, LLP
);
290 status_xfer
= dma_readl(dw
, RAW
.XFER
);
292 if (status_xfer
& dwc
->mask
) {
293 /* Everything we've submitted is done */
294 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
295 dwc_complete_all(dw
, dwc
);
299 if (list_empty(&dwc
->active_list
))
302 dev_vdbg(chan2dev(&dwc
->chan
), "scan_descriptors: llp=0x%x\n", llp
);
304 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
305 if (desc
->lli
.llp
== llp
)
306 /* This one is currently in progress */
309 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
310 if (child
->lli
.llp
== llp
)
311 /* Currently in progress */
315 * No descriptors so far seem to be in progress, i.e.
316 * this one must be done.
318 dwc_descriptor_complete(dwc
, desc
);
321 dev_err(chan2dev(&dwc
->chan
),
322 "BUG: All descriptors done, but channel not idle!\n");
324 /* Try to continue after resetting the channel... */
325 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
326 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
329 if (!list_empty(&dwc
->queue
)) {
330 list_move(dwc
->queue
.next
, &dwc
->active_list
);
331 dwc_dostart(dwc
, dwc_first_active(dwc
));
335 static void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
337 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
338 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
339 lli
->sar
, lli
->dar
, lli
->llp
,
340 lli
->ctlhi
, lli
->ctllo
);
343 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
345 struct dw_desc
*bad_desc
;
346 struct dw_desc
*child
;
348 dwc_scan_descriptors(dw
, dwc
);
351 * The descriptor currently at the head of the active list is
352 * borked. Since we don't have any way to report errors, we'll
353 * just have to scream loudly and try to carry on.
355 bad_desc
= dwc_first_active(dwc
);
356 list_del_init(&bad_desc
->desc_node
);
357 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
359 /* Clear the error flag and try to restart the controller */
360 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
361 if (!list_empty(&dwc
->active_list
))
362 dwc_dostart(dwc
, dwc_first_active(dwc
));
365 * KERN_CRITICAL may seem harsh, but since this only happens
366 * when someone submits a bad physical address in a
367 * descriptor, we should consider ourselves lucky that the
368 * controller flagged an error instead of scribbling over
369 * random memory locations.
371 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
372 "Bad descriptor submitted for DMA!\n");
373 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
374 " cookie: %d\n", bad_desc
->txd
.cookie
);
375 dwc_dump_lli(dwc
, &bad_desc
->lli
);
376 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
377 dwc_dump_lli(dwc
, &child
->lli
);
379 /* Pretend the descriptor completed successfully */
380 dwc_descriptor_complete(dwc
, bad_desc
);
383 /* --------------------- Cyclic DMA API extensions -------------------- */
385 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
387 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
388 return channel_readl(dwc
, SAR
);
390 EXPORT_SYMBOL(dw_dma_get_src_addr
);
392 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
394 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
395 return channel_readl(dwc
, DAR
);
397 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
399 /* called with dwc->lock held and all DMAC interrupts disabled */
400 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
401 u32 status_block
, u32 status_err
, u32 status_xfer
)
403 if (status_block
& dwc
->mask
) {
404 void (*callback
)(void *param
);
405 void *callback_param
;
407 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
408 channel_readl(dwc
, LLP
));
409 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
411 callback
= dwc
->cdesc
->period_callback
;
412 callback_param
= dwc
->cdesc
->period_callback_param
;
414 spin_unlock(&dwc
->lock
);
415 callback(callback_param
);
416 spin_lock(&dwc
->lock
);
421 * Error and transfer complete are highly unlikely, and will most
422 * likely be due to a configuration error by the user.
424 if (unlikely(status_err
& dwc
->mask
) ||
425 unlikely(status_xfer
& dwc
->mask
)) {
428 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
429 "interrupt, stopping DMA transfer\n",
430 status_xfer
? "xfer" : "error");
431 dev_err(chan2dev(&dwc
->chan
),
432 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
433 channel_readl(dwc
, SAR
),
434 channel_readl(dwc
, DAR
),
435 channel_readl(dwc
, LLP
),
436 channel_readl(dwc
, CTL_HI
),
437 channel_readl(dwc
, CTL_LO
));
439 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
440 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
443 /* make sure DMA does not restart by loading a new list */
444 channel_writel(dwc
, LLP
, 0);
445 channel_writel(dwc
, CTL_LO
, 0);
446 channel_writel(dwc
, CTL_HI
, 0);
448 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
449 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
450 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
452 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
453 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
457 /* ------------------------------------------------------------------------- */
459 static void dw_dma_tasklet(unsigned long data
)
461 struct dw_dma
*dw
= (struct dw_dma
*)data
;
462 struct dw_dma_chan
*dwc
;
468 status_block
= dma_readl(dw
, RAW
.BLOCK
);
469 status_xfer
= dma_readl(dw
, RAW
.XFER
);
470 status_err
= dma_readl(dw
, RAW
.ERROR
);
472 dev_vdbg(dw
->dma
.dev
, "tasklet: status_block=%x status_err=%x\n",
473 status_block
, status_err
);
475 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
477 spin_lock(&dwc
->lock
);
478 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
479 dwc_handle_cyclic(dw
, dwc
, status_block
, status_err
,
481 else if (status_err
& (1 << i
))
482 dwc_handle_error(dw
, dwc
);
483 else if ((status_block
| status_xfer
) & (1 << i
))
484 dwc_scan_descriptors(dw
, dwc
);
485 spin_unlock(&dwc
->lock
);
489 * Re-enable interrupts. Block Complete interrupts are only
490 * enabled if the INT_EN bit in the descriptor is set. This
491 * will trigger a scan before the whole list is done.
493 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
494 channel_set_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
495 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
498 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
500 struct dw_dma
*dw
= dev_id
;
503 dev_vdbg(dw
->dma
.dev
, "interrupt: status=0x%x\n",
504 dma_readl(dw
, STATUS_INT
));
507 * Just disable the interrupts. We'll turn them back on in the
510 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
511 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
512 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
514 status
= dma_readl(dw
, STATUS_INT
);
517 "BUG: Unexpected interrupts pending: 0x%x\n",
521 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
522 channel_clear_bit(dw
, MASK
.BLOCK
, (1 << 8) - 1);
523 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
524 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
525 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
528 tasklet_schedule(&dw
->tasklet
);
533 /*----------------------------------------------------------------------*/
535 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
537 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
538 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
541 spin_lock_bh(&dwc
->lock
);
542 cookie
= dwc_assign_cookie(dwc
, desc
);
545 * REVISIT: We should attempt to chain as many descriptors as
546 * possible, perhaps even appending to those already submitted
547 * for DMA. But this is hard to do in a race-free manner.
549 if (list_empty(&dwc
->active_list
)) {
550 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
552 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
553 dwc_dostart(dwc
, dwc_first_active(dwc
));
555 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
558 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
561 spin_unlock_bh(&dwc
->lock
);
566 static struct dma_async_tx_descriptor
*
567 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
568 size_t len
, unsigned long flags
)
570 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
571 struct dw_desc
*desc
;
572 struct dw_desc
*first
;
573 struct dw_desc
*prev
;
576 unsigned int src_width
;
577 unsigned int dst_width
;
580 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
581 dest
, src
, len
, flags
);
583 if (unlikely(!len
)) {
584 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
589 * We can be a lot more clever here, but this should take care
590 * of the most common optimization.
592 if (!((src
| dest
| len
) & 7))
593 src_width
= dst_width
= 3;
594 else if (!((src
| dest
| len
) & 3))
595 src_width
= dst_width
= 2;
596 else if (!((src
| dest
| len
) & 1))
597 src_width
= dst_width
= 1;
599 src_width
= dst_width
= 0;
601 ctllo
= DWC_DEFAULT_CTLLO(chan
->private)
602 | DWC_CTLL_DST_WIDTH(dst_width
)
603 | DWC_CTLL_SRC_WIDTH(src_width
)
609 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
610 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
613 desc
= dwc_desc_get(dwc
);
617 desc
->lli
.sar
= src
+ offset
;
618 desc
->lli
.dar
= dest
+ offset
;
619 desc
->lli
.ctllo
= ctllo
;
620 desc
->lli
.ctlhi
= xfer_count
;
625 prev
->lli
.llp
= desc
->txd
.phys
;
626 dma_sync_single_for_device(chan2parent(chan
),
627 prev
->txd
.phys
, sizeof(prev
->lli
),
629 list_add_tail(&desc
->desc_node
,
636 if (flags
& DMA_PREP_INTERRUPT
)
637 /* Trigger interrupt after last block */
638 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
641 dma_sync_single_for_device(chan2parent(chan
),
642 prev
->txd
.phys
, sizeof(prev
->lli
),
645 first
->txd
.flags
= flags
;
651 dwc_desc_put(dwc
, first
);
655 static struct dma_async_tx_descriptor
*
656 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
657 unsigned int sg_len
, enum dma_data_direction direction
,
660 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
661 struct dw_dma_slave
*dws
= chan
->private;
662 struct dw_desc
*prev
;
663 struct dw_desc
*first
;
666 unsigned int reg_width
;
667 unsigned int mem_width
;
669 struct scatterlist
*sg
;
670 size_t total_len
= 0;
672 dev_vdbg(chan2dev(chan
), "prep_dma_slave\n");
674 if (unlikely(!dws
|| !sg_len
))
677 reg_width
= dws
->reg_width
;
682 ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
683 | DWC_CTLL_DST_WIDTH(reg_width
)
688 for_each_sg(sgl
, sg
, sg_len
, i
) {
689 struct dw_desc
*desc
;
693 desc
= dwc_desc_get(dwc
);
695 dev_err(chan2dev(chan
),
696 "not enough descriptors available\n");
701 len
= sg_dma_len(sg
);
703 if (unlikely(mem
& 3 || len
& 3))
708 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
709 desc
->lli
.ctlhi
= len
>> mem_width
;
714 prev
->lli
.llp
= desc
->txd
.phys
;
715 dma_sync_single_for_device(chan2parent(chan
),
719 list_add_tail(&desc
->desc_node
,
726 case DMA_FROM_DEVICE
:
727 ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
728 | DWC_CTLL_SRC_WIDTH(reg_width
)
734 for_each_sg(sgl
, sg
, sg_len
, i
) {
735 struct dw_desc
*desc
;
739 desc
= dwc_desc_get(dwc
);
741 dev_err(chan2dev(chan
),
742 "not enough descriptors available\n");
747 len
= sg_dma_len(sg
);
749 if (unlikely(mem
& 3 || len
& 3))
754 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
755 desc
->lli
.ctlhi
= len
>> reg_width
;
760 prev
->lli
.llp
= desc
->txd
.phys
;
761 dma_sync_single_for_device(chan2parent(chan
),
765 list_add_tail(&desc
->desc_node
,
776 if (flags
& DMA_PREP_INTERRUPT
)
777 /* Trigger interrupt after last block */
778 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
781 dma_sync_single_for_device(chan2parent(chan
),
782 prev
->txd
.phys
, sizeof(prev
->lli
),
785 first
->len
= total_len
;
790 dwc_desc_put(dwc
, first
);
794 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
797 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
798 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
799 struct dw_desc
*desc
, *_desc
;
802 /* Only supports DMA_TERMINATE_ALL */
803 if (cmd
!= DMA_TERMINATE_ALL
)
807 * This is only called when something went wrong elsewhere, so
808 * we don't really care about the data. Just disable the
809 * channel. We still have to poll the channel enable bit due
810 * to AHB/HSB limitations.
812 spin_lock_bh(&dwc
->lock
);
814 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
816 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
819 /* active_list entries will end up before queued entries */
820 list_splice_init(&dwc
->queue
, &list
);
821 list_splice_init(&dwc
->active_list
, &list
);
823 spin_unlock_bh(&dwc
->lock
);
825 /* Flush all pending and queued descriptors */
826 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
827 dwc_descriptor_complete(dwc
, desc
);
832 static enum dma_status
833 dwc_tx_status(struct dma_chan
*chan
,
835 struct dma_tx_state
*txstate
)
837 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
838 dma_cookie_t last_used
;
839 dma_cookie_t last_complete
;
842 last_complete
= dwc
->completed
;
843 last_used
= chan
->cookie
;
845 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
846 if (ret
!= DMA_SUCCESS
) {
847 spin_lock_bh(&dwc
->lock
);
848 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
849 spin_unlock_bh(&dwc
->lock
);
851 last_complete
= dwc
->completed
;
852 last_used
= chan
->cookie
;
854 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
857 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
862 static void dwc_issue_pending(struct dma_chan
*chan
)
864 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
866 spin_lock_bh(&dwc
->lock
);
867 if (!list_empty(&dwc
->queue
))
868 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
869 spin_unlock_bh(&dwc
->lock
);
872 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
874 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
875 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
876 struct dw_desc
*desc
;
877 struct dw_dma_slave
*dws
;
882 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
884 /* ASSERT: channel is idle */
885 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
886 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
890 dwc
->completed
= chan
->cookie
= 1;
892 cfghi
= DWC_CFGH_FIFO_MODE
;
898 * We need controller-specific data to set up slave
901 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
904 cfglo
= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
907 cfglo
|= DWC_CFGL_CH_PRIOR(dwc
->priority
);
909 channel_writel(dwc
, CFG_LO
, cfglo
);
910 channel_writel(dwc
, CFG_HI
, cfghi
);
913 * NOTE: some controllers may have additional features that we
914 * need to initialize here, like "scatter-gather" (which
915 * doesn't mean what you think it means), and status writeback.
918 spin_lock_bh(&dwc
->lock
);
919 i
= dwc
->descs_allocated
;
920 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
921 spin_unlock_bh(&dwc
->lock
);
923 desc
= kzalloc(sizeof(struct dw_desc
), GFP_KERNEL
);
925 dev_info(chan2dev(chan
),
926 "only allocated %d descriptors\n", i
);
927 spin_lock_bh(&dwc
->lock
);
931 INIT_LIST_HEAD(&desc
->tx_list
);
932 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
933 desc
->txd
.tx_submit
= dwc_tx_submit
;
934 desc
->txd
.flags
= DMA_CTRL_ACK
;
935 desc
->txd
.phys
= dma_map_single(chan2parent(chan
), &desc
->lli
,
936 sizeof(desc
->lli
), DMA_TO_DEVICE
);
937 dwc_desc_put(dwc
, desc
);
939 spin_lock_bh(&dwc
->lock
);
940 i
= ++dwc
->descs_allocated
;
943 /* Enable interrupts */
944 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
945 channel_set_bit(dw
, MASK
.BLOCK
, dwc
->mask
);
946 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
948 spin_unlock_bh(&dwc
->lock
);
950 dev_dbg(chan2dev(chan
),
951 "alloc_chan_resources allocated %d descriptors\n", i
);
956 static void dwc_free_chan_resources(struct dma_chan
*chan
)
958 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
959 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
960 struct dw_desc
*desc
, *_desc
;
963 dev_dbg(chan2dev(chan
), "free_chan_resources (descs allocated=%u)\n",
964 dwc
->descs_allocated
);
966 /* ASSERT: channel is idle */
967 BUG_ON(!list_empty(&dwc
->active_list
));
968 BUG_ON(!list_empty(&dwc
->queue
));
969 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
971 spin_lock_bh(&dwc
->lock
);
972 list_splice_init(&dwc
->free_list
, &list
);
973 dwc
->descs_allocated
= 0;
975 /* Disable interrupts */
976 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
977 channel_clear_bit(dw
, MASK
.BLOCK
, dwc
->mask
);
978 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
980 spin_unlock_bh(&dwc
->lock
);
982 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
983 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
984 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
985 sizeof(desc
->lli
), DMA_TO_DEVICE
);
989 dev_vdbg(chan2dev(chan
), "free_chan_resources done\n");
992 /* --------------------- Cyclic DMA API extensions -------------------- */
995 * dw_dma_cyclic_start - start the cyclic DMA transfer
996 * @chan: the DMA channel to start
998 * Must be called with soft interrupts disabled. Returns zero on success or
1001 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1003 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1004 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1006 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1007 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1011 spin_lock(&dwc
->lock
);
1013 /* assert channel is idle */
1014 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1015 dev_err(chan2dev(&dwc
->chan
),
1016 "BUG: Attempted to start non-idle channel\n");
1017 dev_err(chan2dev(&dwc
->chan
),
1018 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1019 channel_readl(dwc
, SAR
),
1020 channel_readl(dwc
, DAR
),
1021 channel_readl(dwc
, LLP
),
1022 channel_readl(dwc
, CTL_HI
),
1023 channel_readl(dwc
, CTL_LO
));
1024 spin_unlock(&dwc
->lock
);
1028 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
1029 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1030 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1032 /* setup DMAC channel registers */
1033 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1034 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1035 channel_writel(dwc
, CTL_HI
, 0);
1037 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1039 spin_unlock(&dwc
->lock
);
1043 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1046 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1047 * @chan: the DMA channel to stop
1049 * Must be called with soft interrupts disabled.
1051 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1053 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1054 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1056 spin_lock(&dwc
->lock
);
1058 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1059 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1062 spin_unlock(&dwc
->lock
);
1064 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1067 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1068 * @chan: the DMA channel to prepare
1069 * @buf_addr: physical DMA address where the buffer starts
1070 * @buf_len: total number of bytes for the entire buffer
1071 * @period_len: number of bytes for each period
1072 * @direction: transfer direction, to or from device
1074 * Must be called before trying to start the transfer. Returns a valid struct
1075 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1077 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1078 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1079 enum dma_data_direction direction
)
1081 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1082 struct dw_cyclic_desc
*cdesc
;
1083 struct dw_cyclic_desc
*retval
= NULL
;
1084 struct dw_desc
*desc
;
1085 struct dw_desc
*last
= NULL
;
1086 struct dw_dma_slave
*dws
= chan
->private;
1087 unsigned long was_cyclic
;
1088 unsigned int reg_width
;
1089 unsigned int periods
;
1092 spin_lock_bh(&dwc
->lock
);
1093 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1094 spin_unlock_bh(&dwc
->lock
);
1095 dev_dbg(chan2dev(&dwc
->chan
),
1096 "queue and/or active list are not empty\n");
1097 return ERR_PTR(-EBUSY
);
1100 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1101 spin_unlock_bh(&dwc
->lock
);
1103 dev_dbg(chan2dev(&dwc
->chan
),
1104 "channel already prepared for cyclic DMA\n");
1105 return ERR_PTR(-EBUSY
);
1108 retval
= ERR_PTR(-EINVAL
);
1109 reg_width
= dws
->reg_width
;
1110 periods
= buf_len
/ period_len
;
1112 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1113 if (period_len
> (DWC_MAX_COUNT
<< reg_width
))
1115 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1117 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1119 if (unlikely(!(direction
& (DMA_TO_DEVICE
| DMA_FROM_DEVICE
))))
1122 retval
= ERR_PTR(-ENOMEM
);
1124 if (periods
> NR_DESCS_PER_CHANNEL
)
1127 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1131 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1135 for (i
= 0; i
< periods
; i
++) {
1136 desc
= dwc_desc_get(dwc
);
1138 goto out_err_desc_get
;
1140 switch (direction
) {
1142 desc
->lli
.dar
= dws
->tx_reg
;
1143 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1144 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
1145 | DWC_CTLL_DST_WIDTH(reg_width
)
1146 | DWC_CTLL_SRC_WIDTH(reg_width
)
1152 case DMA_FROM_DEVICE
:
1153 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1154 desc
->lli
.sar
= dws
->rx_reg
;
1155 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
1156 | DWC_CTLL_SRC_WIDTH(reg_width
)
1157 | DWC_CTLL_DST_WIDTH(reg_width
)
1167 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1168 cdesc
->desc
[i
] = desc
;
1171 last
->lli
.llp
= desc
->txd
.phys
;
1172 dma_sync_single_for_device(chan2parent(chan
),
1173 last
->txd
.phys
, sizeof(last
->lli
),
1180 /* lets make a cyclic list */
1181 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1182 dma_sync_single_for_device(chan2parent(chan
), last
->txd
.phys
,
1183 sizeof(last
->lli
), DMA_TO_DEVICE
);
1185 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%08x len %zu "
1186 "period %zu periods %d\n", buf_addr
, buf_len
,
1187 period_len
, periods
);
1189 cdesc
->periods
= periods
;
1196 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1200 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1201 return (struct dw_cyclic_desc
*)retval
;
1203 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1206 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1207 * @chan: the DMA channel to free
1209 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1211 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1212 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1213 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1216 dev_dbg(chan2dev(&dwc
->chan
), "cyclic free\n");
1221 spin_lock_bh(&dwc
->lock
);
1223 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1224 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1227 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
1228 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1229 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1231 spin_unlock_bh(&dwc
->lock
);
1233 for (i
= 0; i
< cdesc
->periods
; i
++)
1234 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1239 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1241 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1243 /*----------------------------------------------------------------------*/
1245 static void dw_dma_off(struct dw_dma
*dw
)
1247 dma_writel(dw
, CFG
, 0);
1249 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1250 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1251 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1252 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1253 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1255 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1259 static int __init
dw_probe(struct platform_device
*pdev
)
1261 struct dw_dma_platform_data
*pdata
;
1262 struct resource
*io
;
1269 pdata
= pdev
->dev
.platform_data
;
1270 if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1273 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1277 irq
= platform_get_irq(pdev
, 0);
1281 size
= sizeof(struct dw_dma
);
1282 size
+= pdata
->nr_channels
* sizeof(struct dw_dma_chan
);
1283 dw
= kzalloc(size
, GFP_KERNEL
);
1287 if (!request_mem_region(io
->start
, DW_REGLEN
, pdev
->dev
.driver
->name
)) {
1292 dw
->regs
= ioremap(io
->start
, DW_REGLEN
);
1298 dw
->clk
= clk_get(&pdev
->dev
, "hclk");
1299 if (IS_ERR(dw
->clk
)) {
1300 err
= PTR_ERR(dw
->clk
);
1303 clk_enable(dw
->clk
);
1305 /* force dma off, just in case */
1308 err
= request_irq(irq
, dw_dma_interrupt
, 0, "dw_dmac", dw
);
1312 platform_set_drvdata(pdev
, dw
);
1314 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1316 dw
->all_chan_mask
= (1 << pdata
->nr_channels
) - 1;
1318 INIT_LIST_HEAD(&dw
->dma
.channels
);
1319 for (i
= 0; i
< pdata
->nr_channels
; i
++, dw
->dma
.chancnt
++) {
1320 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1322 dwc
->chan
.device
= &dw
->dma
;
1323 dwc
->chan
.cookie
= dwc
->completed
= 1;
1324 dwc
->chan
.chan_id
= i
;
1325 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1326 list_add_tail(&dwc
->chan
.device_node
,
1329 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1331 /* 7 is highest priority & 0 is lowest. */
1332 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1333 dwc
->priority
= 7 - i
;
1337 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1338 spin_lock_init(&dwc
->lock
);
1341 INIT_LIST_HEAD(&dwc
->active_list
);
1342 INIT_LIST_HEAD(&dwc
->queue
);
1343 INIT_LIST_HEAD(&dwc
->free_list
);
1345 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1348 /* Clear/disable all interrupts on all channels. */
1349 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1350 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1351 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1352 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1353 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1355 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1356 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1357 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1358 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1359 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1361 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1362 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1363 if (pdata
->is_private
)
1364 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1365 dw
->dma
.dev
= &pdev
->dev
;
1366 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1367 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1369 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1371 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1372 dw
->dma
.device_control
= dwc_control
;
1374 dw
->dma
.device_tx_status
= dwc_tx_status
;
1375 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1377 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1379 printk(KERN_INFO
"%s: DesignWare DMA Controller, %d channels\n",
1380 dev_name(&pdev
->dev
), dw
->dma
.chancnt
);
1382 dma_async_device_register(&dw
->dma
);
1387 clk_disable(dw
->clk
);
1393 release_resource(io
);
1399 static int __exit
dw_remove(struct platform_device
*pdev
)
1401 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1402 struct dw_dma_chan
*dwc
, *_dwc
;
1403 struct resource
*io
;
1406 dma_async_device_unregister(&dw
->dma
);
1408 free_irq(platform_get_irq(pdev
, 0), dw
);
1409 tasklet_kill(&dw
->tasklet
);
1411 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1413 list_del(&dwc
->chan
.device_node
);
1414 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1417 clk_disable(dw
->clk
);
1423 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1424 release_mem_region(io
->start
, DW_REGLEN
);
1431 static void dw_shutdown(struct platform_device
*pdev
)
1433 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1435 dw_dma_off(platform_get_drvdata(pdev
));
1436 clk_disable(dw
->clk
);
1439 static int dw_suspend_noirq(struct device
*dev
)
1441 struct platform_device
*pdev
= to_platform_device(dev
);
1442 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1444 dw_dma_off(platform_get_drvdata(pdev
));
1445 clk_disable(dw
->clk
);
1449 static int dw_resume_noirq(struct device
*dev
)
1451 struct platform_device
*pdev
= to_platform_device(dev
);
1452 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1454 clk_enable(dw
->clk
);
1455 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1459 static const struct dev_pm_ops dw_dev_pm_ops
= {
1460 .suspend_noirq
= dw_suspend_noirq
,
1461 .resume_noirq
= dw_resume_noirq
,
1464 static struct platform_driver dw_driver
= {
1465 .remove
= __exit_p(dw_remove
),
1466 .shutdown
= dw_shutdown
,
1469 .pm
= &dw_dev_pm_ops
,
1473 static int __init
dw_init(void)
1475 return platform_driver_probe(&dw_driver
, dw_probe
);
1477 subsys_initcall(dw_init
);
1479 static void __exit
dw_exit(void)
1481 platform_driver_unregister(&dw_driver
);
1483 module_exit(dw_exit
);
1485 MODULE_LICENSE("GPL v2");
1486 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1487 MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");