2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
27 #include "../dmaengine.h"
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
40 #define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
49 (DWC_CTLL_DST_MSIZE(_dmsize) \
50 | DWC_CTLL_SRC_MSIZE(_smsize) \
53 | DWC_CTLL_DMS(_dwc->dst_master) \
54 | DWC_CTLL_SMS(_dwc->src_master)); \
58 * Number of descriptors to allocate for each channel. This should be
59 * made configurable somehow; preferably, the clients (at least the
60 * ones using slave transfers) should be able to give us a hint.
62 #define NR_DESCS_PER_CHANNEL 64
64 /*----------------------------------------------------------------------*/
66 static struct device
*chan2dev(struct dma_chan
*chan
)
68 return &chan
->dev
->device
;
71 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
73 return to_dw_desc(dwc
->active_list
.next
);
76 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
78 struct dw_desc
*desc
, *_desc
;
79 struct dw_desc
*ret
= NULL
;
83 spin_lock_irqsave(&dwc
->lock
, flags
);
84 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
86 if (async_tx_test_ack(&desc
->txd
)) {
87 list_del(&desc
->desc_node
);
91 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
93 spin_unlock_irqrestore(&dwc
->lock
, flags
);
95 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
101 * Move a descriptor, including any children, to the free list.
102 * `desc' must not be on any lists.
104 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
109 struct dw_desc
*child
;
111 spin_lock_irqsave(&dwc
->lock
, flags
);
112 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
113 dev_vdbg(chan2dev(&dwc
->chan
),
114 "moving child desc %p to freelist\n",
116 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
117 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
118 list_add(&desc
->desc_node
, &dwc
->free_list
);
119 spin_unlock_irqrestore(&dwc
->lock
, flags
);
123 static void dwc_initialize(struct dw_dma_chan
*dwc
)
125 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
126 struct dw_dma_slave
*dws
= dwc
->chan
.private;
127 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
128 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
130 if (dwc
->initialized
== true)
135 * We need controller-specific data to set up slave
138 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
140 cfghi
|= DWC_CFGH_DST_PER(dws
->dst_id
);
141 cfghi
|= DWC_CFGH_SRC_PER(dws
->src_id
);
143 cfghi
|= DWC_CFGH_DST_PER(dwc
->dst_id
);
144 cfghi
|= DWC_CFGH_SRC_PER(dwc
->src_id
);
147 channel_writel(dwc
, CFG_LO
, cfglo
);
148 channel_writel(dwc
, CFG_HI
, cfghi
);
150 /* Enable interrupts */
151 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
152 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
154 dwc
->initialized
= true;
157 /*----------------------------------------------------------------------*/
159 static inline unsigned int dwc_fast_fls(unsigned long long v
)
162 * We can be a lot more clever here, but this should take care
163 * of the most common optimization.
174 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
176 dev_err(chan2dev(&dwc
->chan
),
177 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
178 channel_readl(dwc
, SAR
),
179 channel_readl(dwc
, DAR
),
180 channel_readl(dwc
, LLP
),
181 channel_readl(dwc
, CTL_HI
),
182 channel_readl(dwc
, CTL_LO
));
185 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
187 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
188 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
192 /*----------------------------------------------------------------------*/
194 /* Perform single block transfer */
195 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
196 struct dw_desc
*desc
)
198 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
202 * Software emulation of LLP mode relies on interrupts to continue
203 * multi block transfer.
205 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
207 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
208 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
209 channel_writel(dwc
, CTL_LO
, ctllo
);
210 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
211 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
213 /* Move pointer to next descriptor */
214 dwc
->tx_node_active
= dwc
->tx_node_active
->next
;
217 /* Called with dwc->lock held and bh disabled */
218 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
220 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
221 unsigned long was_soft_llp
;
223 /* ASSERT: channel is idle */
224 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
225 dev_err(chan2dev(&dwc
->chan
),
226 "BUG: Attempted to start non-idle channel\n");
227 dwc_dump_chan_regs(dwc
);
229 /* The tasklet will hopefully advance the queue... */
234 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
237 dev_err(chan2dev(&dwc
->chan
),
238 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
244 dwc
->residue
= first
->total_len
;
245 dwc
->tx_node_active
= &first
->tx_list
;
247 /* Submit first block */
248 dwc_do_single_block(dwc
, first
);
255 channel_writel(dwc
, LLP
, first
->txd
.phys
);
256 channel_writel(dwc
, CTL_LO
,
257 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
258 channel_writel(dwc
, CTL_HI
, 0);
259 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
262 static void dwc_dostart_first_queued(struct dw_dma_chan
*dwc
)
264 struct dw_desc
*desc
;
266 if (list_empty(&dwc
->queue
))
269 list_move(dwc
->queue
.next
, &dwc
->active_list
);
270 desc
= dwc_first_active(dwc
);
271 dev_vdbg(chan2dev(&dwc
->chan
), "%s: started %u\n", __func__
, desc
->txd
.cookie
);
272 dwc_dostart(dwc
, desc
);
275 /*----------------------------------------------------------------------*/
278 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
279 bool callback_required
)
281 dma_async_tx_callback callback
= NULL
;
283 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
284 struct dw_desc
*child
;
287 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
289 spin_lock_irqsave(&dwc
->lock
, flags
);
290 dma_cookie_complete(txd
);
291 if (callback_required
) {
292 callback
= txd
->callback
;
293 param
= txd
->callback_param
;
297 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
298 async_tx_ack(&child
->txd
);
299 async_tx_ack(&desc
->txd
);
301 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
302 list_move(&desc
->desc_node
, &dwc
->free_list
);
304 dma_descriptor_unmap(txd
);
305 spin_unlock_irqrestore(&dwc
->lock
, flags
);
311 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
313 struct dw_desc
*desc
, *_desc
;
317 spin_lock_irqsave(&dwc
->lock
, flags
);
318 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
319 dev_err(chan2dev(&dwc
->chan
),
320 "BUG: XFER bit set, but channel not idle!\n");
322 /* Try to continue after resetting the channel... */
323 dwc_chan_disable(dw
, dwc
);
327 * Submit queued descriptors ASAP, i.e. before we go through
328 * the completed ones.
330 list_splice_init(&dwc
->active_list
, &list
);
331 dwc_dostart_first_queued(dwc
);
333 spin_unlock_irqrestore(&dwc
->lock
, flags
);
335 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
336 dwc_descriptor_complete(dwc
, desc
, true);
339 /* Returns how many bytes were already received from source */
340 static inline u32
dwc_get_sent(struct dw_dma_chan
*dwc
)
342 u32 ctlhi
= channel_readl(dwc
, CTL_HI
);
343 u32 ctllo
= channel_readl(dwc
, CTL_LO
);
345 return (ctlhi
& DWC_CTLH_BLOCK_TS_MASK
) * (1 << (ctllo
>> 4 & 7));
348 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
351 struct dw_desc
*desc
, *_desc
;
352 struct dw_desc
*child
;
356 spin_lock_irqsave(&dwc
->lock
, flags
);
357 llp
= channel_readl(dwc
, LLP
);
358 status_xfer
= dma_readl(dw
, RAW
.XFER
);
360 if (status_xfer
& dwc
->mask
) {
361 /* Everything we've submitted is done */
362 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
364 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
365 struct list_head
*head
, *active
= dwc
->tx_node_active
;
368 * We are inside first active descriptor.
369 * Otherwise something is really wrong.
371 desc
= dwc_first_active(dwc
);
373 head
= &desc
->tx_list
;
374 if (active
!= head
) {
375 /* Update desc to reflect last sent one */
376 if (active
!= head
->next
)
377 desc
= to_dw_desc(active
->prev
);
379 dwc
->residue
-= desc
->len
;
381 child
= to_dw_desc(active
);
383 /* Submit next block */
384 dwc_do_single_block(dwc
, child
);
386 spin_unlock_irqrestore(&dwc
->lock
, flags
);
390 /* We are done here */
391 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
396 spin_unlock_irqrestore(&dwc
->lock
, flags
);
398 dwc_complete_all(dw
, dwc
);
402 if (list_empty(&dwc
->active_list
)) {
404 spin_unlock_irqrestore(&dwc
->lock
, flags
);
408 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
409 dev_vdbg(chan2dev(&dwc
->chan
), "%s: soft LLP mode\n", __func__
);
410 spin_unlock_irqrestore(&dwc
->lock
, flags
);
414 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=%pad\n", __func__
, &llp
);
416 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
417 /* Initial residue value */
418 dwc
->residue
= desc
->total_len
;
420 /* Check first descriptors addr */
421 if (desc
->txd
.phys
== llp
) {
422 spin_unlock_irqrestore(&dwc
->lock
, flags
);
426 /* Check first descriptors llp */
427 if (desc
->lli
.llp
== llp
) {
428 /* This one is currently in progress */
429 dwc
->residue
-= dwc_get_sent(dwc
);
430 spin_unlock_irqrestore(&dwc
->lock
, flags
);
434 dwc
->residue
-= desc
->len
;
435 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
436 if (child
->lli
.llp
== llp
) {
437 /* Currently in progress */
438 dwc
->residue
-= dwc_get_sent(dwc
);
439 spin_unlock_irqrestore(&dwc
->lock
, flags
);
442 dwc
->residue
-= child
->len
;
446 * No descriptors so far seem to be in progress, i.e.
447 * this one must be done.
449 spin_unlock_irqrestore(&dwc
->lock
, flags
);
450 dwc_descriptor_complete(dwc
, desc
, true);
451 spin_lock_irqsave(&dwc
->lock
, flags
);
454 dev_err(chan2dev(&dwc
->chan
),
455 "BUG: All descriptors done, but channel not idle!\n");
457 /* Try to continue after resetting the channel... */
458 dwc_chan_disable(dw
, dwc
);
460 dwc_dostart_first_queued(dwc
);
461 spin_unlock_irqrestore(&dwc
->lock
, flags
);
464 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
466 dev_crit(chan2dev(&dwc
->chan
), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
467 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
470 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
472 struct dw_desc
*bad_desc
;
473 struct dw_desc
*child
;
476 dwc_scan_descriptors(dw
, dwc
);
478 spin_lock_irqsave(&dwc
->lock
, flags
);
481 * The descriptor currently at the head of the active list is
482 * borked. Since we don't have any way to report errors, we'll
483 * just have to scream loudly and try to carry on.
485 bad_desc
= dwc_first_active(dwc
);
486 list_del_init(&bad_desc
->desc_node
);
487 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
489 /* Clear the error flag and try to restart the controller */
490 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
491 if (!list_empty(&dwc
->active_list
))
492 dwc_dostart(dwc
, dwc_first_active(dwc
));
495 * WARN may seem harsh, but since this only happens
496 * when someone submits a bad physical address in a
497 * descriptor, we should consider ourselves lucky that the
498 * controller flagged an error instead of scribbling over
499 * random memory locations.
501 dev_WARN(chan2dev(&dwc
->chan
), "Bad descriptor submitted for DMA!\n"
502 " cookie: %d\n", bad_desc
->txd
.cookie
);
503 dwc_dump_lli(dwc
, &bad_desc
->lli
);
504 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
505 dwc_dump_lli(dwc
, &child
->lli
);
507 spin_unlock_irqrestore(&dwc
->lock
, flags
);
509 /* Pretend the descriptor completed successfully */
510 dwc_descriptor_complete(dwc
, bad_desc
, true);
513 /* --------------------- Cyclic DMA API extensions -------------------- */
515 dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
517 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
518 return channel_readl(dwc
, SAR
);
520 EXPORT_SYMBOL(dw_dma_get_src_addr
);
522 dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
524 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
525 return channel_readl(dwc
, DAR
);
527 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
529 /* Called with dwc->lock held and all DMAC interrupts disabled */
530 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
531 u32 status_err
, u32 status_xfer
)
536 void (*callback
)(void *param
);
537 void *callback_param
;
539 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
540 channel_readl(dwc
, LLP
));
542 callback
= dwc
->cdesc
->period_callback
;
543 callback_param
= dwc
->cdesc
->period_callback_param
;
546 callback(callback_param
);
550 * Error and transfer complete are highly unlikely, and will most
551 * likely be due to a configuration error by the user.
553 if (unlikely(status_err
& dwc
->mask
) ||
554 unlikely(status_xfer
& dwc
->mask
)) {
557 dev_err(chan2dev(&dwc
->chan
),
558 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
559 status_xfer
? "xfer" : "error");
561 spin_lock_irqsave(&dwc
->lock
, flags
);
563 dwc_dump_chan_regs(dwc
);
565 dwc_chan_disable(dw
, dwc
);
567 /* Make sure DMA does not restart by loading a new list */
568 channel_writel(dwc
, LLP
, 0);
569 channel_writel(dwc
, CTL_LO
, 0);
570 channel_writel(dwc
, CTL_HI
, 0);
572 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
573 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
575 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
576 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
578 spin_unlock_irqrestore(&dwc
->lock
, flags
);
582 /* ------------------------------------------------------------------------- */
584 static void dw_dma_tasklet(unsigned long data
)
586 struct dw_dma
*dw
= (struct dw_dma
*)data
;
587 struct dw_dma_chan
*dwc
;
592 status_xfer
= dma_readl(dw
, RAW
.XFER
);
593 status_err
= dma_readl(dw
, RAW
.ERROR
);
595 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
597 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
599 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
600 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
601 else if (status_err
& (1 << i
))
602 dwc_handle_error(dw
, dwc
);
603 else if (status_xfer
& (1 << i
))
604 dwc_scan_descriptors(dw
, dwc
);
608 * Re-enable interrupts.
610 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
611 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
614 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
616 struct dw_dma
*dw
= dev_id
;
617 u32 status
= dma_readl(dw
, STATUS_INT
);
619 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
, status
);
621 /* Check if we have any interrupt from the DMAC */
626 * Just disable the interrupts. We'll turn them back on in the
629 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
630 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
632 status
= dma_readl(dw
, STATUS_INT
);
635 "BUG: Unexpected interrupts pending: 0x%x\n",
639 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
640 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
641 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
642 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
645 tasklet_schedule(&dw
->tasklet
);
650 /*----------------------------------------------------------------------*/
652 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
654 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
655 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
659 spin_lock_irqsave(&dwc
->lock
, flags
);
660 cookie
= dma_cookie_assign(tx
);
663 * REVISIT: We should attempt to chain as many descriptors as
664 * possible, perhaps even appending to those already submitted
665 * for DMA. But this is hard to do in a race-free manner.
668 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
, desc
->txd
.cookie
);
669 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
671 spin_unlock_irqrestore(&dwc
->lock
, flags
);
676 static struct dma_async_tx_descriptor
*
677 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
678 size_t len
, unsigned long flags
)
680 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
681 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
682 struct dw_desc
*desc
;
683 struct dw_desc
*first
;
684 struct dw_desc
*prev
;
687 unsigned int src_width
;
688 unsigned int dst_width
;
689 unsigned int data_width
;
692 dev_vdbg(chan2dev(chan
),
693 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__
,
694 &dest
, &src
, len
, flags
);
696 if (unlikely(!len
)) {
697 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
701 dwc
->direction
= DMA_MEM_TO_MEM
;
703 data_width
= min_t(unsigned int, dw
->data_width
[dwc
->src_master
],
704 dw
->data_width
[dwc
->dst_master
]);
706 src_width
= dst_width
= min_t(unsigned int, data_width
,
707 dwc_fast_fls(src
| dest
| len
));
709 ctllo
= DWC_DEFAULT_CTLLO(chan
)
710 | DWC_CTLL_DST_WIDTH(dst_width
)
711 | DWC_CTLL_SRC_WIDTH(src_width
)
717 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
718 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
721 desc
= dwc_desc_get(dwc
);
725 desc
->lli
.sar
= src
+ offset
;
726 desc
->lli
.dar
= dest
+ offset
;
727 desc
->lli
.ctllo
= ctllo
;
728 desc
->lli
.ctlhi
= xfer_count
;
729 desc
->len
= xfer_count
<< src_width
;
734 prev
->lli
.llp
= desc
->txd
.phys
;
735 list_add_tail(&desc
->desc_node
,
741 if (flags
& DMA_PREP_INTERRUPT
)
742 /* Trigger interrupt after last block */
743 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
746 first
->txd
.flags
= flags
;
747 first
->total_len
= len
;
752 dwc_desc_put(dwc
, first
);
756 static struct dma_async_tx_descriptor
*
757 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
758 unsigned int sg_len
, enum dma_transfer_direction direction
,
759 unsigned long flags
, void *context
)
761 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
762 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
763 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
764 struct dw_desc
*prev
;
765 struct dw_desc
*first
;
768 unsigned int reg_width
;
769 unsigned int mem_width
;
770 unsigned int data_width
;
772 struct scatterlist
*sg
;
773 size_t total_len
= 0;
775 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
777 if (unlikely(!is_slave_direction(direction
) || !sg_len
))
780 dwc
->direction
= direction
;
786 reg_width
= __fls(sconfig
->dst_addr_width
);
787 reg
= sconfig
->dst_addr
;
788 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
789 | DWC_CTLL_DST_WIDTH(reg_width
)
793 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
794 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
796 data_width
= dw
->data_width
[dwc
->src_master
];
798 for_each_sg(sgl
, sg
, sg_len
, i
) {
799 struct dw_desc
*desc
;
802 mem
= sg_dma_address(sg
);
803 len
= sg_dma_len(sg
);
805 mem_width
= min_t(unsigned int,
806 data_width
, dwc_fast_fls(mem
| len
));
808 slave_sg_todev_fill_desc
:
809 desc
= dwc_desc_get(dwc
);
811 dev_err(chan2dev(chan
),
812 "not enough descriptors available\n");
818 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
819 if ((len
>> mem_width
) > dwc
->block_size
) {
820 dlen
= dwc
->block_size
<< mem_width
;
828 desc
->lli
.ctlhi
= dlen
>> mem_width
;
834 prev
->lli
.llp
= desc
->txd
.phys
;
835 list_add_tail(&desc
->desc_node
,
842 goto slave_sg_todev_fill_desc
;
846 reg_width
= __fls(sconfig
->src_addr_width
);
847 reg
= sconfig
->src_addr
;
848 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
849 | DWC_CTLL_SRC_WIDTH(reg_width
)
853 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
854 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
856 data_width
= dw
->data_width
[dwc
->dst_master
];
858 for_each_sg(sgl
, sg
, sg_len
, i
) {
859 struct dw_desc
*desc
;
862 mem
= sg_dma_address(sg
);
863 len
= sg_dma_len(sg
);
865 mem_width
= min_t(unsigned int,
866 data_width
, dwc_fast_fls(mem
| len
));
868 slave_sg_fromdev_fill_desc
:
869 desc
= dwc_desc_get(dwc
);
871 dev_err(chan2dev(chan
),
872 "not enough descriptors available\n");
878 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
879 if ((len
>> reg_width
) > dwc
->block_size
) {
880 dlen
= dwc
->block_size
<< reg_width
;
887 desc
->lli
.ctlhi
= dlen
>> reg_width
;
893 prev
->lli
.llp
= desc
->txd
.phys
;
894 list_add_tail(&desc
->desc_node
,
901 goto slave_sg_fromdev_fill_desc
;
908 if (flags
& DMA_PREP_INTERRUPT
)
909 /* Trigger interrupt after last block */
910 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
913 first
->total_len
= total_len
;
918 dwc_desc_put(dwc
, first
);
922 bool dw_dma_filter(struct dma_chan
*chan
, void *param
)
924 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
925 struct dw_dma_slave
*dws
= param
;
927 if (!dws
|| dws
->dma_dev
!= chan
->device
->dev
)
930 /* We have to copy data since dws can be temporary storage */
932 dwc
->src_id
= dws
->src_id
;
933 dwc
->dst_id
= dws
->dst_id
;
935 dwc
->src_master
= dws
->src_master
;
936 dwc
->dst_master
= dws
->dst_master
;
940 EXPORT_SYMBOL_GPL(dw_dma_filter
);
943 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
944 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
946 * NOTE: burst size 2 is not supported by controller.
948 * This can be done by finding least significant bit set: n & (n - 1)
950 static inline void convert_burst(u32
*maxburst
)
953 *maxburst
= fls(*maxburst
) - 2;
958 static int dwc_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
960 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
962 /* Check if chan will be configured for slave transfers */
963 if (!is_slave_direction(sconfig
->direction
))
966 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
967 dwc
->direction
= sconfig
->direction
;
969 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
970 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
975 static int dwc_pause(struct dma_chan
*chan
)
977 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
979 unsigned int count
= 20; /* timeout iterations */
982 spin_lock_irqsave(&dwc
->lock
, flags
);
984 cfglo
= channel_readl(dwc
, CFG_LO
);
985 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
986 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
) && count
--)
991 spin_unlock_irqrestore(&dwc
->lock
, flags
);
996 static inline void dwc_chan_resume(struct dw_dma_chan
*dwc
)
998 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1000 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
1002 dwc
->paused
= false;
1005 static int dwc_resume(struct dma_chan
*chan
)
1007 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1008 unsigned long flags
;
1013 spin_lock_irqsave(&dwc
->lock
, flags
);
1015 dwc_chan_resume(dwc
);
1017 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1022 static int dwc_terminate_all(struct dma_chan
*chan
)
1024 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1025 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1026 struct dw_desc
*desc
, *_desc
;
1027 unsigned long flags
;
1030 spin_lock_irqsave(&dwc
->lock
, flags
);
1032 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1034 dwc_chan_disable(dw
, dwc
);
1036 dwc_chan_resume(dwc
);
1038 /* active_list entries will end up before queued entries */
1039 list_splice_init(&dwc
->queue
, &list
);
1040 list_splice_init(&dwc
->active_list
, &list
);
1042 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1044 /* Flush all pending and queued descriptors */
1045 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1046 dwc_descriptor_complete(dwc
, desc
, false);
1051 static inline u32
dwc_get_residue(struct dw_dma_chan
*dwc
)
1053 unsigned long flags
;
1056 spin_lock_irqsave(&dwc
->lock
, flags
);
1058 residue
= dwc
->residue
;
1059 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
) && residue
)
1060 residue
-= dwc_get_sent(dwc
);
1062 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1066 static enum dma_status
1067 dwc_tx_status(struct dma_chan
*chan
,
1068 dma_cookie_t cookie
,
1069 struct dma_tx_state
*txstate
)
1071 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1072 enum dma_status ret
;
1074 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1075 if (ret
== DMA_COMPLETE
)
1078 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1080 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1081 if (ret
!= DMA_COMPLETE
)
1082 dma_set_residue(txstate
, dwc_get_residue(dwc
));
1084 if (dwc
->paused
&& ret
== DMA_IN_PROGRESS
)
1090 static void dwc_issue_pending(struct dma_chan
*chan
)
1092 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1093 unsigned long flags
;
1095 spin_lock_irqsave(&dwc
->lock
, flags
);
1096 if (list_empty(&dwc
->active_list
))
1097 dwc_dostart_first_queued(dwc
);
1098 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1101 /*----------------------------------------------------------------------*/
1103 static void dw_dma_off(struct dw_dma
*dw
)
1107 dma_writel(dw
, CFG
, 0);
1109 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1110 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1111 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1112 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1114 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1117 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1118 dw
->chan
[i
].initialized
= false;
1121 static void dw_dma_on(struct dw_dma
*dw
)
1123 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1126 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1128 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1129 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1130 struct dw_desc
*desc
;
1132 unsigned long flags
;
1134 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1136 /* ASSERT: channel is idle */
1137 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1138 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1142 dma_cookie_init(chan
);
1145 * NOTE: some controllers may have additional features that we
1146 * need to initialize here, like "scatter-gather" (which
1147 * doesn't mean what you think it means), and status writeback.
1150 /* Enable controller here if needed */
1153 dw
->in_use
|= dwc
->mask
;
1155 spin_lock_irqsave(&dwc
->lock
, flags
);
1156 i
= dwc
->descs_allocated
;
1157 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1160 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1162 desc
= dma_pool_alloc(dw
->desc_pool
, GFP_ATOMIC
, &phys
);
1164 goto err_desc_alloc
;
1166 memset(desc
, 0, sizeof(struct dw_desc
));
1168 INIT_LIST_HEAD(&desc
->tx_list
);
1169 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1170 desc
->txd
.tx_submit
= dwc_tx_submit
;
1171 desc
->txd
.flags
= DMA_CTRL_ACK
;
1172 desc
->txd
.phys
= phys
;
1174 dwc_desc_put(dwc
, desc
);
1176 spin_lock_irqsave(&dwc
->lock
, flags
);
1177 i
= ++dwc
->descs_allocated
;
1180 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1182 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1187 dev_info(chan2dev(chan
), "only allocated %d descriptors\n", i
);
1192 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1194 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1195 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1196 struct dw_desc
*desc
, *_desc
;
1197 unsigned long flags
;
1200 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1201 dwc
->descs_allocated
);
1203 /* ASSERT: channel is idle */
1204 BUG_ON(!list_empty(&dwc
->active_list
));
1205 BUG_ON(!list_empty(&dwc
->queue
));
1206 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1208 spin_lock_irqsave(&dwc
->lock
, flags
);
1209 list_splice_init(&dwc
->free_list
, &list
);
1210 dwc
->descs_allocated
= 0;
1211 dwc
->initialized
= false;
1213 /* Disable interrupts */
1214 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1215 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1217 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1219 /* Disable controller in case it was a last user */
1220 dw
->in_use
&= ~dwc
->mask
;
1224 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1225 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1226 dma_pool_free(dw
->desc_pool
, desc
, desc
->txd
.phys
);
1229 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1232 /* --------------------- Cyclic DMA API extensions -------------------- */
1235 * dw_dma_cyclic_start - start the cyclic DMA transfer
1236 * @chan: the DMA channel to start
1238 * Must be called with soft interrupts disabled. Returns zero on success or
1239 * -errno on failure.
1241 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1243 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1244 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1245 unsigned long flags
;
1247 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1248 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1252 spin_lock_irqsave(&dwc
->lock
, flags
);
1254 /* Assert channel is idle */
1255 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1256 dev_err(chan2dev(&dwc
->chan
),
1257 "BUG: Attempted to start non-idle channel\n");
1258 dwc_dump_chan_regs(dwc
);
1259 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1263 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1264 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1266 /* Setup DMAC channel registers */
1267 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1268 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1269 channel_writel(dwc
, CTL_HI
, 0);
1271 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1273 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1277 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1280 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1281 * @chan: the DMA channel to stop
1283 * Must be called with soft interrupts disabled.
1285 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1287 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1288 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1289 unsigned long flags
;
1291 spin_lock_irqsave(&dwc
->lock
, flags
);
1293 dwc_chan_disable(dw
, dwc
);
1295 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1297 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1300 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1301 * @chan: the DMA channel to prepare
1302 * @buf_addr: physical DMA address where the buffer starts
1303 * @buf_len: total number of bytes for the entire buffer
1304 * @period_len: number of bytes for each period
1305 * @direction: transfer direction, to or from device
1307 * Must be called before trying to start the transfer. Returns a valid struct
1308 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1310 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1311 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1312 enum dma_transfer_direction direction
)
1314 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1315 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1316 struct dw_cyclic_desc
*cdesc
;
1317 struct dw_cyclic_desc
*retval
= NULL
;
1318 struct dw_desc
*desc
;
1319 struct dw_desc
*last
= NULL
;
1320 unsigned long was_cyclic
;
1321 unsigned int reg_width
;
1322 unsigned int periods
;
1324 unsigned long flags
;
1326 spin_lock_irqsave(&dwc
->lock
, flags
);
1328 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1329 dev_dbg(chan2dev(&dwc
->chan
),
1330 "channel doesn't support LLP transfers\n");
1331 return ERR_PTR(-EINVAL
);
1334 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1335 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1336 dev_dbg(chan2dev(&dwc
->chan
),
1337 "queue and/or active list are not empty\n");
1338 return ERR_PTR(-EBUSY
);
1341 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1342 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1344 dev_dbg(chan2dev(&dwc
->chan
),
1345 "channel already prepared for cyclic DMA\n");
1346 return ERR_PTR(-EBUSY
);
1349 retval
= ERR_PTR(-EINVAL
);
1351 if (unlikely(!is_slave_direction(direction
)))
1354 dwc
->direction
= direction
;
1356 if (direction
== DMA_MEM_TO_DEV
)
1357 reg_width
= __ffs(sconfig
->dst_addr_width
);
1359 reg_width
= __ffs(sconfig
->src_addr_width
);
1361 periods
= buf_len
/ period_len
;
1363 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1364 if (period_len
> (dwc
->block_size
<< reg_width
))
1366 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1368 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1371 retval
= ERR_PTR(-ENOMEM
);
1373 if (periods
> NR_DESCS_PER_CHANNEL
)
1376 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1380 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1384 for (i
= 0; i
< periods
; i
++) {
1385 desc
= dwc_desc_get(dwc
);
1387 goto out_err_desc_get
;
1389 switch (direction
) {
1390 case DMA_MEM_TO_DEV
:
1391 desc
->lli
.dar
= sconfig
->dst_addr
;
1392 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1393 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1394 | DWC_CTLL_DST_WIDTH(reg_width
)
1395 | DWC_CTLL_SRC_WIDTH(reg_width
)
1400 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1401 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1402 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1405 case DMA_DEV_TO_MEM
:
1406 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1407 desc
->lli
.sar
= sconfig
->src_addr
;
1408 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1409 | DWC_CTLL_SRC_WIDTH(reg_width
)
1410 | DWC_CTLL_DST_WIDTH(reg_width
)
1415 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1416 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1417 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1424 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1425 cdesc
->desc
[i
] = desc
;
1428 last
->lli
.llp
= desc
->txd
.phys
;
1433 /* Let's make a cyclic list */
1434 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1436 dev_dbg(chan2dev(&dwc
->chan
),
1437 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1438 &buf_addr
, buf_len
, period_len
, periods
);
1440 cdesc
->periods
= periods
;
1447 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1451 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1452 return (struct dw_cyclic_desc
*)retval
;
1454 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1457 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1458 * @chan: the DMA channel to free
1460 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1462 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1463 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1464 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1466 unsigned long flags
;
1468 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1473 spin_lock_irqsave(&dwc
->lock
, flags
);
1475 dwc_chan_disable(dw
, dwc
);
1477 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1478 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1480 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1482 for (i
= 0; i
< cdesc
->periods
; i
++)
1483 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1488 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1490 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1492 /*----------------------------------------------------------------------*/
1494 int dw_dma_probe(struct dw_dma_chip
*chip
, struct dw_dma_platform_data
*pdata
)
1498 unsigned int dw_params
;
1499 unsigned int nr_channels
;
1500 unsigned int max_blk_size
= 0;
1504 dw
= devm_kzalloc(chip
->dev
, sizeof(*dw
), GFP_KERNEL
);
1508 dw
->regs
= chip
->regs
;
1511 pm_runtime_enable(chip
->dev
);
1512 pm_runtime_get_sync(chip
->dev
);
1514 dw_params
= dma_read_byaddr(chip
->regs
, DW_PARAMS
);
1515 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1517 dev_dbg(chip
->dev
, "DW_PARAMS: 0x%08x\n", dw_params
);
1519 if (!pdata
&& autocfg
) {
1520 pdata
= devm_kzalloc(chip
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1526 /* Fill platform data with the default values */
1527 pdata
->is_private
= true;
1528 pdata
->chan_allocation_order
= CHAN_ALLOCATION_ASCENDING
;
1529 pdata
->chan_priority
= CHAN_PRIORITY_ASCENDING
;
1530 } else if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
) {
1536 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1538 nr_channels
= pdata
->nr_channels
;
1540 dw
->chan
= devm_kcalloc(chip
->dev
, nr_channels
, sizeof(*dw
->chan
),
1547 /* Get hardware configuration parameters */
1549 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1551 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1552 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1554 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1557 dw
->nr_masters
= pdata
->nr_masters
;
1558 memcpy(dw
->data_width
, pdata
->data_width
, 4);
1561 /* Calculate all channel mask before DMA setup */
1562 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1564 /* Force dma off, just in case */
1567 /* Disable BLOCK interrupts as well */
1568 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1570 /* Create a pool of consistent memory blocks for hardware descriptors */
1571 dw
->desc_pool
= dmam_pool_create("dw_dmac_desc_pool", chip
->dev
,
1572 sizeof(struct dw_desc
), 4, 0);
1573 if (!dw
->desc_pool
) {
1574 dev_err(chip
->dev
, "No memory for descriptors dma pool\n");
1579 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1581 err
= request_irq(chip
->irq
, dw_dma_interrupt
, IRQF_SHARED
,
1586 INIT_LIST_HEAD(&dw
->dma
.channels
);
1587 for (i
= 0; i
< nr_channels
; i
++) {
1588 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1589 int r
= nr_channels
- i
- 1;
1591 dwc
->chan
.device
= &dw
->dma
;
1592 dma_cookie_init(&dwc
->chan
);
1593 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1594 list_add_tail(&dwc
->chan
.device_node
,
1597 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1599 /* 7 is highest priority & 0 is lowest. */
1600 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1605 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1606 spin_lock_init(&dwc
->lock
);
1609 INIT_LIST_HEAD(&dwc
->active_list
);
1610 INIT_LIST_HEAD(&dwc
->queue
);
1611 INIT_LIST_HEAD(&dwc
->free_list
);
1613 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1615 dwc
->direction
= DMA_TRANS_NONE
;
1617 /* Hardware configuration */
1619 unsigned int dwc_params
;
1620 void __iomem
*addr
= chip
->regs
+ r
* sizeof(u32
);
1622 dwc_params
= dma_read_byaddr(addr
, DWC_PARAMS
);
1624 dev_dbg(chip
->dev
, "DWC_PARAMS[%d]: 0x%08x\n", i
,
1628 * Decode maximum block size for given channel. The
1629 * stored 4 bit value represents blocks from 0x00 for 3
1630 * up to 0x0a for 4095.
1633 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1635 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1637 dwc
->block_size
= pdata
->block_size
;
1639 /* Check if channel supports multi block transfer */
1640 channel_writel(dwc
, LLP
, 0xfffffffc);
1642 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1643 channel_writel(dwc
, LLP
, 0);
1647 /* Clear all interrupts on all channels. */
1648 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1649 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1650 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1651 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1652 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1654 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1655 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1656 if (pdata
->is_private
)
1657 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1658 dw
->dma
.dev
= chip
->dev
;
1659 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1660 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1662 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1664 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1665 dw
->dma
.device_config
= dwc_config
;
1666 dw
->dma
.device_pause
= dwc_pause
;
1667 dw
->dma
.device_resume
= dwc_resume
;
1668 dw
->dma
.device_terminate_all
= dwc_terminate_all
;
1670 dw
->dma
.device_tx_status
= dwc_tx_status
;
1671 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1673 err
= dma_async_device_register(&dw
->dma
);
1675 goto err_dma_register
;
1677 dev_info(chip
->dev
, "DesignWare DMA Controller, %d channels\n",
1680 pm_runtime_put_sync_suspend(chip
->dev
);
1685 free_irq(chip
->irq
, dw
);
1687 pm_runtime_put_sync_suspend(chip
->dev
);
1690 EXPORT_SYMBOL_GPL(dw_dma_probe
);
1692 int dw_dma_remove(struct dw_dma_chip
*chip
)
1694 struct dw_dma
*dw
= chip
->dw
;
1695 struct dw_dma_chan
*dwc
, *_dwc
;
1697 pm_runtime_get_sync(chip
->dev
);
1700 dma_async_device_unregister(&dw
->dma
);
1702 free_irq(chip
->irq
, dw
);
1703 tasklet_kill(&dw
->tasklet
);
1705 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1707 list_del(&dwc
->chan
.device_node
);
1708 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1711 pm_runtime_put_sync_suspend(chip
->dev
);
1712 pm_runtime_disable(chip
->dev
);
1715 EXPORT_SYMBOL_GPL(dw_dma_remove
);
1717 int dw_dma_disable(struct dw_dma_chip
*chip
)
1719 struct dw_dma
*dw
= chip
->dw
;
1724 EXPORT_SYMBOL_GPL(dw_dma_disable
);
1726 int dw_dma_enable(struct dw_dma_chip
*chip
)
1728 struct dw_dma
*dw
= chip
->dw
;
1733 EXPORT_SYMBOL_GPL(dw_dma_enable
);
1735 MODULE_LICENSE("GPL v2");
1736 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1737 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1738 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");