1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/of_device.h>
19 #include <linux/of_dma.h>
21 #include <linux/wait.h>
22 #include <linux/dma/pxa-dma.h>
24 #include "dmaengine.h"
27 #define DCSR(n) (0x0000 + ((n) << 2))
28 #define DALGN(n) 0x00a0
30 #define DDADR(n) (0x0200 + ((n) << 4))
31 #define DSADR(n) (0x0204 + ((n) << 4))
32 #define DTADR(n) (0x0208 + ((n) << 4))
33 #define DCMD(n) (0x020c + ((n) << 4))
35 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
36 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
37 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
38 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
39 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
40 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
41 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
42 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
44 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
45 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
46 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
47 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
48 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
49 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
50 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
52 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
53 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
55 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
56 #define DDADR_STOP BIT(0) /* Stop (read / write) */
58 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
59 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
60 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
61 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
62 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
63 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
64 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
65 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
66 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
67 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
68 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
69 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
70 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
71 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
73 #define PDMA_ALIGNMENT 3
74 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
77 u32 ddadr
; /* Points to the next descriptor + flags */
78 u32 dsadr
; /* DSADR value for the current transfer */
79 u32 dtadr
; /* DTADR value for the current transfer */
80 u32 dcmd
; /* DCMD value for the current transfer */
84 struct virt_dma_desc vd
; /* Virtual descriptor */
85 int nb_desc
; /* Number of hw. descriptors */
86 size_t len
; /* Number of bytes xfered */
87 dma_addr_t first
; /* First descriptor's addr */
89 /* At least one descriptor has an src/dst address not multiple of 8 */
92 struct dma_pool
*desc_pool
; /* Channel's used allocator */
94 struct pxad_desc_hw
*hw_desc
[]; /* DMA coherent descriptors */
100 struct pxad_chan
*vchan
;
104 struct virt_dma_chan vc
; /* Virtual channel */
105 u32 drcmr
; /* Requestor of the channel */
106 enum pxad_chan_prio prio
; /* Required priority of phy */
108 * At least one desc_sw in submitted or issued transfers on this channel
109 * has one address such as: addr % 8 != 0. This implies the DALGN
110 * setting on the phy.
113 struct dma_slave_config cfg
; /* Runtime config */
115 /* protected by vc->lock */
116 struct pxad_phy
*phy
;
117 struct dma_pool
*desc_pool
; /* Descriptors pool */
118 dma_cookie_t bus_error
;
120 wait_queue_head_t wq_state
;
124 struct dma_device slave
;
128 struct pxad_phy
*phys
;
129 spinlock_t phy_lock
; /* Phy association */
130 #ifdef CONFIG_DEBUG_FS
131 struct dentry
*dbgfs_root
;
132 struct dentry
*dbgfs_state
;
133 struct dentry
**dbgfs_chan
;
137 #define tx_to_pxad_desc(tx) \
138 container_of(tx, struct pxad_desc_sw, async_tx)
139 #define to_pxad_chan(dchan) \
140 container_of(dchan, struct pxad_chan, vc.chan)
141 #define to_pxad_dev(dmadev) \
142 container_of(dmadev, struct pxad_device, slave)
143 #define to_pxad_sw_desc(_vd) \
144 container_of((_vd), struct pxad_desc_sw, vd)
146 #define _phy_readl_relaxed(phy, _reg) \
147 readl_relaxed((phy)->base + _reg((phy)->idx))
148 #define phy_readl_relaxed(phy, _reg) \
151 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
152 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
153 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
157 #define phy_writel(phy, val, _reg) \
159 writel((val), (phy)->base + _reg((phy)->idx)); \
160 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
161 "%s(): writel(0x%08x, %s)\n", \
162 __func__, (u32)(val), #_reg); \
164 #define phy_writel_relaxed(phy, val, _reg) \
166 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
167 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
168 "%s(): writel_relaxed(0x%08x, %s)\n", \
169 __func__, (u32)(val), #_reg); \
172 static unsigned int pxad_drcmr(unsigned int line
)
175 return 0x100 + line
* 4;
176 return 0x1000 + line
* 4;
179 static bool pxad_filter_fn(struct dma_chan
*chan
, void *param
);
184 #ifdef CONFIG_DEBUG_FS
185 #include <linux/debugfs.h>
186 #include <linux/uaccess.h>
187 #include <linux/seq_file.h>
189 static int requester_chan_show(struct seq_file
*s
, void *p
)
191 struct pxad_phy
*phy
= s
->private;
195 seq_printf(s
, "DMA channel %d requester :\n", phy
->idx
);
196 for (i
= 0; i
< 70; i
++) {
197 drcmr
= readl_relaxed(phy
->base
+ pxad_drcmr(i
));
198 if ((drcmr
& DRCMR_CHLNUM
) == phy
->idx
)
199 seq_printf(s
, "\tRequester %d (MAPVLD=%d)\n", i
,
200 !!(drcmr
& DRCMR_MAPVLD
));
205 static inline int dbg_burst_from_dcmd(u32 dcmd
)
207 int burst
= (dcmd
>> 16) & 0x3;
209 return burst
? 4 << burst
: 0;
212 static int is_phys_valid(unsigned long addr
)
214 return pfn_valid(__phys_to_pfn(addr
));
217 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
218 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
220 static int descriptors_show(struct seq_file
*s
, void *p
)
222 struct pxad_phy
*phy
= s
->private;
223 int i
, max_show
= 20, burst
, width
;
225 unsigned long phys_desc
, ddadr
;
226 struct pxad_desc_hw
*desc
;
228 phys_desc
= ddadr
= _phy_readl_relaxed(phy
, DDADR
);
230 seq_printf(s
, "DMA channel %d descriptors :\n", phy
->idx
);
231 seq_printf(s
, "[%03d] First descriptor unknown\n", 0);
232 for (i
= 1; i
< max_show
&& is_phys_valid(phys_desc
); i
++) {
233 desc
= phys_to_virt(phys_desc
);
235 burst
= dbg_burst_from_dcmd(dcmd
);
236 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
238 seq_printf(s
, "[%03d] Desc at %08lx(virt %p)\n",
240 seq_printf(s
, "\tDDADR = %08x\n", desc
->ddadr
);
241 seq_printf(s
, "\tDSADR = %08x\n", desc
->dsadr
);
242 seq_printf(s
, "\tDTADR = %08x\n", desc
->dtadr
);
243 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
245 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
246 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
247 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
248 PXA_DCMD_STR(ENDIAN
), burst
, width
,
249 dcmd
& PXA_DCMD_LENGTH
);
250 phys_desc
= desc
->ddadr
;
253 seq_printf(s
, "[%03d] Desc at %08lx ... max display reached\n",
256 seq_printf(s
, "[%03d] Desc at %08lx is %s\n",
257 i
, phys_desc
, phys_desc
== DDADR_STOP
?
258 "DDADR_STOP" : "invalid");
263 static int chan_state_show(struct seq_file
*s
, void *p
)
265 struct pxad_phy
*phy
= s
->private;
268 static const char * const str_prio
[] = {
269 "high", "normal", "low", "invalid"
272 dcsr
= _phy_readl_relaxed(phy
, DCSR
);
273 dcmd
= _phy_readl_relaxed(phy
, DCMD
);
274 burst
= dbg_burst_from_dcmd(dcmd
);
275 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
277 seq_printf(s
, "DMA channel %d\n", phy
->idx
);
278 seq_printf(s
, "\tPriority : %s\n",
279 str_prio
[(phy
->idx
& 0xf) / 4]);
280 seq_printf(s
, "\tUnaligned transfer bit: %s\n",
281 _phy_readl_relaxed(phy
, DALGN
) & BIT(phy
->idx
) ?
283 seq_printf(s
, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
284 dcsr
, PXA_DCSR_STR(RUN
), PXA_DCSR_STR(NODESC
),
285 PXA_DCSR_STR(STOPIRQEN
), PXA_DCSR_STR(EORIRQEN
),
286 PXA_DCSR_STR(EORJMPEN
), PXA_DCSR_STR(EORSTOPEN
),
287 PXA_DCSR_STR(SETCMPST
), PXA_DCSR_STR(CLRCMPST
),
288 PXA_DCSR_STR(CMPST
), PXA_DCSR_STR(EORINTR
),
289 PXA_DCSR_STR(REQPEND
), PXA_DCSR_STR(STOPSTATE
),
290 PXA_DCSR_STR(ENDINTR
), PXA_DCSR_STR(STARTINTR
),
291 PXA_DCSR_STR(BUSERR
));
293 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
295 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
296 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
297 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
298 PXA_DCMD_STR(ENDIAN
), burst
, width
, dcmd
& PXA_DCMD_LENGTH
);
299 seq_printf(s
, "\tDSADR = %08x\n", _phy_readl_relaxed(phy
, DSADR
));
300 seq_printf(s
, "\tDTADR = %08x\n", _phy_readl_relaxed(phy
, DTADR
));
301 seq_printf(s
, "\tDDADR = %08x\n", _phy_readl_relaxed(phy
, DDADR
));
306 static int state_show(struct seq_file
*s
, void *p
)
308 struct pxad_device
*pdev
= s
->private;
310 /* basic device status */
311 seq_puts(s
, "DMA engine status\n");
312 seq_printf(s
, "\tChannel number: %d\n", pdev
->nr_chans
);
317 DEFINE_SHOW_ATTRIBUTE(state
);
318 DEFINE_SHOW_ATTRIBUTE(chan_state
);
319 DEFINE_SHOW_ATTRIBUTE(descriptors
);
320 DEFINE_SHOW_ATTRIBUTE(requester_chan
);
322 static struct dentry
*pxad_dbg_alloc_chan(struct pxad_device
*pdev
,
323 int ch
, struct dentry
*chandir
)
326 struct dentry
*chan
, *chan_state
= NULL
, *chan_descr
= NULL
;
327 struct dentry
*chan_reqs
= NULL
;
330 scnprintf(chan_name
, sizeof(chan_name
), "%d", ch
);
331 chan
= debugfs_create_dir(chan_name
, chandir
);
332 dt
= (void *)&pdev
->phys
[ch
];
335 chan_state
= debugfs_create_file("state", 0400, chan
, dt
,
338 chan_descr
= debugfs_create_file("descriptors", 0400, chan
, dt
,
341 chan_reqs
= debugfs_create_file("requesters", 0400, chan
, dt
,
342 &requester_chan_fops
);
349 debugfs_remove_recursive(chan
);
353 static void pxad_init_debugfs(struct pxad_device
*pdev
)
356 struct dentry
*chandir
;
358 pdev
->dbgfs_root
= debugfs_create_dir(dev_name(pdev
->slave
.dev
), NULL
);
359 if (IS_ERR(pdev
->dbgfs_root
) || !pdev
->dbgfs_root
)
362 pdev
->dbgfs_state
= debugfs_create_file("state", 0400, pdev
->dbgfs_root
,
364 if (!pdev
->dbgfs_state
)
368 kmalloc_array(pdev
->nr_chans
, sizeof(*pdev
->dbgfs_state
),
370 if (!pdev
->dbgfs_chan
)
373 chandir
= debugfs_create_dir("channels", pdev
->dbgfs_root
);
377 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
378 pdev
->dbgfs_chan
[i
] = pxad_dbg_alloc_chan(pdev
, i
, chandir
);
379 if (!pdev
->dbgfs_chan
[i
])
386 kfree(pdev
->dbgfs_chan
);
389 debugfs_remove_recursive(pdev
->dbgfs_root
);
391 pr_err("pxad: debugfs is not available\n");
394 static void pxad_cleanup_debugfs(struct pxad_device
*pdev
)
396 debugfs_remove_recursive(pdev
->dbgfs_root
);
399 static inline void pxad_init_debugfs(struct pxad_device
*pdev
) {}
400 static inline void pxad_cleanup_debugfs(struct pxad_device
*pdev
) {}
403 static struct pxad_phy
*lookup_phy(struct pxad_chan
*pchan
)
406 struct pxad_device
*pdev
= to_pxad_dev(pchan
->vc
.chan
.device
);
407 struct pxad_phy
*phy
, *found
= NULL
;
411 * dma channel priorities
412 * ch 0 - 3, 16 - 19 <--> (0)
413 * ch 4 - 7, 20 - 23 <--> (1)
414 * ch 8 - 11, 24 - 27 <--> (2)
415 * ch 12 - 15, 28 - 31 <--> (3)
418 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
419 for (prio
= pchan
->prio
; prio
>= PXAD_PRIO_HIGHEST
; prio
--) {
420 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
421 if (prio
!= (i
& 0xf) >> 2)
423 phy
= &pdev
->phys
[i
];
433 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
434 dev_dbg(&pchan
->vc
.chan
.dev
->device
,
435 "%s(): phy=%p(%d)\n", __func__
, found
,
436 found
? found
->idx
: -1);
441 static void pxad_free_phy(struct pxad_chan
*chan
)
443 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
447 dev_dbg(&chan
->vc
.chan
.dev
->device
,
448 "%s(): freeing\n", __func__
);
452 /* clear the channel mapping in DRCMR */
453 if (chan
->drcmr
<= pdev
->nr_requestors
) {
454 reg
= pxad_drcmr(chan
->drcmr
);
455 writel_relaxed(0, chan
->phy
->base
+ reg
);
458 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
459 chan
->phy
->vchan
= NULL
;
461 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
464 static bool is_chan_running(struct pxad_chan
*chan
)
467 struct pxad_phy
*phy
= chan
->phy
;
471 dcsr
= phy_readl_relaxed(phy
, DCSR
);
472 return dcsr
& PXA_DCSR_RUN
;
475 static bool is_running_chan_misaligned(struct pxad_chan
*chan
)
480 dalgn
= phy_readl_relaxed(chan
->phy
, DALGN
);
481 return dalgn
& (BIT(chan
->phy
->idx
));
484 static void phy_enable(struct pxad_phy
*phy
, bool misaligned
)
486 struct pxad_device
*pdev
;
492 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
493 "%s(); phy=%p(%d) misaligned=%d\n", __func__
,
494 phy
, phy
->idx
, misaligned
);
496 pdev
= to_pxad_dev(phy
->vchan
->vc
.chan
.device
);
497 if (phy
->vchan
->drcmr
<= pdev
->nr_requestors
) {
498 reg
= pxad_drcmr(phy
->vchan
->drcmr
);
499 writel_relaxed(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
502 dalgn
= phy_readl_relaxed(phy
, DALGN
);
504 dalgn
|= BIT(phy
->idx
);
506 dalgn
&= ~BIT(phy
->idx
);
507 phy_writel_relaxed(phy
, dalgn
, DALGN
);
509 phy_writel(phy
, PXA_DCSR_STOPIRQEN
| PXA_DCSR_ENDINTR
|
510 PXA_DCSR_BUSERR
| PXA_DCSR_RUN
, DCSR
);
513 static void phy_disable(struct pxad_phy
*phy
)
520 dcsr
= phy_readl_relaxed(phy
, DCSR
);
521 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
522 "%s(): phy=%p(%d)\n", __func__
, phy
, phy
->idx
);
523 phy_writel(phy
, dcsr
& ~PXA_DCSR_RUN
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
526 static void pxad_launch_chan(struct pxad_chan
*chan
,
527 struct pxad_desc_sw
*desc
)
529 dev_dbg(&chan
->vc
.chan
.dev
->device
,
530 "%s(): desc=%p\n", __func__
, desc
);
532 chan
->phy
= lookup_phy(chan
);
534 dev_dbg(&chan
->vc
.chan
.dev
->device
,
535 "%s(): no free dma channel\n", __func__
);
542 * Program the descriptor's address into the DMA controller,
543 * then start the DMA transaction
545 phy_writel(chan
->phy
, desc
->first
, DDADR
);
546 phy_enable(chan
->phy
, chan
->misaligned
);
547 wake_up(&chan
->wq_state
);
550 static void set_updater_desc(struct pxad_desc_sw
*sw_desc
,
553 struct pxad_desc_hw
*updater
=
554 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
555 dma_addr_t dma
= sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
;
557 updater
->ddadr
= DDADR_STOP
;
558 updater
->dsadr
= dma
;
559 updater
->dtadr
= dma
+ 8;
560 updater
->dcmd
= PXA_DCMD_WIDTH4
| PXA_DCMD_BURST32
|
561 (PXA_DCMD_LENGTH
& sizeof(u32
));
562 if (flags
& DMA_PREP_INTERRUPT
)
563 updater
->dcmd
|= PXA_DCMD_ENDIRQEN
;
565 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
= sw_desc
->first
;
568 static bool is_desc_completed(struct virt_dma_desc
*vd
)
570 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
571 struct pxad_desc_hw
*updater
=
572 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
574 return updater
->dtadr
!= (updater
->dsadr
+ 8);
577 static void pxad_desc_chain(struct virt_dma_desc
*vd1
,
578 struct virt_dma_desc
*vd2
)
580 struct pxad_desc_sw
*desc1
= to_pxad_sw_desc(vd1
);
581 struct pxad_desc_sw
*desc2
= to_pxad_sw_desc(vd2
);
582 dma_addr_t dma_to_chain
;
584 dma_to_chain
= desc2
->first
;
585 desc1
->hw_desc
[desc1
->nb_desc
- 1]->ddadr
= dma_to_chain
;
588 static bool pxad_try_hotchain(struct virt_dma_chan
*vc
,
589 struct virt_dma_desc
*vd
)
591 struct virt_dma_desc
*vd_last_issued
= NULL
;
592 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
595 * Attempt to hot chain the tx if the phy is still running. This is
596 * considered successful only if either the channel is still running
597 * after the chaining, or if the chained transfer is completed after
598 * having been hot chained.
599 * A change of alignment is not allowed, and forbids hotchaining.
601 if (is_chan_running(chan
)) {
602 BUG_ON(list_empty(&vc
->desc_issued
));
604 if (!is_running_chan_misaligned(chan
) &&
605 to_pxad_sw_desc(vd
)->misaligned
)
608 vd_last_issued
= list_entry(vc
->desc_issued
.prev
,
609 struct virt_dma_desc
, node
);
610 pxad_desc_chain(vd_last_issued
, vd
);
611 if (is_chan_running(chan
) || is_desc_completed(vd
))
618 static unsigned int clear_chan_irq(struct pxad_phy
*phy
)
621 u32 dint
= readl(phy
->base
+ DINT
);
623 if (!(dint
& BIT(phy
->idx
)))
627 dcsr
= phy_readl_relaxed(phy
, DCSR
);
628 phy_writel(phy
, dcsr
, DCSR
);
629 if ((dcsr
& PXA_DCSR_BUSERR
) && (phy
->vchan
))
630 dev_warn(&phy
->vchan
->vc
.chan
.dev
->device
,
631 "%s(chan=%p): PXA_DCSR_BUSERR\n",
632 __func__
, &phy
->vchan
);
634 return dcsr
& ~PXA_DCSR_RUN
;
637 static irqreturn_t
pxad_chan_handler(int irq
, void *dev_id
)
639 struct pxad_phy
*phy
= dev_id
;
640 struct pxad_chan
*chan
= phy
->vchan
;
641 struct virt_dma_desc
*vd
, *tmp
;
645 dma_cookie_t last_started
= 0;
649 dcsr
= clear_chan_irq(phy
);
650 if (dcsr
& PXA_DCSR_RUN
)
653 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
654 list_for_each_entry_safe(vd
, tmp
, &chan
->vc
.desc_issued
, node
) {
655 vd_completed
= is_desc_completed(vd
);
656 dev_dbg(&chan
->vc
.chan
.dev
->device
,
657 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
658 __func__
, vd
, vd
->tx
.cookie
, vd_completed
,
660 last_started
= vd
->tx
.cookie
;
661 if (to_pxad_sw_desc(vd
)->cyclic
) {
662 vchan_cyclic_callback(vd
);
667 vchan_cookie_complete(vd
);
673 if (dcsr
& PXA_DCSR_BUSERR
) {
674 chan
->bus_error
= last_started
;
678 if (!chan
->bus_error
&& dcsr
& PXA_DCSR_STOPSTATE
) {
679 dev_dbg(&chan
->vc
.chan
.dev
->device
,
680 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
682 list_empty(&chan
->vc
.desc_submitted
),
683 list_empty(&chan
->vc
.desc_issued
));
684 phy_writel_relaxed(phy
, dcsr
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
686 if (list_empty(&chan
->vc
.desc_issued
)) {
688 !list_empty(&chan
->vc
.desc_submitted
);
690 vd
= list_first_entry(&chan
->vc
.desc_issued
,
691 struct virt_dma_desc
, node
);
692 pxad_launch_chan(chan
, to_pxad_sw_desc(vd
));
695 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
696 wake_up(&chan
->wq_state
);
701 static irqreturn_t
pxad_int_handler(int irq
, void *dev_id
)
703 struct pxad_device
*pdev
= dev_id
;
704 struct pxad_phy
*phy
;
705 u32 dint
= readl(pdev
->base
+ DINT
);
706 int i
, ret
= IRQ_NONE
;
711 phy
= &pdev
->phys
[i
];
712 if (pxad_chan_handler(irq
, phy
) == IRQ_HANDLED
)
719 static int pxad_alloc_chan_resources(struct dma_chan
*dchan
)
721 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
722 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
727 chan
->desc_pool
= dma_pool_create(dma_chan_name(dchan
),
729 sizeof(struct pxad_desc_hw
),
730 __alignof__(struct pxad_desc_hw
),
732 if (!chan
->desc_pool
) {
733 dev_err(&chan
->vc
.chan
.dev
->device
,
734 "%s(): unable to allocate descriptor pool\n",
742 static void pxad_free_chan_resources(struct dma_chan
*dchan
)
744 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
746 vchan_free_chan_resources(&chan
->vc
);
747 dma_pool_destroy(chan
->desc_pool
);
748 chan
->desc_pool
= NULL
;
750 chan
->drcmr
= U32_MAX
;
751 chan
->prio
= PXAD_PRIO_LOWEST
;
754 static void pxad_free_desc(struct virt_dma_desc
*vd
)
758 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
760 BUG_ON(sw_desc
->nb_desc
== 0);
761 for (i
= sw_desc
->nb_desc
- 1; i
>= 0; i
--) {
763 dma
= sw_desc
->hw_desc
[i
- 1]->ddadr
;
765 dma
= sw_desc
->first
;
766 dma_pool_free(sw_desc
->desc_pool
,
767 sw_desc
->hw_desc
[i
], dma
);
769 sw_desc
->nb_desc
= 0;
773 static struct pxad_desc_sw
*
774 pxad_alloc_desc(struct pxad_chan
*chan
, unsigned int nb_hw_desc
)
776 struct pxad_desc_sw
*sw_desc
;
780 sw_desc
= kzalloc(sizeof(*sw_desc
) +
781 nb_hw_desc
* sizeof(struct pxad_desc_hw
*),
785 sw_desc
->desc_pool
= chan
->desc_pool
;
787 for (i
= 0; i
< nb_hw_desc
; i
++) {
788 sw_desc
->hw_desc
[i
] = dma_pool_alloc(sw_desc
->desc_pool
,
790 if (!sw_desc
->hw_desc
[i
]) {
791 dev_err(&chan
->vc
.chan
.dev
->device
,
792 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
793 __func__
, i
, sw_desc
->desc_pool
);
798 sw_desc
->first
= dma
;
800 sw_desc
->hw_desc
[i
- 1]->ddadr
= dma
;
806 pxad_free_desc(&sw_desc
->vd
);
810 static dma_cookie_t
pxad_tx_submit(struct dma_async_tx_descriptor
*tx
)
812 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
813 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
814 struct virt_dma_desc
*vd_chained
= NULL
,
815 *vd
= container_of(tx
, struct virt_dma_desc
, tx
);
819 set_updater_desc(to_pxad_sw_desc(vd
), tx
->flags
);
821 spin_lock_irqsave(&vc
->lock
, flags
);
822 cookie
= dma_cookie_assign(tx
);
824 if (list_empty(&vc
->desc_submitted
) && pxad_try_hotchain(vc
, vd
)) {
825 list_move_tail(&vd
->node
, &vc
->desc_issued
);
826 dev_dbg(&chan
->vc
.chan
.dev
->device
,
827 "%s(): txd %p[%x]: submitted (hot linked)\n",
828 __func__
, vd
, cookie
);
833 * Fallback to placing the tx in the submitted queue
835 if (!list_empty(&vc
->desc_submitted
)) {
836 vd_chained
= list_entry(vc
->desc_submitted
.prev
,
837 struct virt_dma_desc
, node
);
839 * Only chain the descriptors if no new misalignment is
840 * introduced. If a new misalignment is chained, let the channel
841 * stop, and be relaunched in misalign mode from the irq
844 if (chan
->misaligned
|| !to_pxad_sw_desc(vd
)->misaligned
)
845 pxad_desc_chain(vd_chained
, vd
);
849 dev_dbg(&chan
->vc
.chan
.dev
->device
,
850 "%s(): txd %p[%x]: submitted (%s linked)\n",
851 __func__
, vd
, cookie
, vd_chained
? "cold" : "not");
852 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
853 chan
->misaligned
|= to_pxad_sw_desc(vd
)->misaligned
;
856 spin_unlock_irqrestore(&vc
->lock
, flags
);
860 static void pxad_issue_pending(struct dma_chan
*dchan
)
862 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
863 struct virt_dma_desc
*vd_first
;
866 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
867 if (list_empty(&chan
->vc
.desc_submitted
))
870 vd_first
= list_first_entry(&chan
->vc
.desc_submitted
,
871 struct virt_dma_desc
, node
);
872 dev_dbg(&chan
->vc
.chan
.dev
->device
,
873 "%s(): txd %p[%x]", __func__
, vd_first
, vd_first
->tx
.cookie
);
875 vchan_issue_pending(&chan
->vc
);
876 if (!pxad_try_hotchain(&chan
->vc
, vd_first
))
877 pxad_launch_chan(chan
, to_pxad_sw_desc(vd_first
));
879 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
882 static inline struct dma_async_tx_descriptor
*
883 pxad_tx_prep(struct virt_dma_chan
*vc
, struct virt_dma_desc
*vd
,
884 unsigned long tx_flags
)
886 struct dma_async_tx_descriptor
*tx
;
887 struct pxad_chan
*chan
= container_of(vc
, struct pxad_chan
, vc
);
889 INIT_LIST_HEAD(&vd
->node
);
890 tx
= vchan_tx_prep(vc
, vd
, tx_flags
);
891 tx
->tx_submit
= pxad_tx_submit
;
892 dev_dbg(&chan
->vc
.chan
.dev
->device
,
893 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__
,
894 vc
, vd
, vd
->tx
.cookie
,
900 static void pxad_get_config(struct pxad_chan
*chan
,
901 enum dma_transfer_direction dir
,
902 u32
*dcmd
, u32
*dev_src
, u32
*dev_dst
)
904 u32 maxburst
= 0, dev_addr
= 0;
905 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
906 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
909 if (dir
== DMA_DEV_TO_MEM
) {
910 maxburst
= chan
->cfg
.src_maxburst
;
911 width
= chan
->cfg
.src_addr_width
;
912 dev_addr
= chan
->cfg
.src_addr
;
914 *dcmd
|= PXA_DCMD_INCTRGADDR
;
915 if (chan
->drcmr
<= pdev
->nr_requestors
)
916 *dcmd
|= PXA_DCMD_FLOWSRC
;
918 if (dir
== DMA_MEM_TO_DEV
) {
919 maxburst
= chan
->cfg
.dst_maxburst
;
920 width
= chan
->cfg
.dst_addr_width
;
921 dev_addr
= chan
->cfg
.dst_addr
;
923 *dcmd
|= PXA_DCMD_INCSRCADDR
;
924 if (chan
->drcmr
<= pdev
->nr_requestors
)
925 *dcmd
|= PXA_DCMD_FLOWTRG
;
927 if (dir
== DMA_MEM_TO_MEM
)
928 *dcmd
|= PXA_DCMD_BURST32
| PXA_DCMD_INCTRGADDR
|
931 dev_dbg(&chan
->vc
.chan
.dev
->device
,
932 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
933 __func__
, dev_addr
, maxburst
, width
, dir
);
935 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
936 *dcmd
|= PXA_DCMD_WIDTH1
;
937 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
938 *dcmd
|= PXA_DCMD_WIDTH2
;
939 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
940 *dcmd
|= PXA_DCMD_WIDTH4
;
943 *dcmd
|= PXA_DCMD_BURST8
;
944 else if (maxburst
== 16)
945 *dcmd
|= PXA_DCMD_BURST16
;
946 else if (maxburst
== 32)
947 *dcmd
|= PXA_DCMD_BURST32
;
949 /* FIXME: drivers should be ported over to use the filter
950 * function. Once that's done, the following two lines can
953 if (chan
->cfg
.slave_id
)
954 chan
->drcmr
= chan
->cfg
.slave_id
;
957 static struct dma_async_tx_descriptor
*
958 pxad_prep_memcpy(struct dma_chan
*dchan
,
959 dma_addr_t dma_dst
, dma_addr_t dma_src
,
960 size_t len
, unsigned long flags
)
962 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
963 struct pxad_desc_sw
*sw_desc
;
964 struct pxad_desc_hw
*hw_desc
;
966 unsigned int i
, nb_desc
= 0;
972 dev_dbg(&chan
->vc
.chan
.dev
->device
,
973 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
974 __func__
, (unsigned long)dma_dst
, (unsigned long)dma_src
,
976 pxad_get_config(chan
, DMA_MEM_TO_MEM
, &dcmd
, NULL
, NULL
);
978 nb_desc
= DIV_ROUND_UP(len
, PDMA_MAX_DESC_BYTES
);
979 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
984 if (!IS_ALIGNED(dma_src
, 1 << PDMA_ALIGNMENT
) ||
985 !IS_ALIGNED(dma_dst
, 1 << PDMA_ALIGNMENT
))
986 sw_desc
->misaligned
= true;
990 hw_desc
= sw_desc
->hw_desc
[i
++];
991 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
992 hw_desc
->dcmd
= dcmd
| (PXA_DCMD_LENGTH
& copy
);
993 hw_desc
->dsadr
= dma_src
;
994 hw_desc
->dtadr
= dma_dst
;
999 set_updater_desc(sw_desc
, flags
);
1001 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1004 static struct dma_async_tx_descriptor
*
1005 pxad_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
1006 unsigned int sg_len
, enum dma_transfer_direction dir
,
1007 unsigned long flags
, void *context
)
1009 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1010 struct pxad_desc_sw
*sw_desc
;
1012 struct scatterlist
*sg
;
1014 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1015 unsigned int nb_desc
= 0, i
, j
= 0;
1017 if ((sgl
== NULL
) || (sg_len
== 0))
1020 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1021 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1022 "%s(): dir=%d flags=%lx\n", __func__
, dir
, flags
);
1024 for_each_sg(sgl
, sg
, sg_len
, i
)
1025 nb_desc
+= DIV_ROUND_UP(sg_dma_len(sg
), PDMA_MAX_DESC_BYTES
);
1026 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1030 for_each_sg(sgl
, sg
, sg_len
, i
) {
1031 dma
= sg_dma_address(sg
);
1032 avail
= sg_dma_len(sg
);
1033 sw_desc
->len
+= avail
;
1036 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
1038 sw_desc
->misaligned
= true;
1040 sw_desc
->hw_desc
[j
]->dcmd
=
1041 dcmd
| (PXA_DCMD_LENGTH
& len
);
1042 sw_desc
->hw_desc
[j
]->dsadr
= dsadr
? dsadr
: dma
;
1043 sw_desc
->hw_desc
[j
++]->dtadr
= dtadr
? dtadr
: dma
;
1049 set_updater_desc(sw_desc
, flags
);
1051 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1054 static struct dma_async_tx_descriptor
*
1055 pxad_prep_dma_cyclic(struct dma_chan
*dchan
,
1056 dma_addr_t buf_addr
, size_t len
, size_t period_len
,
1057 enum dma_transfer_direction dir
, unsigned long flags
)
1059 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1060 struct pxad_desc_sw
*sw_desc
;
1061 struct pxad_desc_hw
**phw_desc
;
1063 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1064 unsigned int nb_desc
= 0;
1066 if (!dchan
|| !len
|| !period_len
)
1068 if ((dir
!= DMA_DEV_TO_MEM
) && (dir
!= DMA_MEM_TO_DEV
)) {
1069 dev_err(&chan
->vc
.chan
.dev
->device
,
1070 "Unsupported direction for cyclic DMA\n");
1073 /* the buffer length must be a multiple of period_len */
1074 if (len
% period_len
!= 0 || period_len
> PDMA_MAX_DESC_BYTES
||
1075 !IS_ALIGNED(period_len
, 1 << PDMA_ALIGNMENT
))
1078 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1079 dcmd
|= PXA_DCMD_ENDIRQEN
| (PXA_DCMD_LENGTH
& period_len
);
1080 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1081 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1082 __func__
, (unsigned long)buf_addr
, len
, period_len
, dir
, flags
);
1084 nb_desc
= DIV_ROUND_UP(period_len
, PDMA_MAX_DESC_BYTES
);
1085 nb_desc
*= DIV_ROUND_UP(len
, period_len
);
1086 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1089 sw_desc
->cyclic
= true;
1092 phw_desc
= sw_desc
->hw_desc
;
1095 phw_desc
[0]->dsadr
= dsadr
? dsadr
: dma
;
1096 phw_desc
[0]->dtadr
= dtadr
? dtadr
: dma
;
1097 phw_desc
[0]->dcmd
= dcmd
;
1102 set_updater_desc(sw_desc
, flags
);
1104 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1107 static int pxad_config(struct dma_chan
*dchan
,
1108 struct dma_slave_config
*cfg
)
1110 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1119 static int pxad_terminate_all(struct dma_chan
*dchan
)
1121 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1122 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
1123 struct virt_dma_desc
*vd
= NULL
;
1124 unsigned long flags
;
1125 struct pxad_phy
*phy
;
1128 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1129 "%s(): vchan %p: terminate all\n", __func__
, &chan
->vc
);
1131 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1132 vchan_get_all_descriptors(&chan
->vc
, &head
);
1134 list_for_each_entry(vd
, &head
, node
) {
1135 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1136 "%s(): cancelling txd %p[%x] (completed=%d)", __func__
,
1137 vd
, vd
->tx
.cookie
, is_desc_completed(vd
));
1142 phy_disable(chan
->phy
);
1143 pxad_free_phy(chan
);
1145 spin_lock(&pdev
->phy_lock
);
1147 spin_unlock(&pdev
->phy_lock
);
1149 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1150 vchan_dma_desc_free_list(&chan
->vc
, &head
);
1155 static unsigned int pxad_residue(struct pxad_chan
*chan
,
1156 dma_cookie_t cookie
)
1158 struct virt_dma_desc
*vd
= NULL
;
1159 struct pxad_desc_sw
*sw_desc
= NULL
;
1160 struct pxad_desc_hw
*hw_desc
= NULL
;
1161 u32 curr
, start
, len
, end
, residue
= 0;
1162 unsigned long flags
;
1163 bool passed
= false;
1167 * If the channel does not have a phy pointer anymore, it has already
1168 * been completed. Therefore, its residue is 0.
1173 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1175 vd
= vchan_find_desc(&chan
->vc
, cookie
);
1179 sw_desc
= to_pxad_sw_desc(vd
);
1180 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1181 curr
= phy_readl_relaxed(chan
->phy
, DSADR
);
1183 curr
= phy_readl_relaxed(chan
->phy
, DTADR
);
1186 * curr has to be actually read before checking descriptor
1187 * completion, so that a curr inside a status updater
1188 * descriptor implies the following test returns true, and
1189 * preventing reordering of curr load and the test.
1192 if (is_desc_completed(vd
))
1195 for (i
= 0; i
< sw_desc
->nb_desc
- 1; i
++) {
1196 hw_desc
= sw_desc
->hw_desc
[i
];
1197 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1198 start
= hw_desc
->dsadr
;
1200 start
= hw_desc
->dtadr
;
1201 len
= hw_desc
->dcmd
& PXA_DCMD_LENGTH
;
1205 * 'passed' will be latched once we found the descriptor
1206 * which lies inside the boundaries of the curr
1207 * pointer. All descriptors that occur in the list
1208 * _after_ we found that partially handled descriptor
1209 * are still to be processed and are hence added to the
1210 * residual bytes counter.
1215 } else if (curr
>= start
&& curr
<= end
) {
1216 residue
+= end
- curr
;
1221 residue
= sw_desc
->len
;
1224 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1225 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1226 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1227 __func__
, vd
, cookie
, sw_desc
, residue
);
1231 static enum dma_status
pxad_tx_status(struct dma_chan
*dchan
,
1232 dma_cookie_t cookie
,
1233 struct dma_tx_state
*txstate
)
1235 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1236 enum dma_status ret
;
1238 if (cookie
== chan
->bus_error
)
1241 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1242 if (likely(txstate
&& (ret
!= DMA_ERROR
)))
1243 dma_set_residue(txstate
, pxad_residue(chan
, cookie
));
1248 static void pxad_synchronize(struct dma_chan
*dchan
)
1250 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1252 wait_event(chan
->wq_state
, !is_chan_running(chan
));
1253 vchan_synchronize(&chan
->vc
);
1256 static void pxad_free_channels(struct dma_device
*dmadev
)
1258 struct pxad_chan
*c
, *cn
;
1260 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
,
1261 vc
.chan
.device_node
) {
1262 list_del(&c
->vc
.chan
.device_node
);
1263 tasklet_kill(&c
->vc
.task
);
1267 static int pxad_remove(struct platform_device
*op
)
1269 struct pxad_device
*pdev
= platform_get_drvdata(op
);
1271 pxad_cleanup_debugfs(pdev
);
1272 pxad_free_channels(&pdev
->slave
);
1276 static int pxad_init_phys(struct platform_device
*op
,
1277 struct pxad_device
*pdev
,
1278 unsigned int nb_phy_chans
)
1280 int irq0
, irq
, nr_irq
= 0, i
, ret
;
1281 struct pxad_phy
*phy
;
1283 irq0
= platform_get_irq(op
, 0);
1287 pdev
->phys
= devm_kcalloc(&op
->dev
, nb_phy_chans
,
1288 sizeof(pdev
->phys
[0]), GFP_KERNEL
);
1292 for (i
= 0; i
< nb_phy_chans
; i
++)
1293 if (platform_get_irq(op
, i
) > 0)
1296 for (i
= 0; i
< nb_phy_chans
; i
++) {
1297 phy
= &pdev
->phys
[i
];
1298 phy
->base
= pdev
->base
;
1300 irq
= platform_get_irq(op
, i
);
1301 if ((nr_irq
> 1) && (irq
> 0))
1302 ret
= devm_request_irq(&op
->dev
, irq
,
1304 IRQF_SHARED
, "pxa-dma", phy
);
1305 if ((nr_irq
== 1) && (i
== 0))
1306 ret
= devm_request_irq(&op
->dev
, irq0
,
1308 IRQF_SHARED
, "pxa-dma", pdev
);
1310 dev_err(pdev
->slave
.dev
,
1311 "%s(): can't request irq %d:%d\n", __func__
,
1320 static const struct of_device_id pxad_dt_ids
[] = {
1321 { .compatible
= "marvell,pdma-1.0", },
1324 MODULE_DEVICE_TABLE(of
, pxad_dt_ids
);
1326 static struct dma_chan
*pxad_dma_xlate(struct of_phandle_args
*dma_spec
,
1327 struct of_dma
*ofdma
)
1329 struct pxad_device
*d
= ofdma
->of_dma_data
;
1330 struct dma_chan
*chan
;
1332 chan
= dma_get_any_slave_channel(&d
->slave
);
1336 to_pxad_chan(chan
)->drcmr
= dma_spec
->args
[0];
1337 to_pxad_chan(chan
)->prio
= dma_spec
->args
[1];
1342 static int pxad_init_dmadev(struct platform_device
*op
,
1343 struct pxad_device
*pdev
,
1344 unsigned int nr_phy_chans
,
1345 unsigned int nr_requestors
)
1349 struct pxad_chan
*c
;
1351 pdev
->nr_chans
= nr_phy_chans
;
1352 pdev
->nr_requestors
= nr_requestors
;
1353 INIT_LIST_HEAD(&pdev
->slave
.channels
);
1354 pdev
->slave
.device_alloc_chan_resources
= pxad_alloc_chan_resources
;
1355 pdev
->slave
.device_free_chan_resources
= pxad_free_chan_resources
;
1356 pdev
->slave
.device_tx_status
= pxad_tx_status
;
1357 pdev
->slave
.device_issue_pending
= pxad_issue_pending
;
1358 pdev
->slave
.device_config
= pxad_config
;
1359 pdev
->slave
.device_synchronize
= pxad_synchronize
;
1360 pdev
->slave
.device_terminate_all
= pxad_terminate_all
;
1362 if (op
->dev
.coherent_dma_mask
)
1363 dma_set_mask(&op
->dev
, op
->dev
.coherent_dma_mask
);
1365 dma_set_mask(&op
->dev
, DMA_BIT_MASK(32));
1367 ret
= pxad_init_phys(op
, pdev
, nr_phy_chans
);
1371 for (i
= 0; i
< nr_phy_chans
; i
++) {
1372 c
= devm_kzalloc(&op
->dev
, sizeof(*c
), GFP_KERNEL
);
1377 c
->prio
= PXAD_PRIO_LOWEST
;
1378 c
->vc
.desc_free
= pxad_free_desc
;
1379 vchan_init(&c
->vc
, &pdev
->slave
);
1380 init_waitqueue_head(&c
->wq_state
);
1383 return dmaenginem_async_device_register(&pdev
->slave
);
1386 static int pxad_probe(struct platform_device
*op
)
1388 struct pxad_device
*pdev
;
1389 const struct of_device_id
*of_id
;
1390 const struct dma_slave_map
*slave_map
= NULL
;
1391 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
1392 struct resource
*iores
;
1393 int ret
, dma_channels
= 0, nb_requestors
= 0, slave_map_cnt
= 0;
1394 const enum dma_slave_buswidth widths
=
1395 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
1396 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1398 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
1402 spin_lock_init(&pdev
->phy_lock
);
1404 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
1405 pdev
->base
= devm_ioremap_resource(&op
->dev
, iores
);
1406 if (IS_ERR(pdev
->base
))
1407 return PTR_ERR(pdev
->base
);
1409 of_id
= of_match_device(pxad_dt_ids
, &op
->dev
);
1411 of_property_read_u32(op
->dev
.of_node
, "#dma-channels",
1413 ret
= of_property_read_u32(op
->dev
.of_node
, "#dma-requests",
1416 dev_warn(pdev
->slave
.dev
,
1417 "#dma-requests set to default 32 as missing in OF: %d",
1421 } else if (pdata
&& pdata
->dma_channels
) {
1422 dma_channels
= pdata
->dma_channels
;
1423 nb_requestors
= pdata
->nb_requestors
;
1424 slave_map
= pdata
->slave_map
;
1425 slave_map_cnt
= pdata
->slave_map_cnt
;
1427 dma_channels
= 32; /* default 32 channel */
1430 dma_cap_set(DMA_SLAVE
, pdev
->slave
.cap_mask
);
1431 dma_cap_set(DMA_MEMCPY
, pdev
->slave
.cap_mask
);
1432 dma_cap_set(DMA_CYCLIC
, pdev
->slave
.cap_mask
);
1433 dma_cap_set(DMA_PRIVATE
, pdev
->slave
.cap_mask
);
1434 pdev
->slave
.device_prep_dma_memcpy
= pxad_prep_memcpy
;
1435 pdev
->slave
.device_prep_slave_sg
= pxad_prep_slave_sg
;
1436 pdev
->slave
.device_prep_dma_cyclic
= pxad_prep_dma_cyclic
;
1437 pdev
->slave
.filter
.map
= slave_map
;
1438 pdev
->slave
.filter
.mapcnt
= slave_map_cnt
;
1439 pdev
->slave
.filter
.fn
= pxad_filter_fn
;
1441 pdev
->slave
.copy_align
= PDMA_ALIGNMENT
;
1442 pdev
->slave
.src_addr_widths
= widths
;
1443 pdev
->slave
.dst_addr_widths
= widths
;
1444 pdev
->slave
.directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1445 pdev
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1446 pdev
->slave
.descriptor_reuse
= true;
1448 pdev
->slave
.dev
= &op
->dev
;
1449 ret
= pxad_init_dmadev(op
, pdev
, dma_channels
, nb_requestors
);
1451 dev_err(pdev
->slave
.dev
, "unable to register\n");
1455 if (op
->dev
.of_node
) {
1456 /* Device-tree DMA controller registration */
1457 ret
= of_dma_controller_register(op
->dev
.of_node
,
1458 pxad_dma_xlate
, pdev
);
1460 dev_err(pdev
->slave
.dev
,
1461 "of_dma_controller_register failed\n");
1466 platform_set_drvdata(op
, pdev
);
1467 pxad_init_debugfs(pdev
);
1468 dev_info(pdev
->slave
.dev
, "initialized %d channels on %d requestors\n",
1469 dma_channels
, nb_requestors
);
1473 static const struct platform_device_id pxad_id_table
[] = {
1478 static struct platform_driver pxad_driver
= {
1481 .of_match_table
= pxad_dt_ids
,
1483 .id_table
= pxad_id_table
,
1484 .probe
= pxad_probe
,
1485 .remove
= pxad_remove
,
1488 static bool pxad_filter_fn(struct dma_chan
*chan
, void *param
)
1490 struct pxad_chan
*c
= to_pxad_chan(chan
);
1491 struct pxad_param
*p
= param
;
1493 if (chan
->device
->dev
->driver
!= &pxad_driver
.driver
)
1496 c
->drcmr
= p
->drcmr
;
1502 module_platform_driver(pxad_driver
);
1504 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1505 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1506 MODULE_LICENSE("GPL v2");