]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/dma/pxa_dma.c
Merge tag 'v5.11-rc1' into regulator-5.11
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / pxa_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/of_device.h>
19 #include <linux/of_dma.h>
20 #include <linux/of.h>
21 #include <linux/wait.h>
22 #include <linux/dma/pxa-dma.h>
23
24 #include "dmaengine.h"
25 #include "virt-dma.h"
26
27 #define DCSR(n) (0x0000 + ((n) << 2))
28 #define DALGN(n) 0x00a0
29 #define DINT 0x00f0
30 #define DDADR(n) (0x0200 + ((n) << 4))
31 #define DSADR(n) (0x0204 + ((n) << 4))
32 #define DTADR(n) (0x0208 + ((n) << 4))
33 #define DCMD(n) (0x020c + ((n) << 4))
34
35 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
36 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
37 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
38 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
39 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
40 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
41 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
42 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
43
44 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
45 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
46 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
47 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
48 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
49 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
50 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
51
52 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
53 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
54
55 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
56 #define DDADR_STOP BIT(0) /* Stop (read / write) */
57
58 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
59 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
60 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
61 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
62 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
63 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
64 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
65 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
66 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
67 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
68 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
69 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
70 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
71 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
72
73 #define PDMA_ALIGNMENT 3
74 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
75
76 struct pxad_desc_hw {
77 u32 ddadr; /* Points to the next descriptor + flags */
78 u32 dsadr; /* DSADR value for the current transfer */
79 u32 dtadr; /* DTADR value for the current transfer */
80 u32 dcmd; /* DCMD value for the current transfer */
81 } __aligned(16);
82
83 struct pxad_desc_sw {
84 struct virt_dma_desc vd; /* Virtual descriptor */
85 int nb_desc; /* Number of hw. descriptors */
86 size_t len; /* Number of bytes xfered */
87 dma_addr_t first; /* First descriptor's addr */
88
89 /* At least one descriptor has an src/dst address not multiple of 8 */
90 bool misaligned;
91 bool cyclic;
92 struct dma_pool *desc_pool; /* Channel's used allocator */
93
94 struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */
95 };
96
97 struct pxad_phy {
98 int idx;
99 void __iomem *base;
100 struct pxad_chan *vchan;
101 };
102
103 struct pxad_chan {
104 struct virt_dma_chan vc; /* Virtual channel */
105 u32 drcmr; /* Requestor of the channel */
106 enum pxad_chan_prio prio; /* Required priority of phy */
107 /*
108 * At least one desc_sw in submitted or issued transfers on this channel
109 * has one address such as: addr % 8 != 0. This implies the DALGN
110 * setting on the phy.
111 */
112 bool misaligned;
113 struct dma_slave_config cfg; /* Runtime config */
114
115 /* protected by vc->lock */
116 struct pxad_phy *phy;
117 struct dma_pool *desc_pool; /* Descriptors pool */
118 dma_cookie_t bus_error;
119
120 wait_queue_head_t wq_state;
121 };
122
123 struct pxad_device {
124 struct dma_device slave;
125 int nr_chans;
126 int nr_requestors;
127 void __iomem *base;
128 struct pxad_phy *phys;
129 spinlock_t phy_lock; /* Phy association */
130 #ifdef CONFIG_DEBUG_FS
131 struct dentry *dbgfs_root;
132 struct dentry **dbgfs_chan;
133 #endif
134 };
135
136 #define tx_to_pxad_desc(tx) \
137 container_of(tx, struct pxad_desc_sw, async_tx)
138 #define to_pxad_chan(dchan) \
139 container_of(dchan, struct pxad_chan, vc.chan)
140 #define to_pxad_dev(dmadev) \
141 container_of(dmadev, struct pxad_device, slave)
142 #define to_pxad_sw_desc(_vd) \
143 container_of((_vd), struct pxad_desc_sw, vd)
144
145 #define _phy_readl_relaxed(phy, _reg) \
146 readl_relaxed((phy)->base + _reg((phy)->idx))
147 #define phy_readl_relaxed(phy, _reg) \
148 ({ \
149 u32 _v; \
150 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
151 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
152 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
153 _v); \
154 _v; \
155 })
156 #define phy_writel(phy, val, _reg) \
157 do { \
158 writel((val), (phy)->base + _reg((phy)->idx)); \
159 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
160 "%s(): writel(0x%08x, %s)\n", \
161 __func__, (u32)(val), #_reg); \
162 } while (0)
163 #define phy_writel_relaxed(phy, val, _reg) \
164 do { \
165 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
166 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
167 "%s(): writel_relaxed(0x%08x, %s)\n", \
168 __func__, (u32)(val), #_reg); \
169 } while (0)
170
171 static unsigned int pxad_drcmr(unsigned int line)
172 {
173 if (line < 64)
174 return 0x100 + line * 4;
175 return 0x1000 + line * 4;
176 }
177
178 static bool pxad_filter_fn(struct dma_chan *chan, void *param);
179
180 /*
181 * Debug fs
182 */
183 #ifdef CONFIG_DEBUG_FS
184 #include <linux/debugfs.h>
185 #include <linux/uaccess.h>
186 #include <linux/seq_file.h>
187
188 static int requester_chan_show(struct seq_file *s, void *p)
189 {
190 struct pxad_phy *phy = s->private;
191 int i;
192 u32 drcmr;
193
194 seq_printf(s, "DMA channel %d requester :\n", phy->idx);
195 for (i = 0; i < 70; i++) {
196 drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
197 if ((drcmr & DRCMR_CHLNUM) == phy->idx)
198 seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
199 !!(drcmr & DRCMR_MAPVLD));
200 }
201 return 0;
202 }
203
204 static inline int dbg_burst_from_dcmd(u32 dcmd)
205 {
206 int burst = (dcmd >> 16) & 0x3;
207
208 return burst ? 4 << burst : 0;
209 }
210
211 static int is_phys_valid(unsigned long addr)
212 {
213 return pfn_valid(__phys_to_pfn(addr));
214 }
215
216 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
217 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
218
219 static int descriptors_show(struct seq_file *s, void *p)
220 {
221 struct pxad_phy *phy = s->private;
222 int i, max_show = 20, burst, width;
223 u32 dcmd;
224 unsigned long phys_desc, ddadr;
225 struct pxad_desc_hw *desc;
226
227 phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
228
229 seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
230 seq_printf(s, "[%03d] First descriptor unknown\n", 0);
231 for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
232 desc = phys_to_virt(phys_desc);
233 dcmd = desc->dcmd;
234 burst = dbg_burst_from_dcmd(dcmd);
235 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
236
237 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
238 i, phys_desc, desc);
239 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
240 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
241 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
242 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
243 dcmd,
244 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
245 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
246 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
247 PXA_DCMD_STR(ENDIAN), burst, width,
248 dcmd & PXA_DCMD_LENGTH);
249 phys_desc = desc->ddadr;
250 }
251 if (i == max_show)
252 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
253 i, phys_desc);
254 else
255 seq_printf(s, "[%03d] Desc at %08lx is %s\n",
256 i, phys_desc, phys_desc == DDADR_STOP ?
257 "DDADR_STOP" : "invalid");
258
259 return 0;
260 }
261
262 static int chan_state_show(struct seq_file *s, void *p)
263 {
264 struct pxad_phy *phy = s->private;
265 u32 dcsr, dcmd;
266 int burst, width;
267 static const char * const str_prio[] = {
268 "high", "normal", "low", "invalid"
269 };
270
271 dcsr = _phy_readl_relaxed(phy, DCSR);
272 dcmd = _phy_readl_relaxed(phy, DCMD);
273 burst = dbg_burst_from_dcmd(dcmd);
274 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
275
276 seq_printf(s, "DMA channel %d\n", phy->idx);
277 seq_printf(s, "\tPriority : %s\n",
278 str_prio[(phy->idx & 0xf) / 4]);
279 seq_printf(s, "\tUnaligned transfer bit: %s\n",
280 _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
281 "yes" : "no");
282 seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
283 dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
284 PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
285 PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
286 PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
287 PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
288 PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
289 PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
290 PXA_DCSR_STR(BUSERR));
291
292 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
293 dcmd,
294 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
295 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
296 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
297 PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
298 seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
299 seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
300 seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
301
302 return 0;
303 }
304
305 static int state_show(struct seq_file *s, void *p)
306 {
307 struct pxad_device *pdev = s->private;
308
309 /* basic device status */
310 seq_puts(s, "DMA engine status\n");
311 seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
312
313 return 0;
314 }
315
316 DEFINE_SHOW_ATTRIBUTE(state);
317 DEFINE_SHOW_ATTRIBUTE(chan_state);
318 DEFINE_SHOW_ATTRIBUTE(descriptors);
319 DEFINE_SHOW_ATTRIBUTE(requester_chan);
320
321 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
322 int ch, struct dentry *chandir)
323 {
324 char chan_name[11];
325 struct dentry *chan;
326 void *dt;
327
328 scnprintf(chan_name, sizeof(chan_name), "%d", ch);
329 chan = debugfs_create_dir(chan_name, chandir);
330 dt = (void *)&pdev->phys[ch];
331
332 debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
333 debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
334 debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
335
336 return chan;
337 }
338
339 static void pxad_init_debugfs(struct pxad_device *pdev)
340 {
341 int i;
342 struct dentry *chandir;
343
344 pdev->dbgfs_chan =
345 kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
346 GFP_KERNEL);
347 if (!pdev->dbgfs_chan)
348 return;
349
350 pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
351
352 debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
353
354 chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
355
356 for (i = 0; i < pdev->nr_chans; i++)
357 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
358 }
359
360 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
361 {
362 debugfs_remove_recursive(pdev->dbgfs_root);
363 }
364 #else
365 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
366 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
367 #endif
368
369 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
370 {
371 int prio, i;
372 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
373 struct pxad_phy *phy, *found = NULL;
374 unsigned long flags;
375
376 /*
377 * dma channel priorities
378 * ch 0 - 3, 16 - 19 <--> (0)
379 * ch 4 - 7, 20 - 23 <--> (1)
380 * ch 8 - 11, 24 - 27 <--> (2)
381 * ch 12 - 15, 28 - 31 <--> (3)
382 */
383
384 spin_lock_irqsave(&pdev->phy_lock, flags);
385 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
386 for (i = 0; i < pdev->nr_chans; i++) {
387 if (prio != (i & 0xf) >> 2)
388 continue;
389 phy = &pdev->phys[i];
390 if (!phy->vchan) {
391 phy->vchan = pchan;
392 found = phy;
393 goto out_unlock;
394 }
395 }
396 }
397
398 out_unlock:
399 spin_unlock_irqrestore(&pdev->phy_lock, flags);
400 dev_dbg(&pchan->vc.chan.dev->device,
401 "%s(): phy=%p(%d)\n", __func__, found,
402 found ? found->idx : -1);
403
404 return found;
405 }
406
407 static void pxad_free_phy(struct pxad_chan *chan)
408 {
409 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
410 unsigned long flags;
411 u32 reg;
412
413 dev_dbg(&chan->vc.chan.dev->device,
414 "%s(): freeing\n", __func__);
415 if (!chan->phy)
416 return;
417
418 /* clear the channel mapping in DRCMR */
419 if (chan->drcmr <= pdev->nr_requestors) {
420 reg = pxad_drcmr(chan->drcmr);
421 writel_relaxed(0, chan->phy->base + reg);
422 }
423
424 spin_lock_irqsave(&pdev->phy_lock, flags);
425 chan->phy->vchan = NULL;
426 chan->phy = NULL;
427 spin_unlock_irqrestore(&pdev->phy_lock, flags);
428 }
429
430 static bool is_chan_running(struct pxad_chan *chan)
431 {
432 u32 dcsr;
433 struct pxad_phy *phy = chan->phy;
434
435 if (!phy)
436 return false;
437 dcsr = phy_readl_relaxed(phy, DCSR);
438 return dcsr & PXA_DCSR_RUN;
439 }
440
441 static bool is_running_chan_misaligned(struct pxad_chan *chan)
442 {
443 u32 dalgn;
444
445 BUG_ON(!chan->phy);
446 dalgn = phy_readl_relaxed(chan->phy, DALGN);
447 return dalgn & (BIT(chan->phy->idx));
448 }
449
450 static void phy_enable(struct pxad_phy *phy, bool misaligned)
451 {
452 struct pxad_device *pdev;
453 u32 reg, dalgn;
454
455 if (!phy->vchan)
456 return;
457
458 dev_dbg(&phy->vchan->vc.chan.dev->device,
459 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
460 phy, phy->idx, misaligned);
461
462 pdev = to_pxad_dev(phy->vchan->vc.chan.device);
463 if (phy->vchan->drcmr <= pdev->nr_requestors) {
464 reg = pxad_drcmr(phy->vchan->drcmr);
465 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
466 }
467
468 dalgn = phy_readl_relaxed(phy, DALGN);
469 if (misaligned)
470 dalgn |= BIT(phy->idx);
471 else
472 dalgn &= ~BIT(phy->idx);
473 phy_writel_relaxed(phy, dalgn, DALGN);
474
475 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
476 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
477 }
478
479 static void phy_disable(struct pxad_phy *phy)
480 {
481 u32 dcsr;
482
483 if (!phy)
484 return;
485
486 dcsr = phy_readl_relaxed(phy, DCSR);
487 dev_dbg(&phy->vchan->vc.chan.dev->device,
488 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
489 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
490 }
491
492 static void pxad_launch_chan(struct pxad_chan *chan,
493 struct pxad_desc_sw *desc)
494 {
495 dev_dbg(&chan->vc.chan.dev->device,
496 "%s(): desc=%p\n", __func__, desc);
497 if (!chan->phy) {
498 chan->phy = lookup_phy(chan);
499 if (!chan->phy) {
500 dev_dbg(&chan->vc.chan.dev->device,
501 "%s(): no free dma channel\n", __func__);
502 return;
503 }
504 }
505 chan->bus_error = 0;
506
507 /*
508 * Program the descriptor's address into the DMA controller,
509 * then start the DMA transaction
510 */
511 phy_writel(chan->phy, desc->first, DDADR);
512 phy_enable(chan->phy, chan->misaligned);
513 wake_up(&chan->wq_state);
514 }
515
516 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
517 unsigned long flags)
518 {
519 struct pxad_desc_hw *updater =
520 sw_desc->hw_desc[sw_desc->nb_desc - 1];
521 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
522
523 updater->ddadr = DDADR_STOP;
524 updater->dsadr = dma;
525 updater->dtadr = dma + 8;
526 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
527 (PXA_DCMD_LENGTH & sizeof(u32));
528 if (flags & DMA_PREP_INTERRUPT)
529 updater->dcmd |= PXA_DCMD_ENDIRQEN;
530 if (sw_desc->cyclic)
531 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
532 }
533
534 static bool is_desc_completed(struct virt_dma_desc *vd)
535 {
536 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
537 struct pxad_desc_hw *updater =
538 sw_desc->hw_desc[sw_desc->nb_desc - 1];
539
540 return updater->dtadr != (updater->dsadr + 8);
541 }
542
543 static void pxad_desc_chain(struct virt_dma_desc *vd1,
544 struct virt_dma_desc *vd2)
545 {
546 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
547 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
548 dma_addr_t dma_to_chain;
549
550 dma_to_chain = desc2->first;
551 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
552 }
553
554 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
555 struct virt_dma_desc *vd)
556 {
557 struct virt_dma_desc *vd_last_issued = NULL;
558 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
559
560 /*
561 * Attempt to hot chain the tx if the phy is still running. This is
562 * considered successful only if either the channel is still running
563 * after the chaining, or if the chained transfer is completed after
564 * having been hot chained.
565 * A change of alignment is not allowed, and forbids hotchaining.
566 */
567 if (is_chan_running(chan)) {
568 BUG_ON(list_empty(&vc->desc_issued));
569
570 if (!is_running_chan_misaligned(chan) &&
571 to_pxad_sw_desc(vd)->misaligned)
572 return false;
573
574 vd_last_issued = list_entry(vc->desc_issued.prev,
575 struct virt_dma_desc, node);
576 pxad_desc_chain(vd_last_issued, vd);
577 if (is_chan_running(chan) || is_desc_completed(vd))
578 return true;
579 }
580
581 return false;
582 }
583
584 static unsigned int clear_chan_irq(struct pxad_phy *phy)
585 {
586 u32 dcsr;
587 u32 dint = readl(phy->base + DINT);
588
589 if (!(dint & BIT(phy->idx)))
590 return PXA_DCSR_RUN;
591
592 /* clear irq */
593 dcsr = phy_readl_relaxed(phy, DCSR);
594 phy_writel(phy, dcsr, DCSR);
595 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
596 dev_warn(&phy->vchan->vc.chan.dev->device,
597 "%s(chan=%p): PXA_DCSR_BUSERR\n",
598 __func__, &phy->vchan);
599
600 return dcsr & ~PXA_DCSR_RUN;
601 }
602
603 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
604 {
605 struct pxad_phy *phy = dev_id;
606 struct pxad_chan *chan = phy->vchan;
607 struct virt_dma_desc *vd, *tmp;
608 unsigned int dcsr;
609 bool vd_completed;
610 dma_cookie_t last_started = 0;
611
612 BUG_ON(!chan);
613
614 dcsr = clear_chan_irq(phy);
615 if (dcsr & PXA_DCSR_RUN)
616 return IRQ_NONE;
617
618 spin_lock(&chan->vc.lock);
619 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
620 vd_completed = is_desc_completed(vd);
621 dev_dbg(&chan->vc.chan.dev->device,
622 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
623 __func__, vd, vd->tx.cookie, vd_completed,
624 dcsr);
625 last_started = vd->tx.cookie;
626 if (to_pxad_sw_desc(vd)->cyclic) {
627 vchan_cyclic_callback(vd);
628 break;
629 }
630 if (vd_completed) {
631 list_del(&vd->node);
632 vchan_cookie_complete(vd);
633 } else {
634 break;
635 }
636 }
637
638 if (dcsr & PXA_DCSR_BUSERR) {
639 chan->bus_error = last_started;
640 phy_disable(phy);
641 }
642
643 if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
644 dev_dbg(&chan->vc.chan.dev->device,
645 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
646 __func__,
647 list_empty(&chan->vc.desc_submitted),
648 list_empty(&chan->vc.desc_issued));
649 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
650
651 if (list_empty(&chan->vc.desc_issued)) {
652 chan->misaligned =
653 !list_empty(&chan->vc.desc_submitted);
654 } else {
655 vd = list_first_entry(&chan->vc.desc_issued,
656 struct virt_dma_desc, node);
657 pxad_launch_chan(chan, to_pxad_sw_desc(vd));
658 }
659 }
660 spin_unlock(&chan->vc.lock);
661 wake_up(&chan->wq_state);
662
663 return IRQ_HANDLED;
664 }
665
666 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
667 {
668 struct pxad_device *pdev = dev_id;
669 struct pxad_phy *phy;
670 u32 dint = readl(pdev->base + DINT);
671 int i, ret = IRQ_NONE;
672
673 while (dint) {
674 i = __ffs(dint);
675 dint &= (dint - 1);
676 phy = &pdev->phys[i];
677 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
678 ret = IRQ_HANDLED;
679 }
680
681 return ret;
682 }
683
684 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
685 {
686 struct pxad_chan *chan = to_pxad_chan(dchan);
687 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
688
689 if (chan->desc_pool)
690 return 1;
691
692 chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
693 pdev->slave.dev,
694 sizeof(struct pxad_desc_hw),
695 __alignof__(struct pxad_desc_hw),
696 0);
697 if (!chan->desc_pool) {
698 dev_err(&chan->vc.chan.dev->device,
699 "%s(): unable to allocate descriptor pool\n",
700 __func__);
701 return -ENOMEM;
702 }
703
704 return 1;
705 }
706
707 static void pxad_free_chan_resources(struct dma_chan *dchan)
708 {
709 struct pxad_chan *chan = to_pxad_chan(dchan);
710
711 vchan_free_chan_resources(&chan->vc);
712 dma_pool_destroy(chan->desc_pool);
713 chan->desc_pool = NULL;
714
715 chan->drcmr = U32_MAX;
716 chan->prio = PXAD_PRIO_LOWEST;
717 }
718
719 static void pxad_free_desc(struct virt_dma_desc *vd)
720 {
721 int i;
722 dma_addr_t dma;
723 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
724
725 BUG_ON(sw_desc->nb_desc == 0);
726 for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
727 if (i > 0)
728 dma = sw_desc->hw_desc[i - 1]->ddadr;
729 else
730 dma = sw_desc->first;
731 dma_pool_free(sw_desc->desc_pool,
732 sw_desc->hw_desc[i], dma);
733 }
734 sw_desc->nb_desc = 0;
735 kfree(sw_desc);
736 }
737
738 static struct pxad_desc_sw *
739 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
740 {
741 struct pxad_desc_sw *sw_desc;
742 dma_addr_t dma;
743 int i;
744
745 sw_desc = kzalloc(sizeof(*sw_desc) +
746 nb_hw_desc * sizeof(struct pxad_desc_hw *),
747 GFP_NOWAIT);
748 if (!sw_desc)
749 return NULL;
750 sw_desc->desc_pool = chan->desc_pool;
751
752 for (i = 0; i < nb_hw_desc; i++) {
753 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
754 GFP_NOWAIT, &dma);
755 if (!sw_desc->hw_desc[i]) {
756 dev_err(&chan->vc.chan.dev->device,
757 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
758 __func__, i, sw_desc->desc_pool);
759 goto err;
760 }
761
762 if (i == 0)
763 sw_desc->first = dma;
764 else
765 sw_desc->hw_desc[i - 1]->ddadr = dma;
766 sw_desc->nb_desc++;
767 }
768
769 return sw_desc;
770 err:
771 pxad_free_desc(&sw_desc->vd);
772 return NULL;
773 }
774
775 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
776 {
777 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
778 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
779 struct virt_dma_desc *vd_chained = NULL,
780 *vd = container_of(tx, struct virt_dma_desc, tx);
781 dma_cookie_t cookie;
782 unsigned long flags;
783
784 set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
785
786 spin_lock_irqsave(&vc->lock, flags);
787 cookie = dma_cookie_assign(tx);
788
789 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
790 list_move_tail(&vd->node, &vc->desc_issued);
791 dev_dbg(&chan->vc.chan.dev->device,
792 "%s(): txd %p[%x]: submitted (hot linked)\n",
793 __func__, vd, cookie);
794 goto out;
795 }
796
797 /*
798 * Fallback to placing the tx in the submitted queue
799 */
800 if (!list_empty(&vc->desc_submitted)) {
801 vd_chained = list_entry(vc->desc_submitted.prev,
802 struct virt_dma_desc, node);
803 /*
804 * Only chain the descriptors if no new misalignment is
805 * introduced. If a new misalignment is chained, let the channel
806 * stop, and be relaunched in misalign mode from the irq
807 * handler.
808 */
809 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
810 pxad_desc_chain(vd_chained, vd);
811 else
812 vd_chained = NULL;
813 }
814 dev_dbg(&chan->vc.chan.dev->device,
815 "%s(): txd %p[%x]: submitted (%s linked)\n",
816 __func__, vd, cookie, vd_chained ? "cold" : "not");
817 list_move_tail(&vd->node, &vc->desc_submitted);
818 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
819
820 out:
821 spin_unlock_irqrestore(&vc->lock, flags);
822 return cookie;
823 }
824
825 static void pxad_issue_pending(struct dma_chan *dchan)
826 {
827 struct pxad_chan *chan = to_pxad_chan(dchan);
828 struct virt_dma_desc *vd_first;
829 unsigned long flags;
830
831 spin_lock_irqsave(&chan->vc.lock, flags);
832 if (list_empty(&chan->vc.desc_submitted))
833 goto out;
834
835 vd_first = list_first_entry(&chan->vc.desc_submitted,
836 struct virt_dma_desc, node);
837 dev_dbg(&chan->vc.chan.dev->device,
838 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
839
840 vchan_issue_pending(&chan->vc);
841 if (!pxad_try_hotchain(&chan->vc, vd_first))
842 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
843 out:
844 spin_unlock_irqrestore(&chan->vc.lock, flags);
845 }
846
847 static inline struct dma_async_tx_descriptor *
848 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
849 unsigned long tx_flags)
850 {
851 struct dma_async_tx_descriptor *tx;
852 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
853
854 INIT_LIST_HEAD(&vd->node);
855 tx = vchan_tx_prep(vc, vd, tx_flags);
856 tx->tx_submit = pxad_tx_submit;
857 dev_dbg(&chan->vc.chan.dev->device,
858 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
859 vc, vd, vd->tx.cookie,
860 tx_flags);
861
862 return tx;
863 }
864
865 static void pxad_get_config(struct pxad_chan *chan,
866 enum dma_transfer_direction dir,
867 u32 *dcmd, u32 *dev_src, u32 *dev_dst)
868 {
869 u32 maxburst = 0, dev_addr = 0;
870 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
871 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
872
873 *dcmd = 0;
874 if (dir == DMA_DEV_TO_MEM) {
875 maxburst = chan->cfg.src_maxburst;
876 width = chan->cfg.src_addr_width;
877 dev_addr = chan->cfg.src_addr;
878 *dev_src = dev_addr;
879 *dcmd |= PXA_DCMD_INCTRGADDR;
880 if (chan->drcmr <= pdev->nr_requestors)
881 *dcmd |= PXA_DCMD_FLOWSRC;
882 }
883 if (dir == DMA_MEM_TO_DEV) {
884 maxburst = chan->cfg.dst_maxburst;
885 width = chan->cfg.dst_addr_width;
886 dev_addr = chan->cfg.dst_addr;
887 *dev_dst = dev_addr;
888 *dcmd |= PXA_DCMD_INCSRCADDR;
889 if (chan->drcmr <= pdev->nr_requestors)
890 *dcmd |= PXA_DCMD_FLOWTRG;
891 }
892 if (dir == DMA_MEM_TO_MEM)
893 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
894 PXA_DCMD_INCSRCADDR;
895
896 dev_dbg(&chan->vc.chan.dev->device,
897 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
898 __func__, dev_addr, maxburst, width, dir);
899
900 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
901 *dcmd |= PXA_DCMD_WIDTH1;
902 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
903 *dcmd |= PXA_DCMD_WIDTH2;
904 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
905 *dcmd |= PXA_DCMD_WIDTH4;
906
907 if (maxburst == 8)
908 *dcmd |= PXA_DCMD_BURST8;
909 else if (maxburst == 16)
910 *dcmd |= PXA_DCMD_BURST16;
911 else if (maxburst == 32)
912 *dcmd |= PXA_DCMD_BURST32;
913
914 /* FIXME: drivers should be ported over to use the filter
915 * function. Once that's done, the following two lines can
916 * be removed.
917 */
918 if (chan->cfg.slave_id)
919 chan->drcmr = chan->cfg.slave_id;
920 }
921
922 static struct dma_async_tx_descriptor *
923 pxad_prep_memcpy(struct dma_chan *dchan,
924 dma_addr_t dma_dst, dma_addr_t dma_src,
925 size_t len, unsigned long flags)
926 {
927 struct pxad_chan *chan = to_pxad_chan(dchan);
928 struct pxad_desc_sw *sw_desc;
929 struct pxad_desc_hw *hw_desc;
930 u32 dcmd;
931 unsigned int i, nb_desc = 0;
932 size_t copy;
933
934 if (!dchan || !len)
935 return NULL;
936
937 dev_dbg(&chan->vc.chan.dev->device,
938 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
939 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
940 len, flags);
941 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
942
943 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
944 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
945 if (!sw_desc)
946 return NULL;
947 sw_desc->len = len;
948
949 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
950 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
951 sw_desc->misaligned = true;
952
953 i = 0;
954 do {
955 hw_desc = sw_desc->hw_desc[i++];
956 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
957 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
958 hw_desc->dsadr = dma_src;
959 hw_desc->dtadr = dma_dst;
960 len -= copy;
961 dma_src += copy;
962 dma_dst += copy;
963 } while (len);
964 set_updater_desc(sw_desc, flags);
965
966 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
967 }
968
969 static struct dma_async_tx_descriptor *
970 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
971 unsigned int sg_len, enum dma_transfer_direction dir,
972 unsigned long flags, void *context)
973 {
974 struct pxad_chan *chan = to_pxad_chan(dchan);
975 struct pxad_desc_sw *sw_desc;
976 size_t len, avail;
977 struct scatterlist *sg;
978 dma_addr_t dma;
979 u32 dcmd, dsadr = 0, dtadr = 0;
980 unsigned int nb_desc = 0, i, j = 0;
981
982 if ((sgl == NULL) || (sg_len == 0))
983 return NULL;
984
985 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
986 dev_dbg(&chan->vc.chan.dev->device,
987 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
988
989 for_each_sg(sgl, sg, sg_len, i)
990 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
991 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
992 if (!sw_desc)
993 return NULL;
994
995 for_each_sg(sgl, sg, sg_len, i) {
996 dma = sg_dma_address(sg);
997 avail = sg_dma_len(sg);
998 sw_desc->len += avail;
999
1000 do {
1001 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
1002 if (dma & 0x7)
1003 sw_desc->misaligned = true;
1004
1005 sw_desc->hw_desc[j]->dcmd =
1006 dcmd | (PXA_DCMD_LENGTH & len);
1007 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1008 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1009
1010 dma += len;
1011 avail -= len;
1012 } while (avail);
1013 }
1014 set_updater_desc(sw_desc, flags);
1015
1016 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1017 }
1018
1019 static struct dma_async_tx_descriptor *
1020 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1021 dma_addr_t buf_addr, size_t len, size_t period_len,
1022 enum dma_transfer_direction dir, unsigned long flags)
1023 {
1024 struct pxad_chan *chan = to_pxad_chan(dchan);
1025 struct pxad_desc_sw *sw_desc;
1026 struct pxad_desc_hw **phw_desc;
1027 dma_addr_t dma;
1028 u32 dcmd, dsadr = 0, dtadr = 0;
1029 unsigned int nb_desc = 0;
1030
1031 if (!dchan || !len || !period_len)
1032 return NULL;
1033 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1034 dev_err(&chan->vc.chan.dev->device,
1035 "Unsupported direction for cyclic DMA\n");
1036 return NULL;
1037 }
1038 /* the buffer length must be a multiple of period_len */
1039 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1040 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1041 return NULL;
1042
1043 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1044 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1045 dev_dbg(&chan->vc.chan.dev->device,
1046 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1047 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1048
1049 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1050 nb_desc *= DIV_ROUND_UP(len, period_len);
1051 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1052 if (!sw_desc)
1053 return NULL;
1054 sw_desc->cyclic = true;
1055 sw_desc->len = len;
1056
1057 phw_desc = sw_desc->hw_desc;
1058 dma = buf_addr;
1059 do {
1060 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1061 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1062 phw_desc[0]->dcmd = dcmd;
1063 phw_desc++;
1064 dma += period_len;
1065 len -= period_len;
1066 } while (len);
1067 set_updater_desc(sw_desc, flags);
1068
1069 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1070 }
1071
1072 static int pxad_config(struct dma_chan *dchan,
1073 struct dma_slave_config *cfg)
1074 {
1075 struct pxad_chan *chan = to_pxad_chan(dchan);
1076
1077 if (!dchan)
1078 return -EINVAL;
1079
1080 chan->cfg = *cfg;
1081 return 0;
1082 }
1083
1084 static int pxad_terminate_all(struct dma_chan *dchan)
1085 {
1086 struct pxad_chan *chan = to_pxad_chan(dchan);
1087 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1088 struct virt_dma_desc *vd = NULL;
1089 unsigned long flags;
1090 struct pxad_phy *phy;
1091 LIST_HEAD(head);
1092
1093 dev_dbg(&chan->vc.chan.dev->device,
1094 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1095
1096 spin_lock_irqsave(&chan->vc.lock, flags);
1097 vchan_get_all_descriptors(&chan->vc, &head);
1098
1099 list_for_each_entry(vd, &head, node) {
1100 dev_dbg(&chan->vc.chan.dev->device,
1101 "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1102 vd, vd->tx.cookie, is_desc_completed(vd));
1103 }
1104
1105 phy = chan->phy;
1106 if (phy) {
1107 phy_disable(chan->phy);
1108 pxad_free_phy(chan);
1109 chan->phy = NULL;
1110 spin_lock(&pdev->phy_lock);
1111 phy->vchan = NULL;
1112 spin_unlock(&pdev->phy_lock);
1113 }
1114 spin_unlock_irqrestore(&chan->vc.lock, flags);
1115 vchan_dma_desc_free_list(&chan->vc, &head);
1116
1117 return 0;
1118 }
1119
1120 static unsigned int pxad_residue(struct pxad_chan *chan,
1121 dma_cookie_t cookie)
1122 {
1123 struct virt_dma_desc *vd = NULL;
1124 struct pxad_desc_sw *sw_desc = NULL;
1125 struct pxad_desc_hw *hw_desc = NULL;
1126 u32 curr, start, len, end, residue = 0;
1127 unsigned long flags;
1128 bool passed = false;
1129 int i;
1130
1131 /*
1132 * If the channel does not have a phy pointer anymore, it has already
1133 * been completed. Therefore, its residue is 0.
1134 */
1135 if (!chan->phy)
1136 return 0;
1137
1138 spin_lock_irqsave(&chan->vc.lock, flags);
1139
1140 vd = vchan_find_desc(&chan->vc, cookie);
1141 if (!vd)
1142 goto out;
1143
1144 sw_desc = to_pxad_sw_desc(vd);
1145 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1146 curr = phy_readl_relaxed(chan->phy, DSADR);
1147 else
1148 curr = phy_readl_relaxed(chan->phy, DTADR);
1149
1150 /*
1151 * curr has to be actually read before checking descriptor
1152 * completion, so that a curr inside a status updater
1153 * descriptor implies the following test returns true, and
1154 * preventing reordering of curr load and the test.
1155 */
1156 rmb();
1157 if (is_desc_completed(vd))
1158 goto out;
1159
1160 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1161 hw_desc = sw_desc->hw_desc[i];
1162 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1163 start = hw_desc->dsadr;
1164 else
1165 start = hw_desc->dtadr;
1166 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1167 end = start + len;
1168
1169 /*
1170 * 'passed' will be latched once we found the descriptor
1171 * which lies inside the boundaries of the curr
1172 * pointer. All descriptors that occur in the list
1173 * _after_ we found that partially handled descriptor
1174 * are still to be processed and are hence added to the
1175 * residual bytes counter.
1176 */
1177
1178 if (passed) {
1179 residue += len;
1180 } else if (curr >= start && curr <= end) {
1181 residue += end - curr;
1182 passed = true;
1183 }
1184 }
1185 if (!passed)
1186 residue = sw_desc->len;
1187
1188 out:
1189 spin_unlock_irqrestore(&chan->vc.lock, flags);
1190 dev_dbg(&chan->vc.chan.dev->device,
1191 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1192 __func__, vd, cookie, sw_desc, residue);
1193 return residue;
1194 }
1195
1196 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1197 dma_cookie_t cookie,
1198 struct dma_tx_state *txstate)
1199 {
1200 struct pxad_chan *chan = to_pxad_chan(dchan);
1201 enum dma_status ret;
1202
1203 if (cookie == chan->bus_error)
1204 return DMA_ERROR;
1205
1206 ret = dma_cookie_status(dchan, cookie, txstate);
1207 if (likely(txstate && (ret != DMA_ERROR)))
1208 dma_set_residue(txstate, pxad_residue(chan, cookie));
1209
1210 return ret;
1211 }
1212
1213 static void pxad_synchronize(struct dma_chan *dchan)
1214 {
1215 struct pxad_chan *chan = to_pxad_chan(dchan);
1216
1217 wait_event(chan->wq_state, !is_chan_running(chan));
1218 vchan_synchronize(&chan->vc);
1219 }
1220
1221 static void pxad_free_channels(struct dma_device *dmadev)
1222 {
1223 struct pxad_chan *c, *cn;
1224
1225 list_for_each_entry_safe(c, cn, &dmadev->channels,
1226 vc.chan.device_node) {
1227 list_del(&c->vc.chan.device_node);
1228 tasklet_kill(&c->vc.task);
1229 }
1230 }
1231
1232 static int pxad_remove(struct platform_device *op)
1233 {
1234 struct pxad_device *pdev = platform_get_drvdata(op);
1235
1236 pxad_cleanup_debugfs(pdev);
1237 pxad_free_channels(&pdev->slave);
1238 return 0;
1239 }
1240
1241 static int pxad_init_phys(struct platform_device *op,
1242 struct pxad_device *pdev,
1243 unsigned int nb_phy_chans)
1244 {
1245 int irq0, irq, nr_irq = 0, i, ret;
1246 struct pxad_phy *phy;
1247
1248 irq0 = platform_get_irq(op, 0);
1249 if (irq0 < 0)
1250 return irq0;
1251
1252 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1253 sizeof(pdev->phys[0]), GFP_KERNEL);
1254 if (!pdev->phys)
1255 return -ENOMEM;
1256
1257 for (i = 0; i < nb_phy_chans; i++)
1258 if (platform_get_irq(op, i) > 0)
1259 nr_irq++;
1260
1261 for (i = 0; i < nb_phy_chans; i++) {
1262 phy = &pdev->phys[i];
1263 phy->base = pdev->base;
1264 phy->idx = i;
1265 irq = platform_get_irq(op, i);
1266 if ((nr_irq > 1) && (irq > 0))
1267 ret = devm_request_irq(&op->dev, irq,
1268 pxad_chan_handler,
1269 IRQF_SHARED, "pxa-dma", phy);
1270 if ((nr_irq == 1) && (i == 0))
1271 ret = devm_request_irq(&op->dev, irq0,
1272 pxad_int_handler,
1273 IRQF_SHARED, "pxa-dma", pdev);
1274 if (ret) {
1275 dev_err(pdev->slave.dev,
1276 "%s(): can't request irq %d:%d\n", __func__,
1277 irq, ret);
1278 return ret;
1279 }
1280 }
1281
1282 return 0;
1283 }
1284
1285 static const struct of_device_id pxad_dt_ids[] = {
1286 { .compatible = "marvell,pdma-1.0", },
1287 {}
1288 };
1289 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1290
1291 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1292 struct of_dma *ofdma)
1293 {
1294 struct pxad_device *d = ofdma->of_dma_data;
1295 struct dma_chan *chan;
1296
1297 chan = dma_get_any_slave_channel(&d->slave);
1298 if (!chan)
1299 return NULL;
1300
1301 to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1302 to_pxad_chan(chan)->prio = dma_spec->args[1];
1303
1304 return chan;
1305 }
1306
1307 static int pxad_init_dmadev(struct platform_device *op,
1308 struct pxad_device *pdev,
1309 unsigned int nr_phy_chans,
1310 unsigned int nr_requestors)
1311 {
1312 int ret;
1313 unsigned int i;
1314 struct pxad_chan *c;
1315
1316 pdev->nr_chans = nr_phy_chans;
1317 pdev->nr_requestors = nr_requestors;
1318 INIT_LIST_HEAD(&pdev->slave.channels);
1319 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1320 pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1321 pdev->slave.device_tx_status = pxad_tx_status;
1322 pdev->slave.device_issue_pending = pxad_issue_pending;
1323 pdev->slave.device_config = pxad_config;
1324 pdev->slave.device_synchronize = pxad_synchronize;
1325 pdev->slave.device_terminate_all = pxad_terminate_all;
1326
1327 if (op->dev.coherent_dma_mask)
1328 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1329 else
1330 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1331
1332 ret = pxad_init_phys(op, pdev, nr_phy_chans);
1333 if (ret)
1334 return ret;
1335
1336 for (i = 0; i < nr_phy_chans; i++) {
1337 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1338 if (!c)
1339 return -ENOMEM;
1340
1341 c->drcmr = U32_MAX;
1342 c->prio = PXAD_PRIO_LOWEST;
1343 c->vc.desc_free = pxad_free_desc;
1344 vchan_init(&c->vc, &pdev->slave);
1345 init_waitqueue_head(&c->wq_state);
1346 }
1347
1348 return dmaenginem_async_device_register(&pdev->slave);
1349 }
1350
1351 static int pxad_probe(struct platform_device *op)
1352 {
1353 struct pxad_device *pdev;
1354 const struct of_device_id *of_id;
1355 const struct dma_slave_map *slave_map = NULL;
1356 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1357 struct resource *iores;
1358 int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1359 const enum dma_slave_buswidth widths =
1360 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1361 DMA_SLAVE_BUSWIDTH_4_BYTES;
1362
1363 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1364 if (!pdev)
1365 return -ENOMEM;
1366
1367 spin_lock_init(&pdev->phy_lock);
1368
1369 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1370 pdev->base = devm_ioremap_resource(&op->dev, iores);
1371 if (IS_ERR(pdev->base))
1372 return PTR_ERR(pdev->base);
1373
1374 of_id = of_match_device(pxad_dt_ids, &op->dev);
1375 if (of_id) {
1376 of_property_read_u32(op->dev.of_node, "#dma-channels",
1377 &dma_channels);
1378 ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1379 &nb_requestors);
1380 if (ret) {
1381 dev_warn(pdev->slave.dev,
1382 "#dma-requests set to default 32 as missing in OF: %d",
1383 ret);
1384 nb_requestors = 32;
1385 }
1386 } else if (pdata && pdata->dma_channels) {
1387 dma_channels = pdata->dma_channels;
1388 nb_requestors = pdata->nb_requestors;
1389 slave_map = pdata->slave_map;
1390 slave_map_cnt = pdata->slave_map_cnt;
1391 } else {
1392 dma_channels = 32; /* default 32 channel */
1393 }
1394
1395 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1396 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1397 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1398 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1399 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1400 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1401 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1402 pdev->slave.filter.map = slave_map;
1403 pdev->slave.filter.mapcnt = slave_map_cnt;
1404 pdev->slave.filter.fn = pxad_filter_fn;
1405
1406 pdev->slave.copy_align = PDMA_ALIGNMENT;
1407 pdev->slave.src_addr_widths = widths;
1408 pdev->slave.dst_addr_widths = widths;
1409 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1410 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1411 pdev->slave.descriptor_reuse = true;
1412
1413 pdev->slave.dev = &op->dev;
1414 ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1415 if (ret) {
1416 dev_err(pdev->slave.dev, "unable to register\n");
1417 return ret;
1418 }
1419
1420 if (op->dev.of_node) {
1421 /* Device-tree DMA controller registration */
1422 ret = of_dma_controller_register(op->dev.of_node,
1423 pxad_dma_xlate, pdev);
1424 if (ret < 0) {
1425 dev_err(pdev->slave.dev,
1426 "of_dma_controller_register failed\n");
1427 return ret;
1428 }
1429 }
1430
1431 platform_set_drvdata(op, pdev);
1432 pxad_init_debugfs(pdev);
1433 dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1434 dma_channels, nb_requestors);
1435 return 0;
1436 }
1437
1438 static const struct platform_device_id pxad_id_table[] = {
1439 { "pxa-dma", },
1440 { },
1441 };
1442
1443 static struct platform_driver pxad_driver = {
1444 .driver = {
1445 .name = "pxa-dma",
1446 .of_match_table = pxad_dt_ids,
1447 },
1448 .id_table = pxad_id_table,
1449 .probe = pxad_probe,
1450 .remove = pxad_remove,
1451 };
1452
1453 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1454 {
1455 struct pxad_chan *c = to_pxad_chan(chan);
1456 struct pxad_param *p = param;
1457
1458 if (chan->device->dev->driver != &pxad_driver.driver)
1459 return false;
1460
1461 c->drcmr = p->drcmr;
1462 c->prio = p->prio;
1463
1464 return true;
1465 }
1466
1467 module_platform_driver(pxad_driver);
1468
1469 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1470 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1471 MODULE_LICENSE("GPL v2");