2 * SA11x0 DMAengine support
4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
27 #define DMA_MAX_SIZE 0x1fff
28 #define DMA_CHUNK_SIZE 0x1000
31 #define DMA_DCSR_S 0x04
32 #define DMA_DCSR_C 0x08
33 #define DMA_DCSR_R 0x0c
40 #define DCSR_RUN (1 << 0)
41 #define DCSR_IE (1 << 1)
42 #define DCSR_ERROR (1 << 2)
43 #define DCSR_DONEA (1 << 3)
44 #define DCSR_STRTA (1 << 4)
45 #define DCSR_DONEB (1 << 5)
46 #define DCSR_STRTB (1 << 6)
47 #define DCSR_BIU (1 << 7)
49 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
50 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
51 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
52 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
53 #define DDAR_Ser0UDCTr (0x0 << 4)
54 #define DDAR_Ser0UDCRc (0x1 << 4)
55 #define DDAR_Ser1SDLCTr (0x2 << 4)
56 #define DDAR_Ser1SDLCRc (0x3 << 4)
57 #define DDAR_Ser1UARTTr (0x4 << 4)
58 #define DDAR_Ser1UARTRc (0x5 << 4)
59 #define DDAR_Ser2ICPTr (0x6 << 4)
60 #define DDAR_Ser2ICPRc (0x7 << 4)
61 #define DDAR_Ser3UARTTr (0x8 << 4)
62 #define DDAR_Ser3UARTRc (0x9 << 4)
63 #define DDAR_Ser4MCP0Tr (0xa << 4)
64 #define DDAR_Ser4MCP0Rc (0xb << 4)
65 #define DDAR_Ser4MCP1Tr (0xc << 4)
66 #define DDAR_Ser4MCP1Rc (0xd << 4)
67 #define DDAR_Ser4SSPTr (0xe << 4)
68 #define DDAR_Ser4SSPRc (0xf << 4)
70 struct sa11x0_dma_sg
{
75 struct sa11x0_dma_desc
{
76 struct virt_dma_desc vd
;
84 struct sa11x0_dma_sg sg
[0];
87 struct sa11x0_dma_phy
;
89 struct sa11x0_dma_chan
{
90 struct virt_dma_chan vc
;
92 /* protected by c->vc.lock */
93 struct sa11x0_dma_phy
*phy
;
94 enum dma_status status
;
96 /* protected by d->lock */
97 struct list_head node
;
103 struct sa11x0_dma_phy
{
105 struct sa11x0_dma_dev
*dev
;
108 struct sa11x0_dma_chan
*vchan
;
110 /* Protected by c->vc.lock */
112 struct sa11x0_dma_desc
*txd_load
;
114 struct sa11x0_dma_desc
*txd_done
;
120 struct sa11x0_dma_dev
{
121 struct dma_device slave
;
124 struct tasklet_struct task
;
125 struct list_head chan_pending
;
126 struct sa11x0_dma_phy phy
[NR_PHY_CHAN
];
129 static struct sa11x0_dma_chan
*to_sa11x0_dma_chan(struct dma_chan
*chan
)
131 return container_of(chan
, struct sa11x0_dma_chan
, vc
.chan
);
134 static struct sa11x0_dma_dev
*to_sa11x0_dma(struct dma_device
*dmadev
)
136 return container_of(dmadev
, struct sa11x0_dma_dev
, slave
);
139 static struct sa11x0_dma_desc
*sa11x0_dma_next_desc(struct sa11x0_dma_chan
*c
)
141 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
143 return vd
? container_of(vd
, struct sa11x0_dma_desc
, vd
) : NULL
;
146 static void sa11x0_dma_free_desc(struct virt_dma_desc
*vd
)
148 kfree(container_of(vd
, struct sa11x0_dma_desc
, vd
));
151 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy
*p
, struct sa11x0_dma_desc
*txd
)
153 list_del(&txd
->vd
.node
);
157 dev_vdbg(p
->dev
->slave
.dev
, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
158 p
->num
, &txd
->vd
, txd
->vd
.tx
.cookie
, txd
->ddar
);
161 static void noinline
sa11x0_dma_start_sg(struct sa11x0_dma_phy
*p
,
162 struct sa11x0_dma_chan
*c
)
164 struct sa11x0_dma_desc
*txd
= p
->txd_load
;
165 struct sa11x0_dma_sg
*sg
;
166 void __iomem
*base
= p
->base
;
173 dcsr
= readl_relaxed(base
+ DMA_DCSR_R
);
175 /* Don't try to load the next transfer if both buffers are started */
176 if ((dcsr
& (DCSR_STRTA
| DCSR_STRTB
)) == (DCSR_STRTA
| DCSR_STRTB
))
179 if (p
->sg_load
== txd
->sglen
) {
181 struct sa11x0_dma_desc
*txn
= sa11x0_dma_next_desc(c
);
184 * We have reached the end of the current descriptor.
185 * Peek at the next descriptor, and if compatible with
186 * the current, start processing it.
188 if (txn
&& txn
->ddar
== txd
->ddar
) {
190 sa11x0_dma_start_desc(p
, txn
);
196 /* Cyclic: reset back to beginning */
201 sg
= &txd
->sg
[p
->sg_load
++];
203 /* Select buffer to load according to channel status */
204 if (((dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == (DCSR_BIU
| DCSR_STRTB
)) ||
205 ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == 0)) {
208 dcsr
= DCSR_STRTA
| DCSR_IE
| DCSR_RUN
;
212 dcsr
= DCSR_STRTB
| DCSR_IE
| DCSR_RUN
;
215 writel_relaxed(sg
->addr
, base
+ dbsx
);
216 writel_relaxed(sg
->len
, base
+ dbtx
);
217 writel(dcsr
, base
+ DMA_DCSR_S
);
219 dev_dbg(p
->dev
->slave
.dev
, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
221 'A' + (dbsx
== DMA_DBSB
), sg
->addr
,
222 'A' + (dbtx
== DMA_DBTB
), sg
->len
);
225 static void noinline
sa11x0_dma_complete(struct sa11x0_dma_phy
*p
,
226 struct sa11x0_dma_chan
*c
)
228 struct sa11x0_dma_desc
*txd
= p
->txd_done
;
230 if (++p
->sg_done
== txd
->sglen
) {
232 vchan_cookie_complete(&txd
->vd
);
235 p
->txd_done
= p
->txd_load
;
238 tasklet_schedule(&p
->dev
->task
);
240 if ((p
->sg_done
% txd
->period
) == 0)
241 vchan_cyclic_callback(&txd
->vd
);
243 /* Cyclic: reset back to beginning */
248 sa11x0_dma_start_sg(p
, c
);
251 static irqreturn_t
sa11x0_dma_irq(int irq
, void *dev_id
)
253 struct sa11x0_dma_phy
*p
= dev_id
;
254 struct sa11x0_dma_dev
*d
= p
->dev
;
255 struct sa11x0_dma_chan
*c
;
258 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
259 if (!(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
)))
262 /* Clear reported status bits */
263 writel_relaxed(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
),
264 p
->base
+ DMA_DCSR_C
);
266 dev_dbg(d
->slave
.dev
, "pchan %u: irq: DCSR:%02x\n", p
->num
, dcsr
);
268 if (dcsr
& DCSR_ERROR
) {
269 dev_err(d
->slave
.dev
, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
271 readl_relaxed(p
->base
+ DMA_DDAR
),
272 readl_relaxed(p
->base
+ DMA_DBSA
),
273 readl_relaxed(p
->base
+ DMA_DBTA
),
274 readl_relaxed(p
->base
+ DMA_DBSB
),
275 readl_relaxed(p
->base
+ DMA_DBTB
));
282 spin_lock_irqsave(&c
->vc
.lock
, flags
);
284 * Now that we're holding the lock, check that the vchan
285 * really is associated with this pchan before touching the
286 * hardware. This should always succeed, because we won't
287 * change p->vchan or c->phy while the channel is actively
291 if (dcsr
& DCSR_DONEA
)
292 sa11x0_dma_complete(p
, c
);
293 if (dcsr
& DCSR_DONEB
)
294 sa11x0_dma_complete(p
, c
);
296 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
302 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan
*c
)
304 struct sa11x0_dma_desc
*txd
= sa11x0_dma_next_desc(c
);
306 /* If the issued list is empty, we have no further txds to process */
308 struct sa11x0_dma_phy
*p
= c
->phy
;
310 sa11x0_dma_start_desc(p
, txd
);
314 /* The channel should not have any transfers started */
315 WARN_ON(readl_relaxed(p
->base
+ DMA_DCSR_R
) &
316 (DCSR_STRTA
| DCSR_STRTB
));
318 /* Clear the run and start bits before changing DDAR */
319 writel_relaxed(DCSR_RUN
| DCSR_STRTA
| DCSR_STRTB
,
320 p
->base
+ DMA_DCSR_C
);
321 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
323 /* Try to start both buffers */
324 sa11x0_dma_start_sg(p
, c
);
325 sa11x0_dma_start_sg(p
, c
);
329 static void sa11x0_dma_tasklet(unsigned long arg
)
331 struct sa11x0_dma_dev
*d
= (struct sa11x0_dma_dev
*)arg
;
332 struct sa11x0_dma_phy
*p
;
333 struct sa11x0_dma_chan
*c
;
334 unsigned pch
, pch_alloc
= 0;
336 dev_dbg(d
->slave
.dev
, "tasklet enter\n");
338 list_for_each_entry(c
, &d
->slave
.channels
, vc
.chan
.device_node
) {
339 spin_lock_irq(&c
->vc
.lock
);
341 if (p
&& !p
->txd_done
) {
342 sa11x0_dma_start_txd(c
);
344 /* No current txd associated with this channel */
345 dev_dbg(d
->slave
.dev
, "pchan %u: free\n", p
->num
);
347 /* Mark this channel free */
352 spin_unlock_irq(&c
->vc
.lock
);
355 spin_lock_irq(&d
->lock
);
356 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
359 if (p
->vchan
== NULL
&& !list_empty(&d
->chan_pending
)) {
360 c
= list_first_entry(&d
->chan_pending
,
361 struct sa11x0_dma_chan
, node
);
362 list_del_init(&c
->node
);
364 pch_alloc
|= 1 << pch
;
366 /* Mark this channel allocated */
369 dev_dbg(d
->slave
.dev
, "pchan %u: alloc vchan %p\n", pch
, &c
->vc
);
372 spin_unlock_irq(&d
->lock
);
374 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
375 if (pch_alloc
& (1 << pch
)) {
379 spin_lock_irq(&c
->vc
.lock
);
382 sa11x0_dma_start_txd(c
);
383 spin_unlock_irq(&c
->vc
.lock
);
387 dev_dbg(d
->slave
.dev
, "tasklet exit\n");
391 static void sa11x0_dma_free_chan_resources(struct dma_chan
*chan
)
393 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
394 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
397 spin_lock_irqsave(&d
->lock
, flags
);
398 list_del_init(&c
->node
);
399 spin_unlock_irqrestore(&d
->lock
, flags
);
401 vchan_free_chan_resources(&c
->vc
);
404 static dma_addr_t
sa11x0_dma_pos(struct sa11x0_dma_phy
*p
)
409 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
411 if ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == DCSR_STRTA
||
412 (dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == DCSR_BIU
)
417 return readl_relaxed(p
->base
+ reg
);
420 static enum dma_status
sa11x0_dma_tx_status(struct dma_chan
*chan
,
421 dma_cookie_t cookie
, struct dma_tx_state
*state
)
423 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
424 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
425 struct sa11x0_dma_phy
*p
;
426 struct virt_dma_desc
*vd
;
430 ret
= dma_cookie_status(&c
->vc
.chan
, cookie
, state
);
431 if (ret
== DMA_COMPLETE
)
437 spin_lock_irqsave(&c
->vc
.lock
, flags
);
441 * If the cookie is on our issue queue, then the residue is
444 vd
= vchan_find_desc(&c
->vc
, cookie
);
446 state
->residue
= container_of(vd
, struct sa11x0_dma_desc
, vd
)->size
;
450 struct sa11x0_dma_desc
*txd
;
453 if (p
->txd_done
&& p
->txd_done
->vd
.tx
.cookie
== cookie
)
455 else if (p
->txd_load
&& p
->txd_load
->vd
.tx
.cookie
== cookie
)
462 dma_addr_t addr
= sa11x0_dma_pos(p
);
465 dev_vdbg(d
->slave
.dev
, "tx_status: addr:%pad\n", &addr
);
467 for (i
= 0; i
< txd
->sglen
; i
++) {
468 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x\n",
469 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
470 if (addr
>= txd
->sg
[i
].addr
&&
471 addr
< txd
->sg
[i
].addr
+ txd
->sg
[i
].len
) {
474 len
= txd
->sg
[i
].len
-
475 (addr
- txd
->sg
[i
].addr
);
476 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] +%x\n",
483 for (; i
< txd
->sglen
; i
++) {
484 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x ++\n",
485 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
486 bytes
+= txd
->sg
[i
].len
;
489 state
->residue
= bytes
;
491 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
493 dev_vdbg(d
->slave
.dev
, "tx_status: bytes 0x%x\n", state
->residue
);
499 * Move pending txds to the issued list, and re-init pending list.
500 * If not already pending, add this channel to the list of pending
501 * channels and trigger the tasklet to run.
503 static void sa11x0_dma_issue_pending(struct dma_chan
*chan
)
505 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
506 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
509 spin_lock_irqsave(&c
->vc
.lock
, flags
);
510 if (vchan_issue_pending(&c
->vc
)) {
513 if (list_empty(&c
->node
)) {
514 list_add_tail(&c
->node
, &d
->chan_pending
);
515 tasklet_schedule(&d
->task
);
516 dev_dbg(d
->slave
.dev
, "vchan %p: issued\n", &c
->vc
);
518 spin_unlock(&d
->lock
);
521 dev_dbg(d
->slave
.dev
, "vchan %p: nothing to issue\n", &c
->vc
);
522 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
525 static struct dma_async_tx_descriptor
*sa11x0_dma_prep_slave_sg(
526 struct dma_chan
*chan
, struct scatterlist
*sg
, unsigned int sglen
,
527 enum dma_transfer_direction dir
, unsigned long flags
, void *context
)
529 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
530 struct sa11x0_dma_desc
*txd
;
531 struct scatterlist
*sgent
;
532 unsigned i
, j
= sglen
;
535 /* SA11x0 channels can only operate in their native direction */
536 if (dir
!= (c
->ddar
& DDAR_RW
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
)) {
537 dev_err(chan
->device
->dev
, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
538 &c
->vc
, c
->ddar
, dir
);
542 /* Do not allow zero-sized txds */
546 for_each_sg(sg
, sgent
, sglen
, i
) {
547 dma_addr_t addr
= sg_dma_address(sgent
);
548 unsigned int len
= sg_dma_len(sgent
);
550 if (len
> DMA_MAX_SIZE
)
551 j
+= DIV_ROUND_UP(len
, DMA_MAX_SIZE
& ~DMA_ALIGN
) - 1;
552 if (addr
& DMA_ALIGN
) {
553 dev_dbg(chan
->device
->dev
, "vchan %p: bad buffer alignment: %pad\n",
559 txd
= kzalloc(struct_size(txd
, sg
, j
), GFP_ATOMIC
);
561 dev_dbg(chan
->device
->dev
, "vchan %p: kzalloc failed\n", &c
->vc
);
566 for_each_sg(sg
, sgent
, sglen
, i
) {
567 dma_addr_t addr
= sg_dma_address(sgent
);
568 unsigned len
= sg_dma_len(sgent
);
576 * Check whether the transfer will fit. If not, try
577 * to split the transfer up such that we end up with
578 * equal chunks - but make sure that we preserve the
579 * alignment. This avoids small segments.
581 if (tlen
> DMA_MAX_SIZE
) {
582 unsigned mult
= DIV_ROUND_UP(tlen
,
583 DMA_MAX_SIZE
& ~DMA_ALIGN
);
585 tlen
= (tlen
/ mult
) & ~DMA_ALIGN
;
588 txd
->sg
[j
].addr
= addr
;
589 txd
->sg
[j
].len
= tlen
;
601 dev_dbg(chan
->device
->dev
, "vchan %p: txd %p: size %zu nr %u\n",
602 &c
->vc
, &txd
->vd
, txd
->size
, txd
->sglen
);
604 return vchan_tx_prep(&c
->vc
, &txd
->vd
, flags
);
607 static struct dma_async_tx_descriptor
*sa11x0_dma_prep_dma_cyclic(
608 struct dma_chan
*chan
, dma_addr_t addr
, size_t size
, size_t period
,
609 enum dma_transfer_direction dir
, unsigned long flags
)
611 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
612 struct sa11x0_dma_desc
*txd
;
613 unsigned i
, j
, k
, sglen
, sgperiod
;
615 /* SA11x0 channels can only operate in their native direction */
616 if (dir
!= (c
->ddar
& DDAR_RW
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
)) {
617 dev_err(chan
->device
->dev
, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
618 &c
->vc
, c
->ddar
, dir
);
622 sgperiod
= DIV_ROUND_UP(period
, DMA_MAX_SIZE
& ~DMA_ALIGN
);
623 sglen
= size
* sgperiod
/ period
;
625 /* Do not allow zero-sized txds */
629 txd
= kzalloc(struct_size(txd
, sg
, sglen
), GFP_ATOMIC
);
631 dev_dbg(chan
->device
->dev
, "vchan %p: kzalloc failed\n", &c
->vc
);
635 for (i
= k
= 0; i
< size
/ period
; i
++) {
636 size_t tlen
, len
= period
;
638 for (j
= 0; j
< sgperiod
; j
++, k
++) {
641 if (tlen
> DMA_MAX_SIZE
) {
642 unsigned mult
= DIV_ROUND_UP(tlen
, DMA_MAX_SIZE
& ~DMA_ALIGN
);
643 tlen
= (tlen
/ mult
) & ~DMA_ALIGN
;
646 txd
->sg
[k
].addr
= addr
;
647 txd
->sg
[k
].len
= tlen
;
661 txd
->period
= sgperiod
;
663 return vchan_tx_prep(&c
->vc
, &txd
->vd
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
666 static int sa11x0_dma_device_config(struct dma_chan
*chan
,
667 struct dma_slave_config
*cfg
)
669 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
670 u32 ddar
= c
->ddar
& ((0xf << 4) | DDAR_RW
);
672 enum dma_slave_buswidth width
;
675 if (ddar
& DDAR_RW
) {
676 addr
= cfg
->src_addr
;
677 width
= cfg
->src_addr_width
;
678 maxburst
= cfg
->src_maxburst
;
680 addr
= cfg
->dst_addr
;
681 width
= cfg
->dst_addr_width
;
682 maxburst
= cfg
->dst_maxburst
;
685 if ((width
!= DMA_SLAVE_BUSWIDTH_1_BYTE
&&
686 width
!= DMA_SLAVE_BUSWIDTH_2_BYTES
) ||
687 (maxburst
!= 4 && maxburst
!= 8))
690 if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
695 dev_dbg(c
->vc
.chan
.device
->dev
, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
696 &c
->vc
, &addr
, width
, maxburst
);
698 c
->ddar
= ddar
| (addr
& 0xf0000000) | (addr
& 0x003ffffc) << 6;
703 static int sa11x0_dma_device_pause(struct dma_chan
*chan
)
705 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
706 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
707 struct sa11x0_dma_phy
*p
;
710 dev_dbg(d
->slave
.dev
, "vchan %p: pause\n", &c
->vc
);
711 spin_lock_irqsave(&c
->vc
.lock
, flags
);
712 if (c
->status
== DMA_IN_PROGRESS
) {
713 c
->status
= DMA_PAUSED
;
717 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
720 list_del_init(&c
->node
);
721 spin_unlock(&d
->lock
);
724 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
729 static int sa11x0_dma_device_resume(struct dma_chan
*chan
)
731 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
732 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
733 struct sa11x0_dma_phy
*p
;
736 dev_dbg(d
->slave
.dev
, "vchan %p: resume\n", &c
->vc
);
737 spin_lock_irqsave(&c
->vc
.lock
, flags
);
738 if (c
->status
== DMA_PAUSED
) {
739 c
->status
= DMA_IN_PROGRESS
;
743 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_S
);
744 } else if (!list_empty(&c
->vc
.desc_issued
)) {
746 list_add_tail(&c
->node
, &d
->chan_pending
);
747 spin_unlock(&d
->lock
);
750 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
755 static int sa11x0_dma_device_terminate_all(struct dma_chan
*chan
)
757 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
758 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
759 struct sa11x0_dma_phy
*p
;
763 dev_dbg(d
->slave
.dev
, "vchan %p: terminate all\n", &c
->vc
);
764 /* Clear the tx descriptor lists */
765 spin_lock_irqsave(&c
->vc
.lock
, flags
);
766 vchan_get_all_descriptors(&c
->vc
, &head
);
770 dev_dbg(d
->slave
.dev
, "pchan %u: terminating\n", p
->num
);
771 /* vchan is assigned to a pchan - stop the channel */
772 writel(DCSR_RUN
| DCSR_IE
|
773 DCSR_STRTA
| DCSR_DONEA
|
774 DCSR_STRTB
| DCSR_DONEB
,
775 p
->base
+ DMA_DCSR_C
);
778 if (p
->txd_load
!= p
->txd_done
)
779 list_add_tail(&p
->txd_load
->vd
.node
, &head
);
783 list_add_tail(&p
->txd_done
->vd
.node
, &head
);
789 spin_unlock(&d
->lock
);
790 tasklet_schedule(&d
->task
);
792 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
793 vchan_dma_desc_free_list(&c
->vc
, &head
);
798 struct sa11x0_dma_channel_desc
{
803 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
804 static const struct sa11x0_dma_channel_desc chan_desc
[] = {
806 CD(Ser0UDCRc
, DDAR_RW
),
808 CD(Ser1SDLCRc
, DDAR_RW
),
810 CD(Ser1UARTRc
, DDAR_RW
),
812 CD(Ser2ICPRc
, DDAR_RW
),
814 CD(Ser3UARTRc
, DDAR_RW
),
816 CD(Ser4MCP0Rc
, DDAR_RW
),
818 CD(Ser4MCP1Rc
, DDAR_RW
),
820 CD(Ser4SSPRc
, DDAR_RW
),
823 static const struct dma_slave_map sa11x0_dma_map
[] = {
824 { "sa11x0-ir", "tx", "Ser2ICPTr" },
825 { "sa11x0-ir", "rx", "Ser2ICPRc" },
826 { "sa11x0-ssp", "tx", "Ser4SSPTr" },
827 { "sa11x0-ssp", "rx", "Ser4SSPRc" },
830 static bool sa11x0_dma_filter_fn(struct dma_chan
*chan
, void *param
)
832 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
833 const char *p
= param
;
835 return !strcmp(c
->name
, p
);
838 static int sa11x0_dma_init_dmadev(struct dma_device
*dmadev
,
843 INIT_LIST_HEAD(&dmadev
->channels
);
845 dmadev
->device_free_chan_resources
= sa11x0_dma_free_chan_resources
;
846 dmadev
->device_config
= sa11x0_dma_device_config
;
847 dmadev
->device_pause
= sa11x0_dma_device_pause
;
848 dmadev
->device_resume
= sa11x0_dma_device_resume
;
849 dmadev
->device_terminate_all
= sa11x0_dma_device_terminate_all
;
850 dmadev
->device_tx_status
= sa11x0_dma_tx_status
;
851 dmadev
->device_issue_pending
= sa11x0_dma_issue_pending
;
853 for (i
= 0; i
< ARRAY_SIZE(chan_desc
); i
++) {
854 struct sa11x0_dma_chan
*c
;
856 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
858 dev_err(dev
, "no memory for channel %u\n", i
);
862 c
->status
= DMA_IN_PROGRESS
;
863 c
->ddar
= chan_desc
[i
].ddar
;
864 c
->name
= chan_desc
[i
].name
;
865 INIT_LIST_HEAD(&c
->node
);
867 c
->vc
.desc_free
= sa11x0_dma_free_desc
;
868 vchan_init(&c
->vc
, dmadev
);
871 return dma_async_device_register(dmadev
);
874 static int sa11x0_dma_request_irq(struct platform_device
*pdev
, int nr
,
877 int irq
= platform_get_irq(pdev
, nr
);
882 return request_irq(irq
, sa11x0_dma_irq
, 0, dev_name(&pdev
->dev
), data
);
885 static void sa11x0_dma_free_irq(struct platform_device
*pdev
, int nr
,
888 int irq
= platform_get_irq(pdev
, nr
);
893 static void sa11x0_dma_free_channels(struct dma_device
*dmadev
)
895 struct sa11x0_dma_chan
*c
, *cn
;
897 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
, vc
.chan
.device_node
) {
898 list_del(&c
->vc
.chan
.device_node
);
899 tasklet_kill(&c
->vc
.task
);
904 static int sa11x0_dma_probe(struct platform_device
*pdev
)
906 struct sa11x0_dma_dev
*d
;
907 struct resource
*res
;
911 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
915 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
921 spin_lock_init(&d
->lock
);
922 INIT_LIST_HEAD(&d
->chan_pending
);
924 d
->slave
.filter
.fn
= sa11x0_dma_filter_fn
;
925 d
->slave
.filter
.mapcnt
= ARRAY_SIZE(sa11x0_dma_map
);
926 d
->slave
.filter
.map
= sa11x0_dma_map
;
928 d
->base
= ioremap(res
->start
, resource_size(res
));
934 tasklet_init(&d
->task
, sa11x0_dma_tasklet
, (unsigned long)d
);
936 for (i
= 0; i
< NR_PHY_CHAN
; i
++) {
937 struct sa11x0_dma_phy
*p
= &d
->phy
[i
];
941 p
->base
= d
->base
+ i
* DMA_SIZE
;
942 writel_relaxed(DCSR_RUN
| DCSR_IE
| DCSR_ERROR
|
943 DCSR_DONEA
| DCSR_STRTA
| DCSR_DONEB
| DCSR_STRTB
,
944 p
->base
+ DMA_DCSR_C
);
945 writel_relaxed(0, p
->base
+ DMA_DDAR
);
947 ret
= sa11x0_dma_request_irq(pdev
, i
, p
);
951 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
957 dma_cap_set(DMA_SLAVE
, d
->slave
.cap_mask
);
958 dma_cap_set(DMA_CYCLIC
, d
->slave
.cap_mask
);
959 d
->slave
.device_prep_slave_sg
= sa11x0_dma_prep_slave_sg
;
960 d
->slave
.device_prep_dma_cyclic
= sa11x0_dma_prep_dma_cyclic
;
961 d
->slave
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
962 d
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
963 d
->slave
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
964 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
);
965 d
->slave
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
966 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
);
967 ret
= sa11x0_dma_init_dmadev(&d
->slave
, &pdev
->dev
);
969 dev_warn(d
->slave
.dev
, "failed to register slave async device: %d\n",
974 platform_set_drvdata(pdev
, d
);
978 sa11x0_dma_free_channels(&d
->slave
);
979 for (i
= 0; i
< NR_PHY_CHAN
; i
++)
980 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
982 tasklet_kill(&d
->task
);
990 static int sa11x0_dma_remove(struct platform_device
*pdev
)
992 struct sa11x0_dma_dev
*d
= platform_get_drvdata(pdev
);
995 dma_async_device_unregister(&d
->slave
);
997 sa11x0_dma_free_channels(&d
->slave
);
998 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++)
999 sa11x0_dma_free_irq(pdev
, pch
, &d
->phy
[pch
]);
1000 tasklet_kill(&d
->task
);
1007 static int sa11x0_dma_suspend(struct device
*dev
)
1009 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
1012 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
1013 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
1014 u32 dcsr
, saved_dcsr
;
1016 dcsr
= saved_dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1017 if (dcsr
& DCSR_RUN
) {
1018 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
1019 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1022 saved_dcsr
&= DCSR_RUN
| DCSR_IE
;
1023 if (dcsr
& DCSR_BIU
) {
1024 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSB
);
1025 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTB
);
1026 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSA
);
1027 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTA
);
1028 saved_dcsr
|= (dcsr
& DCSR_STRTA
? DCSR_STRTB
: 0) |
1029 (dcsr
& DCSR_STRTB
? DCSR_STRTA
: 0);
1031 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSA
);
1032 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTA
);
1033 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSB
);
1034 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTB
);
1035 saved_dcsr
|= dcsr
& (DCSR_STRTA
| DCSR_STRTB
);
1037 p
->dcsr
= saved_dcsr
;
1039 writel(DCSR_STRTA
| DCSR_STRTB
, p
->base
+ DMA_DCSR_C
);
1045 static int sa11x0_dma_resume(struct device
*dev
)
1047 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
1050 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
1051 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
1052 struct sa11x0_dma_desc
*txd
= NULL
;
1053 u32 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1055 WARN_ON(dcsr
& (DCSR_BIU
| DCSR_STRTA
| DCSR_STRTB
| DCSR_RUN
));
1059 else if (p
->txd_load
)
1065 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
1067 writel_relaxed(p
->dbs
[0], p
->base
+ DMA_DBSA
);
1068 writel_relaxed(p
->dbt
[0], p
->base
+ DMA_DBTA
);
1069 writel_relaxed(p
->dbs
[1], p
->base
+ DMA_DBSB
);
1070 writel_relaxed(p
->dbt
[1], p
->base
+ DMA_DBTB
);
1071 writel_relaxed(p
->dcsr
, p
->base
+ DMA_DCSR_S
);
1077 static const struct dev_pm_ops sa11x0_dma_pm_ops
= {
1078 .suspend_noirq
= sa11x0_dma_suspend
,
1079 .resume_noirq
= sa11x0_dma_resume
,
1080 .freeze_noirq
= sa11x0_dma_suspend
,
1081 .thaw_noirq
= sa11x0_dma_resume
,
1082 .poweroff_noirq
= sa11x0_dma_suspend
,
1083 .restore_noirq
= sa11x0_dma_resume
,
1086 static struct platform_driver sa11x0_dma_driver
= {
1088 .name
= "sa11x0-dma",
1089 .pm
= &sa11x0_dma_pm_ops
,
1091 .probe
= sa11x0_dma_probe
,
1092 .remove
= sa11x0_dma_remove
,
1095 static int __init
sa11x0_dma_init(void)
1097 return platform_driver_register(&sa11x0_dma_driver
);
1099 subsys_initcall(sa11x0_dma_init
);
1101 static void __exit
sa11x0_dma_exit(void)
1103 platform_driver_unregister(&sa11x0_dma_driver
);
1105 module_exit(sa11x0_dma_exit
);
1107 MODULE_AUTHOR("Russell King");
1108 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1109 MODULE_LICENSE("GPL v2");
1110 MODULE_ALIAS("platform:sa11x0-dma");