2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
29 #include "at_hdmac_regs.h"
30 #include "dmaengine.h"
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42 #define ATC_DEFAULT_CTRLA (0)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
47 * Initial number of descriptors to allocate for each channel. This could
48 * be increased during dma usage.
50 static unsigned int init_nr_desc_per_channel
= 64;
51 module_param(init_nr_desc_per_channel
, uint
, 0644);
52 MODULE_PARM_DESC(init_nr_desc_per_channel
,
53 "initial descriptors per channel (default: 64)");
57 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
);
60 /*----------------------------------------------------------------------*/
62 static struct at_desc
*atc_first_active(struct at_dma_chan
*atchan
)
64 return list_first_entry(&atchan
->active_list
,
65 struct at_desc
, desc_node
);
68 static struct at_desc
*atc_first_queued(struct at_dma_chan
*atchan
)
70 return list_first_entry(&atchan
->queue
,
71 struct at_desc
, desc_node
);
75 * atc_alloc_descriptor - allocate and return an initialized descriptor
76 * @chan: the channel to allocate descriptors for
77 * @gfp_flags: GFP allocation flags
79 * Note: The ack-bit is positioned in the descriptor flag at creation time
80 * to make initial allocation more convenient. This bit will be cleared
81 * and control will be given to client at usage time (during
82 * preparation functions).
84 static struct at_desc
*atc_alloc_descriptor(struct dma_chan
*chan
,
87 struct at_desc
*desc
= NULL
;
88 struct at_dma
*atdma
= to_at_dma(chan
->device
);
91 desc
= dma_pool_alloc(atdma
->dma_desc_pool
, gfp_flags
, &phys
);
93 memset(desc
, 0, sizeof(struct at_desc
));
94 INIT_LIST_HEAD(&desc
->tx_list
);
95 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
96 /* txd.flags will be overwritten in prep functions */
97 desc
->txd
.flags
= DMA_CTRL_ACK
;
98 desc
->txd
.tx_submit
= atc_tx_submit
;
99 desc
->txd
.phys
= phys
;
106 * atc_desc_get - get an unused descriptor from free_list
107 * @atchan: channel we want a new descriptor for
109 static struct at_desc
*atc_desc_get(struct at_dma_chan
*atchan
)
111 struct at_desc
*desc
, *_desc
;
112 struct at_desc
*ret
= NULL
;
117 spin_lock_irqsave(&atchan
->lock
, flags
);
118 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
120 if (async_tx_test_ack(&desc
->txd
)) {
121 list_del(&desc
->desc_node
);
125 dev_dbg(chan2dev(&atchan
->chan_common
),
126 "desc %p not ACKed\n", desc
);
128 spin_unlock_irqrestore(&atchan
->lock
, flags
);
129 dev_vdbg(chan2dev(&atchan
->chan_common
),
130 "scanned %u descriptors on freelist\n", i
);
132 /* no more descriptor available in initial pool: create one more */
134 ret
= atc_alloc_descriptor(&atchan
->chan_common
, GFP_ATOMIC
);
136 spin_lock_irqsave(&atchan
->lock
, flags
);
137 atchan
->descs_allocated
++;
138 spin_unlock_irqrestore(&atchan
->lock
, flags
);
140 dev_err(chan2dev(&atchan
->chan_common
),
141 "not enough descriptors available\n");
149 * atc_desc_put - move a descriptor, including any children, to the free list
150 * @atchan: channel we work on
151 * @desc: descriptor, at the head of a chain, to move to free list
153 static void atc_desc_put(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
156 struct at_desc
*child
;
159 spin_lock_irqsave(&atchan
->lock
, flags
);
160 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
161 dev_vdbg(chan2dev(&atchan
->chan_common
),
162 "moving child desc %p to freelist\n",
164 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
165 dev_vdbg(chan2dev(&atchan
->chan_common
),
166 "moving desc %p to freelist\n", desc
);
167 list_add(&desc
->desc_node
, &atchan
->free_list
);
168 spin_unlock_irqrestore(&atchan
->lock
, flags
);
173 * atc_desc_chain - build chain adding a descripor
174 * @first: address of first descripor of the chain
175 * @prev: address of previous descripor of the chain
176 * @desc: descriptor to queue
178 * Called from prep_* functions
180 static void atc_desc_chain(struct at_desc
**first
, struct at_desc
**prev
,
181 struct at_desc
*desc
)
186 /* inform the HW lli about chaining */
187 (*prev
)->lli
.dscr
= desc
->txd
.phys
;
188 /* insert the link descriptor to the LD ring */
189 list_add_tail(&desc
->desc_node
,
196 * atc_dostart - starts the DMA engine for real
197 * @atchan: the channel we want to start
198 * @first: first descriptor in the list we want to begin with
200 * Called with atchan->lock held and bh disabled
202 static void atc_dostart(struct at_dma_chan
*atchan
, struct at_desc
*first
)
204 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
206 /* ASSERT: channel is idle */
207 if (atc_chan_is_enabled(atchan
)) {
208 dev_err(chan2dev(&atchan
->chan_common
),
209 "BUG: Attempted to start non-idle channel\n");
210 dev_err(chan2dev(&atchan
->chan_common
),
211 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
212 channel_readl(atchan
, SADDR
),
213 channel_readl(atchan
, DADDR
),
214 channel_readl(atchan
, CTRLA
),
215 channel_readl(atchan
, CTRLB
),
216 channel_readl(atchan
, DSCR
));
218 /* The tasklet will hopefully advance the queue... */
222 vdbg_dump_regs(atchan
);
224 /* clear any pending interrupt */
225 while (dma_readl(atdma
, EBCISR
))
228 channel_writel(atchan
, SADDR
, 0);
229 channel_writel(atchan
, DADDR
, 0);
230 channel_writel(atchan
, CTRLA
, 0);
231 channel_writel(atchan
, CTRLB
, 0);
232 channel_writel(atchan
, DSCR
, first
->txd
.phys
);
233 dma_writel(atdma
, CHER
, atchan
->mask
);
235 vdbg_dump_regs(atchan
);
239 * atc_chain_complete - finish work for one transaction chain
240 * @atchan: channel we work on
241 * @desc: descriptor at the head of the chain we want do complete
243 * Called with atchan->lock held and bh disabled */
245 atc_chain_complete(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
247 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
249 dev_vdbg(chan2dev(&atchan
->chan_common
),
250 "descriptor %u complete\n", txd
->cookie
);
252 dma_cookie_complete(txd
);
254 /* move children to free_list */
255 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
256 /* move myself to free_list */
257 list_move(&desc
->desc_node
, &atchan
->free_list
);
259 /* unmap dma addresses (not on slave channels) */
260 if (!atchan
->chan_common
.private) {
261 struct device
*parent
= chan2parent(&atchan
->chan_common
);
262 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
263 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
264 dma_unmap_single(parent
,
266 desc
->len
, DMA_FROM_DEVICE
);
268 dma_unmap_page(parent
,
270 desc
->len
, DMA_FROM_DEVICE
);
272 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
273 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
274 dma_unmap_single(parent
,
276 desc
->len
, DMA_TO_DEVICE
);
278 dma_unmap_page(parent
,
280 desc
->len
, DMA_TO_DEVICE
);
284 /* for cyclic transfers,
285 * no need to replay callback function while stopping */
286 if (!atc_chan_is_cyclic(atchan
)) {
287 dma_async_tx_callback callback
= txd
->callback
;
288 void *param
= txd
->callback_param
;
291 * The API requires that no submissions are done from a
292 * callback, so we don't need to drop the lock here
298 dma_run_dependencies(txd
);
302 * atc_complete_all - finish work for all transactions
303 * @atchan: channel to complete transactions for
305 * Eventually submit queued descriptors if any
307 * Assume channel is idle while calling this function
308 * Called with atchan->lock held and bh disabled
310 static void atc_complete_all(struct at_dma_chan
*atchan
)
312 struct at_desc
*desc
, *_desc
;
315 dev_vdbg(chan2dev(&atchan
->chan_common
), "complete all\n");
317 BUG_ON(atc_chan_is_enabled(atchan
));
320 * Submit queued descriptors ASAP, i.e. before we go through
321 * the completed ones.
323 if (!list_empty(&atchan
->queue
))
324 atc_dostart(atchan
, atc_first_queued(atchan
));
325 /* empty active_list now it is completed */
326 list_splice_init(&atchan
->active_list
, &list
);
327 /* empty queue list by moving descriptors (if any) to active_list */
328 list_splice_init(&atchan
->queue
, &atchan
->active_list
);
330 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
331 atc_chain_complete(atchan
, desc
);
335 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
336 * @atchan: channel to be cleaned up
338 * Called with atchan->lock held and bh disabled
340 static void atc_cleanup_descriptors(struct at_dma_chan
*atchan
)
342 struct at_desc
*desc
, *_desc
;
343 struct at_desc
*child
;
345 dev_vdbg(chan2dev(&atchan
->chan_common
), "cleanup descriptors\n");
347 list_for_each_entry_safe(desc
, _desc
, &atchan
->active_list
, desc_node
) {
348 if (!(desc
->lli
.ctrla
& ATC_DONE
))
349 /* This one is currently in progress */
352 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
353 if (!(child
->lli
.ctrla
& ATC_DONE
))
354 /* Currently in progress */
358 * No descriptors so far seem to be in progress, i.e.
359 * this chain must be done.
361 atc_chain_complete(atchan
, desc
);
366 * atc_advance_work - at the end of a transaction, move forward
367 * @atchan: channel where the transaction ended
369 * Called with atchan->lock held and bh disabled
371 static void atc_advance_work(struct at_dma_chan
*atchan
)
373 dev_vdbg(chan2dev(&atchan
->chan_common
), "advance_work\n");
375 if (list_empty(&atchan
->active_list
) ||
376 list_is_singular(&atchan
->active_list
)) {
377 atc_complete_all(atchan
);
379 atc_chain_complete(atchan
, atc_first_active(atchan
));
381 atc_dostart(atchan
, atc_first_active(atchan
));
387 * atc_handle_error - handle errors reported by DMA controller
388 * @atchan: channel where error occurs
390 * Called with atchan->lock held and bh disabled
392 static void atc_handle_error(struct at_dma_chan
*atchan
)
394 struct at_desc
*bad_desc
;
395 struct at_desc
*child
;
398 * The descriptor currently at the head of the active list is
399 * broked. Since we don't have any way to report errors, we'll
400 * just have to scream loudly and try to carry on.
402 bad_desc
= atc_first_active(atchan
);
403 list_del_init(&bad_desc
->desc_node
);
405 /* As we are stopped, take advantage to push queued descriptors
407 list_splice_init(&atchan
->queue
, atchan
->active_list
.prev
);
409 /* Try to restart the controller */
410 if (!list_empty(&atchan
->active_list
))
411 atc_dostart(atchan
, atc_first_active(atchan
));
414 * KERN_CRITICAL may seem harsh, but since this only happens
415 * when someone submits a bad physical address in a
416 * descriptor, we should consider ourselves lucky that the
417 * controller flagged an error instead of scribbling over
418 * random memory locations.
420 dev_crit(chan2dev(&atchan
->chan_common
),
421 "Bad descriptor submitted for DMA!\n");
422 dev_crit(chan2dev(&atchan
->chan_common
),
423 " cookie: %d\n", bad_desc
->txd
.cookie
);
424 atc_dump_lli(atchan
, &bad_desc
->lli
);
425 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
426 atc_dump_lli(atchan
, &child
->lli
);
428 /* Pretend the descriptor completed successfully */
429 atc_chain_complete(atchan
, bad_desc
);
433 * atc_handle_cyclic - at the end of a period, run callback function
434 * @atchan: channel used for cyclic operations
436 * Called with atchan->lock held and bh disabled
438 static void atc_handle_cyclic(struct at_dma_chan
*atchan
)
440 struct at_desc
*first
= atc_first_active(atchan
);
441 struct dma_async_tx_descriptor
*txd
= &first
->txd
;
442 dma_async_tx_callback callback
= txd
->callback
;
443 void *param
= txd
->callback_param
;
445 dev_vdbg(chan2dev(&atchan
->chan_common
),
446 "new cyclic period llp 0x%08x\n",
447 channel_readl(atchan
, DSCR
));
453 /*-- IRQ & Tasklet ---------------------------------------------------*/
455 static void atc_tasklet(unsigned long data
)
457 struct at_dma_chan
*atchan
= (struct at_dma_chan
*)data
;
460 spin_lock_irqsave(&atchan
->lock
, flags
);
461 if (test_and_clear_bit(ATC_IS_ERROR
, &atchan
->status
))
462 atc_handle_error(atchan
);
463 else if (atc_chan_is_cyclic(atchan
))
464 atc_handle_cyclic(atchan
);
466 atc_advance_work(atchan
);
468 spin_unlock_irqrestore(&atchan
->lock
, flags
);
471 static irqreturn_t
at_dma_interrupt(int irq
, void *dev_id
)
473 struct at_dma
*atdma
= (struct at_dma
*)dev_id
;
474 struct at_dma_chan
*atchan
;
476 u32 status
, pending
, imr
;
480 imr
= dma_readl(atdma
, EBCIMR
);
481 status
= dma_readl(atdma
, EBCISR
);
482 pending
= status
& imr
;
487 dev_vdbg(atdma
->dma_common
.dev
,
488 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
489 status
, imr
, pending
);
491 for (i
= 0; i
< atdma
->dma_common
.chancnt
; i
++) {
492 atchan
= &atdma
->chan
[i
];
493 if (pending
& (AT_DMA_BTC(i
) | AT_DMA_ERR(i
))) {
494 if (pending
& AT_DMA_ERR(i
)) {
495 /* Disable channel on AHB error */
496 dma_writel(atdma
, CHDR
,
497 AT_DMA_RES(i
) | atchan
->mask
);
498 /* Give information to tasklet */
499 set_bit(ATC_IS_ERROR
, &atchan
->status
);
501 tasklet_schedule(&atchan
->tasklet
);
512 /*-- DMA Engine API --------------------------------------------------*/
515 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
516 * @desc: descriptor at the head of the transaction chain
518 * Queue chain if DMA engine is working already
520 * Cookie increment and adding to active_list or queue must be atomic
522 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
)
524 struct at_desc
*desc
= txd_to_at_desc(tx
);
525 struct at_dma_chan
*atchan
= to_at_dma_chan(tx
->chan
);
529 spin_lock_irqsave(&atchan
->lock
, flags
);
530 cookie
= dma_cookie_assign(tx
);
532 if (list_empty(&atchan
->active_list
)) {
533 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
535 atc_dostart(atchan
, desc
);
536 list_add_tail(&desc
->desc_node
, &atchan
->active_list
);
538 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
540 list_add_tail(&desc
->desc_node
, &atchan
->queue
);
543 spin_unlock_irqrestore(&atchan
->lock
, flags
);
549 * atc_prep_dma_memcpy - prepare a memcpy operation
550 * @chan: the channel to prepare operation on
551 * @dest: operation virtual destination address
552 * @src: operation virtual source address
553 * @len: operation length
554 * @flags: tx descriptor status flags
556 static struct dma_async_tx_descriptor
*
557 atc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
558 size_t len
, unsigned long flags
)
560 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
561 struct at_desc
*desc
= NULL
;
562 struct at_desc
*first
= NULL
;
563 struct at_desc
*prev
= NULL
;
566 unsigned int src_width
;
567 unsigned int dst_width
;
571 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
572 dest
, src
, len
, flags
);
574 if (unlikely(!len
)) {
575 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
579 ctrla
= ATC_DEFAULT_CTRLA
;
580 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
581 | ATC_SRC_ADDR_MODE_INCR
582 | ATC_DST_ADDR_MODE_INCR
586 * We can be a lot more clever here, but this should take care
587 * of the most common optimization.
589 if (!((src
| dest
| len
) & 3)) {
590 ctrla
|= ATC_SRC_WIDTH_WORD
| ATC_DST_WIDTH_WORD
;
591 src_width
= dst_width
= 2;
592 } else if (!((src
| dest
| len
) & 1)) {
593 ctrla
|= ATC_SRC_WIDTH_HALFWORD
| ATC_DST_WIDTH_HALFWORD
;
594 src_width
= dst_width
= 1;
596 ctrla
|= ATC_SRC_WIDTH_BYTE
| ATC_DST_WIDTH_BYTE
;
597 src_width
= dst_width
= 0;
600 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
601 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
604 desc
= atc_desc_get(atchan
);
608 desc
->lli
.saddr
= src
+ offset
;
609 desc
->lli
.daddr
= dest
+ offset
;
610 desc
->lli
.ctrla
= ctrla
| xfer_count
;
611 desc
->lli
.ctrlb
= ctrlb
;
613 desc
->txd
.cookie
= 0;
615 atc_desc_chain(&first
, &prev
, desc
);
618 /* First descriptor of the chain embedds additional information */
619 first
->txd
.cookie
= -EBUSY
;
622 /* set end-of-link to the last link descriptor of list*/
625 first
->txd
.flags
= flags
; /* client is in control of this ack */
630 atc_desc_put(atchan
, first
);
636 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
638 * @sgl: scatterlist to transfer to/from
639 * @sg_len: number of entries in @scatterlist
640 * @direction: DMA direction
641 * @flags: tx descriptor status flags
642 * @context: transaction context (ignored)
644 static struct dma_async_tx_descriptor
*
645 atc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
646 unsigned int sg_len
, enum dma_transfer_direction direction
,
647 unsigned long flags
, void *context
)
649 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
650 struct at_dma_slave
*atslave
= chan
->private;
651 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
652 struct at_desc
*first
= NULL
;
653 struct at_desc
*prev
= NULL
;
657 unsigned int reg_width
;
658 unsigned int mem_width
;
660 struct scatterlist
*sg
;
661 size_t total_len
= 0;
663 dev_vdbg(chan2dev(chan
), "prep_slave_sg (%d): %s f0x%lx\n",
665 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
668 if (unlikely(!atslave
|| !sg_len
)) {
669 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
673 ctrla
= ATC_DEFAULT_CTRLA
| atslave
->ctrla
;
678 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
679 ctrla
|= ATC_DST_WIDTH(reg_width
);
680 ctrlb
|= ATC_DST_ADDR_MODE_FIXED
681 | ATC_SRC_ADDR_MODE_INCR
683 | ATC_SIF(AT_DMA_MEM_IF
) | ATC_DIF(AT_DMA_PER_IF
);
684 reg
= sconfig
->dst_addr
;
685 for_each_sg(sgl
, sg
, sg_len
, i
) {
686 struct at_desc
*desc
;
690 desc
= atc_desc_get(atchan
);
694 mem
= sg_dma_address(sg
);
695 len
= sg_dma_len(sg
);
697 if (unlikely(mem
& 3 || len
& 3))
700 desc
->lli
.saddr
= mem
;
701 desc
->lli
.daddr
= reg
;
702 desc
->lli
.ctrla
= ctrla
703 | ATC_SRC_WIDTH(mem_width
)
705 desc
->lli
.ctrlb
= ctrlb
;
707 atc_desc_chain(&first
, &prev
, desc
);
712 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
713 ctrla
|= ATC_SRC_WIDTH(reg_width
);
714 ctrlb
|= ATC_DST_ADDR_MODE_INCR
715 | ATC_SRC_ADDR_MODE_FIXED
717 | ATC_SIF(AT_DMA_PER_IF
) | ATC_DIF(AT_DMA_MEM_IF
);
719 reg
= sconfig
->src_addr
;
720 for_each_sg(sgl
, sg
, sg_len
, i
) {
721 struct at_desc
*desc
;
725 desc
= atc_desc_get(atchan
);
729 mem
= sg_dma_address(sg
);
730 len
= sg_dma_len(sg
);
732 if (unlikely(mem
& 3 || len
& 3))
735 desc
->lli
.saddr
= reg
;
736 desc
->lli
.daddr
= mem
;
737 desc
->lli
.ctrla
= ctrla
738 | ATC_DST_WIDTH(mem_width
)
740 desc
->lli
.ctrlb
= ctrlb
;
742 atc_desc_chain(&first
, &prev
, desc
);
750 /* set end-of-link to the last link descriptor of list*/
753 /* First descriptor of the chain embedds additional information */
754 first
->txd
.cookie
= -EBUSY
;
755 first
->len
= total_len
;
757 /* first link descriptor of list is responsible of flags */
758 first
->txd
.flags
= flags
; /* client is in control of this ack */
763 dev_err(chan2dev(chan
), "not enough descriptors available\n");
764 atc_desc_put(atchan
, first
);
769 * atc_dma_cyclic_check_values
770 * Check for too big/unaligned periods and unaligned DMA buffer
773 atc_dma_cyclic_check_values(unsigned int reg_width
, dma_addr_t buf_addr
,
774 size_t period_len
, enum dma_transfer_direction direction
)
776 if (period_len
> (ATC_BTSIZE_MAX
<< reg_width
))
778 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
780 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
782 if (unlikely(!(direction
& (DMA_DEV_TO_MEM
| DMA_MEM_TO_DEV
))))
792 * atc_dma_cyclic_fill_desc - Fill one period decriptor
795 atc_dma_cyclic_fill_desc(struct dma_chan
*chan
, struct at_desc
*desc
,
796 unsigned int period_index
, dma_addr_t buf_addr
,
797 unsigned int reg_width
, size_t period_len
,
798 enum dma_transfer_direction direction
)
800 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
801 struct at_dma_slave
*atslave
= chan
->private;
802 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
805 /* prepare common CRTLA value */
806 ctrla
= ATC_DEFAULT_CTRLA
| atslave
->ctrla
807 | ATC_DST_WIDTH(reg_width
)
808 | ATC_SRC_WIDTH(reg_width
)
809 | period_len
>> reg_width
;
813 desc
->lli
.saddr
= buf_addr
+ (period_len
* period_index
);
814 desc
->lli
.daddr
= sconfig
->dst_addr
;
815 desc
->lli
.ctrla
= ctrla
;
816 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_FIXED
817 | ATC_SRC_ADDR_MODE_INCR
819 | ATC_SIF(AT_DMA_MEM_IF
)
820 | ATC_DIF(AT_DMA_PER_IF
);
824 desc
->lli
.saddr
= sconfig
->src_addr
;
825 desc
->lli
.daddr
= buf_addr
+ (period_len
* period_index
);
826 desc
->lli
.ctrla
= ctrla
;
827 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_INCR
828 | ATC_SRC_ADDR_MODE_FIXED
830 | ATC_SIF(AT_DMA_PER_IF
)
831 | ATC_DIF(AT_DMA_MEM_IF
);
842 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
843 * @chan: the DMA channel to prepare
844 * @buf_addr: physical DMA address where the buffer starts
845 * @buf_len: total number of bytes for the entire buffer
846 * @period_len: number of bytes for each period
847 * @direction: transfer direction, to or from device
848 * @context: transfer context (ignored)
850 static struct dma_async_tx_descriptor
*
851 atc_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
852 size_t period_len
, enum dma_transfer_direction direction
,
855 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
856 struct at_dma_slave
*atslave
= chan
->private;
857 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
858 struct at_desc
*first
= NULL
;
859 struct at_desc
*prev
= NULL
;
860 unsigned long was_cyclic
;
861 unsigned int reg_width
;
862 unsigned int periods
= buf_len
/ period_len
;
865 dev_vdbg(chan2dev(chan
), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
866 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
868 periods
, buf_len
, period_len
);
870 if (unlikely(!atslave
|| !buf_len
|| !period_len
)) {
871 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: length is zero!\n");
875 was_cyclic
= test_and_set_bit(ATC_IS_CYCLIC
, &atchan
->status
);
877 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: channel in use!\n");
881 if (sconfig
->direction
== DMA_MEM_TO_DEV
)
882 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
884 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
886 /* Check for too big/unaligned periods and unaligned DMA buffer */
887 if (atc_dma_cyclic_check_values(reg_width
, buf_addr
,
888 period_len
, direction
))
891 /* build cyclic linked list */
892 for (i
= 0; i
< periods
; i
++) {
893 struct at_desc
*desc
;
895 desc
= atc_desc_get(atchan
);
899 if (atc_dma_cyclic_fill_desc(chan
, desc
, i
, buf_addr
,
900 reg_width
, period_len
, direction
))
903 atc_desc_chain(&first
, &prev
, desc
);
906 /* lets make a cyclic list */
907 prev
->lli
.dscr
= first
->txd
.phys
;
909 /* First descriptor of the chain embedds additional information */
910 first
->txd
.cookie
= -EBUSY
;
911 first
->len
= buf_len
;
916 dev_err(chan2dev(chan
), "not enough descriptors available\n");
917 atc_desc_put(atchan
, first
);
919 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
923 static int set_runtime_config(struct dma_chan
*chan
,
924 struct dma_slave_config
*sconfig
)
926 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
928 /* Check if it is chan is configured for slave transfers */
932 memcpy(&atchan
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
934 convert_burst(&atchan
->dma_sconfig
.src_maxburst
);
935 convert_burst(&atchan
->dma_sconfig
.dst_maxburst
);
941 static int atc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
944 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
945 struct at_dma
*atdma
= to_at_dma(chan
->device
);
946 int chan_id
= atchan
->chan_common
.chan_id
;
951 dev_vdbg(chan2dev(chan
), "atc_control (%d)\n", cmd
);
953 if (cmd
== DMA_PAUSE
) {
954 spin_lock_irqsave(&atchan
->lock
, flags
);
956 dma_writel(atdma
, CHER
, AT_DMA_SUSP(chan_id
));
957 set_bit(ATC_IS_PAUSED
, &atchan
->status
);
959 spin_unlock_irqrestore(&atchan
->lock
, flags
);
960 } else if (cmd
== DMA_RESUME
) {
961 if (!atc_chan_is_paused(atchan
))
964 spin_lock_irqsave(&atchan
->lock
, flags
);
966 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
));
967 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
969 spin_unlock_irqrestore(&atchan
->lock
, flags
);
970 } else if (cmd
== DMA_TERMINATE_ALL
) {
971 struct at_desc
*desc
, *_desc
;
973 * This is only called when something went wrong elsewhere, so
974 * we don't really care about the data. Just disable the
975 * channel. We still have to poll the channel enable bit due
976 * to AHB/HSB limitations.
978 spin_lock_irqsave(&atchan
->lock
, flags
);
980 /* disabling channel: must also remove suspend state */
981 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
) | atchan
->mask
);
983 /* confirm that this channel is disabled */
984 while (dma_readl(atdma
, CHSR
) & atchan
->mask
)
987 /* active_list entries will end up before queued entries */
988 list_splice_init(&atchan
->queue
, &list
);
989 list_splice_init(&atchan
->active_list
, &list
);
991 /* Flush all pending and queued descriptors */
992 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
993 atc_chain_complete(atchan
, desc
);
995 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
996 /* if channel dedicated to cyclic operations, free it */
997 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
999 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1000 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1001 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1010 * atc_tx_status - poll for transaction completion
1011 * @chan: DMA channel
1012 * @cookie: transaction identifier to check status of
1013 * @txstate: if not %NULL updated with transaction state
1015 * If @txstate is passed in, upon return it reflect the driver
1016 * internal state and can be used with dma_async_is_complete() to check
1017 * the status of multiple cookies without re-checking hardware state.
1019 static enum dma_status
1020 atc_tx_status(struct dma_chan
*chan
,
1021 dma_cookie_t cookie
,
1022 struct dma_tx_state
*txstate
)
1024 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1025 dma_cookie_t last_used
;
1026 dma_cookie_t last_complete
;
1027 unsigned long flags
;
1028 enum dma_status ret
;
1030 spin_lock_irqsave(&atchan
->lock
, flags
);
1032 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1033 if (ret
!= DMA_SUCCESS
) {
1034 atc_cleanup_descriptors(atchan
);
1036 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1039 last_complete
= chan
->completed_cookie
;
1040 last_used
= chan
->cookie
;
1042 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1044 if (ret
!= DMA_SUCCESS
)
1045 dma_set_residue(txstate
, atc_first_active(atchan
)->len
);
1047 if (atc_chan_is_paused(atchan
))
1050 dev_vdbg(chan2dev(chan
), "tx_status %d: cookie = %d (d%d, u%d)\n",
1051 ret
, cookie
, last_complete
? last_complete
: 0,
1052 last_used
? last_used
: 0);
1058 * atc_issue_pending - try to finish work
1059 * @chan: target DMA channel
1061 static void atc_issue_pending(struct dma_chan
*chan
)
1063 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1064 unsigned long flags
;
1066 dev_vdbg(chan2dev(chan
), "issue_pending\n");
1068 /* Not needed for cyclic transfers */
1069 if (atc_chan_is_cyclic(atchan
))
1072 spin_lock_irqsave(&atchan
->lock
, flags
);
1073 if (!atc_chan_is_enabled(atchan
)) {
1074 atc_advance_work(atchan
);
1076 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1080 * atc_alloc_chan_resources - allocate resources for DMA channel
1081 * @chan: allocate descriptor resources for this channel
1082 * @client: current client requesting the channel be ready for requests
1084 * return - the number of allocated descriptors
1086 static int atc_alloc_chan_resources(struct dma_chan
*chan
)
1088 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1089 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1090 struct at_desc
*desc
;
1091 struct at_dma_slave
*atslave
;
1092 unsigned long flags
;
1095 LIST_HEAD(tmp_list
);
1097 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1099 /* ASSERT: channel is idle */
1100 if (atc_chan_is_enabled(atchan
)) {
1101 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
1105 cfg
= ATC_DEFAULT_CFG
;
1107 atslave
= chan
->private;
1110 * We need controller-specific data to set up slave
1113 BUG_ON(!atslave
->dma_dev
|| atslave
->dma_dev
!= atdma
->dma_common
.dev
);
1115 /* if cfg configuration specified take it instad of default */
1120 /* have we already been set up?
1121 * reconfigure channel but no need to reallocate descriptors */
1122 if (!list_empty(&atchan
->free_list
))
1123 return atchan
->descs_allocated
;
1125 /* Allocate initial pool of descriptors */
1126 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
1127 desc
= atc_alloc_descriptor(chan
, GFP_KERNEL
);
1129 dev_err(atdma
->dma_common
.dev
,
1130 "Only %d initial descriptors\n", i
);
1133 list_add_tail(&desc
->desc_node
, &tmp_list
);
1136 spin_lock_irqsave(&atchan
->lock
, flags
);
1137 atchan
->descs_allocated
= i
;
1138 list_splice(&tmp_list
, &atchan
->free_list
);
1139 dma_cookie_init(chan
);
1140 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1142 /* channel parameters */
1143 channel_writel(atchan
, CFG
, cfg
);
1145 dev_dbg(chan2dev(chan
),
1146 "alloc_chan_resources: allocated %d descriptors\n",
1147 atchan
->descs_allocated
);
1149 return atchan
->descs_allocated
;
1153 * atc_free_chan_resources - free all channel resources
1154 * @chan: DMA channel
1156 static void atc_free_chan_resources(struct dma_chan
*chan
)
1158 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1159 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1160 struct at_desc
*desc
, *_desc
;
1163 dev_dbg(chan2dev(chan
), "free_chan_resources: (descs allocated=%u)\n",
1164 atchan
->descs_allocated
);
1166 /* ASSERT: channel is idle */
1167 BUG_ON(!list_empty(&atchan
->active_list
));
1168 BUG_ON(!list_empty(&atchan
->queue
));
1169 BUG_ON(atc_chan_is_enabled(atchan
));
1171 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
1172 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1173 list_del(&desc
->desc_node
);
1174 /* free link descriptor */
1175 dma_pool_free(atdma
->dma_desc_pool
, desc
, desc
->txd
.phys
);
1177 list_splice_init(&atchan
->free_list
, &list
);
1178 atchan
->descs_allocated
= 0;
1181 dev_vdbg(chan2dev(chan
), "free_chan_resources: done\n");
1185 /*-- Module Management -----------------------------------------------*/
1187 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1188 static struct at_dma_platform_data at91sam9rl_config
= {
1191 static struct at_dma_platform_data at91sam9g45_config
= {
1195 #if defined(CONFIG_OF)
1196 static const struct of_device_id atmel_dma_dt_ids
[] = {
1198 .compatible
= "atmel,at91sam9rl-dma",
1199 .data
= &at91sam9rl_config
,
1201 .compatible
= "atmel,at91sam9g45-dma",
1202 .data
= &at91sam9g45_config
,
1208 MODULE_DEVICE_TABLE(of
, atmel_dma_dt_ids
);
1211 static const struct platform_device_id atdma_devtypes
[] = {
1213 .name
= "at91sam9rl_dma",
1214 .driver_data
= (unsigned long) &at91sam9rl_config
,
1216 .name
= "at91sam9g45_dma",
1217 .driver_data
= (unsigned long) &at91sam9g45_config
,
1223 static inline struct at_dma_platform_data
* __init
at_dma_get_driver_data(
1224 struct platform_device
*pdev
)
1226 if (pdev
->dev
.of_node
) {
1227 const struct of_device_id
*match
;
1228 match
= of_match_node(atmel_dma_dt_ids
, pdev
->dev
.of_node
);
1233 return (struct at_dma_platform_data
*)
1234 platform_get_device_id(pdev
)->driver_data
;
1238 * at_dma_off - disable DMA controller
1239 * @atdma: the Atmel HDAMC device
1241 static void at_dma_off(struct at_dma
*atdma
)
1243 dma_writel(atdma
, EN
, 0);
1245 /* disable all interrupts */
1246 dma_writel(atdma
, EBCIDR
, -1L);
1248 /* confirm that all channels are disabled */
1249 while (dma_readl(atdma
, CHSR
) & atdma
->all_chan_mask
)
1253 static int __init
at_dma_probe(struct platform_device
*pdev
)
1255 struct resource
*io
;
1256 struct at_dma
*atdma
;
1261 struct at_dma_platform_data
*plat_dat
;
1263 /* setup platform data for each SoC */
1264 dma_cap_set(DMA_MEMCPY
, at91sam9rl_config
.cap_mask
);
1265 dma_cap_set(DMA_MEMCPY
, at91sam9g45_config
.cap_mask
);
1266 dma_cap_set(DMA_SLAVE
, at91sam9g45_config
.cap_mask
);
1268 /* get DMA parameters from controller type */
1269 plat_dat
= at_dma_get_driver_data(pdev
);
1273 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1277 irq
= platform_get_irq(pdev
, 0);
1281 size
= sizeof(struct at_dma
);
1282 size
+= plat_dat
->nr_channels
* sizeof(struct at_dma_chan
);
1283 atdma
= kzalloc(size
, GFP_KERNEL
);
1287 /* discover transaction capabilities */
1288 atdma
->dma_common
.cap_mask
= plat_dat
->cap_mask
;
1289 atdma
->all_chan_mask
= (1 << plat_dat
->nr_channels
) - 1;
1291 size
= resource_size(io
);
1292 if (!request_mem_region(io
->start
, size
, pdev
->dev
.driver
->name
)) {
1297 atdma
->regs
= ioremap(io
->start
, size
);
1303 atdma
->clk
= clk_get(&pdev
->dev
, "dma_clk");
1304 if (IS_ERR(atdma
->clk
)) {
1305 err
= PTR_ERR(atdma
->clk
);
1308 clk_enable(atdma
->clk
);
1310 /* force dma off, just in case */
1313 err
= request_irq(irq
, at_dma_interrupt
, 0, "at_hdmac", atdma
);
1317 platform_set_drvdata(pdev
, atdma
);
1319 /* create a pool of consistent memory blocks for hardware descriptors */
1320 atdma
->dma_desc_pool
= dma_pool_create("at_hdmac_desc_pool",
1321 &pdev
->dev
, sizeof(struct at_desc
),
1322 4 /* word alignment */, 0);
1323 if (!atdma
->dma_desc_pool
) {
1324 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1326 goto err_pool_create
;
1329 /* clear any pending interrupt */
1330 while (dma_readl(atdma
, EBCISR
))
1333 /* initialize channels related values */
1334 INIT_LIST_HEAD(&atdma
->dma_common
.channels
);
1335 for (i
= 0; i
< plat_dat
->nr_channels
; i
++) {
1336 struct at_dma_chan
*atchan
= &atdma
->chan
[i
];
1338 atchan
->chan_common
.device
= &atdma
->dma_common
;
1339 dma_cookie_init(&atchan
->chan_common
);
1340 list_add_tail(&atchan
->chan_common
.device_node
,
1341 &atdma
->dma_common
.channels
);
1343 atchan
->ch_regs
= atdma
->regs
+ ch_regs(i
);
1344 spin_lock_init(&atchan
->lock
);
1345 atchan
->mask
= 1 << i
;
1347 INIT_LIST_HEAD(&atchan
->active_list
);
1348 INIT_LIST_HEAD(&atchan
->queue
);
1349 INIT_LIST_HEAD(&atchan
->free_list
);
1351 tasklet_init(&atchan
->tasklet
, atc_tasklet
,
1352 (unsigned long)atchan
);
1353 atc_enable_chan_irq(atdma
, i
);
1356 /* set base routines */
1357 atdma
->dma_common
.device_alloc_chan_resources
= atc_alloc_chan_resources
;
1358 atdma
->dma_common
.device_free_chan_resources
= atc_free_chan_resources
;
1359 atdma
->dma_common
.device_tx_status
= atc_tx_status
;
1360 atdma
->dma_common
.device_issue_pending
= atc_issue_pending
;
1361 atdma
->dma_common
.dev
= &pdev
->dev
;
1363 /* set prep routines based on capability */
1364 if (dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
))
1365 atdma
->dma_common
.device_prep_dma_memcpy
= atc_prep_dma_memcpy
;
1367 if (dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
)) {
1368 atdma
->dma_common
.device_prep_slave_sg
= atc_prep_slave_sg
;
1369 /* controller can do slave DMA: can trigger cyclic transfers */
1370 dma_cap_set(DMA_CYCLIC
, atdma
->dma_common
.cap_mask
);
1371 atdma
->dma_common
.device_prep_dma_cyclic
= atc_prep_dma_cyclic
;
1372 atdma
->dma_common
.device_control
= atc_control
;
1375 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1377 dev_info(&pdev
->dev
, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1378 dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
) ? "cpy " : "",
1379 dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
) ? "slave " : "",
1380 plat_dat
->nr_channels
);
1382 dma_async_device_register(&atdma
->dma_common
);
1387 platform_set_drvdata(pdev
, NULL
);
1388 free_irq(platform_get_irq(pdev
, 0), atdma
);
1390 clk_disable(atdma
->clk
);
1391 clk_put(atdma
->clk
);
1393 iounmap(atdma
->regs
);
1396 release_mem_region(io
->start
, size
);
1402 static int __exit
at_dma_remove(struct platform_device
*pdev
)
1404 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1405 struct dma_chan
*chan
, *_chan
;
1406 struct resource
*io
;
1409 dma_async_device_unregister(&atdma
->dma_common
);
1411 dma_pool_destroy(atdma
->dma_desc_pool
);
1412 platform_set_drvdata(pdev
, NULL
);
1413 free_irq(platform_get_irq(pdev
, 0), atdma
);
1415 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1417 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1419 /* Disable interrupts */
1420 atc_disable_chan_irq(atdma
, chan
->chan_id
);
1421 tasklet_disable(&atchan
->tasklet
);
1423 tasklet_kill(&atchan
->tasklet
);
1424 list_del(&chan
->device_node
);
1427 clk_disable(atdma
->clk
);
1428 clk_put(atdma
->clk
);
1430 iounmap(atdma
->regs
);
1433 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1434 release_mem_region(io
->start
, resource_size(io
));
1441 static void at_dma_shutdown(struct platform_device
*pdev
)
1443 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1445 at_dma_off(platform_get_drvdata(pdev
));
1446 clk_disable(atdma
->clk
);
1449 static int at_dma_prepare(struct device
*dev
)
1451 struct platform_device
*pdev
= to_platform_device(dev
);
1452 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1453 struct dma_chan
*chan
, *_chan
;
1455 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1457 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1458 /* wait for transaction completion (except in cyclic case) */
1459 if (atc_chan_is_enabled(atchan
) && !atc_chan_is_cyclic(atchan
))
1465 static void atc_suspend_cyclic(struct at_dma_chan
*atchan
)
1467 struct dma_chan
*chan
= &atchan
->chan_common
;
1469 /* Channel should be paused by user
1470 * do it anyway even if it is not done already */
1471 if (!atc_chan_is_paused(atchan
)) {
1472 dev_warn(chan2dev(chan
),
1473 "cyclic channel not paused, should be done by channel user\n");
1474 atc_control(chan
, DMA_PAUSE
, 0);
1477 /* now preserve additional data for cyclic operations */
1478 /* next descriptor address in the cyclic list */
1479 atchan
->save_dscr
= channel_readl(atchan
, DSCR
);
1481 vdbg_dump_regs(atchan
);
1484 static int at_dma_suspend_noirq(struct device
*dev
)
1486 struct platform_device
*pdev
= to_platform_device(dev
);
1487 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1488 struct dma_chan
*chan
, *_chan
;
1491 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1493 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1495 if (atc_chan_is_cyclic(atchan
))
1496 atc_suspend_cyclic(atchan
);
1497 atchan
->save_cfg
= channel_readl(atchan
, CFG
);
1499 atdma
->save_imr
= dma_readl(atdma
, EBCIMR
);
1501 /* disable DMA controller */
1503 clk_disable(atdma
->clk
);
1507 static void atc_resume_cyclic(struct at_dma_chan
*atchan
)
1509 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
1511 /* restore channel status for cyclic descriptors list:
1512 * next descriptor in the cyclic list at the time of suspend */
1513 channel_writel(atchan
, SADDR
, 0);
1514 channel_writel(atchan
, DADDR
, 0);
1515 channel_writel(atchan
, CTRLA
, 0);
1516 channel_writel(atchan
, CTRLB
, 0);
1517 channel_writel(atchan
, DSCR
, atchan
->save_dscr
);
1518 dma_writel(atdma
, CHER
, atchan
->mask
);
1520 /* channel pause status should be removed by channel user
1521 * We cannot take the initiative to do it here */
1523 vdbg_dump_regs(atchan
);
1526 static int at_dma_resume_noirq(struct device
*dev
)
1528 struct platform_device
*pdev
= to_platform_device(dev
);
1529 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1530 struct dma_chan
*chan
, *_chan
;
1532 /* bring back DMA controller */
1533 clk_enable(atdma
->clk
);
1534 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1536 /* clear any pending interrupt */
1537 while (dma_readl(atdma
, EBCISR
))
1540 /* restore saved data */
1541 dma_writel(atdma
, EBCIER
, atdma
->save_imr
);
1542 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1544 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1546 channel_writel(atchan
, CFG
, atchan
->save_cfg
);
1547 if (atc_chan_is_cyclic(atchan
))
1548 atc_resume_cyclic(atchan
);
1553 static const struct dev_pm_ops at_dma_dev_pm_ops
= {
1554 .prepare
= at_dma_prepare
,
1555 .suspend_noirq
= at_dma_suspend_noirq
,
1556 .resume_noirq
= at_dma_resume_noirq
,
1559 static struct platform_driver at_dma_driver
= {
1560 .remove
= __exit_p(at_dma_remove
),
1561 .shutdown
= at_dma_shutdown
,
1562 .id_table
= atdma_devtypes
,
1565 .pm
= &at_dma_dev_pm_ops
,
1566 .of_match_table
= of_match_ptr(atmel_dma_dt_ids
),
1570 static int __init
at_dma_init(void)
1572 return platform_driver_probe(&at_dma_driver
, at_dma_probe
);
1574 subsys_initcall(at_dma_init
);
1576 static void __exit
at_dma_exit(void)
1578 platform_driver_unregister(&at_dma_driver
);
1580 module_exit(at_dma_exit
);
1582 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1583 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1584 MODULE_LICENSE("GPL");
1585 MODULE_ALIAS("platform:at_hdmac");