2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <dt-bindings/dma/at91.h>
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
28 #include <linux/of_dma.h>
30 #include "at_hdmac_regs.h"
31 #include "dmaengine.h"
37 * at_hdmac : Name of the ATmel AHB DMA Controller
38 * at_dma_ / atdma : ATmel DMA controller entity related
39 * atc_ / atchan : ATmel DMA Channel entity related
42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
47 * Initial number of descriptors to allocate for each channel. This could
48 * be increased during dma usage.
50 static unsigned int init_nr_desc_per_channel
= 64;
51 module_param(init_nr_desc_per_channel
, uint
, 0644);
52 MODULE_PARM_DESC(init_nr_desc_per_channel
,
53 "initial descriptors per channel (default: 64)");
57 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
);
58 static void atc_issue_pending(struct dma_chan
*chan
);
61 /*----------------------------------------------------------------------*/
63 static struct at_desc
*atc_first_active(struct at_dma_chan
*atchan
)
65 return list_first_entry(&atchan
->active_list
,
66 struct at_desc
, desc_node
);
69 static struct at_desc
*atc_first_queued(struct at_dma_chan
*atchan
)
71 return list_first_entry(&atchan
->queue
,
72 struct at_desc
, desc_node
);
76 * atc_alloc_descriptor - allocate and return an initialized descriptor
77 * @chan: the channel to allocate descriptors for
78 * @gfp_flags: GFP allocation flags
80 * Note: The ack-bit is positioned in the descriptor flag at creation time
81 * to make initial allocation more convenient. This bit will be cleared
82 * and control will be given to client at usage time (during
83 * preparation functions).
85 static struct at_desc
*atc_alloc_descriptor(struct dma_chan
*chan
,
88 struct at_desc
*desc
= NULL
;
89 struct at_dma
*atdma
= to_at_dma(chan
->device
);
92 desc
= dma_pool_alloc(atdma
->dma_desc_pool
, gfp_flags
, &phys
);
94 memset(desc
, 0, sizeof(struct at_desc
));
95 INIT_LIST_HEAD(&desc
->tx_list
);
96 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
97 /* txd.flags will be overwritten in prep functions */
98 desc
->txd
.flags
= DMA_CTRL_ACK
;
99 desc
->txd
.tx_submit
= atc_tx_submit
;
100 desc
->txd
.phys
= phys
;
107 * atc_desc_get - get an unused descriptor from free_list
108 * @atchan: channel we want a new descriptor for
110 static struct at_desc
*atc_desc_get(struct at_dma_chan
*atchan
)
112 struct at_desc
*desc
, *_desc
;
113 struct at_desc
*ret
= NULL
;
118 spin_lock_irqsave(&atchan
->lock
, flags
);
119 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
121 if (async_tx_test_ack(&desc
->txd
)) {
122 list_del(&desc
->desc_node
);
126 dev_dbg(chan2dev(&atchan
->chan_common
),
127 "desc %p not ACKed\n", desc
);
129 spin_unlock_irqrestore(&atchan
->lock
, flags
);
130 dev_vdbg(chan2dev(&atchan
->chan_common
),
131 "scanned %u descriptors on freelist\n", i
);
133 /* no more descriptor available in initial pool: create one more */
135 ret
= atc_alloc_descriptor(&atchan
->chan_common
, GFP_ATOMIC
);
137 spin_lock_irqsave(&atchan
->lock
, flags
);
138 atchan
->descs_allocated
++;
139 spin_unlock_irqrestore(&atchan
->lock
, flags
);
141 dev_err(chan2dev(&atchan
->chan_common
),
142 "not enough descriptors available\n");
150 * atc_desc_put - move a descriptor, including any children, to the free list
151 * @atchan: channel we work on
152 * @desc: descriptor, at the head of a chain, to move to free list
154 static void atc_desc_put(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
157 struct at_desc
*child
;
160 spin_lock_irqsave(&atchan
->lock
, flags
);
161 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
162 dev_vdbg(chan2dev(&atchan
->chan_common
),
163 "moving child desc %p to freelist\n",
165 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
166 dev_vdbg(chan2dev(&atchan
->chan_common
),
167 "moving desc %p to freelist\n", desc
);
168 list_add(&desc
->desc_node
, &atchan
->free_list
);
169 spin_unlock_irqrestore(&atchan
->lock
, flags
);
174 * atc_desc_chain - build chain adding a descriptor
175 * @first: address of first descriptor of the chain
176 * @prev: address of previous descriptor of the chain
177 * @desc: descriptor to queue
179 * Called from prep_* functions
181 static void atc_desc_chain(struct at_desc
**first
, struct at_desc
**prev
,
182 struct at_desc
*desc
)
187 /* inform the HW lli about chaining */
188 (*prev
)->lli
.dscr
= desc
->txd
.phys
;
189 /* insert the link descriptor to the LD ring */
190 list_add_tail(&desc
->desc_node
,
197 * atc_dostart - starts the DMA engine for real
198 * @atchan: the channel we want to start
199 * @first: first descriptor in the list we want to begin with
201 * Called with atchan->lock held and bh disabled
203 static void atc_dostart(struct at_dma_chan
*atchan
, struct at_desc
*first
)
205 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
207 /* ASSERT: channel is idle */
208 if (atc_chan_is_enabled(atchan
)) {
209 dev_err(chan2dev(&atchan
->chan_common
),
210 "BUG: Attempted to start non-idle channel\n");
211 dev_err(chan2dev(&atchan
->chan_common
),
212 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
213 channel_readl(atchan
, SADDR
),
214 channel_readl(atchan
, DADDR
),
215 channel_readl(atchan
, CTRLA
),
216 channel_readl(atchan
, CTRLB
),
217 channel_readl(atchan
, DSCR
));
219 /* The tasklet will hopefully advance the queue... */
223 vdbg_dump_regs(atchan
);
225 channel_writel(atchan
, SADDR
, 0);
226 channel_writel(atchan
, DADDR
, 0);
227 channel_writel(atchan
, CTRLA
, 0);
228 channel_writel(atchan
, CTRLB
, 0);
229 channel_writel(atchan
, DSCR
, first
->txd
.phys
);
230 dma_writel(atdma
, CHER
, atchan
->mask
);
232 vdbg_dump_regs(atchan
);
236 * atc_get_current_descriptors -
237 * locate the descriptor which equal to physical address in DSCR
238 * @atchan: the channel we want to start
239 * @dscr_addr: physical descriptor address in DSCR
241 static struct at_desc
*atc_get_current_descriptors(struct at_dma_chan
*atchan
,
244 struct at_desc
*desc
, *_desc
, *child
, *desc_cur
= NULL
;
246 list_for_each_entry_safe(desc
, _desc
, &atchan
->active_list
, desc_node
) {
247 if (desc
->lli
.dscr
== dscr_addr
) {
252 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
253 if (child
->lli
.dscr
== dscr_addr
) {
264 * atc_get_bytes_left -
265 * Get the number of bytes residue in dma buffer,
266 * @chan: the channel we want to start
268 static int atc_get_bytes_left(struct dma_chan
*chan
)
270 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
271 struct at_dma
*atdma
= to_at_dma(chan
->device
);
272 int chan_id
= atchan
->chan_common
.chan_id
;
273 struct at_desc
*desc_first
= atc_first_active(atchan
);
274 struct at_desc
*desc_cur
;
275 int ret
= 0, count
= 0;
278 * Initialize necessary values in the first time.
279 * remain_desc record remain desc length.
281 if (atchan
->remain_desc
== 0)
282 /* First descriptor embedds the transaction length */
283 atchan
->remain_desc
= desc_first
->len
;
286 * This happens when current descriptor transfer complete.
287 * The residual buffer size should reduce current descriptor length.
289 if (unlikely(test_bit(ATC_IS_BTC
, &atchan
->status
))) {
290 clear_bit(ATC_IS_BTC
, &atchan
->status
);
291 desc_cur
= atc_get_current_descriptors(atchan
,
292 channel_readl(atchan
, DSCR
));
298 count
= (desc_cur
->lli
.ctrla
& ATC_BTSIZE_MAX
)
299 << desc_first
->tx_width
;
300 if (atchan
->remain_desc
< count
) {
305 atchan
->remain_desc
-= count
;
306 ret
= atchan
->remain_desc
;
309 * Get residual bytes when current
310 * descriptor transfer in progress.
312 count
= (channel_readl(atchan
, CTRLA
) & ATC_BTSIZE_MAX
)
313 << (desc_first
->tx_width
);
314 ret
= atchan
->remain_desc
- count
;
319 if (!(dma_readl(atdma
, CHSR
) & AT_DMA_EMPT(chan_id
)))
320 atc_issue_pending(chan
);
327 * atc_chain_complete - finish work for one transaction chain
328 * @atchan: channel we work on
329 * @desc: descriptor at the head of the chain we want do complete
331 * Called with atchan->lock held and bh disabled */
333 atc_chain_complete(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
335 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
337 dev_vdbg(chan2dev(&atchan
->chan_common
),
338 "descriptor %u complete\n", txd
->cookie
);
340 /* mark the descriptor as complete for non cyclic cases only */
341 if (!atc_chan_is_cyclic(atchan
))
342 dma_cookie_complete(txd
);
344 /* move children to free_list */
345 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
346 /* move myself to free_list */
347 list_move(&desc
->desc_node
, &atchan
->free_list
);
349 dma_descriptor_unmap(txd
);
350 /* for cyclic transfers,
351 * no need to replay callback function while stopping */
352 if (!atc_chan_is_cyclic(atchan
)) {
353 dma_async_tx_callback callback
= txd
->callback
;
354 void *param
= txd
->callback_param
;
357 * The API requires that no submissions are done from a
358 * callback, so we don't need to drop the lock here
364 dma_run_dependencies(txd
);
368 * atc_complete_all - finish work for all transactions
369 * @atchan: channel to complete transactions for
371 * Eventually submit queued descriptors if any
373 * Assume channel is idle while calling this function
374 * Called with atchan->lock held and bh disabled
376 static void atc_complete_all(struct at_dma_chan
*atchan
)
378 struct at_desc
*desc
, *_desc
;
381 dev_vdbg(chan2dev(&atchan
->chan_common
), "complete all\n");
384 * Submit queued descriptors ASAP, i.e. before we go through
385 * the completed ones.
387 if (!list_empty(&atchan
->queue
))
388 atc_dostart(atchan
, atc_first_queued(atchan
));
389 /* empty active_list now it is completed */
390 list_splice_init(&atchan
->active_list
, &list
);
391 /* empty queue list by moving descriptors (if any) to active_list */
392 list_splice_init(&atchan
->queue
, &atchan
->active_list
);
394 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
395 atc_chain_complete(atchan
, desc
);
399 * atc_advance_work - at the end of a transaction, move forward
400 * @atchan: channel where the transaction ended
402 * Called with atchan->lock held and bh disabled
404 static void atc_advance_work(struct at_dma_chan
*atchan
)
406 dev_vdbg(chan2dev(&atchan
->chan_common
), "advance_work\n");
408 if (atc_chan_is_enabled(atchan
))
411 if (list_empty(&atchan
->active_list
) ||
412 list_is_singular(&atchan
->active_list
)) {
413 atc_complete_all(atchan
);
415 atc_chain_complete(atchan
, atc_first_active(atchan
));
417 atc_dostart(atchan
, atc_first_active(atchan
));
423 * atc_handle_error - handle errors reported by DMA controller
424 * @atchan: channel where error occurs
426 * Called with atchan->lock held and bh disabled
428 static void atc_handle_error(struct at_dma_chan
*atchan
)
430 struct at_desc
*bad_desc
;
431 struct at_desc
*child
;
434 * The descriptor currently at the head of the active list is
435 * broked. Since we don't have any way to report errors, we'll
436 * just have to scream loudly and try to carry on.
438 bad_desc
= atc_first_active(atchan
);
439 list_del_init(&bad_desc
->desc_node
);
441 /* As we are stopped, take advantage to push queued descriptors
443 list_splice_init(&atchan
->queue
, atchan
->active_list
.prev
);
445 /* Try to restart the controller */
446 if (!list_empty(&atchan
->active_list
))
447 atc_dostart(atchan
, atc_first_active(atchan
));
450 * KERN_CRITICAL may seem harsh, but since this only happens
451 * when someone submits a bad physical address in a
452 * descriptor, we should consider ourselves lucky that the
453 * controller flagged an error instead of scribbling over
454 * random memory locations.
456 dev_crit(chan2dev(&atchan
->chan_common
),
457 "Bad descriptor submitted for DMA!\n");
458 dev_crit(chan2dev(&atchan
->chan_common
),
459 " cookie: %d\n", bad_desc
->txd
.cookie
);
460 atc_dump_lli(atchan
, &bad_desc
->lli
);
461 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
462 atc_dump_lli(atchan
, &child
->lli
);
464 /* Pretend the descriptor completed successfully */
465 atc_chain_complete(atchan
, bad_desc
);
469 * atc_handle_cyclic - at the end of a period, run callback function
470 * @atchan: channel used for cyclic operations
472 * Called with atchan->lock held and bh disabled
474 static void atc_handle_cyclic(struct at_dma_chan
*atchan
)
476 struct at_desc
*first
= atc_first_active(atchan
);
477 struct dma_async_tx_descriptor
*txd
= &first
->txd
;
478 dma_async_tx_callback callback
= txd
->callback
;
479 void *param
= txd
->callback_param
;
481 dev_vdbg(chan2dev(&atchan
->chan_common
),
482 "new cyclic period llp 0x%08x\n",
483 channel_readl(atchan
, DSCR
));
489 /*-- IRQ & Tasklet ---------------------------------------------------*/
491 static void atc_tasklet(unsigned long data
)
493 struct at_dma_chan
*atchan
= (struct at_dma_chan
*)data
;
496 spin_lock_irqsave(&atchan
->lock
, flags
);
497 if (test_and_clear_bit(ATC_IS_ERROR
, &atchan
->status
))
498 atc_handle_error(atchan
);
499 else if (atc_chan_is_cyclic(atchan
))
500 atc_handle_cyclic(atchan
);
502 atc_advance_work(atchan
);
504 spin_unlock_irqrestore(&atchan
->lock
, flags
);
507 static irqreturn_t
at_dma_interrupt(int irq
, void *dev_id
)
509 struct at_dma
*atdma
= (struct at_dma
*)dev_id
;
510 struct at_dma_chan
*atchan
;
512 u32 status
, pending
, imr
;
516 imr
= dma_readl(atdma
, EBCIMR
);
517 status
= dma_readl(atdma
, EBCISR
);
518 pending
= status
& imr
;
523 dev_vdbg(atdma
->dma_common
.dev
,
524 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
525 status
, imr
, pending
);
527 for (i
= 0; i
< atdma
->dma_common
.chancnt
; i
++) {
528 atchan
= &atdma
->chan
[i
];
529 if (pending
& (AT_DMA_BTC(i
) | AT_DMA_ERR(i
))) {
530 if (pending
& AT_DMA_ERR(i
)) {
531 /* Disable channel on AHB error */
532 dma_writel(atdma
, CHDR
,
533 AT_DMA_RES(i
) | atchan
->mask
);
534 /* Give information to tasklet */
535 set_bit(ATC_IS_ERROR
, &atchan
->status
);
537 if (pending
& AT_DMA_BTC(i
))
538 set_bit(ATC_IS_BTC
, &atchan
->status
);
539 tasklet_schedule(&atchan
->tasklet
);
550 /*-- DMA Engine API --------------------------------------------------*/
553 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
554 * @desc: descriptor at the head of the transaction chain
556 * Queue chain if DMA engine is working already
558 * Cookie increment and adding to active_list or queue must be atomic
560 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
)
562 struct at_desc
*desc
= txd_to_at_desc(tx
);
563 struct at_dma_chan
*atchan
= to_at_dma_chan(tx
->chan
);
567 spin_lock_irqsave(&atchan
->lock
, flags
);
568 cookie
= dma_cookie_assign(tx
);
570 if (list_empty(&atchan
->active_list
)) {
571 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
573 atc_dostart(atchan
, desc
);
574 list_add_tail(&desc
->desc_node
, &atchan
->active_list
);
576 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
578 list_add_tail(&desc
->desc_node
, &atchan
->queue
);
581 spin_unlock_irqrestore(&atchan
->lock
, flags
);
587 * atc_prep_dma_memcpy - prepare a memcpy operation
588 * @chan: the channel to prepare operation on
589 * @dest: operation virtual destination address
590 * @src: operation virtual source address
591 * @len: operation length
592 * @flags: tx descriptor status flags
594 static struct dma_async_tx_descriptor
*
595 atc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
596 size_t len
, unsigned long flags
)
598 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
599 struct at_desc
*desc
= NULL
;
600 struct at_desc
*first
= NULL
;
601 struct at_desc
*prev
= NULL
;
604 unsigned int src_width
;
605 unsigned int dst_width
;
609 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
610 dest
, src
, len
, flags
);
612 if (unlikely(!len
)) {
613 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
617 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
618 | ATC_SRC_ADDR_MODE_INCR
619 | ATC_DST_ADDR_MODE_INCR
623 * We can be a lot more clever here, but this should take care
624 * of the most common optimization.
626 if (!((src
| dest
| len
) & 3)) {
627 ctrla
= ATC_SRC_WIDTH_WORD
| ATC_DST_WIDTH_WORD
;
628 src_width
= dst_width
= 2;
629 } else if (!((src
| dest
| len
) & 1)) {
630 ctrla
= ATC_SRC_WIDTH_HALFWORD
| ATC_DST_WIDTH_HALFWORD
;
631 src_width
= dst_width
= 1;
633 ctrla
= ATC_SRC_WIDTH_BYTE
| ATC_DST_WIDTH_BYTE
;
634 src_width
= dst_width
= 0;
637 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
638 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
641 desc
= atc_desc_get(atchan
);
645 desc
->lli
.saddr
= src
+ offset
;
646 desc
->lli
.daddr
= dest
+ offset
;
647 desc
->lli
.ctrla
= ctrla
| xfer_count
;
648 desc
->lli
.ctrlb
= ctrlb
;
650 desc
->txd
.cookie
= 0;
652 atc_desc_chain(&first
, &prev
, desc
);
655 /* First descriptor of the chain embedds additional information */
656 first
->txd
.cookie
= -EBUSY
;
658 first
->tx_width
= src_width
;
660 /* set end-of-link to the last link descriptor of list*/
663 first
->txd
.flags
= flags
; /* client is in control of this ack */
668 atc_desc_put(atchan
, first
);
674 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
676 * @sgl: scatterlist to transfer to/from
677 * @sg_len: number of entries in @scatterlist
678 * @direction: DMA direction
679 * @flags: tx descriptor status flags
680 * @context: transaction context (ignored)
682 static struct dma_async_tx_descriptor
*
683 atc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
684 unsigned int sg_len
, enum dma_transfer_direction direction
,
685 unsigned long flags
, void *context
)
687 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
688 struct at_dma_slave
*atslave
= chan
->private;
689 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
690 struct at_desc
*first
= NULL
;
691 struct at_desc
*prev
= NULL
;
695 unsigned int reg_width
;
696 unsigned int mem_width
;
698 struct scatterlist
*sg
;
699 size_t total_len
= 0;
701 dev_vdbg(chan2dev(chan
), "prep_slave_sg (%d): %s f0x%lx\n",
703 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
706 if (unlikely(!atslave
|| !sg_len
)) {
707 dev_dbg(chan2dev(chan
), "prep_slave_sg: sg length is zero!\n");
711 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
712 | ATC_DCSIZE(sconfig
->dst_maxburst
);
717 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
718 ctrla
|= ATC_DST_WIDTH(reg_width
);
719 ctrlb
|= ATC_DST_ADDR_MODE_FIXED
720 | ATC_SRC_ADDR_MODE_INCR
722 | ATC_SIF(atchan
->mem_if
) | ATC_DIF(atchan
->per_if
);
723 reg
= sconfig
->dst_addr
;
724 for_each_sg(sgl
, sg
, sg_len
, i
) {
725 struct at_desc
*desc
;
729 desc
= atc_desc_get(atchan
);
733 mem
= sg_dma_address(sg
);
734 len
= sg_dma_len(sg
);
735 if (unlikely(!len
)) {
736 dev_dbg(chan2dev(chan
),
737 "prep_slave_sg: sg(%d) data length is zero\n", i
);
741 if (unlikely(mem
& 3 || len
& 3))
744 desc
->lli
.saddr
= mem
;
745 desc
->lli
.daddr
= reg
;
746 desc
->lli
.ctrla
= ctrla
747 | ATC_SRC_WIDTH(mem_width
)
749 desc
->lli
.ctrlb
= ctrlb
;
751 atc_desc_chain(&first
, &prev
, desc
);
756 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
757 ctrla
|= ATC_SRC_WIDTH(reg_width
);
758 ctrlb
|= ATC_DST_ADDR_MODE_INCR
759 | ATC_SRC_ADDR_MODE_FIXED
761 | ATC_SIF(atchan
->per_if
) | ATC_DIF(atchan
->mem_if
);
763 reg
= sconfig
->src_addr
;
764 for_each_sg(sgl
, sg
, sg_len
, i
) {
765 struct at_desc
*desc
;
769 desc
= atc_desc_get(atchan
);
773 mem
= sg_dma_address(sg
);
774 len
= sg_dma_len(sg
);
775 if (unlikely(!len
)) {
776 dev_dbg(chan2dev(chan
),
777 "prep_slave_sg: sg(%d) data length is zero\n", i
);
781 if (unlikely(mem
& 3 || len
& 3))
784 desc
->lli
.saddr
= reg
;
785 desc
->lli
.daddr
= mem
;
786 desc
->lli
.ctrla
= ctrla
787 | ATC_DST_WIDTH(mem_width
)
789 desc
->lli
.ctrlb
= ctrlb
;
791 atc_desc_chain(&first
, &prev
, desc
);
799 /* set end-of-link to the last link descriptor of list*/
802 /* First descriptor of the chain embedds additional information */
803 first
->txd
.cookie
= -EBUSY
;
804 first
->len
= total_len
;
805 first
->tx_width
= reg_width
;
807 /* first link descriptor of list is responsible of flags */
808 first
->txd
.flags
= flags
; /* client is in control of this ack */
813 dev_err(chan2dev(chan
), "not enough descriptors available\n");
815 atc_desc_put(atchan
, first
);
820 * atc_dma_cyclic_check_values
821 * Check for too big/unaligned periods and unaligned DMA buffer
824 atc_dma_cyclic_check_values(unsigned int reg_width
, dma_addr_t buf_addr
,
827 if (period_len
> (ATC_BTSIZE_MAX
<< reg_width
))
829 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
831 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
841 * atc_dma_cyclic_fill_desc - Fill one period descriptor
844 atc_dma_cyclic_fill_desc(struct dma_chan
*chan
, struct at_desc
*desc
,
845 unsigned int period_index
, dma_addr_t buf_addr
,
846 unsigned int reg_width
, size_t period_len
,
847 enum dma_transfer_direction direction
)
849 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
850 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
853 /* prepare common CRTLA value */
854 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
855 | ATC_DCSIZE(sconfig
->dst_maxburst
)
856 | ATC_DST_WIDTH(reg_width
)
857 | ATC_SRC_WIDTH(reg_width
)
858 | period_len
>> reg_width
;
862 desc
->lli
.saddr
= buf_addr
+ (period_len
* period_index
);
863 desc
->lli
.daddr
= sconfig
->dst_addr
;
864 desc
->lli
.ctrla
= ctrla
;
865 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_FIXED
866 | ATC_SRC_ADDR_MODE_INCR
868 | ATC_SIF(atchan
->mem_if
)
869 | ATC_DIF(atchan
->per_if
);
873 desc
->lli
.saddr
= sconfig
->src_addr
;
874 desc
->lli
.daddr
= buf_addr
+ (period_len
* period_index
);
875 desc
->lli
.ctrla
= ctrla
;
876 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_INCR
877 | ATC_SRC_ADDR_MODE_FIXED
879 | ATC_SIF(atchan
->per_if
)
880 | ATC_DIF(atchan
->mem_if
);
891 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
892 * @chan: the DMA channel to prepare
893 * @buf_addr: physical DMA address where the buffer starts
894 * @buf_len: total number of bytes for the entire buffer
895 * @period_len: number of bytes for each period
896 * @direction: transfer direction, to or from device
897 * @flags: tx descriptor status flags
899 static struct dma_async_tx_descriptor
*
900 atc_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
901 size_t period_len
, enum dma_transfer_direction direction
,
904 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
905 struct at_dma_slave
*atslave
= chan
->private;
906 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
907 struct at_desc
*first
= NULL
;
908 struct at_desc
*prev
= NULL
;
909 unsigned long was_cyclic
;
910 unsigned int reg_width
;
911 unsigned int periods
= buf_len
/ period_len
;
914 dev_vdbg(chan2dev(chan
), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
915 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
917 periods
, buf_len
, period_len
);
919 if (unlikely(!atslave
|| !buf_len
|| !period_len
)) {
920 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: length is zero!\n");
924 was_cyclic
= test_and_set_bit(ATC_IS_CYCLIC
, &atchan
->status
);
926 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: channel in use!\n");
930 if (unlikely(!is_slave_direction(direction
)))
933 if (sconfig
->direction
== DMA_MEM_TO_DEV
)
934 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
936 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
938 /* Check for too big/unaligned periods and unaligned DMA buffer */
939 if (atc_dma_cyclic_check_values(reg_width
, buf_addr
, period_len
))
942 /* build cyclic linked list */
943 for (i
= 0; i
< periods
; i
++) {
944 struct at_desc
*desc
;
946 desc
= atc_desc_get(atchan
);
950 if (atc_dma_cyclic_fill_desc(chan
, desc
, i
, buf_addr
,
951 reg_width
, period_len
, direction
))
954 atc_desc_chain(&first
, &prev
, desc
);
957 /* lets make a cyclic list */
958 prev
->lli
.dscr
= first
->txd
.phys
;
960 /* First descriptor of the chain embedds additional information */
961 first
->txd
.cookie
= -EBUSY
;
962 first
->len
= buf_len
;
963 first
->tx_width
= reg_width
;
968 dev_err(chan2dev(chan
), "not enough descriptors available\n");
969 atc_desc_put(atchan
, first
);
971 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
975 static int set_runtime_config(struct dma_chan
*chan
,
976 struct dma_slave_config
*sconfig
)
978 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
980 /* Check if it is chan is configured for slave transfers */
984 memcpy(&atchan
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
986 convert_burst(&atchan
->dma_sconfig
.src_maxburst
);
987 convert_burst(&atchan
->dma_sconfig
.dst_maxburst
);
993 static int atc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
996 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
997 struct at_dma
*atdma
= to_at_dma(chan
->device
);
998 int chan_id
= atchan
->chan_common
.chan_id
;
1003 dev_vdbg(chan2dev(chan
), "atc_control (%d)\n", cmd
);
1005 if (cmd
== DMA_PAUSE
) {
1006 spin_lock_irqsave(&atchan
->lock
, flags
);
1008 dma_writel(atdma
, CHER
, AT_DMA_SUSP(chan_id
));
1009 set_bit(ATC_IS_PAUSED
, &atchan
->status
);
1011 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1012 } else if (cmd
== DMA_RESUME
) {
1013 if (!atc_chan_is_paused(atchan
))
1016 spin_lock_irqsave(&atchan
->lock
, flags
);
1018 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
));
1019 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1021 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1022 } else if (cmd
== DMA_TERMINATE_ALL
) {
1023 struct at_desc
*desc
, *_desc
;
1025 * This is only called when something went wrong elsewhere, so
1026 * we don't really care about the data. Just disable the
1027 * channel. We still have to poll the channel enable bit due
1028 * to AHB/HSB limitations.
1030 spin_lock_irqsave(&atchan
->lock
, flags
);
1032 /* disabling channel: must also remove suspend state */
1033 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
) | atchan
->mask
);
1035 /* confirm that this channel is disabled */
1036 while (dma_readl(atdma
, CHSR
) & atchan
->mask
)
1039 /* active_list entries will end up before queued entries */
1040 list_splice_init(&atchan
->queue
, &list
);
1041 list_splice_init(&atchan
->active_list
, &list
);
1043 /* Flush all pending and queued descriptors */
1044 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1045 atc_chain_complete(atchan
, desc
);
1047 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1048 /* if channel dedicated to cyclic operations, free it */
1049 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1051 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1052 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1053 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1062 * atc_tx_status - poll for transaction completion
1063 * @chan: DMA channel
1064 * @cookie: transaction identifier to check status of
1065 * @txstate: if not %NULL updated with transaction state
1067 * If @txstate is passed in, upon return it reflect the driver
1068 * internal state and can be used with dma_async_is_complete() to check
1069 * the status of multiple cookies without re-checking hardware state.
1071 static enum dma_status
1072 atc_tx_status(struct dma_chan
*chan
,
1073 dma_cookie_t cookie
,
1074 struct dma_tx_state
*txstate
)
1076 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1077 unsigned long flags
;
1078 enum dma_status ret
;
1081 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1082 if (ret
== DMA_COMPLETE
)
1085 * There's no point calculating the residue if there's
1086 * no txstate to store the value.
1091 spin_lock_irqsave(&atchan
->lock
, flags
);
1093 /* Get number of bytes left in the active transactions */
1094 bytes
= atc_get_bytes_left(chan
);
1096 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1098 if (unlikely(bytes
< 0)) {
1099 dev_vdbg(chan2dev(chan
), "get residual bytes error\n");
1102 dma_set_residue(txstate
, bytes
);
1105 dev_vdbg(chan2dev(chan
), "tx_status %d: cookie = %d residue = %d\n",
1106 ret
, cookie
, bytes
);
1112 * atc_issue_pending - try to finish work
1113 * @chan: target DMA channel
1115 static void atc_issue_pending(struct dma_chan
*chan
)
1117 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1118 unsigned long flags
;
1120 dev_vdbg(chan2dev(chan
), "issue_pending\n");
1122 /* Not needed for cyclic transfers */
1123 if (atc_chan_is_cyclic(atchan
))
1126 spin_lock_irqsave(&atchan
->lock
, flags
);
1127 atc_advance_work(atchan
);
1128 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1132 * atc_alloc_chan_resources - allocate resources for DMA channel
1133 * @chan: allocate descriptor resources for this channel
1134 * @client: current client requesting the channel be ready for requests
1136 * return - the number of allocated descriptors
1138 static int atc_alloc_chan_resources(struct dma_chan
*chan
)
1140 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1141 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1142 struct at_desc
*desc
;
1143 struct at_dma_slave
*atslave
;
1144 unsigned long flags
;
1147 LIST_HEAD(tmp_list
);
1149 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1151 /* ASSERT: channel is idle */
1152 if (atc_chan_is_enabled(atchan
)) {
1153 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
1157 cfg
= ATC_DEFAULT_CFG
;
1159 atslave
= chan
->private;
1162 * We need controller-specific data to set up slave
1165 BUG_ON(!atslave
->dma_dev
|| atslave
->dma_dev
!= atdma
->dma_common
.dev
);
1167 /* if cfg configuration specified take it instead of default */
1172 /* have we already been set up?
1173 * reconfigure channel but no need to reallocate descriptors */
1174 if (!list_empty(&atchan
->free_list
))
1175 return atchan
->descs_allocated
;
1177 /* Allocate initial pool of descriptors */
1178 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
1179 desc
= atc_alloc_descriptor(chan
, GFP_KERNEL
);
1181 dev_err(atdma
->dma_common
.dev
,
1182 "Only %d initial descriptors\n", i
);
1185 list_add_tail(&desc
->desc_node
, &tmp_list
);
1188 spin_lock_irqsave(&atchan
->lock
, flags
);
1189 atchan
->descs_allocated
= i
;
1190 atchan
->remain_desc
= 0;
1191 list_splice(&tmp_list
, &atchan
->free_list
);
1192 dma_cookie_init(chan
);
1193 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1195 /* channel parameters */
1196 channel_writel(atchan
, CFG
, cfg
);
1198 dev_dbg(chan2dev(chan
),
1199 "alloc_chan_resources: allocated %d descriptors\n",
1200 atchan
->descs_allocated
);
1202 return atchan
->descs_allocated
;
1206 * atc_free_chan_resources - free all channel resources
1207 * @chan: DMA channel
1209 static void atc_free_chan_resources(struct dma_chan
*chan
)
1211 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1212 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1213 struct at_desc
*desc
, *_desc
;
1216 dev_dbg(chan2dev(chan
), "free_chan_resources: (descs allocated=%u)\n",
1217 atchan
->descs_allocated
);
1219 /* ASSERT: channel is idle */
1220 BUG_ON(!list_empty(&atchan
->active_list
));
1221 BUG_ON(!list_empty(&atchan
->queue
));
1222 BUG_ON(atc_chan_is_enabled(atchan
));
1224 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
1225 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1226 list_del(&desc
->desc_node
);
1227 /* free link descriptor */
1228 dma_pool_free(atdma
->dma_desc_pool
, desc
, desc
->txd
.phys
);
1230 list_splice_init(&atchan
->free_list
, &list
);
1231 atchan
->descs_allocated
= 0;
1233 atchan
->remain_desc
= 0;
1235 dev_vdbg(chan2dev(chan
), "free_chan_resources: done\n");
1239 static bool at_dma_filter(struct dma_chan
*chan
, void *slave
)
1241 struct at_dma_slave
*atslave
= slave
;
1243 if (atslave
->dma_dev
== chan
->device
->dev
) {
1244 chan
->private = atslave
;
1251 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1252 struct of_dma
*of_dma
)
1254 struct dma_chan
*chan
;
1255 struct at_dma_chan
*atchan
;
1256 struct at_dma_slave
*atslave
;
1257 dma_cap_mask_t mask
;
1258 unsigned int per_id
;
1259 struct platform_device
*dmac_pdev
;
1261 if (dma_spec
->args_count
!= 2)
1264 dmac_pdev
= of_find_device_by_node(dma_spec
->np
);
1267 dma_cap_set(DMA_SLAVE
, mask
);
1269 atslave
= devm_kzalloc(&dmac_pdev
->dev
, sizeof(*atslave
), GFP_KERNEL
);
1273 atslave
->cfg
= ATC_DST_H2SEL_HW
| ATC_SRC_H2SEL_HW
;
1275 * We can fill both SRC_PER and DST_PER, one of these fields will be
1276 * ignored depending on DMA transfer direction.
1278 per_id
= dma_spec
->args
[1] & AT91_DMA_CFG_PER_ID_MASK
;
1279 atslave
->cfg
|= ATC_DST_PER_MSB(per_id
) | ATC_DST_PER(per_id
)
1280 | ATC_SRC_PER_MSB(per_id
) | ATC_SRC_PER(per_id
);
1282 * We have to translate the value we get from the device tree since
1283 * the half FIFO configuration value had to be 0 to keep backward
1286 switch (dma_spec
->args
[1] & AT91_DMA_CFG_FIFOCFG_MASK
) {
1287 case AT91_DMA_CFG_FIFOCFG_ALAP
:
1288 atslave
->cfg
|= ATC_FIFOCFG_LARGESTBURST
;
1290 case AT91_DMA_CFG_FIFOCFG_ASAP
:
1291 atslave
->cfg
|= ATC_FIFOCFG_ENOUGHSPACE
;
1293 case AT91_DMA_CFG_FIFOCFG_HALF
:
1295 atslave
->cfg
|= ATC_FIFOCFG_HALFFIFO
;
1297 atslave
->dma_dev
= &dmac_pdev
->dev
;
1299 chan
= dma_request_channel(mask
, at_dma_filter
, atslave
);
1303 atchan
= to_at_dma_chan(chan
);
1304 atchan
->per_if
= dma_spec
->args
[0] & 0xff;
1305 atchan
->mem_if
= (dma_spec
->args
[0] >> 16) & 0xff;
1310 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1311 struct of_dma
*of_dma
)
1317 /*-- Module Management -----------------------------------------------*/
1319 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1320 static struct at_dma_platform_data at91sam9rl_config
= {
1323 static struct at_dma_platform_data at91sam9g45_config
= {
1327 #if defined(CONFIG_OF)
1328 static const struct of_device_id atmel_dma_dt_ids
[] = {
1330 .compatible
= "atmel,at91sam9rl-dma",
1331 .data
= &at91sam9rl_config
,
1333 .compatible
= "atmel,at91sam9g45-dma",
1334 .data
= &at91sam9g45_config
,
1340 MODULE_DEVICE_TABLE(of
, atmel_dma_dt_ids
);
1343 static const struct platform_device_id atdma_devtypes
[] = {
1345 .name
= "at91sam9rl_dma",
1346 .driver_data
= (unsigned long) &at91sam9rl_config
,
1348 .name
= "at91sam9g45_dma",
1349 .driver_data
= (unsigned long) &at91sam9g45_config
,
1355 static inline const struct at_dma_platform_data
* __init
at_dma_get_driver_data(
1356 struct platform_device
*pdev
)
1358 if (pdev
->dev
.of_node
) {
1359 const struct of_device_id
*match
;
1360 match
= of_match_node(atmel_dma_dt_ids
, pdev
->dev
.of_node
);
1365 return (struct at_dma_platform_data
*)
1366 platform_get_device_id(pdev
)->driver_data
;
1370 * at_dma_off - disable DMA controller
1371 * @atdma: the Atmel HDAMC device
1373 static void at_dma_off(struct at_dma
*atdma
)
1375 dma_writel(atdma
, EN
, 0);
1377 /* disable all interrupts */
1378 dma_writel(atdma
, EBCIDR
, -1L);
1380 /* confirm that all channels are disabled */
1381 while (dma_readl(atdma
, CHSR
) & atdma
->all_chan_mask
)
1385 static int __init
at_dma_probe(struct platform_device
*pdev
)
1387 struct resource
*io
;
1388 struct at_dma
*atdma
;
1393 const struct at_dma_platform_data
*plat_dat
;
1395 /* setup platform data for each SoC */
1396 dma_cap_set(DMA_MEMCPY
, at91sam9rl_config
.cap_mask
);
1397 dma_cap_set(DMA_MEMCPY
, at91sam9g45_config
.cap_mask
);
1398 dma_cap_set(DMA_SLAVE
, at91sam9g45_config
.cap_mask
);
1400 /* get DMA parameters from controller type */
1401 plat_dat
= at_dma_get_driver_data(pdev
);
1405 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1409 irq
= platform_get_irq(pdev
, 0);
1413 size
= sizeof(struct at_dma
);
1414 size
+= plat_dat
->nr_channels
* sizeof(struct at_dma_chan
);
1415 atdma
= kzalloc(size
, GFP_KERNEL
);
1419 /* discover transaction capabilities */
1420 atdma
->dma_common
.cap_mask
= plat_dat
->cap_mask
;
1421 atdma
->all_chan_mask
= (1 << plat_dat
->nr_channels
) - 1;
1423 size
= resource_size(io
);
1424 if (!request_mem_region(io
->start
, size
, pdev
->dev
.driver
->name
)) {
1429 atdma
->regs
= ioremap(io
->start
, size
);
1435 atdma
->clk
= clk_get(&pdev
->dev
, "dma_clk");
1436 if (IS_ERR(atdma
->clk
)) {
1437 err
= PTR_ERR(atdma
->clk
);
1440 err
= clk_prepare_enable(atdma
->clk
);
1442 goto err_clk_prepare
;
1444 /* force dma off, just in case */
1447 err
= request_irq(irq
, at_dma_interrupt
, 0, "at_hdmac", atdma
);
1451 platform_set_drvdata(pdev
, atdma
);
1453 /* create a pool of consistent memory blocks for hardware descriptors */
1454 atdma
->dma_desc_pool
= dma_pool_create("at_hdmac_desc_pool",
1455 &pdev
->dev
, sizeof(struct at_desc
),
1456 4 /* word alignment */, 0);
1457 if (!atdma
->dma_desc_pool
) {
1458 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1460 goto err_pool_create
;
1463 /* clear any pending interrupt */
1464 while (dma_readl(atdma
, EBCISR
))
1467 /* initialize channels related values */
1468 INIT_LIST_HEAD(&atdma
->dma_common
.channels
);
1469 for (i
= 0; i
< plat_dat
->nr_channels
; i
++) {
1470 struct at_dma_chan
*atchan
= &atdma
->chan
[i
];
1472 atchan
->mem_if
= AT_DMA_MEM_IF
;
1473 atchan
->per_if
= AT_DMA_PER_IF
;
1474 atchan
->chan_common
.device
= &atdma
->dma_common
;
1475 dma_cookie_init(&atchan
->chan_common
);
1476 list_add_tail(&atchan
->chan_common
.device_node
,
1477 &atdma
->dma_common
.channels
);
1479 atchan
->ch_regs
= atdma
->regs
+ ch_regs(i
);
1480 spin_lock_init(&atchan
->lock
);
1481 atchan
->mask
= 1 << i
;
1483 INIT_LIST_HEAD(&atchan
->active_list
);
1484 INIT_LIST_HEAD(&atchan
->queue
);
1485 INIT_LIST_HEAD(&atchan
->free_list
);
1487 tasklet_init(&atchan
->tasklet
, atc_tasklet
,
1488 (unsigned long)atchan
);
1489 atc_enable_chan_irq(atdma
, i
);
1492 /* set base routines */
1493 atdma
->dma_common
.device_alloc_chan_resources
= atc_alloc_chan_resources
;
1494 atdma
->dma_common
.device_free_chan_resources
= atc_free_chan_resources
;
1495 atdma
->dma_common
.device_tx_status
= atc_tx_status
;
1496 atdma
->dma_common
.device_issue_pending
= atc_issue_pending
;
1497 atdma
->dma_common
.dev
= &pdev
->dev
;
1499 /* set prep routines based on capability */
1500 if (dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
))
1501 atdma
->dma_common
.device_prep_dma_memcpy
= atc_prep_dma_memcpy
;
1503 if (dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
)) {
1504 atdma
->dma_common
.device_prep_slave_sg
= atc_prep_slave_sg
;
1505 /* controller can do slave DMA: can trigger cyclic transfers */
1506 dma_cap_set(DMA_CYCLIC
, atdma
->dma_common
.cap_mask
);
1507 atdma
->dma_common
.device_prep_dma_cyclic
= atc_prep_dma_cyclic
;
1508 atdma
->dma_common
.device_control
= atc_control
;
1511 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1513 dev_info(&pdev
->dev
, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1514 dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
) ? "cpy " : "",
1515 dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
) ? "slave " : "",
1516 plat_dat
->nr_channels
);
1518 dma_async_device_register(&atdma
->dma_common
);
1521 * Do not return an error if the dmac node is not present in order to
1522 * not break the existing way of requesting channel with
1523 * dma_request_channel().
1525 if (pdev
->dev
.of_node
) {
1526 err
= of_dma_controller_register(pdev
->dev
.of_node
,
1527 at_dma_xlate
, atdma
);
1529 dev_err(&pdev
->dev
, "could not register of_dma_controller\n");
1530 goto err_of_dma_controller_register
;
1536 err_of_dma_controller_register
:
1537 dma_async_device_unregister(&atdma
->dma_common
);
1538 dma_pool_destroy(atdma
->dma_desc_pool
);
1540 free_irq(platform_get_irq(pdev
, 0), atdma
);
1542 clk_disable_unprepare(atdma
->clk
);
1544 clk_put(atdma
->clk
);
1546 iounmap(atdma
->regs
);
1549 release_mem_region(io
->start
, size
);
1555 static int at_dma_remove(struct platform_device
*pdev
)
1557 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1558 struct dma_chan
*chan
, *_chan
;
1559 struct resource
*io
;
1562 dma_async_device_unregister(&atdma
->dma_common
);
1564 dma_pool_destroy(atdma
->dma_desc_pool
);
1565 free_irq(platform_get_irq(pdev
, 0), atdma
);
1567 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1569 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1571 /* Disable interrupts */
1572 atc_disable_chan_irq(atdma
, chan
->chan_id
);
1574 tasklet_kill(&atchan
->tasklet
);
1575 list_del(&chan
->device_node
);
1578 clk_disable_unprepare(atdma
->clk
);
1579 clk_put(atdma
->clk
);
1581 iounmap(atdma
->regs
);
1584 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1585 release_mem_region(io
->start
, resource_size(io
));
1592 static void at_dma_shutdown(struct platform_device
*pdev
)
1594 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1596 at_dma_off(platform_get_drvdata(pdev
));
1597 clk_disable_unprepare(atdma
->clk
);
1600 static int at_dma_prepare(struct device
*dev
)
1602 struct platform_device
*pdev
= to_platform_device(dev
);
1603 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1604 struct dma_chan
*chan
, *_chan
;
1606 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1608 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1609 /* wait for transaction completion (except in cyclic case) */
1610 if (atc_chan_is_enabled(atchan
) && !atc_chan_is_cyclic(atchan
))
1616 static void atc_suspend_cyclic(struct at_dma_chan
*atchan
)
1618 struct dma_chan
*chan
= &atchan
->chan_common
;
1620 /* Channel should be paused by user
1621 * do it anyway even if it is not done already */
1622 if (!atc_chan_is_paused(atchan
)) {
1623 dev_warn(chan2dev(chan
),
1624 "cyclic channel not paused, should be done by channel user\n");
1625 atc_control(chan
, DMA_PAUSE
, 0);
1628 /* now preserve additional data for cyclic operations */
1629 /* next descriptor address in the cyclic list */
1630 atchan
->save_dscr
= channel_readl(atchan
, DSCR
);
1632 vdbg_dump_regs(atchan
);
1635 static int at_dma_suspend_noirq(struct device
*dev
)
1637 struct platform_device
*pdev
= to_platform_device(dev
);
1638 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1639 struct dma_chan
*chan
, *_chan
;
1642 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1644 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1646 if (atc_chan_is_cyclic(atchan
))
1647 atc_suspend_cyclic(atchan
);
1648 atchan
->save_cfg
= channel_readl(atchan
, CFG
);
1650 atdma
->save_imr
= dma_readl(atdma
, EBCIMR
);
1652 /* disable DMA controller */
1654 clk_disable_unprepare(atdma
->clk
);
1658 static void atc_resume_cyclic(struct at_dma_chan
*atchan
)
1660 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
1662 /* restore channel status for cyclic descriptors list:
1663 * next descriptor in the cyclic list at the time of suspend */
1664 channel_writel(atchan
, SADDR
, 0);
1665 channel_writel(atchan
, DADDR
, 0);
1666 channel_writel(atchan
, CTRLA
, 0);
1667 channel_writel(atchan
, CTRLB
, 0);
1668 channel_writel(atchan
, DSCR
, atchan
->save_dscr
);
1669 dma_writel(atdma
, CHER
, atchan
->mask
);
1671 /* channel pause status should be removed by channel user
1672 * We cannot take the initiative to do it here */
1674 vdbg_dump_regs(atchan
);
1677 static int at_dma_resume_noirq(struct device
*dev
)
1679 struct platform_device
*pdev
= to_platform_device(dev
);
1680 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1681 struct dma_chan
*chan
, *_chan
;
1683 /* bring back DMA controller */
1684 clk_prepare_enable(atdma
->clk
);
1685 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1687 /* clear any pending interrupt */
1688 while (dma_readl(atdma
, EBCISR
))
1691 /* restore saved data */
1692 dma_writel(atdma
, EBCIER
, atdma
->save_imr
);
1693 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1695 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1697 channel_writel(atchan
, CFG
, atchan
->save_cfg
);
1698 if (atc_chan_is_cyclic(atchan
))
1699 atc_resume_cyclic(atchan
);
1704 static const struct dev_pm_ops at_dma_dev_pm_ops
= {
1705 .prepare
= at_dma_prepare
,
1706 .suspend_noirq
= at_dma_suspend_noirq
,
1707 .resume_noirq
= at_dma_resume_noirq
,
1710 static struct platform_driver at_dma_driver
= {
1711 .remove
= at_dma_remove
,
1712 .shutdown
= at_dma_shutdown
,
1713 .id_table
= atdma_devtypes
,
1716 .pm
= &at_dma_dev_pm_ops
,
1717 .of_match_table
= of_match_ptr(atmel_dma_dt_ids
),
1721 static int __init
at_dma_init(void)
1723 return platform_driver_probe(&at_dma_driver
, at_dma_probe
);
1725 subsys_initcall(at_dma_init
);
1727 static void __exit
at_dma_exit(void)
1729 platform_driver_unregister(&at_dma_driver
);
1731 module_exit(at_dma_exit
);
1733 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1734 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1735 MODULE_LICENSE("GPL");
1736 MODULE_ALIAS("platform:at_hdmac");