2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <linux/platform_data/edma.h>
29 #include "dmaengine.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
57 #define EDMA_MAX_SLOTS MAX_NR_SG
58 #define EDMA_DESCRIPTORS 16
61 struct edmacc_param param
;
65 struct virt_dma_desc vdesc
;
66 struct list_head node
;
72 struct edma_pset pset
[0];
78 struct virt_dma_chan vchan
;
79 struct list_head node
;
80 struct edma_desc
*edesc
;
84 int slot
[EDMA_MAX_SLOTS
];
86 struct dma_slave_config cfg
;
91 struct dma_device dma_slave
;
92 struct edma_chan slave_chans
[EDMA_CHANS
];
97 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
99 return container_of(d
, struct edma_cc
, dma_slave
);
102 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
104 return container_of(c
, struct edma_chan
, vchan
.chan
);
107 static inline struct edma_desc
108 *to_edma_desc(struct dma_async_tx_descriptor
*tx
)
110 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
113 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
115 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
118 /* Dispatch a queued descriptor to the controller (caller holds lock) */
119 static void edma_execute(struct edma_chan
*echan
)
121 struct virt_dma_desc
*vdesc
;
122 struct edma_desc
*edesc
;
123 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
124 int i
, j
, left
, nslots
;
126 /* If either we processed all psets or we're still not started */
128 echan
->edesc
->pset_nr
== echan
->edesc
->processed
) {
130 vdesc
= vchan_next_desc(&echan
->vchan
);
135 list_del(&vdesc
->node
);
136 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
139 edesc
= echan
->edesc
;
141 /* Find out how many left */
142 left
= edesc
->pset_nr
- edesc
->processed
;
143 nslots
= min(MAX_NR_SG
, left
);
145 /* Write descriptor PaRAM set(s) */
146 for (i
= 0; i
< nslots
; i
++) {
147 j
= i
+ edesc
->processed
;
148 edma_write_slot(echan
->slot
[i
], &edesc
->pset
[j
].param
);
149 dev_vdbg(echan
->vchan
.chan
.device
->dev
,
161 j
, echan
->ch_num
, echan
->slot
[i
],
162 edesc
->pset
[j
].param
.opt
,
163 edesc
->pset
[j
].param
.src
,
164 edesc
->pset
[j
].param
.dst
,
165 edesc
->pset
[j
].param
.a_b_cnt
,
166 edesc
->pset
[j
].param
.ccnt
,
167 edesc
->pset
[j
].param
.src_dst_bidx
,
168 edesc
->pset
[j
].param
.src_dst_cidx
,
169 edesc
->pset
[j
].param
.link_bcntrld
);
170 /* Link to the previous slot if not the last set */
171 if (i
!= (nslots
- 1))
172 edma_link(echan
->slot
[i
], echan
->slot
[i
+1]);
175 edesc
->processed
+= nslots
;
178 * If this is either the last set in a set of SG-list transactions
179 * then setup a link to the dummy slot, this results in all future
180 * events being absorbed and that's OK because we're done
182 if (edesc
->processed
== edesc
->pset_nr
) {
184 edma_link(echan
->slot
[nslots
-1], echan
->slot
[1]);
186 edma_link(echan
->slot
[nslots
-1],
187 echan
->ecc
->dummy_slot
);
190 if (edesc
->processed
<= MAX_NR_SG
) {
191 dev_dbg(dev
, "first transfer starting on channel %d\n",
193 edma_start(echan
->ch_num
);
195 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
196 echan
->ch_num
, edesc
->processed
);
197 edma_resume(echan
->ch_num
);
201 * This happens due to setup times between intermediate transfers
202 * in long SG lists which have to be broken up into transfers of
206 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
207 edma_clean_channel(echan
->ch_num
);
208 edma_stop(echan
->ch_num
);
209 edma_start(echan
->ch_num
);
210 edma_trigger_channel(echan
->ch_num
);
215 static int edma_terminate_all(struct edma_chan
*echan
)
220 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
223 * Stop DMA activity: we assume the callback will not be called
224 * after edma_dma() returns (even if it does, it will see
225 * echan->edesc is NULL and exit.)
229 edma_stop(echan
->ch_num
);
232 vchan_get_all_descriptors(&echan
->vchan
, &head
);
233 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
234 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
239 static int edma_slave_config(struct edma_chan
*echan
,
240 struct dma_slave_config
*cfg
)
242 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
243 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
246 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
251 static int edma_dma_pause(struct edma_chan
*echan
)
253 /* Pause/Resume only allowed with cyclic mode */
254 if (!echan
->edesc
->cyclic
)
257 edma_pause(echan
->ch_num
);
261 static int edma_dma_resume(struct edma_chan
*echan
)
263 /* Pause/Resume only allowed with cyclic mode */
264 if (!echan
->edesc
->cyclic
)
267 edma_resume(echan
->ch_num
);
271 static int edma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
275 struct dma_slave_config
*config
;
276 struct edma_chan
*echan
= to_edma_chan(chan
);
279 case DMA_TERMINATE_ALL
:
280 edma_terminate_all(echan
);
282 case DMA_SLAVE_CONFIG
:
283 config
= (struct dma_slave_config
*)arg
;
284 ret
= edma_slave_config(echan
, config
);
287 ret
= edma_dma_pause(echan
);
291 ret
= edma_dma_resume(echan
);
302 * A PaRAM set configuration abstraction used by other modes
303 * @chan: Channel who's PaRAM set we're configuring
304 * @pset: PaRAM set to initialize and setup.
305 * @src_addr: Source address of the DMA
306 * @dst_addr: Destination address of the DMA
307 * @burst: In units of dev_width, how much to send
308 * @dev_width: How much is the dev_width
309 * @dma_length: Total length of the DMA transfer
310 * @direction: Direction of the transfer
312 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
313 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
314 enum dma_slave_buswidth dev_width
, unsigned int dma_length
,
315 enum dma_transfer_direction direction
)
317 struct edma_chan
*echan
= to_edma_chan(chan
);
318 struct device
*dev
= chan
->device
->dev
;
319 struct edmacc_param
*param
= &epset
->param
;
320 int acnt
, bcnt
, ccnt
, cidx
;
321 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
326 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
330 * If the maxburst is equal to the fifo width, use
331 * A-synced transfers. This allows for large contiguous
332 * buffer transfers using only one PaRAM set.
336 * For the A-sync case, bcnt and ccnt are the remainder
337 * and quotient respectively of the division of:
338 * (dma_length / acnt) by (SZ_64K -1). This is so
339 * that in case bcnt over flows, we have ccnt to use.
340 * Note: In A-sync tranfer only, bcntrld is used, but it
341 * only applies for sg_dma_len(sg) >= SZ_64K.
342 * In this case, the best way adopted is- bccnt for the
343 * first frame will be the remainder below. Then for
344 * every successive frame, bcnt will be SZ_64K-1. This
345 * is assured as bcntrld = 0xffff in end of function.
348 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
349 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
351 * If bcnt is non-zero, we have a remainder and hence an
352 * extra frame to transfer, so increment ccnt.
361 * If maxburst is greater than the fifo address_width,
362 * use AB-synced transfers where A count is the fifo
363 * address_width and B count is the maxburst. In this
364 * case, we are limited to transfers of C count frames
365 * of (address_width * maxburst) where C count is limited
366 * to SZ_64K-1. This places an upper bound on the length
367 * of an SG segment that can be handled.
371 ccnt
= dma_length
/ (acnt
* bcnt
);
372 if (ccnt
> (SZ_64K
- 1)) {
373 dev_err(dev
, "Exceeded max SG segment size\n");
379 if (direction
== DMA_MEM_TO_DEV
) {
384 } else if (direction
== DMA_DEV_TO_MEM
) {
389 } else if (direction
== DMA_MEM_TO_MEM
) {
395 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
399 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
400 /* Configure A or AB synchronized transfers */
402 param
->opt
|= SYNCDIM
;
404 param
->src
= src_addr
;
405 param
->dst
= dst_addr
;
407 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
408 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
410 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
413 * Only time when (bcntrld) auto reload is required is for
414 * A-sync case, and in this case, a requirement of reload value
415 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
416 * and then later will be populated by edma_execute.
418 param
->link_bcntrld
= 0xffffffff;
422 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
423 struct dma_chan
*chan
, struct scatterlist
*sgl
,
424 unsigned int sg_len
, enum dma_transfer_direction direction
,
425 unsigned long tx_flags
, void *context
)
427 struct edma_chan
*echan
= to_edma_chan(chan
);
428 struct device
*dev
= chan
->device
->dev
;
429 struct edma_desc
*edesc
;
430 dma_addr_t src_addr
= 0, dst_addr
= 0;
431 enum dma_slave_buswidth dev_width
;
433 struct scatterlist
*sg
;
436 if (unlikely(!echan
|| !sgl
|| !sg_len
))
439 if (direction
== DMA_DEV_TO_MEM
) {
440 src_addr
= echan
->cfg
.src_addr
;
441 dev_width
= echan
->cfg
.src_addr_width
;
442 burst
= echan
->cfg
.src_maxburst
;
443 } else if (direction
== DMA_MEM_TO_DEV
) {
444 dst_addr
= echan
->cfg
.dst_addr
;
445 dev_width
= echan
->cfg
.dst_addr_width
;
446 burst
= echan
->cfg
.dst_maxburst
;
448 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
452 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
453 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
457 edesc
= kzalloc(sizeof(*edesc
) + sg_len
*
458 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
460 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
464 edesc
->pset_nr
= sg_len
;
467 /* Allocate a PaRAM slot, if needed */
468 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
470 for (i
= 0; i
< nslots
; i
++) {
471 if (echan
->slot
[i
] < 0) {
473 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
475 if (echan
->slot
[i
] < 0) {
477 dev_err(dev
, "%s: Failed to allocate slot\n",
484 /* Configure PaRAM sets for each SG */
485 for_each_sg(sgl
, sg
, sg_len
, i
) {
486 /* Get address for each SG */
487 if (direction
== DMA_DEV_TO_MEM
)
488 dst_addr
= sg_dma_address(sg
);
490 src_addr
= sg_dma_address(sg
);
492 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
493 dst_addr
, burst
, dev_width
,
494 sg_dma_len(sg
), direction
);
501 edesc
->residue
+= sg_dma_len(sg
);
503 /* If this is the last in a current SG set of transactions,
504 enable interrupts so that next set is processed */
505 if (!((i
+1) % MAX_NR_SG
))
506 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
508 /* If this is the last set, enable completion interrupt flag */
510 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
513 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
516 struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
517 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
518 size_t len
, unsigned long tx_flags
)
521 struct edma_desc
*edesc
;
522 struct device
*dev
= chan
->device
->dev
;
523 struct edma_chan
*echan
= to_edma_chan(chan
);
525 if (unlikely(!echan
|| !len
))
528 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
530 dev_dbg(dev
, "Failed to allocate a descriptor\n");
536 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
537 DMA_SLAVE_BUSWIDTH_4_BYTES
, len
, DMA_MEM_TO_MEM
);
544 * Enable intermediate transfer chaining to re-trigger channel
545 * on completion of every TR, and enable transfer-completion
546 * interrupt on completion of the whole transfer.
548 edesc
->pset
[0].opt
|= ITCCHEN
;
549 edesc
->pset
[0].opt
|= TCINTEN
;
551 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
554 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
555 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
556 size_t period_len
, enum dma_transfer_direction direction
,
557 unsigned long tx_flags
, void *context
)
559 struct edma_chan
*echan
= to_edma_chan(chan
);
560 struct device
*dev
= chan
->device
->dev
;
561 struct edma_desc
*edesc
;
562 dma_addr_t src_addr
, dst_addr
;
563 enum dma_slave_buswidth dev_width
;
567 if (unlikely(!echan
|| !buf_len
|| !period_len
))
570 if (direction
== DMA_DEV_TO_MEM
) {
571 src_addr
= echan
->cfg
.src_addr
;
573 dev_width
= echan
->cfg
.src_addr_width
;
574 burst
= echan
->cfg
.src_maxburst
;
575 } else if (direction
== DMA_MEM_TO_DEV
) {
577 dst_addr
= echan
->cfg
.dst_addr
;
578 dev_width
= echan
->cfg
.dst_addr_width
;
579 burst
= echan
->cfg
.dst_maxburst
;
581 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
585 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
586 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
590 if (unlikely(buf_len
% period_len
)) {
591 dev_err(dev
, "Period should be multiple of Buffer length\n");
595 nslots
= (buf_len
/ period_len
) + 1;
598 * Cyclic DMA users such as audio cannot tolerate delays introduced
599 * by cases where the number of periods is more than the maximum
600 * number of SGs the EDMA driver can handle at a time. For DMA types
601 * such as Slave SGs, such delays are tolerable and synchronized,
602 * but the synchronization is difficult to achieve with Cyclic and
603 * cannot be guaranteed, so we error out early.
605 if (nslots
> MAX_NR_SG
)
608 edesc
= kzalloc(sizeof(*edesc
) + nslots
*
609 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
611 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
616 edesc
->pset_nr
= nslots
;
617 edesc
->residue
= buf_len
;
619 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
620 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
622 for (i
= 0; i
< nslots
; i
++) {
623 /* Allocate a PaRAM slot, if needed */
624 if (echan
->slot
[i
] < 0) {
626 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
628 if (echan
->slot
[i
] < 0) {
630 dev_err(dev
, "%s: Failed to allocate slot\n",
636 if (i
== nslots
- 1) {
637 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
638 sizeof(edesc
->pset
[0]));
642 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
643 dst_addr
, burst
, dev_width
, period_len
,
650 if (direction
== DMA_DEV_TO_MEM
)
651 dst_addr
+= period_len
;
653 src_addr
+= period_len
;
655 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
668 i
, echan
->ch_num
, echan
->slot
[i
],
669 edesc
->pset
[i
].param
.opt
,
670 edesc
->pset
[i
].param
.src
,
671 edesc
->pset
[i
].param
.dst
,
672 edesc
->pset
[i
].param
.a_b_cnt
,
673 edesc
->pset
[i
].param
.ccnt
,
674 edesc
->pset
[i
].param
.src_dst_bidx
,
675 edesc
->pset
[i
].param
.src_dst_cidx
,
676 edesc
->pset
[i
].param
.link_bcntrld
);
681 * Enable interrupts for every period because callback
682 * has to be called for every period.
684 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
687 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
690 static void edma_callback(unsigned ch_num
, u16 ch_status
, void *data
)
692 struct edma_chan
*echan
= data
;
693 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
694 struct edma_desc
*edesc
;
695 struct edmacc_param p
;
697 edesc
= echan
->edesc
;
699 /* Pause the channel for non-cyclic */
700 if (!edesc
|| (edesc
&& !edesc
->cyclic
))
701 edma_pause(echan
->ch_num
);
704 case EDMA_DMA_COMPLETE
:
705 spin_lock(&echan
->vchan
.lock
);
709 vchan_cyclic_callback(&edesc
->vdesc
);
710 } else if (edesc
->processed
== edesc
->pset_nr
) {
711 dev_dbg(dev
, "Transfer complete, stopping channel %d\n", ch_num
);
713 edma_stop(echan
->ch_num
);
714 vchan_cookie_complete(&edesc
->vdesc
);
717 dev_dbg(dev
, "Intermediate transfer complete on channel %d\n", ch_num
);
722 spin_unlock(&echan
->vchan
.lock
);
725 case EDMA_DMA_CC_ERROR
:
726 spin_lock(&echan
->vchan
.lock
);
728 edma_read_slot(EDMA_CHAN_SLOT(echan
->slot
[0]), &p
);
731 * Issue later based on missed flag which will be sure
733 * (1) we finished transmitting an intermediate slot and
734 * edma_execute is coming up.
735 * (2) or we finished current transfer and issue will
738 * Important note: issuing can be dangerous here and
739 * lead to some nasty recursion when we are in a NULL
740 * slot. So we avoid doing so and set the missed flag.
742 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
743 dev_dbg(dev
, "Error occurred, looks like slot is null, just setting miss\n");
747 * The slot is already programmed but the event got
748 * missed, so its safe to issue it here.
750 dev_dbg(dev
, "Error occurred but slot is non-null, TRIGGERING\n");
751 edma_clean_channel(echan
->ch_num
);
752 edma_stop(echan
->ch_num
);
753 edma_start(echan
->ch_num
);
754 edma_trigger_channel(echan
->ch_num
);
757 spin_unlock(&echan
->vchan
.lock
);
765 /* Alloc channel resources */
766 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
768 struct edma_chan
*echan
= to_edma_chan(chan
);
769 struct device
*dev
= chan
->device
->dev
;
774 a_ch_num
= edma_alloc_channel(echan
->ch_num
, edma_callback
,
775 chan
, EVENTQ_DEFAULT
);
782 if (a_ch_num
!= echan
->ch_num
) {
783 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
784 EDMA_CTLR(echan
->ch_num
),
785 EDMA_CHAN_SLOT(echan
->ch_num
));
790 echan
->alloced
= true;
791 echan
->slot
[0] = echan
->ch_num
;
793 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
794 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
799 edma_free_channel(a_ch_num
);
804 /* Free channel resources */
805 static void edma_free_chan_resources(struct dma_chan
*chan
)
807 struct edma_chan
*echan
= to_edma_chan(chan
);
808 struct device
*dev
= chan
->device
->dev
;
811 /* Terminate transfers */
812 edma_stop(echan
->ch_num
);
814 vchan_free_chan_resources(&echan
->vchan
);
816 /* Free EDMA PaRAM slots */
817 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
818 if (echan
->slot
[i
] >= 0) {
819 edma_free_slot(echan
->slot
[i
]);
824 /* Free EDMA channel */
825 if (echan
->alloced
) {
826 edma_free_channel(echan
->ch_num
);
827 echan
->alloced
= false;
830 dev_dbg(dev
, "freeing channel for %u\n", echan
->ch_num
);
833 /* Send pending descriptor to hardware */
834 static void edma_issue_pending(struct dma_chan
*chan
)
836 struct edma_chan
*echan
= to_edma_chan(chan
);
839 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
840 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
842 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
845 /* Check request completion status */
846 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
848 struct dma_tx_state
*txstate
)
850 struct edma_chan
*echan
= to_edma_chan(chan
);
851 struct virt_dma_desc
*vdesc
;
855 ret
= dma_cookie_status(chan
, cookie
, txstate
);
856 if (ret
== DMA_COMPLETE
|| !txstate
)
859 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
860 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
861 txstate
->residue
= echan
->edesc
->residue
;
862 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
863 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
864 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
869 static void __init
edma_chan_init(struct edma_cc
*ecc
,
870 struct dma_device
*dma
,
871 struct edma_chan
*echans
)
875 for (i
= 0; i
< EDMA_CHANS
; i
++) {
876 struct edma_chan
*echan
= &echans
[i
];
877 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->ctlr
, i
);
879 echan
->vchan
.desc_free
= edma_desc_free
;
881 vchan_init(&echan
->vchan
, dma
);
883 INIT_LIST_HEAD(&echan
->node
);
884 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
889 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
890 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
891 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
893 static int edma_dma_device_slave_caps(struct dma_chan
*dchan
,
894 struct dma_slave_caps
*caps
)
896 caps
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
897 caps
->dstn_addr_widths
= EDMA_DMA_BUSWIDTHS
;
898 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
899 caps
->cmd_pause
= true;
900 caps
->cmd_terminate
= true;
901 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
906 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
909 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
910 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
911 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
912 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
913 dma
->device_free_chan_resources
= edma_free_chan_resources
;
914 dma
->device_issue_pending
= edma_issue_pending
;
915 dma
->device_tx_status
= edma_tx_status
;
916 dma
->device_control
= edma_control
;
917 dma
->device_slave_caps
= edma_dma_device_slave_caps
;
921 * code using dma memcpy must make sure alignment of
922 * length is at dma->copy_align boundary.
924 dma
->copy_align
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
926 INIT_LIST_HEAD(&dma
->channels
);
929 static int edma_probe(struct platform_device
*pdev
)
934 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
938 ecc
= devm_kzalloc(&pdev
->dev
, sizeof(*ecc
), GFP_KERNEL
);
940 dev_err(&pdev
->dev
, "Can't allocate controller\n");
944 ecc
->ctlr
= pdev
->id
;
945 ecc
->dummy_slot
= edma_alloc_slot(ecc
->ctlr
, EDMA_SLOT_ANY
);
946 if (ecc
->dummy_slot
< 0) {
947 dev_err(&pdev
->dev
, "Can't allocate PaRAM dummy slot\n");
951 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
952 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
953 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
954 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
956 edma_dma_init(ecc
, &ecc
->dma_slave
, &pdev
->dev
);
958 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
960 ret
= dma_async_device_register(&ecc
->dma_slave
);
964 platform_set_drvdata(pdev
, ecc
);
966 dev_info(&pdev
->dev
, "TI EDMA DMA engine driver\n");
971 edma_free_slot(ecc
->dummy_slot
);
975 static int edma_remove(struct platform_device
*pdev
)
977 struct device
*dev
= &pdev
->dev
;
978 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
980 dma_async_device_unregister(&ecc
->dma_slave
);
981 edma_free_slot(ecc
->dummy_slot
);
986 static struct platform_driver edma_driver
= {
988 .remove
= edma_remove
,
990 .name
= "edma-dma-engine",
991 .owner
= THIS_MODULE
,
995 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
997 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
998 struct edma_chan
*echan
= to_edma_chan(chan
);
999 unsigned ch_req
= *(unsigned *)param
;
1000 return ch_req
== echan
->ch_num
;
1004 EXPORT_SYMBOL(edma_filter_fn
);
1006 static struct platform_device
*pdev0
, *pdev1
;
1008 static const struct platform_device_info edma_dev_info0
= {
1009 .name
= "edma-dma-engine",
1011 .dma_mask
= DMA_BIT_MASK(32),
1014 static const struct platform_device_info edma_dev_info1
= {
1015 .name
= "edma-dma-engine",
1017 .dma_mask
= DMA_BIT_MASK(32),
1020 static int edma_init(void)
1022 int ret
= platform_driver_register(&edma_driver
);
1025 pdev0
= platform_device_register_full(&edma_dev_info0
);
1026 if (IS_ERR(pdev0
)) {
1027 platform_driver_unregister(&edma_driver
);
1028 ret
= PTR_ERR(pdev0
);
1033 if (EDMA_CTLRS
== 2) {
1034 pdev1
= platform_device_register_full(&edma_dev_info1
);
1035 if (IS_ERR(pdev1
)) {
1036 platform_driver_unregister(&edma_driver
);
1037 platform_device_unregister(pdev0
);
1038 ret
= PTR_ERR(pdev1
);
1045 subsys_initcall(edma_init
);
1047 static void __exit
edma_exit(void)
1049 platform_device_unregister(pdev0
);
1051 platform_device_unregister(pdev1
);
1052 platform_driver_unregister(&edma_driver
);
1054 module_exit(edma_exit
);
1056 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
1057 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
1058 MODULE_LICENSE("GPL v2");