2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dmapool.h>
35 #include <linux/of_platform.h>
39 static void dma_init(struct fsl_dma_chan
*fsl_chan
)
41 /* Reset the channel */
42 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, 0, 32);
44 switch (fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
46 /* Set the channel to below modes:
47 * EIE - Error interrupt enable
48 * EOSIE - End of segments interrupt enable (basic mode)
49 * EOLNIE - End of links interrupt enable
51 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EIE
52 | FSL_DMA_MR_EOLNIE
| FSL_DMA_MR_EOSIE
, 32);
55 /* Set the channel to below modes:
56 * EOTIE - End-of-transfer interrupt enable
57 * PRC_RM - PCI read multiple
59 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EOTIE
60 | FSL_DMA_MR_PRC_RM
, 32);
66 static void set_sr(struct fsl_dma_chan
*fsl_chan
, u32 val
)
68 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->sr
, val
, 32);
71 static u32
get_sr(struct fsl_dma_chan
*fsl_chan
)
73 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->sr
, 32);
76 static void set_desc_cnt(struct fsl_dma_chan
*fsl_chan
,
77 struct fsl_dma_ld_hw
*hw
, u32 count
)
79 hw
->count
= CPU_TO_DMA(fsl_chan
, count
, 32);
82 static void set_desc_src(struct fsl_dma_chan
*fsl_chan
,
83 struct fsl_dma_ld_hw
*hw
, dma_addr_t src
)
87 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
88 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
89 hw
->src_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| src
, 64);
92 static void set_desc_dest(struct fsl_dma_chan
*fsl_chan
,
93 struct fsl_dma_ld_hw
*hw
, dma_addr_t dest
)
97 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
98 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
99 hw
->dst_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| dest
, 64);
102 static void set_desc_next(struct fsl_dma_chan
*fsl_chan
,
103 struct fsl_dma_ld_hw
*hw
, dma_addr_t next
)
107 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
109 hw
->next_ln_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| next
, 64);
112 static void set_cdar(struct fsl_dma_chan
*fsl_chan
, dma_addr_t addr
)
114 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->cdar
, addr
| FSL_DMA_SNEN
, 64);
117 static dma_addr_t
get_cdar(struct fsl_dma_chan
*fsl_chan
)
119 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->cdar
, 64) & ~FSL_DMA_SNEN
;
122 static void set_ndar(struct fsl_dma_chan
*fsl_chan
, dma_addr_t addr
)
124 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->ndar
, addr
, 64);
127 static dma_addr_t
get_ndar(struct fsl_dma_chan
*fsl_chan
)
129 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->ndar
, 64);
132 static u32
get_bcr(struct fsl_dma_chan
*fsl_chan
)
134 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->bcr
, 32);
137 static int dma_is_idle(struct fsl_dma_chan
*fsl_chan
)
139 u32 sr
= get_sr(fsl_chan
);
140 return (!(sr
& FSL_DMA_SR_CB
)) || (sr
& FSL_DMA_SR_CH
);
143 static void dma_start(struct fsl_dma_chan
*fsl_chan
)
147 if (fsl_chan
->feature
& FSL_DMA_CHAN_PAUSE_EXT
) {
148 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->bcr
, 0, 32);
149 mr_set
|= FSL_DMA_MR_EMP_EN
;
151 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
152 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
153 & ~FSL_DMA_MR_EMP_EN
, 32);
155 if (fsl_chan
->feature
& FSL_DMA_CHAN_START_EXT
)
156 mr_set
|= FSL_DMA_MR_EMS_EN
;
158 mr_set
|= FSL_DMA_MR_CS
;
160 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
161 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
165 static void dma_halt(struct fsl_dma_chan
*fsl_chan
)
169 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
170 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) | FSL_DMA_MR_CA
,
172 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
173 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) & ~(FSL_DMA_MR_CS
174 | FSL_DMA_MR_EMS_EN
| FSL_DMA_MR_CA
), 32);
176 for (i
= 0; i
< 100; i
++) {
177 if (dma_is_idle(fsl_chan
))
181 if (i
>= 100 && !dma_is_idle(fsl_chan
))
182 dev_err(fsl_chan
->dev
, "DMA halt timeout!\n");
185 static void set_ld_eol(struct fsl_dma_chan
*fsl_chan
,
186 struct fsl_desc_sw
*desc
)
190 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
193 desc
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
194 DMA_TO_CPU(fsl_chan
, desc
->hw
.next_ln_addr
, 64) | FSL_DMA_EOL
198 static void append_ld_queue(struct fsl_dma_chan
*fsl_chan
,
199 struct fsl_desc_sw
*new_desc
)
201 struct fsl_desc_sw
*queue_tail
= to_fsl_desc(fsl_chan
->ld_queue
.prev
);
203 if (list_empty(&fsl_chan
->ld_queue
))
206 /* Link to the new descriptor physical address and
207 * Enable End-of-segment interrupt for
208 * the last link descriptor.
209 * (the previous node's next link descriptor)
211 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
213 queue_tail
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
214 new_desc
->async_tx
.phys
| FSL_DMA_EOSIE
|
215 (((fsl_chan
->feature
& FSL_DMA_IP_MASK
)
216 == FSL_DMA_IP_83XX
) ? FSL_DMA_SNEN
: 0), 64);
220 * fsl_chan_set_src_loop_size - Set source address hold transfer size
221 * @fsl_chan : Freescale DMA channel
222 * @size : Address loop size, 0 for disable loop
224 * The set source address hold transfer size. The source
225 * address hold or loop transfer size is when the DMA transfer
226 * data from source address (SA), if the loop size is 4, the DMA will
227 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
228 * SA + 1 ... and so on.
230 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan
*fsl_chan
, int size
)
234 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
235 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) &
236 (~FSL_DMA_MR_SAHE
), 32);
242 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
243 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) |
244 FSL_DMA_MR_SAHE
| (__ilog2(size
) << 14),
251 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
252 * @fsl_chan : Freescale DMA channel
253 * @size : Address loop size, 0 for disable loop
255 * The set destination address hold transfer size. The destination
256 * address hold or loop transfer size is when the DMA transfer
257 * data to destination address (TA), if the loop size is 4, the DMA will
258 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
259 * TA + 1 ... and so on.
261 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan
*fsl_chan
, int size
)
265 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
266 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) &
267 (~FSL_DMA_MR_DAHE
), 32);
273 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
274 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) |
275 FSL_DMA_MR_DAHE
| (__ilog2(size
) << 16),
282 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
283 * @fsl_chan : Freescale DMA channel
284 * @size : Pause control size, 0 for disable external pause control.
285 * The maximum is 1024.
287 * The Freescale DMA channel can be controlled by the external
288 * signal DREQ#. The pause control size is how many bytes are allowed
289 * to transfer before pausing the channel, after which a new assertion
290 * of DREQ# resumes channel operation.
292 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan
*fsl_chan
, int size
)
298 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
299 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
300 | ((__ilog2(size
) << 24) & 0x0f000000),
302 fsl_chan
->feature
|= FSL_DMA_CHAN_PAUSE_EXT
;
304 fsl_chan
->feature
&= ~FSL_DMA_CHAN_PAUSE_EXT
;
308 * fsl_chan_toggle_ext_start - Toggle channel external start status
309 * @fsl_chan : Freescale DMA channel
310 * @enable : 0 is disabled, 1 is enabled.
312 * If enable the external start, the channel can be started by an
313 * external DMA start pin. So the dma_start() does not start the
314 * transfer immediately. The DMA channel will wait for the
315 * control pin asserted.
317 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan
*fsl_chan
, int enable
)
320 fsl_chan
->feature
|= FSL_DMA_CHAN_START_EXT
;
322 fsl_chan
->feature
&= ~FSL_DMA_CHAN_START_EXT
;
325 static dma_cookie_t
fsl_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
327 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(tx
->chan
);
328 struct fsl_desc_sw
*desc
;
332 /* cookie increment and adding to ld_queue must be atomic */
333 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
335 cookie
= fsl_chan
->common
.cookie
;
336 list_for_each_entry(desc
, &tx
->tx_list
, node
) {
341 desc
->async_tx
.cookie
= cookie
;
344 fsl_chan
->common
.cookie
= cookie
;
345 append_ld_queue(fsl_chan
, tx_to_fsl_desc(tx
));
346 list_splice_init(&tx
->tx_list
, fsl_chan
->ld_queue
.prev
);
348 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
354 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
355 * @fsl_chan : Freescale DMA channel
357 * Return - The descriptor allocated. NULL for failed.
359 static struct fsl_desc_sw
*fsl_dma_alloc_descriptor(
360 struct fsl_dma_chan
*fsl_chan
)
363 struct fsl_desc_sw
*desc_sw
;
365 desc_sw
= dma_pool_alloc(fsl_chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
367 memset(desc_sw
, 0, sizeof(struct fsl_desc_sw
));
368 dma_async_tx_descriptor_init(&desc_sw
->async_tx
,
370 desc_sw
->async_tx
.tx_submit
= fsl_dma_tx_submit
;
371 desc_sw
->async_tx
.phys
= pdesc
;
379 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
380 * @fsl_chan : Freescale DMA channel
382 * This function will create a dma pool for descriptor allocation.
384 * Return - The number of descriptors allocated.
386 static int fsl_dma_alloc_chan_resources(struct dma_chan
*chan
)
388 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
390 /* Has this channel already been allocated? */
391 if (fsl_chan
->desc_pool
)
394 /* We need the descriptor to be aligned to 32bytes
395 * for meeting FSL DMA specification requirement.
397 fsl_chan
->desc_pool
= dma_pool_create("fsl_dma_engine_desc_pool",
398 fsl_chan
->dev
, sizeof(struct fsl_desc_sw
),
400 if (!fsl_chan
->desc_pool
) {
401 dev_err(fsl_chan
->dev
, "No memory for channel %d "
402 "descriptor dma pool.\n", fsl_chan
->id
);
410 * fsl_dma_free_chan_resources - Free all resources of the channel.
411 * @fsl_chan : Freescale DMA channel
413 static void fsl_dma_free_chan_resources(struct dma_chan
*chan
)
415 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
416 struct fsl_desc_sw
*desc
, *_desc
;
419 dev_dbg(fsl_chan
->dev
, "Free all channel resources.\n");
420 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
421 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
422 #ifdef FSL_DMA_LD_DEBUG
423 dev_dbg(fsl_chan
->dev
,
424 "LD %p will be released.\n", desc
);
426 list_del(&desc
->node
);
427 /* free link descriptor */
428 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
430 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
431 dma_pool_destroy(fsl_chan
->desc_pool
);
433 fsl_chan
->desc_pool
= NULL
;
436 static struct dma_async_tx_descriptor
*
437 fsl_dma_prep_interrupt(struct dma_chan
*chan
, unsigned long flags
)
439 struct fsl_dma_chan
*fsl_chan
;
440 struct fsl_desc_sw
*new;
445 fsl_chan
= to_fsl_chan(chan
);
447 new = fsl_dma_alloc_descriptor(fsl_chan
);
449 dev_err(fsl_chan
->dev
, "No free memory for link descriptor\n");
453 new->async_tx
.cookie
= -EBUSY
;
454 new->async_tx
.flags
= flags
;
456 /* Insert the link descriptor to the LD ring */
457 list_add_tail(&new->node
, &new->async_tx
.tx_list
);
459 /* Set End-of-link to the last link descriptor of new list*/
460 set_ld_eol(fsl_chan
, new);
462 return &new->async_tx
;
465 static struct dma_async_tx_descriptor
*fsl_dma_prep_memcpy(
466 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
467 size_t len
, unsigned long flags
)
469 struct fsl_dma_chan
*fsl_chan
;
470 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
471 struct list_head
*list
;
480 fsl_chan
= to_fsl_chan(chan
);
484 /* Allocate the link descriptor from DMA pool */
485 new = fsl_dma_alloc_descriptor(fsl_chan
);
487 dev_err(fsl_chan
->dev
,
488 "No free memory for link descriptor\n");
491 #ifdef FSL_DMA_LD_DEBUG
492 dev_dbg(fsl_chan
->dev
, "new link desc alloc %p\n", new);
495 copy
= min(len
, (size_t)FSL_DMA_BCR_MAX_CNT
);
497 set_desc_cnt(fsl_chan
, &new->hw
, copy
);
498 set_desc_src(fsl_chan
, &new->hw
, dma_src
);
499 set_desc_dest(fsl_chan
, &new->hw
, dma_dest
);
504 set_desc_next(fsl_chan
, &prev
->hw
, new->async_tx
.phys
);
506 new->async_tx
.cookie
= 0;
507 async_tx_ack(&new->async_tx
);
514 /* Insert the link descriptor to the LD ring */
515 list_add_tail(&new->node
, &first
->async_tx
.tx_list
);
518 new->async_tx
.flags
= flags
; /* client is in control of this ack */
519 new->async_tx
.cookie
= -EBUSY
;
521 /* Set End-of-link to the last link descriptor of new list*/
522 set_ld_eol(fsl_chan
, new);
524 return &first
->async_tx
;
530 list
= &first
->async_tx
.tx_list
;
531 list_for_each_entry_safe_reverse(new, prev
, list
, node
) {
532 list_del(&new->node
);
533 dma_pool_free(fsl_chan
->desc_pool
, new, new->async_tx
.phys
);
540 * fsl_dma_update_completed_cookie - Update the completed cookie.
541 * @fsl_chan : Freescale DMA channel
543 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan
*fsl_chan
)
545 struct fsl_desc_sw
*cur_desc
, *desc
;
548 ld_phy
= get_cdar(fsl_chan
) & FSL_DMA_NLDA_MASK
;
552 list_for_each_entry(desc
, &fsl_chan
->ld_queue
, node
)
553 if (desc
->async_tx
.phys
== ld_phy
) {
558 if (cur_desc
&& cur_desc
->async_tx
.cookie
) {
559 if (dma_is_idle(fsl_chan
))
560 fsl_chan
->completed_cookie
=
561 cur_desc
->async_tx
.cookie
;
563 fsl_chan
->completed_cookie
=
564 cur_desc
->async_tx
.cookie
- 1;
570 * fsl_chan_ld_cleanup - Clean up link descriptors
571 * @fsl_chan : Freescale DMA channel
573 * This function clean up the ld_queue of DMA channel.
574 * If 'in_intr' is set, the function will move the link descriptor to
575 * the recycle list. Otherwise, free it directly.
577 static void fsl_chan_ld_cleanup(struct fsl_dma_chan
*fsl_chan
)
579 struct fsl_desc_sw
*desc
, *_desc
;
582 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
584 dev_dbg(fsl_chan
->dev
, "chan completed_cookie = %d\n",
585 fsl_chan
->completed_cookie
);
586 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
587 dma_async_tx_callback callback
;
588 void *callback_param
;
590 if (dma_async_is_complete(desc
->async_tx
.cookie
,
591 fsl_chan
->completed_cookie
, fsl_chan
->common
.cookie
)
595 callback
= desc
->async_tx
.callback
;
596 callback_param
= desc
->async_tx
.callback_param
;
598 /* Remove from ld_queue list */
599 list_del(&desc
->node
);
601 dev_dbg(fsl_chan
->dev
, "link descriptor %p will be recycle.\n",
603 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
605 /* Run the link descriptor callback function */
607 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
608 dev_dbg(fsl_chan
->dev
, "link descriptor %p callback\n",
610 callback(callback_param
);
611 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
614 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
618 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
619 * @fsl_chan : Freescale DMA channel
621 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan
*fsl_chan
)
623 struct list_head
*ld_node
;
624 dma_addr_t next_dest_addr
;
627 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
629 if (!dma_is_idle(fsl_chan
))
634 /* If there are some link descriptors
635 * not transfered in queue. We need to start it.
638 /* Find the first un-transfer desciptor */
639 for (ld_node
= fsl_chan
->ld_queue
.next
;
640 (ld_node
!= &fsl_chan
->ld_queue
)
641 && (dma_async_is_complete(
642 to_fsl_desc(ld_node
)->async_tx
.cookie
,
643 fsl_chan
->completed_cookie
,
644 fsl_chan
->common
.cookie
) == DMA_SUCCESS
);
645 ld_node
= ld_node
->next
);
647 if (ld_node
!= &fsl_chan
->ld_queue
) {
648 /* Get the ld start address from ld_queue */
649 next_dest_addr
= to_fsl_desc(ld_node
)->async_tx
.phys
;
650 dev_dbg(fsl_chan
->dev
, "xfer LDs staring from 0x%llx\n",
651 (unsigned long long)next_dest_addr
);
652 set_cdar(fsl_chan
, next_dest_addr
);
655 set_cdar(fsl_chan
, 0);
656 set_ndar(fsl_chan
, 0);
660 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
664 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
665 * @fsl_chan : Freescale DMA channel
667 static void fsl_dma_memcpy_issue_pending(struct dma_chan
*chan
)
669 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
671 #ifdef FSL_DMA_LD_DEBUG
672 struct fsl_desc_sw
*ld
;
675 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
676 if (list_empty(&fsl_chan
->ld_queue
)) {
677 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
681 dev_dbg(fsl_chan
->dev
, "--memcpy issue--\n");
682 list_for_each_entry(ld
, &fsl_chan
->ld_queue
, node
) {
684 dev_dbg(fsl_chan
->dev
, "Ch %d, LD %08x\n",
685 fsl_chan
->id
, ld
->async_tx
.phys
);
686 for (i
= 0; i
< 8; i
++)
687 dev_dbg(fsl_chan
->dev
, "LD offset %d: %08x\n",
688 i
, *(((u32
*)&ld
->hw
) + i
));
690 dev_dbg(fsl_chan
->dev
, "----------------\n");
691 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
694 fsl_chan_xfer_ld_queue(fsl_chan
);
698 * fsl_dma_is_complete - Determine the DMA status
699 * @fsl_chan : Freescale DMA channel
701 static enum dma_status
fsl_dma_is_complete(struct dma_chan
*chan
,
706 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
707 dma_cookie_t last_used
;
708 dma_cookie_t last_complete
;
710 fsl_chan_ld_cleanup(fsl_chan
);
712 last_used
= chan
->cookie
;
713 last_complete
= fsl_chan
->completed_cookie
;
716 *done
= last_complete
;
721 return dma_async_is_complete(cookie
, last_complete
, last_used
);
724 static irqreturn_t
fsl_dma_chan_do_interrupt(int irq
, void *data
)
726 struct fsl_dma_chan
*fsl_chan
= (struct fsl_dma_chan
*)data
;
728 int update_cookie
= 0;
731 stat
= get_sr(fsl_chan
);
732 dev_dbg(fsl_chan
->dev
, "event: channel %d, stat = 0x%x\n",
734 set_sr(fsl_chan
, stat
); /* Clear the event register */
736 stat
&= ~(FSL_DMA_SR_CB
| FSL_DMA_SR_CH
);
740 if (stat
& FSL_DMA_SR_TE
)
741 dev_err(fsl_chan
->dev
, "Transfer Error!\n");
744 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
745 * triger a PE interrupt.
747 if (stat
& FSL_DMA_SR_PE
) {
748 dev_dbg(fsl_chan
->dev
, "event: Programming Error INT\n");
749 if (get_bcr(fsl_chan
) == 0) {
750 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
751 * Now, update the completed cookie, and continue the
752 * next uncompleted transfer.
757 stat
&= ~FSL_DMA_SR_PE
;
760 /* If the link descriptor segment transfer finishes,
761 * we will recycle the used descriptor.
763 if (stat
& FSL_DMA_SR_EOSI
) {
764 dev_dbg(fsl_chan
->dev
, "event: End-of-segments INT\n");
765 dev_dbg(fsl_chan
->dev
, "event: clndar 0x%llx, nlndar 0x%llx\n",
766 (unsigned long long)get_cdar(fsl_chan
),
767 (unsigned long long)get_ndar(fsl_chan
));
768 stat
&= ~FSL_DMA_SR_EOSI
;
772 /* For MPC8349, EOCDI event need to update cookie
773 * and start the next transfer if it exist.
775 if (stat
& FSL_DMA_SR_EOCDI
) {
776 dev_dbg(fsl_chan
->dev
, "event: End-of-Chain link INT\n");
777 stat
&= ~FSL_DMA_SR_EOCDI
;
782 /* If it current transfer is the end-of-transfer,
783 * we should clear the Channel Start bit for
784 * prepare next transfer.
786 if (stat
& FSL_DMA_SR_EOLNI
) {
787 dev_dbg(fsl_chan
->dev
, "event: End-of-link INT\n");
788 stat
&= ~FSL_DMA_SR_EOLNI
;
793 fsl_dma_update_completed_cookie(fsl_chan
);
795 fsl_chan_xfer_ld_queue(fsl_chan
);
797 dev_dbg(fsl_chan
->dev
, "event: unhandled sr 0x%02x\n",
800 dev_dbg(fsl_chan
->dev
, "event: Exit\n");
801 tasklet_schedule(&fsl_chan
->tasklet
);
805 static irqreturn_t
fsl_dma_do_interrupt(int irq
, void *data
)
807 struct fsl_dma_device
*fdev
= (struct fsl_dma_device
*)data
;
811 gsr
= (fdev
->feature
& FSL_DMA_BIG_ENDIAN
) ? in_be32(fdev
->reg_base
)
812 : in_le32(fdev
->reg_base
);
813 ch_nr
= (32 - ffs(gsr
)) / 8;
815 return fdev
->chan
[ch_nr
] ? fsl_dma_chan_do_interrupt(irq
,
816 fdev
->chan
[ch_nr
]) : IRQ_NONE
;
819 static void dma_do_tasklet(unsigned long data
)
821 struct fsl_dma_chan
*fsl_chan
= (struct fsl_dma_chan
*)data
;
822 fsl_chan_ld_cleanup(fsl_chan
);
825 static int __devinit
fsl_dma_chan_probe(struct fsl_dma_device
*fdev
,
826 struct device_node
*node
, u32 feature
, const char *compatible
)
828 struct fsl_dma_chan
*new_fsl_chan
;
832 new_fsl_chan
= kzalloc(sizeof(struct fsl_dma_chan
), GFP_KERNEL
);
834 dev_err(fdev
->dev
, "No free memory for allocating "
839 /* get dma channel register base */
840 err
= of_address_to_resource(node
, 0, &new_fsl_chan
->reg
);
842 dev_err(fdev
->dev
, "Can't get %s property 'reg'\n",
847 new_fsl_chan
->feature
= feature
;
850 fdev
->feature
= new_fsl_chan
->feature
;
852 /* If the DMA device's feature is different than its channels',
855 WARN_ON(fdev
->feature
!= new_fsl_chan
->feature
);
857 new_fsl_chan
->dev
= fdev
->dev
;
858 new_fsl_chan
->reg_base
= ioremap(new_fsl_chan
->reg
.start
,
859 new_fsl_chan
->reg
.end
- new_fsl_chan
->reg
.start
+ 1);
861 new_fsl_chan
->id
= ((new_fsl_chan
->reg
.start
- 0x100) & 0xfff) >> 7;
862 if (new_fsl_chan
->id
>= FSL_DMA_MAX_CHANS_PER_DEVICE
) {
863 dev_err(fdev
->dev
, "There is no %d channel!\n",
868 fdev
->chan
[new_fsl_chan
->id
] = new_fsl_chan
;
869 tasklet_init(&new_fsl_chan
->tasklet
, dma_do_tasklet
,
870 (unsigned long)new_fsl_chan
);
872 /* Init the channel */
873 dma_init(new_fsl_chan
);
875 /* Clear cdar registers */
876 set_cdar(new_fsl_chan
, 0);
878 switch (new_fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
879 case FSL_DMA_IP_85XX
:
880 new_fsl_chan
->toggle_ext_start
= fsl_chan_toggle_ext_start
;
881 new_fsl_chan
->toggle_ext_pause
= fsl_chan_toggle_ext_pause
;
882 case FSL_DMA_IP_83XX
:
883 new_fsl_chan
->set_src_loop_size
= fsl_chan_set_src_loop_size
;
884 new_fsl_chan
->set_dest_loop_size
= fsl_chan_set_dest_loop_size
;
887 spin_lock_init(&new_fsl_chan
->desc_lock
);
888 INIT_LIST_HEAD(&new_fsl_chan
->ld_queue
);
890 new_fsl_chan
->common
.device
= &fdev
->common
;
892 /* Add the channel to DMA device channel list */
893 list_add_tail(&new_fsl_chan
->common
.device_node
,
894 &fdev
->common
.channels
);
895 fdev
->common
.chancnt
++;
897 new_fsl_chan
->irq
= irq_of_parse_and_map(node
, 0);
898 if (new_fsl_chan
->irq
!= NO_IRQ
) {
899 err
= request_irq(new_fsl_chan
->irq
,
900 &fsl_dma_chan_do_interrupt
, IRQF_SHARED
,
901 "fsldma-channel", new_fsl_chan
);
903 dev_err(fdev
->dev
, "DMA channel %s request_irq error "
904 "with return %d\n", node
->full_name
, err
);
909 dev_info(fdev
->dev
, "#%d (%s), irq %d\n", new_fsl_chan
->id
,
911 new_fsl_chan
->irq
!= NO_IRQ
? new_fsl_chan
->irq
: fdev
->irq
);
916 list_del(&new_fsl_chan
->common
.device_node
);
918 iounmap(new_fsl_chan
->reg_base
);
924 static void fsl_dma_chan_remove(struct fsl_dma_chan
*fchan
)
926 if (fchan
->irq
!= NO_IRQ
)
927 free_irq(fchan
->irq
, fchan
);
928 list_del(&fchan
->common
.device_node
);
929 iounmap(fchan
->reg_base
);
933 static int __devinit
of_fsl_dma_probe(struct of_device
*dev
,
934 const struct of_device_id
*match
)
937 struct fsl_dma_device
*fdev
;
938 struct device_node
*child
;
940 fdev
= kzalloc(sizeof(struct fsl_dma_device
), GFP_KERNEL
);
942 dev_err(&dev
->dev
, "No enough memory for 'priv'\n");
945 fdev
->dev
= &dev
->dev
;
946 INIT_LIST_HEAD(&fdev
->common
.channels
);
948 /* get DMA controller register base */
949 err
= of_address_to_resource(dev
->node
, 0, &fdev
->reg
);
951 dev_err(&dev
->dev
, "Can't get %s property 'reg'\n",
952 dev
->node
->full_name
);
956 dev_info(&dev
->dev
, "Probe the Freescale DMA driver for %s "
957 "controller at 0x%llx...\n",
958 match
->compatible
, (unsigned long long)fdev
->reg
.start
);
959 fdev
->reg_base
= ioremap(fdev
->reg
.start
, fdev
->reg
.end
960 - fdev
->reg
.start
+ 1);
962 dma_cap_set(DMA_MEMCPY
, fdev
->common
.cap_mask
);
963 dma_cap_set(DMA_INTERRUPT
, fdev
->common
.cap_mask
);
964 fdev
->common
.device_alloc_chan_resources
= fsl_dma_alloc_chan_resources
;
965 fdev
->common
.device_free_chan_resources
= fsl_dma_free_chan_resources
;
966 fdev
->common
.device_prep_dma_interrupt
= fsl_dma_prep_interrupt
;
967 fdev
->common
.device_prep_dma_memcpy
= fsl_dma_prep_memcpy
;
968 fdev
->common
.device_is_tx_complete
= fsl_dma_is_complete
;
969 fdev
->common
.device_issue_pending
= fsl_dma_memcpy_issue_pending
;
970 fdev
->common
.dev
= &dev
->dev
;
972 fdev
->irq
= irq_of_parse_and_map(dev
->node
, 0);
973 if (fdev
->irq
!= NO_IRQ
) {
974 err
= request_irq(fdev
->irq
, &fsl_dma_do_interrupt
, IRQF_SHARED
,
975 "fsldma-device", fdev
);
977 dev_err(&dev
->dev
, "DMA device request_irq error "
978 "with return %d\n", err
);
983 dev_set_drvdata(&(dev
->dev
), fdev
);
985 /* We cannot use of_platform_bus_probe() because there is no
986 * of_platform_bus_remove. Instead, we manually instantiate every DMA
989 for_each_child_of_node(dev
->node
, child
) {
990 if (of_device_is_compatible(child
, "fsl,eloplus-dma-channel"))
991 fsl_dma_chan_probe(fdev
, child
,
992 FSL_DMA_IP_85XX
| FSL_DMA_BIG_ENDIAN
,
993 "fsl,eloplus-dma-channel");
994 if (of_device_is_compatible(child
, "fsl,elo-dma-channel"))
995 fsl_dma_chan_probe(fdev
, child
,
996 FSL_DMA_IP_83XX
| FSL_DMA_LITTLE_ENDIAN
,
997 "fsl,elo-dma-channel");
1000 dma_async_device_register(&fdev
->common
);
1004 iounmap(fdev
->reg_base
);
1010 static int of_fsl_dma_remove(struct of_device
*of_dev
)
1012 struct fsl_dma_device
*fdev
;
1015 fdev
= dev_get_drvdata(&of_dev
->dev
);
1017 dma_async_device_unregister(&fdev
->common
);
1019 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++)
1021 fsl_dma_chan_remove(fdev
->chan
[i
]);
1023 if (fdev
->irq
!= NO_IRQ
)
1024 free_irq(fdev
->irq
, fdev
);
1026 iounmap(fdev
->reg_base
);
1029 dev_set_drvdata(&of_dev
->dev
, NULL
);
1034 static struct of_device_id of_fsl_dma_ids
[] = {
1035 { .compatible
= "fsl,eloplus-dma", },
1036 { .compatible
= "fsl,elo-dma", },
1040 static struct of_platform_driver of_fsl_dma_driver
= {
1041 .name
= "fsl-elo-dma",
1042 .match_table
= of_fsl_dma_ids
,
1043 .probe
= of_fsl_dma_probe
,
1044 .remove
= of_fsl_dma_remove
,
1047 static __init
int of_fsl_dma_init(void)
1051 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1053 ret
= of_register_platform_driver(&of_fsl_dma_driver
);
1055 pr_err("fsldma: failed to register platform driver\n");
1060 static void __exit
of_fsl_dma_exit(void)
1062 of_unregister_platform_driver(&of_fsl_dma_driver
);
1065 subsys_initcall(of_fsl_dma_init
);
1066 module_exit(of_fsl_dma_exit
);
1068 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1069 MODULE_LICENSE("GPL");