1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP DMAengine support
5 #include <linux/delay.h>
6 #include <linux/dmaengine.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dmapool.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/omap-dma.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/of_dma.h>
19 #include <linux/of_device.h>
21 #include "../virt-dma.h"
23 #define OMAP_SDMA_REQUESTS 127
24 #define OMAP_SDMA_CHANNELS 32
27 struct dma_device ddev
;
30 const struct omap_dma_reg
*reg_map
;
31 struct omap_system_dma_plat_info
*plat
;
34 struct dma_pool
*desc_pool
;
35 unsigned dma_requests
;
37 uint32_t irq_enable_mask
;
38 struct omap_chan
**lch_map
;
42 struct virt_dma_chan vc
;
43 void __iomem
*channel_base
;
44 const struct omap_dma_reg
*reg_map
;
47 struct dma_slave_config cfg
;
54 struct omap_desc
*desc
;
58 #define DESC_NXT_SV_REFRESH (0x1 << 24)
59 #define DESC_NXT_SV_REUSE (0x2 << 24)
60 #define DESC_NXT_DV_REFRESH (0x1 << 26)
61 #define DESC_NXT_DV_REUSE (0x2 << 26)
62 #define DESC_NTYPE_TYPE2 (0x2 << 29)
64 /* Type 2 descriptor with Source or Destination address update */
65 struct omap_type2_desc
{
68 uint32_t addr
; /* src or dst */
79 uint32_t en
; /* number of elements (24-bit) */
80 uint32_t fn
; /* number of frames (16-bit) */
81 int32_t fi
; /* for double indexing */
82 int16_t ei
; /* for double indexing */
85 struct omap_type2_desc
*t2_desc
;
86 dma_addr_t t2_desc_paddr
;
90 struct virt_dma_desc vd
;
92 enum dma_transfer_direction dir
;
95 int32_t fi
; /* for OMAP_DMA_SYNC_PACKET / double indexing */
96 int16_t ei
; /* for double indexing */
97 uint8_t es
; /* CSDP_DATA_TYPE_xxx */
98 uint32_t ccr
; /* CCR value */
99 uint16_t clnk_ctrl
; /* CLNK_CTRL value */
100 uint16_t cicr
; /* CICR value */
101 uint32_t csdp
; /* CSDP value */
104 struct omap_sg sg
[0];
108 CAPS_0_SUPPORT_LL123
= BIT(20), /* Linked List type1/2/3 */
109 CAPS_0_SUPPORT_LL4
= BIT(21), /* Linked List type4 */
112 CCR_READ_PRIORITY
= BIT(6),
114 CCR_AUTO_INIT
= BIT(8), /* OMAP1 only */
115 CCR_REPEAT
= BIT(9), /* OMAP1 only */
116 CCR_OMAP31_DISABLE
= BIT(10), /* OMAP1 only */
117 CCR_SUSPEND_SENSITIVE
= BIT(8), /* OMAP2+ only */
118 CCR_RD_ACTIVE
= BIT(9), /* OMAP2+ only */
119 CCR_WR_ACTIVE
= BIT(10), /* OMAP2+ only */
120 CCR_SRC_AMODE_CONSTANT
= 0 << 12,
121 CCR_SRC_AMODE_POSTINC
= 1 << 12,
122 CCR_SRC_AMODE_SGLIDX
= 2 << 12,
123 CCR_SRC_AMODE_DBLIDX
= 3 << 12,
124 CCR_DST_AMODE_CONSTANT
= 0 << 14,
125 CCR_DST_AMODE_POSTINC
= 1 << 14,
126 CCR_DST_AMODE_SGLIDX
= 2 << 14,
127 CCR_DST_AMODE_DBLIDX
= 3 << 14,
128 CCR_CONSTANT_FILL
= BIT(16),
129 CCR_TRANSPARENT_COPY
= BIT(17),
131 CCR_SUPERVISOR
= BIT(22),
132 CCR_PREFETCH
= BIT(23),
133 CCR_TRIGGER_SRC
= BIT(24),
134 CCR_BUFFERING_DISABLE
= BIT(25),
135 CCR_WRITE_PRIORITY
= BIT(26),
136 CCR_SYNC_ELEMENT
= 0,
137 CCR_SYNC_FRAME
= CCR_FS
,
138 CCR_SYNC_BLOCK
= CCR_BS
,
139 CCR_SYNC_PACKET
= CCR_BS
| CCR_FS
,
141 CSDP_DATA_TYPE_8
= 0,
142 CSDP_DATA_TYPE_16
= 1,
143 CSDP_DATA_TYPE_32
= 2,
144 CSDP_SRC_PORT_EMIFF
= 0 << 2, /* OMAP1 only */
145 CSDP_SRC_PORT_EMIFS
= 1 << 2, /* OMAP1 only */
146 CSDP_SRC_PORT_OCP_T1
= 2 << 2, /* OMAP1 only */
147 CSDP_SRC_PORT_TIPB
= 3 << 2, /* OMAP1 only */
148 CSDP_SRC_PORT_OCP_T2
= 4 << 2, /* OMAP1 only */
149 CSDP_SRC_PORT_MPUI
= 5 << 2, /* OMAP1 only */
150 CSDP_SRC_PACKED
= BIT(6),
151 CSDP_SRC_BURST_1
= 0 << 7,
152 CSDP_SRC_BURST_16
= 1 << 7,
153 CSDP_SRC_BURST_32
= 2 << 7,
154 CSDP_SRC_BURST_64
= 3 << 7,
155 CSDP_DST_PORT_EMIFF
= 0 << 9, /* OMAP1 only */
156 CSDP_DST_PORT_EMIFS
= 1 << 9, /* OMAP1 only */
157 CSDP_DST_PORT_OCP_T1
= 2 << 9, /* OMAP1 only */
158 CSDP_DST_PORT_TIPB
= 3 << 9, /* OMAP1 only */
159 CSDP_DST_PORT_OCP_T2
= 4 << 9, /* OMAP1 only */
160 CSDP_DST_PORT_MPUI
= 5 << 9, /* OMAP1 only */
161 CSDP_DST_PACKED
= BIT(13),
162 CSDP_DST_BURST_1
= 0 << 14,
163 CSDP_DST_BURST_16
= 1 << 14,
164 CSDP_DST_BURST_32
= 2 << 14,
165 CSDP_DST_BURST_64
= 3 << 14,
166 CSDP_WRITE_NON_POSTED
= 0 << 16,
167 CSDP_WRITE_POSTED
= 1 << 16,
168 CSDP_WRITE_LAST_NON_POSTED
= 2 << 16,
170 CICR_TOUT_IE
= BIT(0), /* OMAP1 only */
171 CICR_DROP_IE
= BIT(1),
172 CICR_HALF_IE
= BIT(2),
173 CICR_FRAME_IE
= BIT(3),
174 CICR_LAST_IE
= BIT(4),
175 CICR_BLOCK_IE
= BIT(5),
176 CICR_PKT_IE
= BIT(7), /* OMAP2+ only */
177 CICR_TRANS_ERR_IE
= BIT(8), /* OMAP2+ only */
178 CICR_SUPERVISOR_ERR_IE
= BIT(10), /* OMAP2+ only */
179 CICR_MISALIGNED_ERR_IE
= BIT(11), /* OMAP2+ only */
180 CICR_DRAIN_IE
= BIT(12), /* OMAP2+ only */
181 CICR_SUPER_BLOCK_IE
= BIT(14), /* OMAP2+ only */
183 CLNK_CTRL_ENABLE_LNK
= BIT(15),
185 CDP_DST_VALID_INC
= 0 << 0,
186 CDP_DST_VALID_RELOAD
= 1 << 0,
187 CDP_DST_VALID_REUSE
= 2 << 0,
188 CDP_SRC_VALID_INC
= 0 << 2,
189 CDP_SRC_VALID_RELOAD
= 1 << 2,
190 CDP_SRC_VALID_REUSE
= 2 << 2,
191 CDP_NTYPE_TYPE1
= 1 << 4,
192 CDP_NTYPE_TYPE2
= 2 << 4,
193 CDP_NTYPE_TYPE3
= 3 << 4,
194 CDP_TMODE_NORMAL
= 0 << 8,
195 CDP_TMODE_LLIST
= 1 << 8,
199 static const unsigned es_bytes
[] = {
200 [CSDP_DATA_TYPE_8
] = 1,
201 [CSDP_DATA_TYPE_16
] = 2,
202 [CSDP_DATA_TYPE_32
] = 4,
205 static struct of_dma_filter_info omap_dma_info
= {
206 .filter_fn
= omap_dma_filter_fn
,
209 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
211 return container_of(d
, struct omap_dmadev
, ddev
);
214 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
216 return container_of(c
, struct omap_chan
, vc
.chan
);
219 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
221 return container_of(t
, struct omap_desc
, vd
.tx
);
224 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
226 struct omap_desc
*d
= to_omap_dma_desc(&vd
->tx
);
229 struct omap_dmadev
*od
= to_omap_dma_dev(vd
->tx
.chan
->device
);
232 for (i
= 0; i
< d
->sglen
; i
++) {
233 if (d
->sg
[i
].t2_desc
)
234 dma_pool_free(od
->desc_pool
, d
->sg
[i
].t2_desc
,
235 d
->sg
[i
].t2_desc_paddr
);
242 static void omap_dma_fill_type2_desc(struct omap_desc
*d
, int idx
,
243 enum dma_transfer_direction dir
, bool last
)
245 struct omap_sg
*sg
= &d
->sg
[idx
];
246 struct omap_type2_desc
*t2_desc
= sg
->t2_desc
;
249 d
->sg
[idx
- 1].t2_desc
->next_desc
= sg
->t2_desc_paddr
;
251 t2_desc
->next_desc
= 0xfffffffc;
253 t2_desc
->en
= sg
->en
;
254 t2_desc
->addr
= sg
->addr
;
255 t2_desc
->fn
= sg
->fn
& 0xffff;
256 t2_desc
->cicr
= d
->cicr
;
258 t2_desc
->cicr
&= ~CICR_BLOCK_IE
;
262 t2_desc
->cdei
= sg
->ei
;
263 t2_desc
->csei
= d
->ei
;
264 t2_desc
->cdfi
= sg
->fi
;
265 t2_desc
->csfi
= d
->fi
;
267 t2_desc
->en
|= DESC_NXT_DV_REFRESH
;
268 t2_desc
->en
|= DESC_NXT_SV_REUSE
;
271 t2_desc
->cdei
= d
->ei
;
272 t2_desc
->csei
= sg
->ei
;
273 t2_desc
->cdfi
= d
->fi
;
274 t2_desc
->csfi
= sg
->fi
;
276 t2_desc
->en
|= DESC_NXT_SV_REFRESH
;
277 t2_desc
->en
|= DESC_NXT_DV_REUSE
;
283 t2_desc
->en
|= DESC_NTYPE_TYPE2
;
286 static void omap_dma_write(uint32_t val
, unsigned type
, void __iomem
*addr
)
289 case OMAP_DMA_REG_16BIT
:
290 writew_relaxed(val
, addr
);
292 case OMAP_DMA_REG_2X16BIT
:
293 writew_relaxed(val
, addr
);
294 writew_relaxed(val
>> 16, addr
+ 2);
296 case OMAP_DMA_REG_32BIT
:
297 writel_relaxed(val
, addr
);
304 static unsigned omap_dma_read(unsigned type
, void __iomem
*addr
)
309 case OMAP_DMA_REG_16BIT
:
310 val
= readw_relaxed(addr
);
312 case OMAP_DMA_REG_2X16BIT
:
313 val
= readw_relaxed(addr
);
314 val
|= readw_relaxed(addr
+ 2) << 16;
316 case OMAP_DMA_REG_32BIT
:
317 val
= readl_relaxed(addr
);
327 static void omap_dma_glbl_write(struct omap_dmadev
*od
, unsigned reg
, unsigned val
)
329 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
333 omap_dma_write(val
, r
->type
, od
->base
+ r
->offset
);
336 static unsigned omap_dma_glbl_read(struct omap_dmadev
*od
, unsigned reg
)
338 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
342 return omap_dma_read(r
->type
, od
->base
+ r
->offset
);
345 static void omap_dma_chan_write(struct omap_chan
*c
, unsigned reg
, unsigned val
)
347 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
349 omap_dma_write(val
, r
->type
, c
->channel_base
+ r
->offset
);
352 static unsigned omap_dma_chan_read(struct omap_chan
*c
, unsigned reg
)
354 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
356 return omap_dma_read(r
->type
, c
->channel_base
+ r
->offset
);
359 static void omap_dma_clear_csr(struct omap_chan
*c
)
362 omap_dma_chan_read(c
, CSR
);
364 omap_dma_chan_write(c
, CSR
, ~0);
367 static unsigned omap_dma_get_csr(struct omap_chan
*c
)
369 unsigned val
= omap_dma_chan_read(c
, CSR
);
372 omap_dma_chan_write(c
, CSR
, val
);
377 static void omap_dma_assign(struct omap_dmadev
*od
, struct omap_chan
*c
,
380 c
->channel_base
= od
->base
+ od
->plat
->channel_stride
* lch
;
382 od
->lch_map
[lch
] = c
;
385 static void omap_dma_start(struct omap_chan
*c
, struct omap_desc
*d
)
387 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
388 uint16_t cicr
= d
->cicr
;
390 if (__dma_omap15xx(od
->plat
->dma_attr
))
391 omap_dma_chan_write(c
, CPC
, 0);
393 omap_dma_chan_write(c
, CDAC
, 0);
395 omap_dma_clear_csr(c
);
398 uint32_t cdp
= CDP_TMODE_LLIST
| CDP_NTYPE_TYPE2
| CDP_FAST
;
400 if (d
->dir
== DMA_DEV_TO_MEM
)
401 cdp
|= (CDP_DST_VALID_RELOAD
| CDP_SRC_VALID_REUSE
);
403 cdp
|= (CDP_DST_VALID_REUSE
| CDP_SRC_VALID_RELOAD
);
404 omap_dma_chan_write(c
, CDP
, cdp
);
406 omap_dma_chan_write(c
, CNDP
, d
->sg
[0].t2_desc_paddr
);
407 omap_dma_chan_write(c
, CCDN
, 0);
408 omap_dma_chan_write(c
, CCFN
, 0xffff);
409 omap_dma_chan_write(c
, CCEN
, 0xffffff);
411 cicr
&= ~CICR_BLOCK_IE
;
412 } else if (od
->ll123_supported
) {
413 omap_dma_chan_write(c
, CDP
, 0);
416 /* Enable interrupts */
417 omap_dma_chan_write(c
, CICR
, cicr
);
420 omap_dma_chan_write(c
, CCR
, d
->ccr
| CCR_ENABLE
);
425 static void omap_dma_drain_chan(struct omap_chan
*c
)
430 /* Wait for sDMA FIFO to drain */
432 val
= omap_dma_chan_read(c
, CCR
);
433 if (!(val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
)))
442 if (val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
))
443 dev_err(c
->vc
.chan
.device
->dev
,
444 "DMA drain did not complete on lch %d\n",
448 static int omap_dma_stop(struct omap_chan
*c
)
450 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
454 omap_dma_chan_write(c
, CICR
, 0);
456 omap_dma_clear_csr(c
);
458 val
= omap_dma_chan_read(c
, CCR
);
459 if (od
->plat
->errata
& DMA_ERRATA_i541
&& val
& CCR_TRIGGER_SRC
) {
462 sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
463 val
= sysconfig
& ~DMA_SYSCONFIG_MIDLEMODE_MASK
;
464 val
|= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE
);
465 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, val
);
467 val
= omap_dma_chan_read(c
, CCR
);
469 omap_dma_chan_write(c
, CCR
, val
);
471 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
472 omap_dma_drain_chan(c
);
474 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, sysconfig
);
476 if (!(val
& CCR_ENABLE
))
480 omap_dma_chan_write(c
, CCR
, val
);
482 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
483 omap_dma_drain_chan(c
);
488 if (!__dma_omap15xx(od
->plat
->dma_attr
) && c
->cyclic
) {
489 val
= omap_dma_chan_read(c
, CLNK_CTRL
);
492 val
|= 1 << 14; /* set the STOP_LNK bit */
494 val
&= ~CLNK_CTRL_ENABLE_LNK
;
496 omap_dma_chan_write(c
, CLNK_CTRL
, val
);
502 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
)
504 struct omap_sg
*sg
= d
->sg
+ c
->sgidx
;
505 unsigned cxsa
, cxei
, cxfi
;
507 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
517 omap_dma_chan_write(c
, cxsa
, sg
->addr
);
518 omap_dma_chan_write(c
, cxei
, sg
->ei
);
519 omap_dma_chan_write(c
, cxfi
, sg
->fi
);
520 omap_dma_chan_write(c
, CEN
, sg
->en
);
521 omap_dma_chan_write(c
, CFN
, sg
->fn
);
523 omap_dma_start(c
, d
);
527 static void omap_dma_start_desc(struct omap_chan
*c
)
529 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
531 unsigned cxsa
, cxei
, cxfi
;
540 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
544 * This provides the necessary barrier to ensure data held in
545 * DMA coherent memory is visible to the DMA engine prior to
546 * the transfer starting.
550 omap_dma_chan_write(c
, CCR
, d
->ccr
);
552 omap_dma_chan_write(c
, CCR2
, d
->ccr
>> 16);
554 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
564 omap_dma_chan_write(c
, cxsa
, d
->dev_addr
);
565 omap_dma_chan_write(c
, cxei
, d
->ei
);
566 omap_dma_chan_write(c
, cxfi
, d
->fi
);
567 omap_dma_chan_write(c
, CSDP
, d
->csdp
);
568 omap_dma_chan_write(c
, CLNK_CTRL
, d
->clnk_ctrl
);
570 omap_dma_start_sg(c
, d
);
573 static void omap_dma_callback(int ch
, u16 status
, void *data
)
575 struct omap_chan
*c
= data
;
579 spin_lock_irqsave(&c
->vc
.lock
, flags
);
583 vchan_cyclic_callback(&d
->vd
);
584 } else if (d
->using_ll
|| c
->sgidx
== d
->sglen
) {
585 omap_dma_start_desc(c
);
586 vchan_cookie_complete(&d
->vd
);
588 omap_dma_start_sg(c
, d
);
591 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
594 static irqreturn_t
omap_dma_irq(int irq
, void *devid
)
596 struct omap_dmadev
*od
= devid
;
597 unsigned status
, channel
;
599 spin_lock(&od
->irq_lock
);
601 status
= omap_dma_glbl_read(od
, IRQSTATUS_L1
);
602 status
&= od
->irq_enable_mask
;
604 spin_unlock(&od
->irq_lock
);
608 while ((channel
= ffs(status
)) != 0) {
616 c
= od
->lch_map
[channel
];
618 /* This should never happen */
619 dev_err(od
->ddev
.dev
, "invalid channel %u\n", channel
);
623 csr
= omap_dma_get_csr(c
);
624 omap_dma_glbl_write(od
, IRQSTATUS_L1
, mask
);
626 omap_dma_callback(channel
, csr
, c
);
629 spin_unlock(&od
->irq_lock
);
634 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
636 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
637 struct omap_chan
*c
= to_omap_dma_chan(chan
);
638 struct device
*dev
= od
->ddev
.dev
;
642 ret
= omap_request_dma(c
->dma_sig
, "DMA engine",
643 omap_dma_callback
, c
, &c
->dma_ch
);
645 ret
= omap_request_dma(c
->dma_sig
, "DMA engine", NULL
, NULL
,
649 dev_dbg(dev
, "allocating channel %u for %u\n", c
->dma_ch
, c
->dma_sig
);
652 omap_dma_assign(od
, c
, c
->dma_ch
);
657 spin_lock_irq(&od
->irq_lock
);
658 val
= BIT(c
->dma_ch
);
659 omap_dma_glbl_write(od
, IRQSTATUS_L1
, val
);
660 od
->irq_enable_mask
|= val
;
661 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
663 val
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
664 val
&= ~BIT(c
->dma_ch
);
665 omap_dma_glbl_write(od
, IRQENABLE_L0
, val
);
666 spin_unlock_irq(&od
->irq_lock
);
671 if (__dma_omap16xx(od
->plat
->dma_attr
)) {
672 c
->ccr
= CCR_OMAP31_DISABLE
;
673 /* Duplicate what plat-omap/dma.c does */
674 c
->ccr
|= c
->dma_ch
+ 1;
676 c
->ccr
= c
->dma_sig
& 0x1f;
679 c
->ccr
= c
->dma_sig
& 0x1f;
680 c
->ccr
|= (c
->dma_sig
& ~0x1f) << 14;
682 if (od
->plat
->errata
& DMA_ERRATA_IFRAME_BUFFERING
)
683 c
->ccr
|= CCR_BUFFERING_DISABLE
;
688 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
690 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
691 struct omap_chan
*c
= to_omap_dma_chan(chan
);
694 spin_lock_irq(&od
->irq_lock
);
695 od
->irq_enable_mask
&= ~BIT(c
->dma_ch
);
696 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
697 spin_unlock_irq(&od
->irq_lock
);
700 c
->channel_base
= NULL
;
701 od
->lch_map
[c
->dma_ch
] = NULL
;
702 vchan_free_chan_resources(&c
->vc
);
703 omap_free_dma(c
->dma_ch
);
705 dev_dbg(od
->ddev
.dev
, "freeing channel %u used for %u\n", c
->dma_ch
,
710 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
712 return sg
->en
* sg
->fn
;
715 static size_t omap_dma_desc_size(struct omap_desc
*d
)
720 for (size
= i
= 0; i
< d
->sglen
; i
++)
721 size
+= omap_dma_sg_size(&d
->sg
[i
]);
723 return size
* es_bytes
[d
->es
];
726 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
729 size_t size
, es_size
= es_bytes
[d
->es
];
731 for (size
= i
= 0; i
< d
->sglen
; i
++) {
732 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
736 else if (addr
>= d
->sg
[i
].addr
&&
737 addr
< d
->sg
[i
].addr
+ this_size
)
738 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
744 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
745 * read before the DMA controller finished disabling the channel.
747 static uint32_t omap_dma_chan_read_3_3(struct omap_chan
*c
, unsigned reg
)
749 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
752 val
= omap_dma_chan_read(c
, reg
);
753 if (val
== 0 && od
->plat
->errata
& DMA_ERRATA_3_3
)
754 val
= omap_dma_chan_read(c
, reg
);
759 static dma_addr_t
omap_dma_get_src_pos(struct omap_chan
*c
)
761 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
762 dma_addr_t addr
, cdac
;
764 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
765 addr
= omap_dma_chan_read(c
, CPC
);
767 addr
= omap_dma_chan_read_3_3(c
, CSAC
);
768 cdac
= omap_dma_chan_read_3_3(c
, CDAC
);
771 * CDAC == 0 indicates that the DMA transfer on the channel has
772 * not been started (no data has been transferred so far).
773 * Return the programmed source start address in this case.
776 addr
= omap_dma_chan_read(c
, CSSA
);
780 addr
|= omap_dma_chan_read(c
, CSSA
) & 0xffff0000;
785 static dma_addr_t
omap_dma_get_dst_pos(struct omap_chan
*c
)
787 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
790 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
791 addr
= omap_dma_chan_read(c
, CPC
);
793 addr
= omap_dma_chan_read_3_3(c
, CDAC
);
796 * CDAC == 0 indicates that the DMA transfer on the channel
797 * has not been started (no data has been transferred so
798 * far). Return the programmed destination start address in
802 addr
= omap_dma_chan_read(c
, CDSA
);
806 addr
|= omap_dma_chan_read(c
, CDSA
) & 0xffff0000;
811 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
812 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
814 struct omap_chan
*c
= to_omap_dma_chan(chan
);
815 struct virt_dma_desc
*vd
;
819 ret
= dma_cookie_status(chan
, cookie
, txstate
);
821 if (!c
->paused
&& c
->running
) {
822 uint32_t ccr
= omap_dma_chan_read(c
, CCR
);
824 * The channel is no longer active, set the return value
827 if (!(ccr
& CCR_ENABLE
))
831 if (ret
== DMA_COMPLETE
|| !txstate
)
834 spin_lock_irqsave(&c
->vc
.lock
, flags
);
835 vd
= vchan_find_desc(&c
->vc
, cookie
);
837 txstate
->residue
= omap_dma_desc_size(to_omap_dma_desc(&vd
->tx
));
838 } else if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
) {
839 struct omap_desc
*d
= c
->desc
;
842 if (d
->dir
== DMA_MEM_TO_DEV
)
843 pos
= omap_dma_get_src_pos(c
);
844 else if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
)
845 pos
= omap_dma_get_dst_pos(c
);
849 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
851 txstate
->residue
= 0;
853 if (ret
== DMA_IN_PROGRESS
&& c
->paused
)
855 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
860 static void omap_dma_issue_pending(struct dma_chan
*chan
)
862 struct omap_chan
*c
= to_omap_dma_chan(chan
);
865 spin_lock_irqsave(&c
->vc
.lock
, flags
);
866 if (vchan_issue_pending(&c
->vc
) && !c
->desc
)
867 omap_dma_start_desc(c
);
868 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
871 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
872 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
873 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
875 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
876 struct omap_chan
*c
= to_omap_dma_chan(chan
);
877 enum dma_slave_buswidth dev_width
;
878 struct scatterlist
*sgent
;
881 unsigned i
, es
, en
, frame_bytes
;
882 bool ll_failed
= false;
884 u32 port_window
, port_window_bytes
;
886 if (dir
== DMA_DEV_TO_MEM
) {
887 dev_addr
= c
->cfg
.src_addr
;
888 dev_width
= c
->cfg
.src_addr_width
;
889 burst
= c
->cfg
.src_maxburst
;
890 port_window
= c
->cfg
.src_port_window_size
;
891 } else if (dir
== DMA_MEM_TO_DEV
) {
892 dev_addr
= c
->cfg
.dst_addr
;
893 dev_width
= c
->cfg
.dst_addr_width
;
894 burst
= c
->cfg
.dst_maxburst
;
895 port_window
= c
->cfg
.dst_port_window_size
;
897 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
901 /* Bus width translates to the element size (ES) */
903 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
904 es
= CSDP_DATA_TYPE_8
;
906 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
907 es
= CSDP_DATA_TYPE_16
;
909 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
910 es
= CSDP_DATA_TYPE_32
;
912 default: /* not reached */
916 /* Now allocate and setup the descriptor. */
917 d
= kzalloc(struct_size(d
, sg
, sglen
), GFP_ATOMIC
);
922 d
->dev_addr
= dev_addr
;
925 /* When the port_window is used, one frame must cover the window */
928 port_window_bytes
= port_window
* es_bytes
[es
];
932 * One frame covers the port_window and by configure
933 * the source frame index to be -1 * (port_window - 1)
934 * we instruct the sDMA that after a frame is processed
935 * it should move back to the start of the window.
937 d
->fi
= -(port_window_bytes
- 1);
940 d
->ccr
= c
->ccr
| CCR_SYNC_FRAME
;
941 if (dir
== DMA_DEV_TO_MEM
) {
942 d
->csdp
= CSDP_DST_BURST_64
| CSDP_DST_PACKED
;
944 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
946 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
948 if (port_window_bytes
>= 64)
949 d
->csdp
|= CSDP_SRC_BURST_64
;
950 else if (port_window_bytes
>= 32)
951 d
->csdp
|= CSDP_SRC_BURST_32
;
952 else if (port_window_bytes
>= 16)
953 d
->csdp
|= CSDP_SRC_BURST_16
;
956 d
->ccr
|= CCR_SRC_AMODE_CONSTANT
;
959 d
->csdp
= CSDP_SRC_BURST_64
| CSDP_SRC_PACKED
;
961 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
963 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
965 if (port_window_bytes
>= 64)
966 d
->csdp
|= CSDP_DST_BURST_64
;
967 else if (port_window_bytes
>= 32)
968 d
->csdp
|= CSDP_DST_BURST_32
;
969 else if (port_window_bytes
>= 16)
970 d
->csdp
|= CSDP_DST_BURST_16
;
972 d
->ccr
|= CCR_DST_AMODE_CONSTANT
;
976 d
->cicr
= CICR_DROP_IE
| CICR_BLOCK_IE
;
980 d
->cicr
|= CICR_TOUT_IE
;
982 if (dir
== DMA_DEV_TO_MEM
)
983 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_TIPB
;
985 d
->csdp
|= CSDP_DST_PORT_TIPB
| CSDP_SRC_PORT_EMIFF
;
987 if (dir
== DMA_DEV_TO_MEM
)
988 d
->ccr
|= CCR_TRIGGER_SRC
;
990 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
993 d
->csdp
|= CSDP_WRITE_LAST_NON_POSTED
;
995 if (od
->plat
->errata
& DMA_ERRATA_PARALLEL_CHANNELS
)
996 d
->clnk_ctrl
= c
->dma_ch
;
999 * Build our scatterlist entries: each contains the address,
1000 * the number of elements (EN) in each frame, and the number of
1001 * frames (FN). Number of bytes for this entry = ES * EN * FN.
1003 * Burst size translates to number of elements with frame sync.
1004 * Note: DMA engine defines burst to be the number of dev-width
1008 frame_bytes
= es_bytes
[es
] * en
;
1011 d
->using_ll
= od
->ll123_supported
;
1013 for_each_sg(sgl
, sgent
, sglen
, i
) {
1014 struct omap_sg
*osg
= &d
->sg
[i
];
1016 osg
->addr
= sg_dma_address(sgent
);
1018 osg
->fn
= sg_dma_len(sgent
) / frame_bytes
;
1021 osg
->t2_desc
= dma_pool_alloc(od
->desc_pool
, GFP_ATOMIC
,
1022 &osg
->t2_desc_paddr
);
1023 if (!osg
->t2_desc
) {
1024 dev_err(chan
->device
->dev
,
1025 "t2_desc[%d] allocation failed\n", i
);
1027 d
->using_ll
= false;
1031 omap_dma_fill_type2_desc(d
, i
, dir
, (i
== sglen
- 1));
1037 /* Release the dma_pool entries if one allocation failed */
1039 for (i
= 0; i
< d
->sglen
; i
++) {
1040 struct omap_sg
*osg
= &d
->sg
[i
];
1043 dma_pool_free(od
->desc_pool
, osg
->t2_desc
,
1044 osg
->t2_desc_paddr
);
1045 osg
->t2_desc
= NULL
;
1050 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1053 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
1054 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1055 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
)
1057 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1058 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1059 enum dma_slave_buswidth dev_width
;
1060 struct omap_desc
*d
;
1061 dma_addr_t dev_addr
;
1065 if (dir
== DMA_DEV_TO_MEM
) {
1066 dev_addr
= c
->cfg
.src_addr
;
1067 dev_width
= c
->cfg
.src_addr_width
;
1068 burst
= c
->cfg
.src_maxburst
;
1069 } else if (dir
== DMA_MEM_TO_DEV
) {
1070 dev_addr
= c
->cfg
.dst_addr
;
1071 dev_width
= c
->cfg
.dst_addr_width
;
1072 burst
= c
->cfg
.dst_maxburst
;
1074 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
1078 /* Bus width translates to the element size (ES) */
1079 switch (dev_width
) {
1080 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1081 es
= CSDP_DATA_TYPE_8
;
1083 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1084 es
= CSDP_DATA_TYPE_16
;
1086 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1087 es
= CSDP_DATA_TYPE_32
;
1089 default: /* not reached */
1093 /* Now allocate and setup the descriptor. */
1094 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1099 d
->dev_addr
= dev_addr
;
1102 d
->sg
[0].addr
= buf_addr
;
1103 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
1104 d
->sg
[0].fn
= buf_len
/ period_len
;
1108 if (dir
== DMA_DEV_TO_MEM
)
1109 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
1111 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
1113 d
->cicr
= CICR_DROP_IE
;
1114 if (flags
& DMA_PREP_INTERRUPT
)
1115 d
->cicr
|= CICR_FRAME_IE
;
1120 d
->cicr
|= CICR_TOUT_IE
;
1122 if (dir
== DMA_DEV_TO_MEM
)
1123 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_MPUI
;
1125 d
->csdp
|= CSDP_DST_PORT_MPUI
| CSDP_SRC_PORT_EMIFF
;
1128 d
->ccr
|= CCR_SYNC_PACKET
;
1130 d
->ccr
|= CCR_SYNC_ELEMENT
;
1132 if (dir
== DMA_DEV_TO_MEM
) {
1133 d
->ccr
|= CCR_TRIGGER_SRC
;
1134 d
->csdp
|= CSDP_DST_PACKED
;
1136 d
->csdp
|= CSDP_SRC_PACKED
;
1139 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1141 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1144 if (__dma_omap15xx(od
->plat
->dma_attr
))
1145 d
->ccr
|= CCR_AUTO_INIT
| CCR_REPEAT
;
1147 d
->clnk_ctrl
= c
->dma_ch
| CLNK_CTRL_ENABLE_LNK
;
1151 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1154 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_memcpy(
1155 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1156 size_t len
, unsigned long tx_flags
)
1158 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1159 struct omap_desc
*d
;
1162 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1166 data_type
= __ffs((src
| dest
| len
));
1167 if (data_type
> CSDP_DATA_TYPE_32
)
1168 data_type
= CSDP_DATA_TYPE_32
;
1170 d
->dir
= DMA_MEM_TO_MEM
;
1174 d
->sg
[0].en
= len
/ BIT(data_type
);
1176 d
->sg
[0].addr
= dest
;
1179 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_POSTINC
;
1181 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
1183 d
->csdp
= data_type
;
1186 d
->cicr
|= CICR_TOUT_IE
;
1187 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1189 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1190 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1191 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1194 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1197 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_interleaved(
1198 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
1199 unsigned long flags
)
1201 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1202 struct omap_desc
*d
;
1205 size_t src_icg
, dst_icg
;
1207 /* Slave mode is not supported */
1208 if (is_slave_direction(xt
->dir
))
1211 if (xt
->frame_size
!= 1 || xt
->numf
== 0)
1214 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1218 data_type
= __ffs((xt
->src_start
| xt
->dst_start
| xt
->sgl
[0].size
));
1219 if (data_type
> CSDP_DATA_TYPE_32
)
1220 data_type
= CSDP_DATA_TYPE_32
;
1223 d
->dir
= DMA_MEM_TO_MEM
;
1224 d
->dev_addr
= xt
->src_start
;
1226 sg
->en
= xt
->sgl
[0].size
/ BIT(data_type
);
1228 sg
->addr
= xt
->dst_start
;
1232 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
1233 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
1235 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
1238 } else if (xt
->src_inc
) {
1239 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
1242 dev_err(chan
->device
->dev
,
1243 "%s: SRC constant addressing is not supported\n",
1250 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
1253 } else if (xt
->dst_inc
) {
1254 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
1257 dev_err(chan
->device
->dev
,
1258 "%s: DST constant addressing is not supported\n",
1264 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
1266 d
->csdp
= data_type
;
1269 d
->cicr
|= CICR_TOUT_IE
;
1270 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1272 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1273 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1274 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1277 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1280 static int omap_dma_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
1282 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1284 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1285 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1288 if (cfg
->src_maxburst
> chan
->device
->max_burst
||
1289 cfg
->dst_maxburst
> chan
->device
->max_burst
)
1292 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
1297 static int omap_dma_terminate_all(struct dma_chan
*chan
)
1299 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1300 unsigned long flags
;
1303 spin_lock_irqsave(&c
->vc
.lock
, flags
);
1306 * Stop DMA activity: we assume the callback will not be called
1307 * after omap_dma_stop() returns (even if it does, it will see
1308 * c->desc is NULL and exit.)
1311 vchan_terminate_vdesc(&c
->desc
->vd
);
1313 /* Avoid stopping the dma twice */
1321 vchan_get_all_descriptors(&c
->vc
, &head
);
1322 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
1323 vchan_dma_desc_free_list(&c
->vc
, &head
);
1328 static void omap_dma_synchronize(struct dma_chan
*chan
)
1330 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1332 vchan_synchronize(&c
->vc
);
1335 static int omap_dma_pause(struct dma_chan
*chan
)
1337 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1338 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1339 unsigned long flags
;
1341 bool can_pause
= false;
1343 spin_lock_irqsave(&od
->irq_lock
, flags
);
1352 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1353 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1354 * "When a channel is disabled during a transfer, the channel undergoes
1355 * an abort, unless it is hardware-source-synchronized …".
1356 * A source-synchronised channel is one where the fetching of data is
1357 * under control of the device. In other words, a device-to-memory
1358 * transfer. So, a destination-synchronised channel (which would be a
1359 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1361 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1362 * aborts immediately after completion of current read/write
1363 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1364 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1365 * are both clear _before_ disabling the channel, otherwise data loss
1367 * The problem is that if the channel is active, then device activity
1368 * can result in DMA activity starting between reading those as both
1369 * clear and the write to DMA_CCR to clear the enable bit hitting the
1370 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1371 * destination, then data loss "might" occur (say if we write to an UART
1372 * and the UART is not accepting any further data).
1374 else if (c
->desc
->dir
== DMA_DEV_TO_MEM
)
1377 if (can_pause
&& !c
->paused
) {
1378 ret
= omap_dma_stop(c
);
1383 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1388 static int omap_dma_resume(struct dma_chan
*chan
)
1390 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1391 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1392 unsigned long flags
;
1395 spin_lock_irqsave(&od
->irq_lock
, flags
);
1397 if (c
->paused
&& c
->desc
) {
1400 /* Restore channel link register */
1401 omap_dma_chan_write(c
, CLNK_CTRL
, c
->desc
->clnk_ctrl
);
1403 omap_dma_start(c
, c
->desc
);
1407 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1412 static int omap_dma_chan_init(struct omap_dmadev
*od
)
1414 struct omap_chan
*c
;
1416 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1420 c
->reg_map
= od
->reg_map
;
1421 c
->vc
.desc_free
= omap_dma_desc_free
;
1422 vchan_init(&c
->vc
, &od
->ddev
);
1427 static void omap_dma_free(struct omap_dmadev
*od
)
1429 while (!list_empty(&od
->ddev
.channels
)) {
1430 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
1431 struct omap_chan
, vc
.chan
.device_node
);
1433 list_del(&c
->vc
.chan
.device_node
);
1434 tasklet_kill(&c
->vc
.task
);
1439 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1440 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1441 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1443 static int omap_dma_probe(struct platform_device
*pdev
)
1445 struct omap_dmadev
*od
;
1446 struct resource
*res
;
1450 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
1454 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1455 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1456 if (IS_ERR(od
->base
))
1457 return PTR_ERR(od
->base
);
1459 od
->plat
= omap_get_plat_info();
1461 return -EPROBE_DEFER
;
1463 od
->reg_map
= od
->plat
->reg_map
;
1465 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
1466 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
1467 dma_cap_set(DMA_MEMCPY
, od
->ddev
.cap_mask
);
1468 dma_cap_set(DMA_INTERLEAVE
, od
->ddev
.cap_mask
);
1469 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
1470 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
1471 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
1472 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
1473 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
1474 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
1475 od
->ddev
.device_prep_dma_memcpy
= omap_dma_prep_dma_memcpy
;
1476 od
->ddev
.device_prep_interleaved_dma
= omap_dma_prep_dma_interleaved
;
1477 od
->ddev
.device_config
= omap_dma_slave_config
;
1478 od
->ddev
.device_pause
= omap_dma_pause
;
1479 od
->ddev
.device_resume
= omap_dma_resume
;
1480 od
->ddev
.device_terminate_all
= omap_dma_terminate_all
;
1481 od
->ddev
.device_synchronize
= omap_dma_synchronize
;
1482 od
->ddev
.src_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1483 od
->ddev
.dst_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1484 od
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1485 if (__dma_omap15xx(od
->plat
->dma_attr
))
1486 od
->ddev
.residue_granularity
=
1487 DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1489 od
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1490 od
->ddev
.max_burst
= SZ_16M
- 1; /* CCEN: 24bit unsigned */
1491 od
->ddev
.dev
= &pdev
->dev
;
1492 INIT_LIST_HEAD(&od
->ddev
.channels
);
1493 spin_lock_init(&od
->lock
);
1494 spin_lock_init(&od
->irq_lock
);
1496 /* Number of DMA requests */
1497 od
->dma_requests
= OMAP_SDMA_REQUESTS
;
1498 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
1500 &od
->dma_requests
)) {
1501 dev_info(&pdev
->dev
,
1502 "Missing dma-requests property, using %u.\n",
1503 OMAP_SDMA_REQUESTS
);
1506 /* Number of available logical channels */
1507 if (!pdev
->dev
.of_node
) {
1508 lch_count
= od
->plat
->dma_attr
->lch_count
;
1509 if (unlikely(!lch_count
))
1510 lch_count
= OMAP_SDMA_CHANNELS
;
1511 } else if (of_property_read_u32(pdev
->dev
.of_node
, "dma-channels",
1513 dev_info(&pdev
->dev
,
1514 "Missing dma-channels property, using %u.\n",
1515 OMAP_SDMA_CHANNELS
);
1516 lch_count
= OMAP_SDMA_CHANNELS
;
1519 od
->lch_map
= devm_kcalloc(&pdev
->dev
, lch_count
, sizeof(*od
->lch_map
),
1524 for (i
= 0; i
< od
->dma_requests
; i
++) {
1525 rc
= omap_dma_chan_init(od
);
1532 irq
= platform_get_irq(pdev
, 1);
1534 dev_info(&pdev
->dev
, "failed to get L1 IRQ: %d\n", irq
);
1537 /* Disable all interrupts */
1538 od
->irq_enable_mask
= 0;
1539 omap_dma_glbl_write(od
, IRQENABLE_L1
, 0);
1541 rc
= devm_request_irq(&pdev
->dev
, irq
, omap_dma_irq
,
1542 IRQF_SHARED
, "omap-dma-engine", od
);
1547 if (omap_dma_glbl_read(od
, CAPS_0
) & CAPS_0_SUPPORT_LL123
)
1548 od
->ll123_supported
= true;
1550 od
->ddev
.filter
.map
= od
->plat
->slave_map
;
1551 od
->ddev
.filter
.mapcnt
= od
->plat
->slavecnt
;
1552 od
->ddev
.filter
.fn
= omap_dma_filter_fn
;
1554 if (od
->ll123_supported
) {
1555 od
->desc_pool
= dma_pool_create(dev_name(&pdev
->dev
),
1557 sizeof(struct omap_type2_desc
),
1559 if (!od
->desc_pool
) {
1561 "unable to allocate descriptor pool\n");
1562 od
->ll123_supported
= false;
1566 rc
= dma_async_device_register(&od
->ddev
);
1568 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1574 platform_set_drvdata(pdev
, od
);
1576 if (pdev
->dev
.of_node
) {
1577 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
1579 /* Device-tree DMA controller registration */
1580 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
1581 of_dma_simple_xlate
, &omap_dma_info
);
1583 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1584 dma_async_device_unregister(&od
->ddev
);
1589 dev_info(&pdev
->dev
, "OMAP DMA engine driver%s\n",
1590 od
->ll123_supported
? " (LinkedList1/2/3 supported)" : "");
1595 static int omap_dma_remove(struct platform_device
*pdev
)
1597 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
1600 if (pdev
->dev
.of_node
)
1601 of_dma_controller_free(pdev
->dev
.of_node
);
1603 irq
= platform_get_irq(pdev
, 1);
1604 devm_free_irq(&pdev
->dev
, irq
, od
);
1606 dma_async_device_unregister(&od
->ddev
);
1609 /* Disable all interrupts */
1610 omap_dma_glbl_write(od
, IRQENABLE_L0
, 0);
1613 if (od
->ll123_supported
)
1614 dma_pool_destroy(od
->desc_pool
);
1621 static const struct of_device_id omap_dma_match
[] = {
1622 { .compatible
= "ti,omap2420-sdma", },
1623 { .compatible
= "ti,omap2430-sdma", },
1624 { .compatible
= "ti,omap3430-sdma", },
1625 { .compatible
= "ti,omap3630-sdma", },
1626 { .compatible
= "ti,omap4430-sdma", },
1629 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
1631 static struct platform_driver omap_dma_driver
= {
1632 .probe
= omap_dma_probe
,
1633 .remove
= omap_dma_remove
,
1635 .name
= "omap-dma-engine",
1636 .of_match_table
= of_match_ptr(omap_dma_match
),
1640 bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1642 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
1643 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1644 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1645 unsigned req
= *(unsigned *)param
;
1647 if (req
<= od
->dma_requests
) {
1654 EXPORT_SYMBOL_GPL(omap_dma_filter_fn
);
1656 static int omap_dma_init(void)
1658 return platform_driver_register(&omap_dma_driver
);
1660 subsys_initcall(omap_dma_init
);
1662 static void __exit
omap_dma_exit(void)
1664 platform_driver_unregister(&omap_dma_driver
);
1666 module_exit(omap_dma_exit
);
1668 MODULE_AUTHOR("Russell King");
1669 MODULE_LICENSE("GPL");