2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
105 /* Register Direct Mode Registers */
106 #define XILINX_DMA_REG_VSIZE 0x0000
107 #define XILINX_DMA_REG_HSIZE 0x0004
109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
116 /* HW specific definitions */
117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
122 XILINX_DMA_DMASR_ERR_IRQ)
124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
126 XILINX_DMA_DMASR_SOF_LATE_ERR | \
127 XILINX_DMA_DMASR_SG_DEC_ERR | \
128 XILINX_DMA_DMASR_SG_SLV_ERR | \
129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
131 XILINX_DMA_DMASR_DMA_DEC_ERR | \
132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
133 XILINX_DMA_DMASR_DMA_INT_ERR)
136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
138 * is enabled in the h/w system.
140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_INT_ERR)
146 /* Axi VDMA Flush on Fsync bits */
147 #define XILINX_DMA_FLUSH_S2MM 3
148 #define XILINX_DMA_FLUSH_MM2S 2
149 #define XILINX_DMA_FLUSH_BOTH 1
151 /* Delay loop counter to prevent hardware failure */
152 #define XILINX_DMA_LOOP_COUNT 1000000
154 /* AXI DMA Specific Registers/Offsets */
155 #define XILINX_DMA_REG_SRCDSTADDR 0x18
156 #define XILINX_DMA_REG_BTT 0x28
158 /* AXI DMA Specific Masks/Bit fields */
159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
162 #define XILINX_DMA_CR_COALESCE_SHIFT 16
163 #define XILINX_DMA_BD_SOP BIT(27)
164 #define XILINX_DMA_BD_EOP BIT(26)
165 #define XILINX_DMA_COALESCE_MAX 255
166 #define XILINX_DMA_NUM_APP_WORDS 5
168 /* Multi-Channel DMA Descriptor offsets*/
169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
172 /* Multi-Channel DMA Masks/Shifts */
173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177 #define XILINX_DMA_BD_STRIDE_SHIFT 0
178 #define XILINX_DMA_BD_VSIZE_SHIFT 19
180 /* AXI CDMA Specific Registers/Offsets */
181 #define XILINX_CDMA_REG_SRCADDR 0x18
182 #define XILINX_CDMA_REG_DSTADDR 0x20
184 /* AXI CDMA Specific Masks */
185 #define XILINX_CDMA_CR_SGMODE BIT(3)
188 * struct xilinx_vdma_desc_hw - Hardware Descriptor
189 * @next_desc: Next Descriptor Pointer @0x00
190 * @pad1: Reserved @0x04
191 * @buf_addr: Buffer address @0x08
192 * @buf_addr_msb: MSB of Buffer address @0x0C
193 * @vsize: Vertical Size @0x10
194 * @hsize: Horizontal Size @0x14
195 * @stride: Number of bytes between the first
196 * pixels of each horizontal line @0x18
198 struct xilinx_vdma_desc_hw
{
209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
210 * @next_desc: Next Descriptor Pointer @0x00
211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212 * @buf_addr: Buffer address @0x08
213 * @buf_addr_msb: MSB of Buffer address @0x0C
214 * @pad1: Reserved @0x10
215 * @pad2: Reserved @0x14
216 * @control: Control field @0x18
217 * @status: Status field @0x1C
218 * @app: APP Fields @0x20 - 0x30
220 struct xilinx_axidma_desc_hw
{
229 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
233 * struct xilinx_cdma_desc_hw - Hardware Descriptor
234 * @next_desc: Next Descriptor Pointer @0x00
235 * @next_descmsb: Next Descriptor Pointer MSB @0x04
236 * @src_addr: Source address @0x08
237 * @src_addrmsb: Source address MSB @0x0C
238 * @dest_addr: Destination address @0x10
239 * @dest_addrmsb: Destination address MSB @0x14
240 * @control: Control field @0x18
241 * @status: Status field @0x1C
243 struct xilinx_cdma_desc_hw
{
255 * struct xilinx_vdma_tx_segment - Descriptor segment
256 * @hw: Hardware descriptor
257 * @node: Node in the descriptor segments list
258 * @phys: Physical address of segment
260 struct xilinx_vdma_tx_segment
{
261 struct xilinx_vdma_desc_hw hw
;
262 struct list_head node
;
267 * struct xilinx_axidma_tx_segment - Descriptor segment
268 * @hw: Hardware descriptor
269 * @node: Node in the descriptor segments list
270 * @phys: Physical address of segment
272 struct xilinx_axidma_tx_segment
{
273 struct xilinx_axidma_desc_hw hw
;
274 struct list_head node
;
279 * struct xilinx_cdma_tx_segment - Descriptor segment
280 * @hw: Hardware descriptor
281 * @node: Node in the descriptor segments list
282 * @phys: Physical address of segment
284 struct xilinx_cdma_tx_segment
{
285 struct xilinx_cdma_desc_hw hw
;
286 struct list_head node
;
291 * struct xilinx_dma_tx_descriptor - Per Transaction structure
292 * @async_tx: Async transaction descriptor
293 * @segments: TX segments list
294 * @node: Node in the channel descriptors list
295 * @cyclic: Check for cyclic transfers.
297 struct xilinx_dma_tx_descriptor
{
298 struct dma_async_tx_descriptor async_tx
;
299 struct list_head segments
;
300 struct list_head node
;
305 * struct xilinx_dma_chan - Driver specific DMA channel structure
306 * @xdev: Driver specific device structure
307 * @ctrl_offset: Control registers offset
308 * @desc_offset: TX descriptor registers offset
309 * @lock: Descriptor operation lock
310 * @pending_list: Descriptors waiting
311 * @active_list: Descriptors ready to submit
312 * @done_list: Complete descriptors
313 * @common: DMA common channel
314 * @desc_pool: Descriptors pool
315 * @dev: The dma device
318 * @direction: Transfer direction
319 * @num_frms: Number of frames
320 * @has_sg: Support scatter transfers
321 * @cyclic: Check for cyclic transfers.
322 * @genlock: Support genlock mode
323 * @err: Channel has errors
324 * @tasklet: Cleanup work after irq
325 * @config: Device configuration info
326 * @flush_on_fsync: Flush on Frame sync
327 * @desc_pendingcount: Descriptor pending count
328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
329 * @desc_submitcount: Descriptor h/w submitted count
330 * @residue: Residue for AXI DMA
331 * @seg_v: Statically allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
333 * @start_transfer: Differentiate b/w DMA IP's transfer
334 * @stop_transfer: Differentiate b/w DMA IP's quiesce
336 struct xilinx_dma_chan
{
337 struct xilinx_dma_device
*xdev
;
341 struct list_head pending_list
;
342 struct list_head active_list
;
343 struct list_head done_list
;
344 struct dma_chan common
;
345 struct dma_pool
*desc_pool
;
349 enum dma_transfer_direction direction
;
355 struct tasklet_struct tasklet
;
356 struct xilinx_vdma_config config
;
358 u32 desc_pendingcount
;
360 u32 desc_submitcount
;
362 struct xilinx_axidma_tx_segment
*seg_v
;
363 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
364 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
365 int (*stop_transfer
)(struct xilinx_dma_chan
*chan
);
370 * enum xdma_ip_type: DMA IP type.
372 * XDMA_TYPE_AXIDMA: Axi dma ip.
373 * XDMA_TYPE_CDMA: Axi cdma ip.
374 * XDMA_TYPE_VDMA: Axi vdma ip.
378 XDMA_TYPE_AXIDMA
= 0,
383 struct xilinx_dma_config
{
384 enum xdma_ip_type dmatype
;
385 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
386 struct clk
**tx_clk
, struct clk
**txs_clk
,
387 struct clk
**rx_clk
, struct clk
**rxs_clk
);
391 * struct xilinx_dma_device - DMA device structure
392 * @regs: I/O mapped base address
393 * @dev: Device Structure
394 * @common: DMA device structure
395 * @chan: Driver specific DMA channel
396 * @has_sg: Specifies whether Scatter-Gather is present or not
397 * @mcdma: Specifies whether Multi-Channel is present or not
398 * @flush_on_fsync: Flush on frame sync
399 * @ext_addr: Indicates 64 bit addressing is supported by dma device
400 * @pdev: Platform device structure pointer
401 * @dma_config: DMA config structure
402 * @axi_clk: DMA Axi4-lite interace clock
403 * @tx_clk: DMA mm2s clock
404 * @txs_clk: DMA mm2s stream clock
405 * @rx_clk: DMA s2mm clock
406 * @rxs_clk: DMA s2mm stream clock
407 * @nr_channels: Number of channels DMA device supports
408 * @chan_id: DMA channel identifier
410 struct xilinx_dma_device
{
413 struct dma_device common
;
414 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
419 struct platform_device
*pdev
;
420 const struct xilinx_dma_config
*dma_config
;
431 #define to_xilinx_chan(chan) \
432 container_of(chan, struct xilinx_dma_chan, common)
433 #define to_dma_tx_descriptor(tx) \
434 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
435 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
436 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
437 cond, delay_us, timeout_us)
440 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
442 return ioread32(chan
->xdev
->regs
+ reg
);
445 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
447 iowrite32(value
, chan
->xdev
->regs
+ reg
);
450 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
453 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
456 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
458 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
461 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
464 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
467 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
470 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
473 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
476 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
480 * vdma_desc_write_64 - 64-bit descriptor write
481 * @chan: Driver specific VDMA channel
482 * @reg: Register to write
483 * @value_lsb: lower address of the descriptor.
484 * @value_msb: upper address of the descriptor.
486 * Since vdma driver is trying to write to a register offset which is not a
487 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
488 * instead of a single 64 bit register write.
490 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
491 u32 value_lsb
, u32 value_msb
)
493 /* Write the lsb 32 bits*/
494 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
496 /* Write the msb 32 bits */
497 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
500 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
502 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
505 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
509 dma_writeq(chan
, reg
, addr
);
511 dma_ctrl_write(chan
, reg
, addr
);
514 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
515 struct xilinx_axidma_desc_hw
*hw
,
516 dma_addr_t buf_addr
, size_t sg_used
,
519 if (chan
->ext_addr
) {
520 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
521 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
524 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
528 /* -----------------------------------------------------------------------------
529 * Descriptors and segments alloc and free
533 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
534 * @chan: Driver specific DMA channel
536 * Return: The allocated segment on success and NULL on failure.
538 static struct xilinx_vdma_tx_segment
*
539 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
541 struct xilinx_vdma_tx_segment
*segment
;
544 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
548 segment
->phys
= phys
;
554 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
555 * @chan: Driver specific DMA channel
557 * Return: The allocated segment on success and NULL on failure.
559 static struct xilinx_cdma_tx_segment
*
560 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
562 struct xilinx_cdma_tx_segment
*segment
;
565 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
569 segment
->phys
= phys
;
575 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
576 * @chan: Driver specific DMA channel
578 * Return: The allocated segment on success and NULL on failure.
580 static struct xilinx_axidma_tx_segment
*
581 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
583 struct xilinx_axidma_tx_segment
*segment
;
586 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
590 segment
->phys
= phys
;
596 * xilinx_dma_free_tx_segment - Free transaction segment
597 * @chan: Driver specific DMA channel
598 * @segment: DMA transaction segment
600 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
601 struct xilinx_axidma_tx_segment
*segment
)
603 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
607 * xilinx_cdma_free_tx_segment - Free transaction segment
608 * @chan: Driver specific DMA channel
609 * @segment: DMA transaction segment
611 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
612 struct xilinx_cdma_tx_segment
*segment
)
614 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
618 * xilinx_vdma_free_tx_segment - Free transaction segment
619 * @chan: Driver specific DMA channel
620 * @segment: DMA transaction segment
622 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
623 struct xilinx_vdma_tx_segment
*segment
)
625 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
629 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
630 * @chan: Driver specific DMA channel
632 * Return: The allocated descriptor on success and NULL on failure.
634 static struct xilinx_dma_tx_descriptor
*
635 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
637 struct xilinx_dma_tx_descriptor
*desc
;
639 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
643 INIT_LIST_HEAD(&desc
->segments
);
649 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
650 * @chan: Driver specific DMA channel
651 * @desc: DMA transaction descriptor
654 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
655 struct xilinx_dma_tx_descriptor
*desc
)
657 struct xilinx_vdma_tx_segment
*segment
, *next
;
658 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
659 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
664 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
665 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
666 list_del(&segment
->node
);
667 xilinx_vdma_free_tx_segment(chan
, segment
);
669 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
670 list_for_each_entry_safe(cdma_segment
, cdma_next
,
671 &desc
->segments
, node
) {
672 list_del(&cdma_segment
->node
);
673 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
676 list_for_each_entry_safe(axidma_segment
, axidma_next
,
677 &desc
->segments
, node
) {
678 list_del(&axidma_segment
->node
);
679 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
686 /* Required functions */
689 * xilinx_dma_free_desc_list - Free descriptors list
690 * @chan: Driver specific DMA channel
691 * @list: List to parse and delete the descriptor
693 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
694 struct list_head
*list
)
696 struct xilinx_dma_tx_descriptor
*desc
, *next
;
698 list_for_each_entry_safe(desc
, next
, list
, node
) {
699 list_del(&desc
->node
);
700 xilinx_dma_free_tx_descriptor(chan
, desc
);
705 * xilinx_dma_free_descriptors - Free channel descriptors
706 * @chan: Driver specific DMA channel
708 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
712 spin_lock_irqsave(&chan
->lock
, flags
);
714 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
715 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
716 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
718 spin_unlock_irqrestore(&chan
->lock
, flags
);
722 * xilinx_dma_free_chan_resources - Free channel resources
723 * @dchan: DMA channel
725 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
727 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
729 dev_dbg(chan
->dev
, "Free all channel resources.\n");
731 xilinx_dma_free_descriptors(chan
);
732 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
733 xilinx_dma_free_tx_segment(chan
, chan
->cyclic_seg_v
);
734 xilinx_dma_free_tx_segment(chan
, chan
->seg_v
);
736 dma_pool_destroy(chan
->desc_pool
);
737 chan
->desc_pool
= NULL
;
741 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
742 * @chan: Driver specific dma channel
743 * @desc: dma transaction descriptor
744 * @flags: flags for spin lock
746 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
747 struct xilinx_dma_tx_descriptor
*desc
,
748 unsigned long *flags
)
750 dma_async_tx_callback callback
;
751 void *callback_param
;
753 callback
= desc
->async_tx
.callback
;
754 callback_param
= desc
->async_tx
.callback_param
;
756 spin_unlock_irqrestore(&chan
->lock
, *flags
);
757 callback(callback_param
);
758 spin_lock_irqsave(&chan
->lock
, *flags
);
763 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
764 * @chan: Driver specific DMA channel
766 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
768 struct xilinx_dma_tx_descriptor
*desc
, *next
;
771 spin_lock_irqsave(&chan
->lock
, flags
);
773 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
774 struct dmaengine_desc_callback cb
;
777 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
781 /* Remove from the list of running transactions */
782 list_del(&desc
->node
);
784 /* Run the link descriptor callback function */
785 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
786 if (dmaengine_desc_callback_valid(&cb
)) {
787 spin_unlock_irqrestore(&chan
->lock
, flags
);
788 dmaengine_desc_callback_invoke(&cb
, NULL
);
789 spin_lock_irqsave(&chan
->lock
, flags
);
792 /* Run any dependencies, then free the descriptor */
793 dma_run_dependencies(&desc
->async_tx
);
794 xilinx_dma_free_tx_descriptor(chan
, desc
);
797 spin_unlock_irqrestore(&chan
->lock
, flags
);
801 * xilinx_dma_do_tasklet - Schedule completion tasklet
802 * @data: Pointer to the Xilinx DMA channel structure
804 static void xilinx_dma_do_tasklet(unsigned long data
)
806 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
808 xilinx_dma_chan_desc_cleanup(chan
);
812 * xilinx_dma_alloc_chan_resources - Allocate channel resources
813 * @dchan: DMA channel
815 * Return: '0' on success and failure value on error
817 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
819 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
821 /* Has this channel already been allocated? */
826 * We need the descriptor to be aligned to 64bytes
827 * for meeting Xilinx VDMA specification requirement.
829 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
830 chan
->desc_pool
= dma_pool_create("xilinx_dma_desc_pool",
832 sizeof(struct xilinx_axidma_tx_segment
),
833 __alignof__(struct xilinx_axidma_tx_segment
),
835 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
836 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
838 sizeof(struct xilinx_cdma_tx_segment
),
839 __alignof__(struct xilinx_cdma_tx_segment
),
842 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
844 sizeof(struct xilinx_vdma_tx_segment
),
845 __alignof__(struct xilinx_vdma_tx_segment
),
849 if (!chan
->desc_pool
) {
851 "unable to allocate channel %d descriptor pool\n",
856 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
858 * For AXI DMA case after submitting a pending_list, keep
859 * an extra segment allocated so that the "next descriptor"
860 * pointer on the tail descriptor always points to a
861 * valid descriptor, even when paused after reaching taildesc.
862 * This way, it is possible to issue additional
863 * transfers without halting and restarting the channel.
865 chan
->seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
868 * For cyclic DMA mode we need to program the tail Descriptor
869 * register with a value which is not a part of the BD chain
870 * so allocating a desc segment during channel allocation for
871 * programming tail descriptor.
873 chan
->cyclic_seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
876 dma_cookie_init(dchan
);
878 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
879 /* For AXI DMA resetting once channel will reset the
880 * other channel as well so enable the interrupts here.
882 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
883 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
886 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
887 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
888 XILINX_CDMA_CR_SGMODE
);
894 * xilinx_dma_tx_status - Get DMA transaction status
895 * @dchan: DMA channel
896 * @cookie: Transaction identifier
897 * @txstate: Transaction state
899 * Return: DMA transaction status
901 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
903 struct dma_tx_state
*txstate
)
905 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
906 struct xilinx_dma_tx_descriptor
*desc
;
907 struct xilinx_axidma_tx_segment
*segment
;
908 struct xilinx_axidma_desc_hw
*hw
;
913 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
914 if (ret
== DMA_COMPLETE
|| !txstate
)
917 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
918 spin_lock_irqsave(&chan
->lock
, flags
);
920 desc
= list_last_entry(&chan
->active_list
,
921 struct xilinx_dma_tx_descriptor
, node
);
923 list_for_each_entry(segment
, &desc
->segments
, node
) {
925 residue
+= (hw
->control
- hw
->status
) &
926 XILINX_DMA_MAX_TRANS_LEN
;
929 spin_unlock_irqrestore(&chan
->lock
, flags
);
931 chan
->residue
= residue
;
932 dma_set_residue(txstate
, chan
->residue
);
939 * xilinx_dma_is_running - Check if DMA channel is running
940 * @chan: Driver specific DMA channel
942 * Return: '1' if running, '0' if not.
944 static bool xilinx_dma_is_running(struct xilinx_dma_chan
*chan
)
946 return !(dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
947 XILINX_DMA_DMASR_HALTED
) &&
948 (dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
) &
949 XILINX_DMA_DMACR_RUNSTOP
);
953 * xilinx_dma_is_idle - Check if DMA channel is idle
954 * @chan: Driver specific DMA channel
956 * Return: '1' if idle, '0' if not.
958 static bool xilinx_dma_is_idle(struct xilinx_dma_chan
*chan
)
960 return dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
961 XILINX_DMA_DMASR_IDLE
;
965 * xilinx_dma_stop_transfer - Halt DMA channel
966 * @chan: Driver specific DMA channel
968 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan
*chan
)
972 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
974 /* Wait for the hardware to halt */
975 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
976 val
& XILINX_DMA_DMASR_HALTED
, 0,
977 XILINX_DMA_LOOP_COUNT
);
981 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
982 * @chan: Driver specific DMA channel
984 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan
*chan
)
988 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
989 val
& XILINX_DMA_DMASR_IDLE
, 0,
990 XILINX_DMA_LOOP_COUNT
);
994 * xilinx_dma_start - Start DMA channel
995 * @chan: Driver specific DMA channel
997 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
1002 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1004 /* Wait for the hardware to start */
1005 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1006 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
1007 XILINX_DMA_LOOP_COUNT
);
1010 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
1011 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1018 * xilinx_vdma_start_transfer - Starts VDMA transfer
1019 * @chan: Driver specific channel struct pointer
1021 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1023 struct xilinx_vdma_config
*config
= &chan
->config
;
1024 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1026 struct xilinx_vdma_tx_segment
*tail_segment
;
1028 /* This function was invoked with lock held */
1032 if (list_empty(&chan
->pending_list
))
1035 desc
= list_first_entry(&chan
->pending_list
,
1036 struct xilinx_dma_tx_descriptor
, node
);
1037 tail_desc
= list_last_entry(&chan
->pending_list
,
1038 struct xilinx_dma_tx_descriptor
, node
);
1040 tail_segment
= list_last_entry(&tail_desc
->segments
,
1041 struct xilinx_vdma_tx_segment
, node
);
1043 /* If it is SG mode and hardware is busy, cannot submit */
1044 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1045 !xilinx_dma_is_idle(chan
)) {
1046 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1051 * If hardware is idle, then all descriptors on the running lists are
1052 * done, start new transfers
1055 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1056 desc
->async_tx
.phys
);
1058 /* Configure the hardware using info in the config structure */
1059 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1061 if (config
->frm_cnt_en
)
1062 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1064 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1066 /* Configure channel to allow number frame buffers */
1067 dma_ctrl_write(chan
, XILINX_DMA_REG_FRMSTORE
,
1068 chan
->desc_pendingcount
);
1071 * With SG, start with circular mode, so that BDs can be fetched.
1072 * In direct register mode, if not parking, enable circular mode
1074 if (chan
->has_sg
|| !config
->park
)
1075 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1078 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1080 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1082 if (config
->park
&& (config
->park_frm
>= 0) &&
1083 (config
->park_frm
< chan
->num_frms
)) {
1084 if (chan
->direction
== DMA_MEM_TO_DEV
)
1085 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1087 XILINX_DMA_PARK_PTR_RD_REF_SHIFT
);
1089 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1091 XILINX_DMA_PARK_PTR_WR_REF_SHIFT
);
1094 /* Start the hardware */
1095 xilinx_dma_start(chan
);
1100 /* Start the transfer */
1102 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1103 tail_segment
->phys
);
1105 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1108 if (chan
->desc_submitcount
< chan
->num_frms
)
1109 i
= chan
->desc_submitcount
;
1111 list_for_each_entry(segment
, &desc
->segments
, node
) {
1113 vdma_desc_write_64(chan
,
1114 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1115 segment
->hw
.buf_addr
,
1116 segment
->hw
.buf_addr_msb
);
1118 vdma_desc_write(chan
,
1119 XILINX_VDMA_REG_START_ADDRESS(i
++),
1120 segment
->hw
.buf_addr
);
1128 /* HW expects these parameters to be same for one transaction */
1129 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1130 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1132 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1135 if (!chan
->has_sg
) {
1136 list_del(&desc
->node
);
1137 list_add_tail(&desc
->node
, &chan
->active_list
);
1138 chan
->desc_submitcount
++;
1139 chan
->desc_pendingcount
--;
1140 if (chan
->desc_submitcount
== chan
->num_frms
)
1141 chan
->desc_submitcount
= 0;
1143 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1144 chan
->desc_pendingcount
= 0;
1149 * xilinx_cdma_start_transfer - Starts cdma transfer
1150 * @chan: Driver specific channel struct pointer
1152 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1154 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1155 struct xilinx_cdma_tx_segment
*tail_segment
;
1156 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1161 if (list_empty(&chan
->pending_list
))
1164 head_desc
= list_first_entry(&chan
->pending_list
,
1165 struct xilinx_dma_tx_descriptor
, node
);
1166 tail_desc
= list_last_entry(&chan
->pending_list
,
1167 struct xilinx_dma_tx_descriptor
, node
);
1168 tail_segment
= list_last_entry(&tail_desc
->segments
,
1169 struct xilinx_cdma_tx_segment
, node
);
1171 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1172 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1173 ctrl_reg
|= chan
->desc_pendingcount
<<
1174 XILINX_DMA_CR_COALESCE_SHIFT
;
1175 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1179 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1180 head_desc
->async_tx
.phys
);
1182 /* Update tail ptr register which will start the transfer */
1183 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1184 tail_segment
->phys
);
1186 /* In simple mode */
1187 struct xilinx_cdma_tx_segment
*segment
;
1188 struct xilinx_cdma_desc_hw
*hw
;
1190 segment
= list_first_entry(&head_desc
->segments
,
1191 struct xilinx_cdma_tx_segment
,
1196 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
, hw
->src_addr
);
1197 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
, hw
->dest_addr
);
1199 /* Start the transfer */
1200 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1201 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1204 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1205 chan
->desc_pendingcount
= 0;
1209 * xilinx_dma_start_transfer - Starts DMA transfer
1210 * @chan: Driver specific channel struct pointer
1212 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1214 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1215 struct xilinx_axidma_tx_segment
*tail_segment
, *old_head
, *new_head
;
1221 if (list_empty(&chan
->pending_list
))
1224 /* If it is SG mode and hardware is busy, cannot submit */
1225 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1226 !xilinx_dma_is_idle(chan
)) {
1227 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1231 head_desc
= list_first_entry(&chan
->pending_list
,
1232 struct xilinx_dma_tx_descriptor
, node
);
1233 tail_desc
= list_last_entry(&chan
->pending_list
,
1234 struct xilinx_dma_tx_descriptor
, node
);
1235 tail_segment
= list_last_entry(&tail_desc
->segments
,
1236 struct xilinx_axidma_tx_segment
, node
);
1238 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1239 old_head
= list_first_entry(&head_desc
->segments
,
1240 struct xilinx_axidma_tx_segment
, node
);
1241 new_head
= chan
->seg_v
;
1242 /* Copy Buffer Descriptor fields. */
1243 new_head
->hw
= old_head
->hw
;
1245 /* Swap and save new reserve */
1246 list_replace_init(&old_head
->node
, &new_head
->node
);
1247 chan
->seg_v
= old_head
;
1249 tail_segment
->hw
.next_desc
= chan
->seg_v
->phys
;
1250 head_desc
->async_tx
.phys
= new_head
->phys
;
1253 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1255 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1256 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1257 reg
|= chan
->desc_pendingcount
<<
1258 XILINX_DMA_CR_COALESCE_SHIFT
;
1259 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1262 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1263 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1264 head_desc
->async_tx
.phys
);
1266 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1267 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1268 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1269 head_desc
->async_tx
.phys
);
1272 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1273 head_desc
->async_tx
.phys
);
1275 dma_ctrl_write(chan
,
1276 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1277 head_desc
->async_tx
.phys
);
1282 xilinx_dma_start(chan
);
1287 /* Start the transfer */
1288 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1290 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1291 chan
->cyclic_seg_v
->phys
);
1293 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1294 tail_segment
->phys
);
1295 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1296 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1297 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1298 tail_segment
->phys
);
1301 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1302 tail_segment
->phys
);
1304 dma_ctrl_write(chan
,
1305 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1306 tail_segment
->phys
);
1310 struct xilinx_axidma_tx_segment
*segment
;
1311 struct xilinx_axidma_desc_hw
*hw
;
1313 segment
= list_first_entry(&head_desc
->segments
,
1314 struct xilinx_axidma_tx_segment
,
1318 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1320 /* Start the transfer */
1321 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1322 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1325 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1326 chan
->desc_pendingcount
= 0;
1330 * xilinx_dma_issue_pending - Issue pending transactions
1331 * @dchan: DMA channel
1333 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1335 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1336 unsigned long flags
;
1338 spin_lock_irqsave(&chan
->lock
, flags
);
1339 chan
->start_transfer(chan
);
1340 spin_unlock_irqrestore(&chan
->lock
, flags
);
1344 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1345 * @chan : xilinx DMA channel
1349 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1351 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1353 /* This function was invoked with lock held */
1354 if (list_empty(&chan
->active_list
))
1357 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1358 list_del(&desc
->node
);
1360 dma_cookie_complete(&desc
->async_tx
);
1361 list_add_tail(&desc
->node
, &chan
->done_list
);
1366 * xilinx_dma_reset - Reset DMA channel
1367 * @chan: Driver specific DMA channel
1369 * Return: '0' on success and failure value on error
1371 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1376 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1378 /* Wait for the hardware to finish reset */
1379 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1380 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1381 XILINX_DMA_LOOP_COUNT
);
1384 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1385 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1386 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1396 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1397 * @chan: Driver specific DMA channel
1399 * Return: '0' on success and failure value on error
1401 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1406 err
= xilinx_dma_reset(chan
);
1410 /* Enable interrupts */
1411 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1412 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1418 * xilinx_dma_irq_handler - DMA Interrupt handler
1420 * @data: Pointer to the Xilinx DMA channel structure
1422 * Return: IRQ_HANDLED/IRQ_NONE
1424 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1426 struct xilinx_dma_chan
*chan
= data
;
1429 /* Read the status and ack the interrupts. */
1430 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1431 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1434 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1435 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1437 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1439 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1440 * error is recoverable, ignore it. Otherwise flag the error.
1442 * Only recoverable errors can be cleared in the DMASR register,
1443 * make sure not to write to other error bits to 1.
1445 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1447 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1448 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1450 if (!chan
->flush_on_fsync
||
1451 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1453 "Channel %p has errors %x, cdr %x tdr %x\n",
1455 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1456 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1461 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1463 * Device takes too long to do the transfer when user requires
1466 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1469 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1470 spin_lock(&chan
->lock
);
1471 xilinx_dma_complete_descriptor(chan
);
1472 chan
->start_transfer(chan
);
1473 spin_unlock(&chan
->lock
);
1476 tasklet_schedule(&chan
->tasklet
);
1481 * append_desc_queue - Queuing descriptor
1482 * @chan: Driver specific dma channel
1483 * @desc: dma transaction descriptor
1485 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1486 struct xilinx_dma_tx_descriptor
*desc
)
1488 struct xilinx_vdma_tx_segment
*tail_segment
;
1489 struct xilinx_dma_tx_descriptor
*tail_desc
;
1490 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1491 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1493 if (list_empty(&chan
->pending_list
))
1497 * Add the hardware descriptor to the chain of hardware descriptors
1498 * that already exists in memory.
1500 tail_desc
= list_last_entry(&chan
->pending_list
,
1501 struct xilinx_dma_tx_descriptor
, node
);
1502 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1503 tail_segment
= list_last_entry(&tail_desc
->segments
,
1504 struct xilinx_vdma_tx_segment
,
1506 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1507 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1508 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1509 struct xilinx_cdma_tx_segment
,
1511 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1513 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1514 struct xilinx_axidma_tx_segment
,
1516 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1520 * Add the software descriptor and all children to the list
1521 * of pending transactions
1524 list_add_tail(&desc
->node
, &chan
->pending_list
);
1525 chan
->desc_pendingcount
++;
1527 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1528 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1529 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1530 chan
->desc_pendingcount
= chan
->num_frms
;
1535 * xilinx_dma_tx_submit - Submit DMA transaction
1536 * @tx: Async transaction descriptor
1538 * Return: cookie value on success and failure value on error
1540 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1542 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1543 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1544 dma_cookie_t cookie
;
1545 unsigned long flags
;
1549 xilinx_dma_free_tx_descriptor(chan
, desc
);
1555 * If reset fails, need to hard reset the system.
1556 * Channel is no longer functional
1558 err
= xilinx_dma_chan_reset(chan
);
1563 spin_lock_irqsave(&chan
->lock
, flags
);
1565 cookie
= dma_cookie_assign(tx
);
1567 /* Put this transaction onto the tail of the pending queue */
1568 append_desc_queue(chan
, desc
);
1571 chan
->cyclic
= true;
1573 spin_unlock_irqrestore(&chan
->lock
, flags
);
1579 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1580 * DMA_SLAVE transaction
1581 * @dchan: DMA channel
1582 * @xt: Interleaved template pointer
1583 * @flags: transfer ack flags
1585 * Return: Async transaction descriptor on success and NULL on failure
1587 static struct dma_async_tx_descriptor
*
1588 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1589 struct dma_interleaved_template
*xt
,
1590 unsigned long flags
)
1592 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1593 struct xilinx_dma_tx_descriptor
*desc
;
1594 struct xilinx_vdma_tx_segment
*segment
, *prev
= NULL
;
1595 struct xilinx_vdma_desc_hw
*hw
;
1597 if (!is_slave_direction(xt
->dir
))
1600 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1603 if (xt
->frame_size
!= 1)
1606 /* Allocate a transaction descriptor. */
1607 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1611 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1612 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1613 async_tx_ack(&desc
->async_tx
);
1615 /* Allocate the link descriptor from DMA pool */
1616 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1620 /* Fill in the hardware descriptor */
1622 hw
->vsize
= xt
->numf
;
1623 hw
->hsize
= xt
->sgl
[0].size
;
1624 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1625 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1626 hw
->stride
|= chan
->config
.frm_dly
<<
1627 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1629 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1630 if (chan
->ext_addr
) {
1631 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1632 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1634 hw
->buf_addr
= xt
->dst_start
;
1637 if (chan
->ext_addr
) {
1638 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1639 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1641 hw
->buf_addr
= xt
->src_start
;
1645 /* Insert the segment into the descriptor segments list. */
1646 list_add_tail(&segment
->node
, &desc
->segments
);
1650 /* Link the last hardware descriptor with the first. */
1651 segment
= list_first_entry(&desc
->segments
,
1652 struct xilinx_vdma_tx_segment
, node
);
1653 desc
->async_tx
.phys
= segment
->phys
;
1655 return &desc
->async_tx
;
1658 xilinx_dma_free_tx_descriptor(chan
, desc
);
1663 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1664 * @dchan: DMA channel
1665 * @dma_dst: destination address
1666 * @dma_src: source address
1667 * @len: transfer length
1668 * @flags: transfer ack flags
1670 * Return: Async transaction descriptor on success and NULL on failure
1672 static struct dma_async_tx_descriptor
*
1673 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1674 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1676 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1677 struct xilinx_dma_tx_descriptor
*desc
;
1678 struct xilinx_cdma_tx_segment
*segment
;
1679 struct xilinx_cdma_desc_hw
*hw
;
1681 if (!len
|| len
> XILINX_DMA_MAX_TRANS_LEN
)
1684 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1688 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1689 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1691 /* Allocate the link descriptor from DMA pool */
1692 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1698 hw
->src_addr
= dma_src
;
1699 hw
->dest_addr
= dma_dst
;
1700 if (chan
->ext_addr
) {
1701 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1702 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1705 /* Insert the segment into the descriptor segments list. */
1706 list_add_tail(&segment
->node
, &desc
->segments
);
1708 desc
->async_tx
.phys
= segment
->phys
;
1709 hw
->next_desc
= segment
->phys
;
1711 return &desc
->async_tx
;
1714 xilinx_dma_free_tx_descriptor(chan
, desc
);
1719 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1720 * @dchan: DMA channel
1721 * @sgl: scatterlist to transfer to/from
1722 * @sg_len: number of entries in @scatterlist
1723 * @direction: DMA direction
1724 * @flags: transfer ack flags
1725 * @context: APP words of the descriptor
1727 * Return: Async transaction descriptor on success and NULL on failure
1729 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1730 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1731 enum dma_transfer_direction direction
, unsigned long flags
,
1734 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1735 struct xilinx_dma_tx_descriptor
*desc
;
1736 struct xilinx_axidma_tx_segment
*segment
= NULL
, *prev
= NULL
;
1737 u32
*app_w
= (u32
*)context
;
1738 struct scatterlist
*sg
;
1743 if (!is_slave_direction(direction
))
1746 /* Allocate a transaction descriptor. */
1747 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1751 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1752 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1754 /* Build transactions using information in the scatter gather list */
1755 for_each_sg(sgl
, sg
, sg_len
, i
) {
1758 /* Loop until the entire scatterlist entry is used */
1759 while (sg_used
< sg_dma_len(sg
)) {
1760 struct xilinx_axidma_desc_hw
*hw
;
1762 /* Get a free segment */
1763 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1768 * Calculate the maximum number of bytes to transfer,
1769 * making sure it is less than the hw limit
1771 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
1772 XILINX_DMA_MAX_TRANS_LEN
);
1775 /* Fill in the descriptor */
1776 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1781 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1783 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1784 XILINX_DMA_NUM_APP_WORDS
);
1788 prev
->hw
.next_desc
= segment
->phys
;
1794 * Insert the segment into the descriptor segments
1797 list_add_tail(&segment
->node
, &desc
->segments
);
1801 segment
= list_first_entry(&desc
->segments
,
1802 struct xilinx_axidma_tx_segment
, node
);
1803 desc
->async_tx
.phys
= segment
->phys
;
1804 prev
->hw
.next_desc
= segment
->phys
;
1806 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1807 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1808 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1809 segment
= list_last_entry(&desc
->segments
,
1810 struct xilinx_axidma_tx_segment
,
1812 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1815 return &desc
->async_tx
;
1818 xilinx_dma_free_tx_descriptor(chan
, desc
);
1823 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1824 * @chan: DMA channel
1825 * @sgl: scatterlist to transfer to/from
1826 * @sg_len: number of entries in @scatterlist
1827 * @direction: DMA direction
1828 * @flags: transfer ack flags
1830 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1831 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1832 size_t period_len
, enum dma_transfer_direction direction
,
1833 unsigned long flags
)
1835 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1836 struct xilinx_dma_tx_descriptor
*desc
;
1837 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1838 size_t copy
, sg_used
;
1839 unsigned int num_periods
;
1846 num_periods
= buf_len
/ period_len
;
1851 if (!is_slave_direction(direction
))
1854 /* Allocate a transaction descriptor. */
1855 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1859 chan
->direction
= direction
;
1860 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1861 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1863 for (i
= 0; i
< num_periods
; ++i
) {
1866 while (sg_used
< period_len
) {
1867 struct xilinx_axidma_desc_hw
*hw
;
1869 /* Get a free segment */
1870 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1875 * Calculate the maximum number of bytes to transfer,
1876 * making sure it is less than the hw limit
1878 copy
= min_t(size_t, period_len
- sg_used
,
1879 XILINX_DMA_MAX_TRANS_LEN
);
1881 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1886 prev
->hw
.next_desc
= segment
->phys
;
1892 * Insert the segment into the descriptor segments
1895 list_add_tail(&segment
->node
, &desc
->segments
);
1899 head_segment
= list_first_entry(&desc
->segments
,
1900 struct xilinx_axidma_tx_segment
, node
);
1901 desc
->async_tx
.phys
= head_segment
->phys
;
1903 desc
->cyclic
= true;
1904 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1905 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1906 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1908 segment
= list_last_entry(&desc
->segments
,
1909 struct xilinx_axidma_tx_segment
,
1911 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1913 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1914 if (direction
== DMA_MEM_TO_DEV
) {
1915 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1916 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1919 return &desc
->async_tx
;
1922 xilinx_dma_free_tx_descriptor(chan
, desc
);
1927 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1928 * DMA_SLAVE transaction
1929 * @dchan: DMA channel
1930 * @xt: Interleaved template pointer
1931 * @flags: transfer ack flags
1933 * Return: Async transaction descriptor on success and NULL on failure
1935 static struct dma_async_tx_descriptor
*
1936 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1937 struct dma_interleaved_template
*xt
,
1938 unsigned long flags
)
1940 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1941 struct xilinx_dma_tx_descriptor
*desc
;
1942 struct xilinx_axidma_tx_segment
*segment
;
1943 struct xilinx_axidma_desc_hw
*hw
;
1945 if (!is_slave_direction(xt
->dir
))
1948 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1951 if (xt
->frame_size
!= 1)
1954 /* Allocate a transaction descriptor. */
1955 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1959 chan
->direction
= xt
->dir
;
1960 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1961 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1963 /* Get a free segment */
1964 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1970 /* Fill in the descriptor */
1971 if (xt
->dir
!= DMA_MEM_TO_DEV
)
1972 hw
->buf_addr
= xt
->dst_start
;
1974 hw
->buf_addr
= xt
->src_start
;
1976 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
1977 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
1978 XILINX_DMA_BD_VSIZE_MASK
;
1979 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
1980 XILINX_DMA_BD_STRIDE_MASK
;
1981 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
1984 * Insert the segment into the descriptor segments
1987 list_add_tail(&segment
->node
, &desc
->segments
);
1990 segment
= list_first_entry(&desc
->segments
,
1991 struct xilinx_axidma_tx_segment
, node
);
1992 desc
->async_tx
.phys
= segment
->phys
;
1994 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1995 if (xt
->dir
== DMA_MEM_TO_DEV
) {
1996 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1997 segment
= list_last_entry(&desc
->segments
,
1998 struct xilinx_axidma_tx_segment
,
2000 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2003 return &desc
->async_tx
;
2006 xilinx_dma_free_tx_descriptor(chan
, desc
);
2011 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2012 * @chan: Driver specific DMA Channel pointer
2014 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2016 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2021 xilinx_dma_chan_reset(chan
);
2023 err
= chan
->stop_transfer(chan
);
2025 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
2026 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
2030 /* Remove and free all of the descriptors in the lists */
2031 xilinx_dma_free_descriptors(chan
);
2034 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2035 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2036 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2037 chan
->cyclic
= false;
2044 * xilinx_dma_channel_set_config - Configure VDMA channel
2045 * Run-time configuration for Axi VDMA, supports:
2046 * . halt the channel
2047 * . configure interrupt coalescing and inter-packet delay threshold
2048 * . start/stop parking
2051 * @dchan: DMA channel
2052 * @cfg: VDMA device configuration pointer
2054 * Return: '0' on success and failure value on error
2056 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2057 struct xilinx_vdma_config
*cfg
)
2059 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2063 return xilinx_dma_chan_reset(chan
);
2065 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2067 chan
->config
.frm_dly
= cfg
->frm_dly
;
2068 chan
->config
.park
= cfg
->park
;
2070 /* genlock settings */
2071 chan
->config
.gen_lock
= cfg
->gen_lock
;
2072 chan
->config
.master
= cfg
->master
;
2074 if (cfg
->gen_lock
&& chan
->genlock
) {
2075 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2076 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2079 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2081 chan
->config
.park_frm
= cfg
->park_frm
;
2083 chan
->config
.park_frm
= -1;
2085 chan
->config
.coalesc
= cfg
->coalesc
;
2086 chan
->config
.delay
= cfg
->delay
;
2088 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2089 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2090 chan
->config
.coalesc
= cfg
->coalesc
;
2093 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2094 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2095 chan
->config
.delay
= cfg
->delay
;
2098 /* FSync Source selection */
2099 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2100 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2102 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2106 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2108 /* -----------------------------------------------------------------------------
2113 * xilinx_dma_chan_remove - Per Channel remove function
2114 * @chan: Driver specific DMA channel
2116 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2118 /* Disable all interrupts */
2119 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2120 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2123 free_irq(chan
->irq
, chan
);
2125 tasklet_kill(&chan
->tasklet
);
2127 list_del(&chan
->common
.device_node
);
2130 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2131 struct clk
**tx_clk
, struct clk
**rx_clk
,
2132 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2138 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2139 if (IS_ERR(*axi_clk
)) {
2140 err
= PTR_ERR(*axi_clk
);
2141 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2145 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2146 if (IS_ERR(*tx_clk
))
2149 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2150 if (IS_ERR(*rx_clk
))
2153 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2154 if (IS_ERR(*sg_clk
))
2157 err
= clk_prepare_enable(*axi_clk
);
2159 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2163 err
= clk_prepare_enable(*tx_clk
);
2165 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2166 goto err_disable_axiclk
;
2169 err
= clk_prepare_enable(*rx_clk
);
2171 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2172 goto err_disable_txclk
;
2175 err
= clk_prepare_enable(*sg_clk
);
2177 dev_err(&pdev
->dev
, "failed to enable sg_clk (%d)\n", err
);
2178 goto err_disable_rxclk
;
2184 clk_disable_unprepare(*rx_clk
);
2186 clk_disable_unprepare(*tx_clk
);
2188 clk_disable_unprepare(*axi_clk
);
2193 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2194 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2195 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2203 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2204 if (IS_ERR(*axi_clk
)) {
2205 err
= PTR_ERR(*axi_clk
);
2206 dev_err(&pdev
->dev
, "failed to get axi_clk (%d)\n", err
);
2210 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2211 if (IS_ERR(*dev_clk
)) {
2212 err
= PTR_ERR(*dev_clk
);
2213 dev_err(&pdev
->dev
, "failed to get dev_clk (%d)\n", err
);
2217 err
= clk_prepare_enable(*axi_clk
);
2219 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2223 err
= clk_prepare_enable(*dev_clk
);
2225 dev_err(&pdev
->dev
, "failed to enable dev_clk (%d)\n", err
);
2226 goto err_disable_axiclk
;
2232 clk_disable_unprepare(*axi_clk
);
2237 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2238 struct clk
**tx_clk
, struct clk
**txs_clk
,
2239 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2243 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2244 if (IS_ERR(*axi_clk
)) {
2245 err
= PTR_ERR(*axi_clk
);
2246 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2250 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2251 if (IS_ERR(*tx_clk
))
2254 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2255 if (IS_ERR(*txs_clk
))
2258 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2259 if (IS_ERR(*rx_clk
))
2262 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2263 if (IS_ERR(*rxs_clk
))
2266 err
= clk_prepare_enable(*axi_clk
);
2268 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2272 err
= clk_prepare_enable(*tx_clk
);
2274 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2275 goto err_disable_axiclk
;
2278 err
= clk_prepare_enable(*txs_clk
);
2280 dev_err(&pdev
->dev
, "failed to enable txs_clk (%d)\n", err
);
2281 goto err_disable_txclk
;
2284 err
= clk_prepare_enable(*rx_clk
);
2286 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2287 goto err_disable_txsclk
;
2290 err
= clk_prepare_enable(*rxs_clk
);
2292 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%d)\n", err
);
2293 goto err_disable_rxclk
;
2299 clk_disable_unprepare(*rx_clk
);
2301 clk_disable_unprepare(*txs_clk
);
2303 clk_disable_unprepare(*tx_clk
);
2305 clk_disable_unprepare(*axi_clk
);
2310 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2312 clk_disable_unprepare(xdev
->rxs_clk
);
2313 clk_disable_unprepare(xdev
->rx_clk
);
2314 clk_disable_unprepare(xdev
->txs_clk
);
2315 clk_disable_unprepare(xdev
->tx_clk
);
2316 clk_disable_unprepare(xdev
->axi_clk
);
2320 * xilinx_dma_chan_probe - Per Channel Probing
2321 * It get channel features from the device tree entry and
2322 * initialize special channel handling routines
2324 * @xdev: Driver specific device structure
2325 * @node: Device node
2327 * Return: '0' on success and failure value on error
2329 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2330 struct device_node
*node
, int chan_id
)
2332 struct xilinx_dma_chan
*chan
;
2333 bool has_dre
= false;
2337 /* Allocate and initialize the channel structure */
2338 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2342 chan
->dev
= xdev
->dev
;
2344 chan
->has_sg
= xdev
->has_sg
;
2345 chan
->desc_pendingcount
= 0x0;
2346 chan
->ext_addr
= xdev
->ext_addr
;
2348 spin_lock_init(&chan
->lock
);
2349 INIT_LIST_HEAD(&chan
->pending_list
);
2350 INIT_LIST_HEAD(&chan
->done_list
);
2351 INIT_LIST_HEAD(&chan
->active_list
);
2353 /* Retrieve the channel properties from the device tree */
2354 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2356 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2358 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2360 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2363 width
= value
>> 3; /* Convert bits to bytes */
2365 /* If data width is greater than 8 bytes, DRE is not in hw */
2370 xdev
->common
.copy_align
= fls(width
- 1);
2372 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2373 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2374 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2375 chan
->direction
= DMA_MEM_TO_DEV
;
2377 chan
->tdest
= chan_id
;
2379 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2380 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2381 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2383 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2384 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2385 chan
->flush_on_fsync
= true;
2387 } else if (of_device_is_compatible(node
,
2388 "xlnx,axi-vdma-s2mm-channel") ||
2389 of_device_is_compatible(node
,
2390 "xlnx,axi-dma-s2mm-channel")) {
2391 chan
->direction
= DMA_DEV_TO_MEM
;
2393 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2395 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2396 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2397 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2399 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2400 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2401 chan
->flush_on_fsync
= true;
2404 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2408 /* Request the interrupt */
2409 chan
->irq
= irq_of_parse_and_map(node
, 0);
2410 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2411 "xilinx-dma-controller", chan
);
2413 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2417 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2418 chan
->start_transfer
= xilinx_dma_start_transfer
;
2419 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2420 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2421 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2422 chan
->stop_transfer
= xilinx_cdma_stop_transfer
;
2424 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2425 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2428 /* Initialize the tasklet */
2429 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2430 (unsigned long)chan
);
2433 * Initialize the DMA channel and add it to the DMA engine channels
2436 chan
->common
.device
= &xdev
->common
;
2438 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2439 xdev
->chan
[chan
->id
] = chan
;
2441 /* Reset the channel */
2442 err
= xilinx_dma_chan_reset(chan
);
2444 dev_err(xdev
->dev
, "Reset channel failed\n");
2452 * xilinx_dma_child_probe - Per child node probe
2453 * It get number of dma-channels per child node from
2454 * device-tree and initializes all the channels.
2456 * @xdev: Driver specific device structure
2457 * @node: Device node
2461 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2462 struct device_node
*node
) {
2463 int ret
, i
, nr_channels
= 1;
2465 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2466 if ((ret
< 0) && xdev
->mcdma
)
2467 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2469 for (i
= 0; i
< nr_channels
; i
++)
2470 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2472 xdev
->nr_channels
+= nr_channels
;
2478 * of_dma_xilinx_xlate - Translation function
2479 * @dma_spec: Pointer to DMA specifier as found in the device tree
2480 * @ofdma: Pointer to DMA controller data
2482 * Return: DMA channel pointer on success and NULL on error
2484 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2485 struct of_dma
*ofdma
)
2487 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2488 int chan_id
= dma_spec
->args
[0];
2490 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2493 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2496 static const struct xilinx_dma_config axidma_config
= {
2497 .dmatype
= XDMA_TYPE_AXIDMA
,
2498 .clk_init
= axidma_clk_init
,
2501 static const struct xilinx_dma_config axicdma_config
= {
2502 .dmatype
= XDMA_TYPE_CDMA
,
2503 .clk_init
= axicdma_clk_init
,
2506 static const struct xilinx_dma_config axivdma_config
= {
2507 .dmatype
= XDMA_TYPE_VDMA
,
2508 .clk_init
= axivdma_clk_init
,
2511 static const struct of_device_id xilinx_dma_of_ids
[] = {
2512 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2513 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2514 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2517 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2520 * xilinx_dma_probe - Driver probe function
2521 * @pdev: Pointer to the platform_device structure
2523 * Return: '0' on success and failure value on error
2525 static int xilinx_dma_probe(struct platform_device
*pdev
)
2527 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2528 struct clk
**, struct clk
**, struct clk
**)
2530 struct device_node
*node
= pdev
->dev
.of_node
;
2531 struct xilinx_dma_device
*xdev
;
2532 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2533 struct resource
*io
;
2534 u32 num_frames
, addr_width
;
2537 /* Allocate and initialize the DMA engine structure */
2538 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2542 xdev
->dev
= &pdev
->dev
;
2544 const struct of_device_id
*match
;
2546 match
= of_match_node(xilinx_dma_of_ids
, np
);
2547 if (match
&& match
->data
) {
2548 xdev
->dma_config
= match
->data
;
2549 clk_init
= xdev
->dma_config
->clk_init
;
2553 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2554 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2558 /* Request and map I/O memory */
2559 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2560 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2561 if (IS_ERR(xdev
->regs
))
2562 return PTR_ERR(xdev
->regs
);
2564 /* Retrieve the DMA engine properties from the device tree */
2565 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
2566 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2567 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2569 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2570 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2574 "missing xlnx,num-fstores property\n");
2578 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2579 &xdev
->flush_on_fsync
);
2582 "missing xlnx,flush-fsync property\n");
2585 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2587 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2589 if (addr_width
> 32)
2590 xdev
->ext_addr
= true;
2592 xdev
->ext_addr
= false;
2594 /* Set the dma mask bits */
2595 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2597 /* Initialize the DMA engine */
2598 xdev
->common
.dev
= &pdev
->dev
;
2600 INIT_LIST_HEAD(&xdev
->common
.channels
);
2601 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2602 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2603 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2606 xdev
->common
.device_alloc_chan_resources
=
2607 xilinx_dma_alloc_chan_resources
;
2608 xdev
->common
.device_free_chan_resources
=
2609 xilinx_dma_free_chan_resources
;
2610 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2611 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2612 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2613 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2614 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2615 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2616 xdev
->common
.device_prep_dma_cyclic
=
2617 xilinx_dma_prep_dma_cyclic
;
2618 xdev
->common
.device_prep_interleaved_dma
=
2619 xilinx_dma_prep_interleaved
;
2620 /* Residue calculation is supported by only AXI DMA */
2621 xdev
->common
.residue_granularity
=
2622 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2623 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2624 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2625 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2627 xdev
->common
.device_prep_interleaved_dma
=
2628 xilinx_vdma_dma_prep_interleaved
;
2631 platform_set_drvdata(pdev
, xdev
);
2633 /* Initialize the channels */
2634 for_each_child_of_node(node
, child
) {
2635 err
= xilinx_dma_child_probe(xdev
, child
);
2640 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2641 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2643 xdev
->chan
[i
]->num_frms
= num_frames
;
2646 /* Register the DMA engine with the core */
2647 dma_async_device_register(&xdev
->common
);
2649 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2652 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2653 dma_async_device_unregister(&xdev
->common
);
2657 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2662 xdma_disable_allclks(xdev
);
2664 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2666 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2672 * xilinx_dma_remove - Driver remove function
2673 * @pdev: Pointer to the platform_device structure
2675 * Return: Always '0'
2677 static int xilinx_dma_remove(struct platform_device
*pdev
)
2679 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2682 of_dma_controller_free(pdev
->dev
.of_node
);
2684 dma_async_device_unregister(&xdev
->common
);
2686 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2688 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2690 xdma_disable_allclks(xdev
);
2695 static struct platform_driver xilinx_vdma_driver
= {
2697 .name
= "xilinx-vdma",
2698 .of_match_table
= xilinx_dma_of_ids
,
2700 .probe
= xilinx_dma_probe
,
2701 .remove
= xilinx_dma_remove
,
2704 module_platform_driver(xilinx_vdma_driver
);
2706 MODULE_AUTHOR("Xilinx, Inc.");
2707 MODULE_DESCRIPTION("Xilinx VDMA driver");
2708 MODULE_LICENSE("GPL v2");