]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Jul 2016 22:45:17 +0000 (15:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Jul 2016 22:45:17 +0000 (15:45 -0700)
Pull dmaengine updates from Vinod Koul:
 "This time we have bit of largish changes: two new drivers, bunch of
  updates and cleanups to existing set.  Nothing super exciting though.

  New drivers:
   - Xilinx zynqmp dma engine driver
   - Marvell xor2 driver

  Updates:
   - dmatest sg support
   - updates and enhancements to Xilinx drivers, adding of cyclic mode
   - clock handling fixes across drivers
   - removal of OOM messages on kzalloc across subsystem
   - interleaved transfers support in omap driver
   - runtime pm support in qcom bam dma
   - tasklet kill freeup across drivers
   - irq cleanup on remove across drivers"

* tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits)
  dmaengine: k3dma: add missing clk_disable_unprepare() on error in k3_dma_probe()
  dmaengine: zynqmp_dma: add missing MODULE_LICENSE
  dmaengine: qcom_hidma: use for_each_matching_node() macro
  dmaengine: zynqmp_dma: Fix static checker warning
  dmaengine: omap-dma: Support for interleaved transfer
  dmaengine: ioat: statify symbol
  dmaengine: pxa_dma: implement device_synchronize
  dmaengine: imx-sdma: remove assignment never used
  dmaengine: imx-sdma: remove dummy assignment
  dmaengine: cppi: remove unused and bogus check
  dmaengine: qcom_hidma_lli: kill the tasklets upon exit
  dmaengine: pxa_dma: remove owner assignment
  dmaengine: fsl_raid: remove owner assignment
  dmaengine: coh901318: remove owner assignment
  dmaengine: qcom_hidma: kill the tasklets upon exit
  dmaengine: txx9dmac: explicitly freeup irq
  dmaengine: sirf-dma: kill the tasklets upon exit
  dmaengine: s3c24xx: kill the tasklets upon exit
  dmaengine: s3c24xx: explicitly freeup irq
  dmaengine: pl330: explicitly freeup irq
  ...

54 files changed:
Documentation/devicetree/bindings/dma/mv-xor-v2.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt [deleted file]
Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt [new file with mode: 0644]
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c
drivers/dma/at_xdmac.c
drivers/dma/bcm2835-dma.c
drivers/dma/bestcomm/bestcomm.c
drivers/dma/coh901318.c
drivers/dma/cppi41.c
drivers/dma/dma-axi-dmac.c
drivers/dma/dma-jz4740.c
drivers/dma/dmatest.c
drivers/dma/edma.c
drivers/dma/fsl-edma.c
drivers/dma/fsl_raid.c
drivers/dma/fsldma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/ioat/init.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/moxart-dma.c
drivers/dma/mpc512x_dma.c
drivers/dma/mv_xor.c
drivers/dma/mv_xor_v2.c [new file with mode: 0644]
drivers/dma/nbpfaxi.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/pxa_dma.c
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/hidma.c
drivers/dma/qcom/hidma_ll.c
drivers/dma/qcom/hidma_mgmt.c
drivers/dma/s3c24xx-dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/shdmac.c
drivers/dma/sh/sudmac.c
drivers/dma/sirf-dma.c
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40_ll.c
drivers/dma/sun6i-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/ti-dma-crossbar.c
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/dma/xilinx/Makefile
drivers/dma/xilinx/xilinx_dma.c [new file with mode: 0644]
drivers/dma/xilinx/xilinx_vdma.c [deleted file]
drivers/dma/xilinx/zynqmp_dma.c [new file with mode: 0644]

diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
new file mode 100644 (file)
index 0000000..217a90e
--- /dev/null
@@ -0,0 +1,24 @@
+* Marvell XOR v2 engines
+
+Required properties:
+- compatible: one of the following values:
+    "marvell,armada-7k-xor"
+    "marvell,xor-v2"
+- reg: Should contain registers location and length (two sets)
+    the first set is the DMA registers
+    the second set is the global registers
+- msi-parent: Phandle to the MSI-capable interrupt controller used for
+  interrupts.
+
+Optional properties:
+- clocks: Optional reference to the clock used by the XOR engine.
+
+Example:
+
+       xor0@400000 {
+               compatible = "marvell,xor-v2";
+               reg = <0x400000 0x1000>,
+                     <0x410000 0x1000>;
+               msi-parent = <&gic_v2m0>;
+               dma-coherent;
+       };
index 3cf0072d3141962342f56df2f251ffbeb094879d..a2b8bfaec43cc12654175f22494e3ddbd8f818cf 100644 (file)
@@ -1,46 +1,96 @@
+Xilinx AXI VDMA engine, it does transfers between memory and video devices.
+It can be configured to have one channel or two channels. If configured
+as two channels, one is to transmit to the video device and another is
+to receive from the video device.
+
 Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
 target devices. It can be configured to have one channel or two channels.
 If configured as two channels, one is to transmit to the device and another
 is to receive from the device.
 
+Xilinx AXI CDMA engine, it does transfers between memory-mapped source
+address and a memory-mapped destination address.
+
 Required properties:
-- compatible: Should be "xlnx,axi-dma-1.00.a"
+- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
+             "xlnx,axi-cdma-1.00.a""
 - #dma-cells: Should be <1>, see "dmas" property below
-- reg: Should contain DMA registers location and length.
+- reg: Should contain VDMA registers location and length.
+- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
+- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
 - dma-channel child node: Should have at least one channel and can have up to
        two channels per device. This node specifies the properties of each
        DMA channel (see child node properties below).
+- clocks: Input clock specifier. Refer to common clock bindings.
+- clock-names: List of input clocks
+       For VDMA:
+       Required elements: "s_axi_lite_aclk"
+       Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk",
+                          "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
+       For CDMA:
+       Required elements: "s_axi_lite_aclk", "m_axi_aclk"
+       FOR AXIDMA:
+       Required elements: "s_axi_lite_aclk"
+       Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+                          "m_axi_sg_aclk"
+
+Required properties for VDMA:
+- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
 
 Optional properties:
-- xlnx,include-sg: Tells whether configured for Scatter-mode in
+- xlnx,include-sg: Tells configured for Scatter-mode in
        the hardware.
+Optional properties for AXI DMA:
+- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
+Optional properties for VDMA:
+- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
+       It takes following values:
+       {1}, flush both channels
+       {2}, flush mm2s channel
+       {3}, flush s2mm channel
 
 Required child node properties:
-- compatible: It should be either "xlnx,axi-dma-mm2s-channel" or
+- compatible:
+       For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or
+       "xlnx,axi-vdma-s2mm-channel".
+       For CDMA: It should be "xlnx,axi-cdma-channel".
+       For AXIDMA: It should be either "xlnx,axi-dma-mm2s-channel" or
        "xlnx,axi-dma-s2mm-channel".
-- interrupts: Should contain per channel DMA interrupts.
+- interrupts: Should contain per channel VDMA interrupts.
 - xlnx,datawidth: Should contain the stream data width, take values
        {32,64...1024}.
 
-Option child node properties:
-- xlnx,include-dre: Tells whether hardware is configured for Data
+Optional child node properties:
+- xlnx,include-dre: Tells hardware is configured for Data
        Realignment Engine.
+Optional child node properties for VDMA:
+- xlnx,genlock-mode: Tells Genlock synchronization is
+       enabled/disabled in hardware.
+Optional child node properties for AXI DMA:
+-dma-channels: Number of dma channels in child node.
 
 Example:
 ++++++++
 
-axi_dma_0: axidma@40400000 {
-       compatible = "xlnx,axi-dma-1.00.a";
+axi_vdma_0: axivdma@40030000 {
+       compatible = "xlnx,axi-vdma-1.00.a";
        #dma_cells = <1>;
-       reg = < 0x40400000 0x10000 >;
-       dma-channel@40400000 {
-               compatible = "xlnx,axi-dma-mm2s-channel";
-               interrupts = < 0 59 4 >;
+       reg = < 0x40030000 0x10000 >;
+       dma-ranges = <0x00000000 0x00000000 0x40000000>;
+       xlnx,num-fstores = <0x8>;
+       xlnx,flush-fsync = <0x1>;
+       xlnx,addrwidth = <0x20>;
+       clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
+       clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+                     "m_axis_mm2s_aclk", "s_axis_s2mm_aclk";
+       dma-channel@40030000 {
+               compatible = "xlnx,axi-vdma-mm2s-channel";
+               interrupts = < 0 54 4 >;
                xlnx,datawidth = <0x40>;
        } ;
-       dma-channel@40400030 {
-               compatible = "xlnx,axi-dma-s2mm-channel";
-               interrupts = < 0 58 4 >;
+       dma-channel@40030030 {
+               compatible = "xlnx,axi-vdma-s2mm-channel";
+               interrupts = < 0 53 4 >;
                xlnx,datawidth = <0x40>;
        } ;
 } ;
@@ -49,7 +99,7 @@ axi_dma_0: axidma@40400000 {
 * DMA client
 
 Required properties:
-- dmas: a list of <[DMA device phandle] [Channel ID]> pairs,
+- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
        where Channel ID is '0' for write/tx and '1' for read/rx
        channel.
 - dma-names: a list of DMA channel names, one per "dmas" entry
@@ -57,9 +107,9 @@ Required properties:
 Example:
 ++++++++
 
-dmatest_0: dmatest@0 {
-       compatible ="xlnx,axi-dma-test-1.00.a";
-       dmas = <&axi_dma_0 0
-               &axi_dma_0 1>;
-       dma-names = "dma0", "dma1";
+vdmatest_0: vdmatest@0 {
+       compatible ="xlnx,axi-vdma-test-1.00.a";
+       dmas = <&axi_vdma_0 0
+               &axi_vdma_0 1>;
+       dma-names = "vdma0", "vdma1";
 } ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
deleted file mode 100644 (file)
index a1f2683..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-Xilinx AXI VDMA engine, it does transfers between memory and video devices.
-It can be configured to have one channel or two channels. If configured
-as two channels, one is to transmit to the video device and another is
-to receive from the video device.
-
-Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
-target devices. It can be configured to have one channel or two channels.
-If configured as two channels, one is to transmit to the device and another
-is to receive from the device.
-
-Xilinx AXI CDMA engine, it does transfers between memory-mapped source
-address and a memory-mapped destination address.
-
-Required properties:
-- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
-             "xlnx,axi-cdma-1.00.a""
-- #dma-cells: Should be <1>, see "dmas" property below
-- reg: Should contain VDMA registers location and length.
-- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
-- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
-- dma-channel child node: Should have at least one channel and can have up to
-       two channels per device. This node specifies the properties of each
-       DMA channel (see child node properties below).
-- clocks: Input clock specifier. Refer to common clock bindings.
-- clock-names: List of input clocks
-       For VDMA:
-       Required elements: "s_axi_lite_aclk"
-       Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk",
-                          "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
-       For CDMA:
-       Required elements: "s_axi_lite_aclk", "m_axi_aclk"
-       FOR AXIDMA:
-       Required elements: "s_axi_lite_aclk"
-       Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
-                          "m_axi_sg_aclk"
-
-Required properties for VDMA:
-- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
-
-Optional properties:
-- xlnx,include-sg: Tells configured for Scatter-mode in
-       the hardware.
-Optional properties for VDMA:
-- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
-       It takes following values:
-       {1}, flush both channels
-       {2}, flush mm2s channel
-       {3}, flush s2mm channel
-
-Required child node properties:
-- compatible: It should be either "xlnx,axi-vdma-mm2s-channel" or
-       "xlnx,axi-vdma-s2mm-channel".
-- interrupts: Should contain per channel VDMA interrupts.
-- xlnx,datawidth: Should contain the stream data width, take values
-       {32,64...1024}.
-
-Optional child node properties:
-- xlnx,include-dre: Tells hardware is configured for Data
-       Realignment Engine.
-Optional child node properties for VDMA:
-- xlnx,genlock-mode: Tells Genlock synchronization is
-       enabled/disabled in hardware.
-
-Example:
-++++++++
-
-axi_vdma_0: axivdma@40030000 {
-       compatible = "xlnx,axi-vdma-1.00.a";
-       #dma_cells = <1>;
-       reg = < 0x40030000 0x10000 >;
-       dma-ranges = <0x00000000 0x00000000 0x40000000>;
-       xlnx,num-fstores = <0x8>;
-       xlnx,flush-fsync = <0x1>;
-       xlnx,addrwidth = <0x20>;
-       clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
-       clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
-                     "m_axis_mm2s_aclk", "s_axis_s2mm_aclk";
-       dma-channel@40030000 {
-               compatible = "xlnx,axi-vdma-mm2s-channel";
-               interrupts = < 0 54 4 >;
-               xlnx,datawidth = <0x40>;
-       } ;
-       dma-channel@40030030 {
-               compatible = "xlnx,axi-vdma-s2mm-channel";
-               interrupts = < 0 53 4 >;
-               xlnx,datawidth = <0x40>;
-       } ;
-} ;
-
-
-* DMA client
-
-Required properties:
-- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
-       where Channel ID is '0' for write/tx and '1' for read/rx
-       channel.
-- dma-names: a list of DMA channel names, one per "dmas" entry
-
-Example:
-++++++++
-
-vdmatest_0: vdmatest@0 {
-       compatible ="xlnx,axi-vdma-test-1.00.a";
-       dmas = <&axi_vdma_0 0
-               &axi_vdma_0 1>;
-       dma-names = "vdma0", "vdma1";
-} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
new file mode 100644 (file)
index 0000000..a784cdd
--- /dev/null
@@ -0,0 +1,27 @@
+Xilinx ZynqMP DMA engine, it does support memory to memory transfers,
+memory to device and device to memory transfers. It also has flow
+control and rate control support for slave/peripheral dma access.
+
+Required properties:
+- compatible           : Should be "xlnx,zynqmp-dma-1.0"
+- reg                  : Memory map for gdma/adma module access.
+- interrupt-parent     : Interrupt controller the interrupt is routed through
+- interrupts           : Should contain DMA channel interrupt.
+- xlnx,bus-width       : Axi buswidth in bits. Should contain 128 or 64
+- clock-names          : List of input clocks "clk_main", "clk_apb"
+                         (see clock bindings for details)
+
+Optional properties:
+- dma-coherent         : Present if dma operations are coherent.
+
+Example:
+++++++++
+fpd_dma_chan1: dma@fd500000 {
+       compatible = "xlnx,zynqmp-dma-1.0";
+       reg = <0x0 0xFD500000 0x1000>;
+       interrupt-parent = <&gic>;
+       interrupts = <0 117 4>;
+       clock-names = "clk_main", "clk_apb";
+       xlnx,bus-width = <128>;
+       dma-coherent;
+};
index 8c98779a12b134f3688e1076f1e11211c23e2623..739f797b40d9843b7d75261e955439866c7aaa83 100644 (file)
@@ -339,6 +339,20 @@ config MV_XOR
        ---help---
          Enable support for the Marvell XOR engine.
 
+config MV_XOR_V2
+       bool "Marvell XOR engine version 2 support "
+       depends on ARM64
+       select DMA_ENGINE
+       select DMA_ENGINE_RAID
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+       select GENERIC_MSI_IRQ_DOMAIN
+       ---help---
+         Enable support for the Marvell version 2 XOR engine.
+
+         This engine provides acceleration for copy, XOR and RAID6
+         operations, and is available on Marvell Armada 7K and 8K
+         platforms.
+
 config MXS_DMA
        bool "MXS DMA support"
        depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
@@ -519,19 +533,31 @@ config XGENE_DMA
        help
          Enable support for the APM X-Gene SoC DMA engine.
 
-config XILINX_VDMA
-       tristate "Xilinx AXI VDMA Engine"
+config XILINX_DMA
+       tristate "Xilinx AXI DMAS Engine"
        depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
        select DMA_ENGINE
        help
          Enable support for Xilinx AXI VDMA Soft IP.
 
-         This engine provides high-bandwidth direct memory access
+         AXI VDMA engine provides high-bandwidth direct memory access
          between memory and AXI4-Stream video type target
          peripherals including peripherals which support AXI4-
          Stream Video Protocol.  It has two stream interfaces/
          channels, Memory Mapped to Stream (MM2S) and Stream to
          Memory Mapped (S2MM) for the data transfers.
+         AXI CDMA engine provides high-bandwidth direct memory access
+         between a memory-mapped source address and a memory-mapped
+         destination address.
+         AXI DMA engine provides high-bandwidth one dimensional direct
+         memory access between memory and AXI4-Stream target peripherals.
+
+config XILINX_ZYNQMP_DMA
+       tristate "Xilinx ZynqMP DMA Engine"
+       depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+       select DMA_ENGINE
+       help
+         Enable support for Xilinx ZynqMP DMA controller.
 
 config ZX_DMA
        tristate "ZTE ZX296702 DMA support"
index 614f28b0b739de875823b05a66a62186a1e9f45f..e4dc9cac7ee8427184f821896364c232634a444f 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
 obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
index 81db1c4811ceef4f71e8c0c1e5adada3aec07de1..939a7c31f7605928b1a6b1a95591e683f1ab664e 100644 (file)
@@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
        dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
        if (!dsg) {
                pl08x_free_txd(pl08x, txd);
-               dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
-                               __func__);
                return NULL;
        }
        list_add_tail(&dsg->node, &txd->dsg_list);
@@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
         */
        for (i = 0; i < channels; i++) {
                chan = kzalloc(sizeof(*chan), GFP_KERNEL);
-               if (!chan) {
-                       dev_err(&pl08x->adev->dev,
-                               "%s no memory for channel\n", __func__);
+               if (!chan)
                        return -ENOMEM;
-               }
 
                chan->host = pl08x;
                chan->state = PL08X_CHAN_IDLE;
@@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
                        GFP_KERNEL);
        if (!pl08x->phy_chans) {
-               dev_err(&adev->dev, "%s failed to allocate "
-                       "physical channel holders\n",
-                       __func__);
                ret = -ENOMEM;
                goto out_no_phychans;
        }
index 75bd6621dc5d7fb068ca6f4dd24d71bfa56ad640..e434ffe7bc5c57cfd22f7b9f163f0819a1294981 100644 (file)
@@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
        return desc;
 }
 
-void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
 {
        memset(&desc->lld, 0, sizeof(desc->lld));
        INIT_LIST_HEAD(&desc->descs_list);
@@ -1195,14 +1195,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
        desc->lld.mbr_cfg = chan_cc;
 
        dev_dbg(chan2dev(chan),
-               "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
-               __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
+               "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+               __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
                desc->lld.mbr_cfg);
 
        return desc;
 }
 
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
 at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
                         size_t len, unsigned long flags)
 {
index 6149b27c33ad0962a19ed411055d9d05bfb252d3..e18dc596cf2447fa9ef7e41b62d9396e29043426 100644 (file)
@@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
        unsigned int sg_len)
 {
        struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
-       size_t max_len = bcm2835_dma_max_frame_length(c);
-       unsigned int i, len;
+       size_t len, max_len;
+       unsigned int i;
        dma_addr_t addr;
        struct scatterlist *sgent;
 
+       max_len = bcm2835_dma_max_frame_length(c);
        for_each_sg(sgl, sgent, sg_len, i) {
                for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
                     len > 0;
@@ -613,7 +614,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&c->vc.lock, flags);
 }
 
-struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
        struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
        size_t len, unsigned long flags)
 {
index 180fedb418cc1983720072bb6a40b05ff9ea3a82..7ce843723003037495c3af240f20513d1099f6bb 100644 (file)
@@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
        /* Get a clean struct */
        bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
        if (!bcom_eng) {
-               printk(KERN_ERR DRIVER_NAME ": "
-                       "Can't allocate state structure\n");
                rv = -ENOMEM;
                goto error_sramclean;
        }
index c340ca9bd2b5dc953e6373f85314714836f8cf83..e4acd63e42aa37a9048b91a69f8351b444acc498 100644 (file)
@@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = {
                        COH901318_CX_CTRL_DDMA_LEGACY | \
                        COH901318_CX_CTRL_PRDD_SOURCE)
 
-const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
        {
                .number = U300_DMA_MSL_TX_0,
                .name = "MSL TX 0",
@@ -1280,6 +1280,7 @@ struct coh901318_desc {
 struct coh901318_base {
        struct device *dev;
        void __iomem *virtbase;
+       unsigned int irq;
        struct coh901318_pool pool;
        struct powersave pm;
        struct dma_device dma_slave;
@@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
 }
 
 static const struct file_operations coh901318_debugfs_status_operations = {
-       .owner          = THIS_MODULE,
        .open           = simple_open,
        .read           = coh901318_debugfs_read,
        .llseek         = default_llseek,
@@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_COMPLETE)
+       if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
        dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       base->irq = irq;
+
        err = coh901318_pool_create(&base->pool, &pdev->dev,
                                    sizeof(struct coh901318_lli),
                                    32);
@@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev)
        coh901318_pool_destroy(&base->pool);
        return err;
 }
+static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
+{
+       int chans_i;
+       int i = 0;
+       struct coh901318_chan *cohc;
+
+       for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+               for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+                       cohc = &base->chans[i];
+
+                       tasklet_kill(&cohc->tasklet);
+               }
+       }
+
+}
 
 static int coh901318_remove(struct platform_device *pdev)
 {
        struct coh901318_base *base = platform_get_drvdata(pdev);
 
+       devm_free_irq(&pdev->dev, base->irq, base);
+
+       coh901318_base_remove(base, dma_slave_channels);
+       coh901318_base_remove(base, dma_memcpy_channels);
+
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&base->dma_memcpy);
        dma_async_device_unregister(&base->dma_slave);
@@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = {
        },
 };
 
-int __init coh901318_init(void)
+static int __init coh901318_init(void)
 {
        return platform_driver_probe(&coh901318_driver, coh901318_probe);
 }
 subsys_initcall(coh901318_init);
 
-void __exit coh901318_exit(void)
+static void __exit coh901318_exit(void)
 {
        platform_driver_unregister(&coh901318_driver);
 }
index ceedafbd23e01fcda6968fb04707ae141cff3a88..4b2317426c8e3851fc172002ddc50be1bcf69135 100644 (file)
@@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
        struct cppi41_desc *d;
        struct scatterlist *sg;
        unsigned int i;
-       unsigned int num;
 
-       num = 0;
        d = c->desc;
        for_each_sg(sgl, sg, sg_len, i) {
                u32 addr;
                u32 len;
 
                /* We need to use more than one desc once musb supports sg */
-               BUG_ON(num > 0);
                addr = lower_32_bits(sg_dma_address(sg));
                len = sg_dma_len(sg);
 
index c3468094393ebe4f74d6bf9d8f482f169ac7b6cf..7f0b9aa158679aca780d86890426339f48586f4b 100644 (file)
@@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
        unsigned int pending;
 
        pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+       if (!pending)
+               return IRQ_NONE;
+
        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
 
        spin_lock(&dmac->chan.vchan.lock);
@@ -579,7 +582,9 @@ static int axi_dmac_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        dmac->irq = platform_get_irq(pdev, 0);
-       if (dmac->irq <= 0)
+       if (dmac->irq < 0)
+               return dmac->irq;
+       if (dmac->irq == 0)
                return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -683,6 +688,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = {
        { .compatible = "adi,axi-dmac-1.00.a" },
        { },
 };
+MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
 
 static struct platform_driver axi_dmac_driver = {
        .driver = {
index 7638b24ce8d0dac2672da6d4e83ce9956f054607..9689b36c005ac2bc6485acf9e2f7ef4269d2cb39 100644 (file)
@@ -573,12 +573,26 @@ err_unregister:
        return ret;
 }
 
+static void jz4740_cleanup_vchan(struct dma_device *dmadev)
+{
+       struct jz4740_dmaengine_chan *chan, *_chan;
+
+       list_for_each_entry_safe(chan, _chan,
+                               &dmadev->channels, vchan.chan.device_node) {
+               list_del(&chan->vchan.chan.device_node);
+               tasklet_kill(&chan->vchan.task);
+       }
+}
+
+
 static int jz4740_dma_remove(struct platform_device *pdev)
 {
        struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
        int irq = platform_get_irq(pdev, 0);
 
        free_irq(irq, dmadev);
+
+       jz4740_cleanup_vchan(&dmadev->ddev);
        dma_async_device_unregister(&dmadev->ddev);
        clk_disable_unprepare(dmadev->clk);
 
index b8576fd6bd0e544730bfc07a19ce92b21ab1aba6..1245db5438e1b010cefc7a79a426f363f487ee7c 100644 (file)
@@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(iterations,
                "Iterations before stopping test (default: infinite)");
 
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+               "Number of scatter gather buffers (default: 1)");
+
+static unsigned int dmatest = 1;
+module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dmatest,
+               "dmatest 0-memcpy 1-slave_sg (default: 1)");
+
 static unsigned int xor_sources = 3;
 module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(xor_sources,
@@ -431,6 +441,8 @@ static int dmatest_func(void *data)
        dev = chan->device;
        if (thread->type == DMA_MEMCPY)
                src_cnt = dst_cnt = 1;
+       else if (thread->type == DMA_SG)
+               src_cnt = dst_cnt = sg_buffers;
        else if (thread->type == DMA_XOR) {
                /* force odd to ensure dst = src */
                src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -485,6 +497,8 @@ static int dmatest_func(void *data)
                dma_addr_t *dsts;
                unsigned int src_off, dst_off, len;
                u8 align = 0;
+               struct scatterlist tx_sg[src_cnt];
+               struct scatterlist rx_sg[src_cnt];
 
                total_tests++;
 
@@ -577,10 +591,22 @@ static int dmatest_func(void *data)
                        um->bidi_cnt++;
                }
 
+               sg_init_table(tx_sg, src_cnt);
+               sg_init_table(rx_sg, src_cnt);
+               for (i = 0; i < src_cnt; i++) {
+                       sg_dma_address(&rx_sg[i]) = srcs[i];
+                       sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
+                       sg_dma_len(&tx_sg[i]) = len;
+                       sg_dma_len(&rx_sg[i]) = len;
+               }
+
                if (thread->type == DMA_MEMCPY)
                        tx = dev->device_prep_dma_memcpy(chan,
                                                         dsts[0] + dst_off,
                                                         srcs[0], len, flags);
+               else if (thread->type == DMA_SG)
+                       tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
+                                                    rx_sg, src_cnt, flags);
                else if (thread->type == DMA_XOR)
                        tx = dev->device_prep_dma_xor(chan,
                                                      dsts[0] + dst_off,
@@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
 
        if (type == DMA_MEMCPY)
                op = "copy";
+       else if (type == DMA_SG)
+               op = "sg";
        else if (type == DMA_XOR)
                op = "xor";
        else if (type == DMA_PQ)
@@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info,
        INIT_LIST_HEAD(&dtc->threads);
 
        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
-               cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
-               thread_count += cnt > 0 ? cnt : 0;
+               if (dmatest == 0) {
+                       cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
+                       thread_count += cnt > 0 ? cnt : 0;
+               }
        }
+
+       if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+               if (dmatest == 1) {
+                       cnt = dmatest_add_threads(info, dtc, DMA_SG);
+                       thread_count += cnt > 0 ? cnt : 0;
+               }
+       }
+
        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
                cnt = dmatest_add_threads(info, dtc, DMA_XOR);
                thread_count += cnt > 0 ? cnt : 0;
@@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info)
 
        request_channels(info, DMA_MEMCPY);
        request_channels(info, DMA_XOR);
+       request_channels(info, DMA_SG);
        request_channels(info, DMA_PQ);
 }
 
index 8181ed1313865015a169b1e19e44de5ad3df8eb5..3d277fa76c1abb7397071ef58350a091e943ba4a 100644 (file)
@@ -239,6 +239,9 @@ struct edma_cc {
        bool                            chmap_exist;
        enum dma_event_q                default_queue;
 
+       unsigned int                    ccint;
+       unsigned int                    ccerrint;
+
        /*
         * The slot_inuse bit for each PaRAM slot is clear unless the slot is
         * in use by Linux or if it is allocated to be used by DSP.
@@ -1069,10 +1072,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
 
        edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
                        GFP_ATOMIC);
-       if (!edesc) {
-               dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+       if (!edesc)
                return NULL;
-       }
 
        edesc->pset_nr = sg_len;
        edesc->residue = 0;
@@ -1114,14 +1115,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                edesc->absync = ret;
                edesc->residue += sg_dma_len(sg);
 
-               /* If this is the last in a current SG set of transactions,
-                  enable interrupts so that next set is processed */
-               if (!((i+1) % MAX_NR_SG))
-                       edesc->pset[i].param.opt |= TCINTEN;
-
-               /* If this is the last set, enable completion interrupt flag */
                if (i == sg_len - 1)
+                       /* Enable completion interrupt */
                        edesc->pset[i].param.opt |= TCINTEN;
+               else if (!((i+1) % MAX_NR_SG))
+                       /*
+                        * Enable early completion interrupt for the
+                        * intermediateset. In this case the driver will be
+                        * notified when the paRAM set is submitted to TC. This
+                        * will allow more time to set up the next set of slots.
+                        */
+                       edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
        }
        edesc->residue_stat = edesc->residue;
 
@@ -1173,10 +1177,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 
        edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
                        GFP_ATOMIC);
-       if (!edesc) {
-               dev_dbg(dev, "Failed to allocate a descriptor\n");
+       if (!edesc)
                return NULL;
-       }
 
        edesc->pset_nr = nslots;
        edesc->residue = edesc->residue_stat = len;
@@ -1298,10 +1300,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
 
        edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
                        GFP_ATOMIC);
-       if (!edesc) {
-               dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+       if (!edesc)
                return NULL;
-       }
 
        edesc->cyclic = 1;
        edesc->pset_nr = nslots;
@@ -2207,10 +2207,8 @@ static int edma_probe(struct platform_device *pdev)
                return ret;
 
        ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
-       if (!ecc) {
-               dev_err(dev, "Can't allocate controller\n");
+       if (!ecc)
                return -ENOMEM;
-       }
 
        ecc->dev = dev;
        ecc->id = pdev->id;
@@ -2288,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev)
                        dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
                        return ret;
                }
+               ecc->ccint = irq;
        }
 
        irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
@@ -2303,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev)
                        dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
                        return ret;
                }
+               ecc->ccerrint = irq;
        }
 
        ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
@@ -2393,11 +2393,27 @@ err_reg1:
        return ret;
 }
 
+static void edma_cleanupp_vchan(struct dma_device *dmadev)
+{
+       struct edma_chan *echan, *_echan;
+
+       list_for_each_entry_safe(echan, _echan,
+                       &dmadev->channels, vchan.chan.device_node) {
+               list_del(&echan->vchan.chan.device_node);
+               tasklet_kill(&echan->vchan.task);
+       }
+}
+
 static int edma_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct edma_cc *ecc = dev_get_drvdata(dev);
 
+       devm_free_irq(dev, ecc->ccint, ecc);
+       devm_free_irq(dev, ecc->ccerrint, ecc);
+
+       edma_cleanupp_vchan(&ecc->dma_slave);
+
        if (dev->of_node)
                of_dma_controller_free(dev->of_node);
        dma_async_device_unregister(&ecc->dma_slave);
index be2e62b879481f592aa965d4fe165b4ae8771896..6775f2c74e25b7269417bbe001adfb03698dea97 100644 (file)
@@ -852,6 +852,25 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
        return 0;
 }
 
+static void fsl_edma_irq_exit(
+               struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+       if (fsl_edma->txirq == fsl_edma->errirq) {
+               devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+       } else {
+               devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+               devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+       }
+}
+
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+{
+       int i;
+
+       for (i = 0; i < DMAMUX_NR; i++)
+               clk_disable_unprepare(fsl_edma->muxclk[i]);
+}
+
 static int fsl_edma_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -897,6 +916,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
                ret = clk_prepare_enable(fsl_edma->muxclk[i]);
                if (ret) {
+                       /* disable only clks which were enabled on error */
+                       for (; i >= 0; i--)
+                               clk_disable_unprepare(fsl_edma->muxclk[i]);
+
                        dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
                        return ret;
                }
@@ -951,14 +974,18 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(&fsl_edma->dma_dev);
        if (ret) {
-               dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+               dev_err(&pdev->dev,
+                       "Can't register Freescale eDMA engine. (%d)\n", ret);
+               fsl_disable_clocks(fsl_edma);
                return ret;
        }
 
        ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
        if (ret) {
-               dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+               dev_err(&pdev->dev,
+                       "Can't register Freescale eDMA of_dma. (%d)\n", ret);
                dma_async_device_unregister(&fsl_edma->dma_dev);
+               fsl_disable_clocks(fsl_edma);
                return ret;
        }
 
@@ -968,17 +995,27 @@ static int fsl_edma_probe(struct platform_device *pdev)
        return 0;
 }
 
+static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+       struct fsl_edma_chan *chan, *_chan;
+
+       list_for_each_entry_safe(chan, _chan,
+                               &dmadev->channels, vchan.chan.device_node) {
+               list_del(&chan->vchan.chan.device_node);
+               tasklet_kill(&chan->vchan.task);
+       }
+}
+
 static int fsl_edma_remove(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
-       int i;
 
+       fsl_edma_irq_exit(pdev, fsl_edma);
+       fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_edma->dma_dev);
-
-       for (i = 0; i < DMAMUX_NR; i++)
-               clk_disable_unprepare(fsl_edma->muxclk[i]);
+       fsl_disable_clocks(fsl_edma);
 
        return 0;
 }
index 4d9470f16552ca8065e69832dc786364d6369814..aad167eaaee80b29e9386574c183533d0e8f0d83 100644 (file)
@@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
 
        re_chan = container_of(chan, struct fsl_re_chan, chan);
        if (len > FSL_RE_MAX_DATA_LEN) {
-               dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+               dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
                        len, FSL_RE_MAX_DATA_LEN);
                return NULL;
        }
@@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
 
        re_chan = container_of(chan, struct fsl_re_chan, chan);
        if (len > FSL_RE_MAX_DATA_LEN) {
-               dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+               dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
                        len, FSL_RE_MAX_DATA_LEN);
                return NULL;
        }
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
        re_chan = container_of(chan, struct fsl_re_chan, chan);
 
        if (len > FSL_RE_MAX_DATA_LEN) {
-               dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+               dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
                        len, FSL_RE_MAX_DATA_LEN);
                return NULL;
        }
@@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev)
 
 static void fsl_re_remove_chan(struct fsl_re_chan *chan)
 {
+       tasklet_kill(&chan->irqtask);
+
        dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
                      chan->inb_phys_addr);
 
@@ -890,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = {
 static struct platform_driver fsl_re_driver = {
        .driver = {
                .name = "fsl-raideng",
-               .owner = THIS_MODULE,
                .of_match_table = fsl_re_ids,
        },
        .probe = fsl_re_probe,
index a8828ed639b3027c476fc7d82d4d6607221b2ae1..911b7177eb50487dc53269ac705c9c25dc6f8638 100644 (file)
@@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
        /* alloc channel */
        chan = kzalloc(sizeof(*chan), GFP_KERNEL);
        if (!chan) {
-               dev_err(fdev->dev, "no free memory for DMA channels!\n");
                err = -ENOMEM;
                goto out_return;
        }
@@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op)
 
        fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
        if (!fdev) {
-               dev_err(&op->dev, "No enough memory for 'priv'\n");
                err = -ENOMEM;
                goto out_return;
        }
index 48d85f8b95fe1e97a014a47062e3ebcf27afd314..a960608c0a4db0cc918df3c95d4c6b0a2867d406 100644 (file)
@@ -167,6 +167,7 @@ struct imxdma_channel {
        u32                             ccr_to_device;
        bool                            enabled_2d;
        int                             slot_2d;
+       unsigned int                    irq;
 };
 
 enum imx_dma_type {
@@ -186,6 +187,9 @@ struct imxdma_engine {
        struct imx_dma_2d_config        slots_2d[IMX_DMA_2D_SLOTS];
        struct imxdma_channel           channel[IMX_DMA_CHANNELS];
        enum imx_dma_type               devtype;
+       unsigned int                    irq;
+       unsigned int                    irq_err;
+
 };
 
 struct imxdma_filter_data {
@@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
 }
 
 static int __init imxdma_probe(struct platform_device *pdev)
-       {
+{
        struct imxdma_engine *imxdma;
        struct resource *res;
        const struct of_device_id *of_id;
@@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
                        dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
                        goto disable_dma_ahb_clk;
                }
+               imxdma->irq = irq;
 
                irq_err = platform_get_irq(pdev, 1);
                if (irq_err < 0) {
@@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
                        dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
                        goto disable_dma_ahb_clk;
                }
+               imxdma->irq_err = irq_err;
        }
 
        /* enable DMA module */
@@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
                                         irq + i, i);
                                goto disable_dma_ahb_clk;
                        }
+
+                       imxdmac->irq = irq + i;
                        init_timer(&imxdmac->watchdog);
                        imxdmac->watchdog.function = &imxdma_watchdog;
                        imxdmac->watchdog.data = (unsigned long)imxdmac;
@@ -1217,10 +1225,31 @@ disable_dma_ipg_clk:
        return ret;
 }
 
+static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
+{
+       int i;
+
+       if (is_imx1_dma(imxdma)) {
+               disable_irq(imxdma->irq);
+               disable_irq(imxdma->irq_err);
+       }
+
+       for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+               struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+               if (!is_imx1_dma(imxdma))
+                       disable_irq(imxdmac->irq);
+
+               tasklet_kill(&imxdmac->dma_tasklet);
+       }
+}
+
 static int imxdma_remove(struct platform_device *pdev)
 {
        struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
 
+       imxdma_free_irq(pdev, imxdma);
+
         dma_async_device_unregister(&imxdma->dma_device);
 
        if (pdev->dev.of_node)
index 0f6fd42f55ca41975b12dbe267ef3aabdb81c017..03ec76fc22ff191423bc354e5b5005a621d83412 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/bitops.h>
@@ -385,6 +386,7 @@ struct sdma_engine {
        const struct sdma_driver_data   *drvdata;
        u32                             spba_start_addr;
        u32                             spba_end_addr;
+       unsigned int                    irq;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -571,28 +573,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 static int sdma_run_channel0(struct sdma_engine *sdma)
 {
        int ret;
-       unsigned long timeout = 500;
+       u32 reg;
 
        sdma_enable_channel(sdma, 0);
 
-       while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
-               if (timeout-- <= 0)
-                       break;
-               udelay(1);
-       }
-
-       if (ret) {
-               /* Clear the interrupt status */
-               writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
-       } else {
+       ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
+                                               reg, !(reg & 1), 1, 500);
+       if (ret)
                dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
-       }
 
        /* Set bits of CONFIG register with dynamic context switching */
        if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
                writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
 
-       return ret ? 0 : -ETIMEDOUT;
+       return ret;
 }
 
 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
@@ -727,9 +721,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
        unsigned long stat;
 
        stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
-       /* not interested in channel 0 interrupts */
-       stat &= ~1;
        writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+       /* channel 0 is special and not handled here, see run_channel0() */
+       stat &= ~1;
 
        while (stat) {
                int channel = fls(stat) - 1;
@@ -758,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
         * These are needed once we start to support transfers between
         * two peripherals or memory-to-memory transfers
         */
-       int per_2_per = 0, emi_2_emi = 0;
+       int per_2_per = 0;
 
        sdmac->pc_from_device = 0;
        sdmac->pc_to_device = 0;
@@ -766,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
 
        switch (peripheral_type) {
        case IMX_DMATYPE_MEMORY:
-               emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
                break;
        case IMX_DMATYPE_DSP:
                emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -999,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan)
                } else
                        __set_bit(sdmac->event_id0, sdmac->event_mask);
 
-               /* Watermark Level */
-               sdmac->watermark_level |= sdmac->watermark_level;
                /* Address */
                sdmac->shp_addr = sdmac->per_address;
                sdmac->per_addr = sdmac->per_address2;
@@ -1715,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       sdma->irq = irq;
+
        sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
        if (!sdma->script_addrs)
                return -ENOMEM;
@@ -1840,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev)
        struct sdma_engine *sdma = platform_get_drvdata(pdev);
        int i;
 
+       devm_free_irq(&pdev->dev, sdma->irq, sdma);
        dma_async_device_unregister(&sdma->dma_device);
        kfree(sdma->script_addrs);
        /* Kill the tasklet */
index d406056e889246d1ec8e03dab74d0727046fd192..7145f7716a925e57d4190ceaf20dd9d13f3571bd 100644 (file)
@@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev)
        ioat_disable_interrupts(ioat_dma);
 }
 
-void ioat_resume(struct ioatdma_device *ioat_dma)
+static void ioat_resume(struct ioatdma_device *ioat_dma)
 {
        struct ioatdma_chan *ioat_chan;
        u32 chanerr;
index 1ba2fd73852d273ccf9f8df0e6e6cfc0144fb90d..39de8980128c1c0ae2092110f0a4138e0a6a4bab 100644 (file)
@@ -102,6 +102,7 @@ struct k3_dma_dev {
        struct clk              *clk;
        u32                     dma_channels;
        u32                     dma_requests;
+       unsigned int            irq;
 };
 
 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
@@ -425,10 +426,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
 
        num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
        ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
-       if (!ds) {
-               dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+       if (!ds)
                return NULL;
-       }
+
        ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
        ds->size = len;
        ds->desc_num = num;
@@ -481,10 +481,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
        }
 
        ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
-       if (!ds) {
-               dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+       if (!ds)
                return NULL;
-       }
+
        ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
        ds->desc_num = num;
        num = 0;
@@ -705,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op)
        if (ret)
                return ret;
 
+       d->irq = irq;
+
        /* init phy channel */
        d->phy = devm_kzalloc(&op->dev,
                d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -759,7 +760,7 @@ static int k3_dma_probe(struct platform_device *op)
 
        ret = dma_async_device_register(&d->slave);
        if (ret)
-               return ret;
+               goto dma_async_register_fail;
 
        ret = of_dma_controller_register((&op->dev)->of_node,
                                        k3_of_dma_simple_xlate, d);
@@ -776,6 +777,8 @@ static int k3_dma_probe(struct platform_device *op)
 
 of_dma_register_fail:
        dma_async_device_unregister(&d->slave);
+dma_async_register_fail:
+       clk_disable_unprepare(d->clk);
        return ret;
 }
 
@@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op)
        dma_async_device_unregister(&d->slave);
        of_dma_controller_free((&op->dev)->of_node);
 
+       devm_free_irq(&op->dev, d->irq, d);
+
        list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
                list_del(&c->vc.chan.device_node);
                tasklet_kill(&c->vc.task);
index 56f1fd68b6205af0de5c6f55687305c42de0f1e9..f4b25fb0d0402668bafad206b4576bcf808005cb 100644 (file)
@@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data)
 static int mmp_pdma_remove(struct platform_device *op)
 {
        struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+       struct mmp_pdma_phy *phy;
+       int i, irq = 0, irq_num = 0;
+
+
+       for (i = 0; i < pdev->dma_channels; i++) {
+               if (platform_get_irq(op, i) > 0)
+                       irq_num++;
+       }
+
+       if (irq_num != pdev->dma_channels) {
+               irq = platform_get_irq(op, 0);
+               devm_free_irq(&op->dev, irq, pdev);
+       } else {
+               for (i = 0; i < pdev->dma_channels; i++) {
+                       phy = &pdev->phy[i];
+                       irq = platform_get_irq(op, i);
+                       devm_free_irq(&op->dev, irq, phy);
+               }
+       }
 
        dma_async_device_unregister(&pdev->device);
        return 0;
index 3df0422607d594f995b455782db4f7badced0fc4..b3441f57a3640d74f268637a7f9a4941d5d7daca 100644 (file)
@@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
        return;
 }
 
-struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
+static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
 {
        struct gen_pool *gpool;
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
@@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
 
        /* alloc channel */
        tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
-       if (!tdmac) {
-               dev_err(tdev->dev, "no free memory for DMA channels!\n");
+       if (!tdmac)
                return -ENOMEM;
-       }
+
        if (irq)
                tdmac->irq = irq;
        tdmac->dev         = tdev->dev;
@@ -593,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
        return true;
 }
 
-struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
                               struct of_dma *ofdma)
 {
        struct mmp_tdma_device *tdev = ofdma->of_dma_data;
index 631c4435e075f25d39c6041a523710495d1bfd8d..a6e642792e5a34f1bb46e738e416a49cd3fcc442 100644 (file)
@@ -148,6 +148,7 @@ struct moxart_chan {
 struct moxart_dmadev {
        struct dma_device               dma_slave;
        struct moxart_chan              slave_chans[APB_DMA_MAX_CHANNEL];
+       unsigned int                    irq;
 };
 
 struct moxart_filter_data {
@@ -574,10 +575,8 @@ static int moxart_probe(struct platform_device *pdev)
        struct moxart_dmadev *mdc;
 
        mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
-       if (!mdc) {
-               dev_err(dev, "can't allocate DMA container\n");
+       if (!mdc)
                return -ENOMEM;
-       }
 
        irq = irq_of_parse_and_map(node, 0);
        if (irq == NO_IRQ) {
@@ -617,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev)
                dev_err(dev, "devm_request_irq failed\n");
                return ret;
        }
+       mdc->irq = irq;
 
        ret = dma_async_device_register(&mdc->dma_slave);
        if (ret) {
@@ -640,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev)
 {
        struct moxart_dmadev *m = platform_get_drvdata(pdev);
 
+       devm_free_irq(&pdev->dev, m->irq, m);
+
        dma_async_device_unregister(&m->dma_slave);
 
        if (pdev->dev.of_node)
index ccadafa51d5ed940842030c13b22f4900d8b160f..fa86592c7ae1d80b717a710a31a81f0f618b2acc 100644 (file)
@@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op)
        }
        free_irq(mdma->irq, mdma);
        irq_dispose_mapping(mdma->irq);
+       tasklet_kill(&mdma->tasklet);
 
        return 0;
 }
index d0446a75990aeb046acb8d50a329bfeb7a9999c6..f4c9f98ec35e51ac0ba46ae07ea7cb214d2988b3 100644 (file)
@@ -1057,7 +1057,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 
 err_free_irq:
        free_irq(mv_chan->irq, mv_chan);
- err_free_dma:
+err_free_dma:
        dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
                          mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
        return ERR_PTR(ret);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
new file mode 100644 (file)
index 0000000..a28a01f
--- /dev/null
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2015-2016 Marvell International Ltd.
+
+ * This program is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+/* DMA Engine Registers */
+#define MV_XOR_V2_DMA_DESQ_BALR_OFF                    0x000
+#define MV_XOR_V2_DMA_DESQ_BAHR_OFF                    0x004
+#define MV_XOR_V2_DMA_DESQ_SIZE_OFF                    0x008
+#define MV_XOR_V2_DMA_DESQ_DONE_OFF                    0x00C
+#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK         0x7FFF
+#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT                0
+#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK                0x1FFF
+#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT       16
+#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF                  0x010
+#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK           0x3F3F
+#define   MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE      0x202
+#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE            0x3C3C
+#define MV_XOR_V2_DMA_IMSG_CDAT_OFF                    0x014
+#define MV_XOR_V2_DMA_IMSG_THRD_OFF                    0x018
+#define   MV_XOR_V2_DMA_IMSG_THRD_MASK                 0x7FFF
+#define   MV_XOR_V2_DMA_IMSG_THRD_SHIFT                        0x0
+#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF                  0x01C
+  /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
+#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF                   0x04C
+#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK          0xFFFF
+#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT         16
+#define MV_XOR_V2_DMA_IMSG_BALR_OFF                    0x050
+#define MV_XOR_V2_DMA_IMSG_BAHR_OFF                    0x054
+#define MV_XOR_V2_DMA_DESQ_CTRL_OFF                    0x100
+#define          MV_XOR_V2_DMA_DESQ_CTRL_32B                   1
+#define   MV_XOR_V2_DMA_DESQ_CTRL_128B                 7
+#define MV_XOR_V2_DMA_DESQ_STOP_OFF                    0x800
+#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF                 0x804
+#define MV_XOR_V2_DMA_DESQ_ADD_OFF                     0x808
+
+/* XOR Global registers */
+#define MV_XOR_V2_GLOB_BW_CTRL                         0x4
+#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT     0
+#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL       64
+#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT     8
+#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL       8
+#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT    12
+#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL      4
+#define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT    16
+#define          MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL       4
+#define MV_XOR_V2_GLOB_PAUSE                           0x014
+#define   MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL                0x8
+#define MV_XOR_V2_GLOB_SYS_INT_CAUSE                   0x200
+#define MV_XOR_V2_GLOB_SYS_INT_MASK                    0x204
+#define MV_XOR_V2_GLOB_MEM_INT_CAUSE                   0x220
+#define MV_XOR_V2_GLOB_MEM_INT_MASK                    0x224
+
+#define MV_XOR_V2_MIN_DESC_SIZE                                32
+#define MV_XOR_V2_EXT_DESC_SIZE                                128
+
+#define MV_XOR_V2_DESC_RESERVED_SIZE                   12
+#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE                        12
+
+#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF               8
+
+/*
+ * Descriptors queue size. With 32 bytes descriptors, up to 2^14
+ * descriptors are allowed, with 128 bytes descriptors, up to 2^12
+ * descriptors are allowed. This driver uses 128 bytes descriptors,
+ * but experimentation has shown that a set of 1024 descriptors is
+ * sufficient to reach a good level of performance.
+ */
+#define MV_XOR_V2_DESC_NUM                             1024
+
+/**
+ * struct mv_xor_v2_descriptor - DMA HW descriptor
+ * @desc_id: used by S/W and is not affected by H/W.
+ * @flags: error and status flags
+ * @crc32_result: CRC32 calculation result
+ * @desc_ctrl: operation mode and control flags
+ * @buff_size: amount of bytes to be processed
+ * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
+ * AW-Attributes
+ * @data_buff_addr: Source (and might be RAID6 destination)
+ * addresses of data buffers in RAID5 and RAID6
+ * @reserved: reserved
+ */
+struct mv_xor_v2_descriptor {
+       u16 desc_id;
+       u16 flags;
+       u32 crc32_result;
+       u32 desc_ctrl;
+
+       /* Definitions for desc_ctrl */
+#define DESC_NUM_ACTIVE_D_BUF_SHIFT    22
+#define DESC_OP_MODE_SHIFT             28
+#define DESC_OP_MODE_NOP               0       /* Idle operation */
+#define DESC_OP_MODE_MEMCPY            1       /* Pure-DMA operation */
+#define DESC_OP_MODE_MEMSET            2       /* Mem-Fill operation */
+#define DESC_OP_MODE_MEMINIT           3       /* Mem-Init operation */
+#define DESC_OP_MODE_MEM_COMPARE       4       /* Mem-Compare operation */
+#define DESC_OP_MODE_CRC32             5       /* CRC32 calculation */
+#define DESC_OP_MODE_XOR               6       /* RAID5 (XOR) operation */
+#define DESC_OP_MODE_RAID6             7       /* RAID6 P&Q-generation */
+#define DESC_OP_MODE_RAID6_REC         8       /* RAID6 Recovery */
+#define DESC_Q_BUFFER_ENABLE           BIT(16)
+#define DESC_P_BUFFER_ENABLE           BIT(17)
+#define DESC_IOD                       BIT(27)
+
+       u32 buff_size;
+       u32 fill_pattern_src_addr[4];
+       u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
+       u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
+};
+
+/**
+ * struct mv_xor_v2_device - implements a xor device
+ * @lock: lock for the engine
+ * @dma_base: memory mapped DMA register base
+ * @glob_base: memory mapped global register base
+ * @irq_tasklet:
+ * @free_sw_desc: linked list of free SW descriptors
+ * @dmadev: dma device
+ * @dmachan: dma channel
+ * @hw_desq: HW descriptors queue
+ * @hw_desq_virt: virtual address of DESCQ
+ * @sw_desq: SW descriptors queue
+ * @desc_size: HW descriptor size
+ * @npendings: number of pending descriptors (for which tx_submit has
+ * been called, but not yet issue_pending)
+ */
+struct mv_xor_v2_device {
+       spinlock_t lock;
+       void __iomem *dma_base;
+       void __iomem *glob_base;
+       struct clk *clk;
+       struct tasklet_struct irq_tasklet;
+       struct list_head free_sw_desc;
+       struct dma_device dmadev;
+       struct dma_chan dmachan;
+       dma_addr_t hw_desq;
+       struct mv_xor_v2_descriptor *hw_desq_virt;
+       struct mv_xor_v2_sw_desc *sw_desq;
+       int desc_size;
+       unsigned int npendings;
+};
+
+/**
+ * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
+ * @idx: descriptor index
+ * @async_tx: support for the async_tx api
+ * @hw_desc: assosiated HW descriptor
+ * @free_list: node of the free SW descriprots list
+*/
+struct mv_xor_v2_sw_desc {
+       int idx;
+       struct dma_async_tx_descriptor async_tx;
+       struct mv_xor_v2_descriptor hw_desc;
+       struct list_head free_list;
+};
+
+/*
+ * Fill the data buffers to a HW descriptor
+ */
+static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
+                                       struct mv_xor_v2_descriptor *desc,
+                                       dma_addr_t src, int index)
+{
+       int arr_index = ((index >> 1) * 3);
+
+       /*
+        * Fill the buffer's addresses to the descriptor.
+        *
+        * The format of the buffers address for 2 sequential buffers
+        * X and X + 1:
+        *
+        *  First word:  Buffer-DX-Address-Low[31:0]
+        *  Second word: Buffer-DX+1-Address-Low[31:0]
+        *  Third word:  DX+1-Buffer-Address-High[47:32] [31:16]
+        *               DX-Buffer-Address-High[47:32] [15:0]
+        */
+       if ((index & 0x1) == 0) {
+               desc->data_buff_addr[arr_index] = lower_32_bits(src);
+
+               desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
+               desc->data_buff_addr[arr_index + 2] |=
+                       upper_32_bits(src) & 0xFFFF;
+       } else {
+               desc->data_buff_addr[arr_index + 1] =
+                       lower_32_bits(src);
+
+               desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
+               desc->data_buff_addr[arr_index + 2] |=
+                       (upper_32_bits(src) & 0xFFFF) << 16;
+       }
+}
+
+/*
+ * Return the next available index in the DESQ.
+ */
+static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
+{
+       /* read the index for the next available descriptor in the DESQ */
+       u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
+
+       return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
+               & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
+}
+
+/*
+ * notify the engine of new descriptors, and update the available index.
+ */
+static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
+                                      int num_of_desc)
+{
+       /* write the number of new descriptors in the DESQ. */
+       writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
+}
+
+/*
+ * free HW descriptors
+ */
+static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
+                                         int num_of_desc)
+{
+       /* write the number of new descriptors in the DESQ. */
+       writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
+}
+
+/*
+ * Set descriptor size
+ * Return the HW descriptor size in bytes
+ */
+static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
+{
+       writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
+              xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
+
+       return MV_XOR_V2_EXT_DESC_SIZE;
+}
+
+/*
+ * Set the IMSG threshold
+ */
+static inline
+void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
+{
+       u32 reg;
+
+       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+
+       reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+       reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+
+       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+}
+
+static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+{
+       struct mv_xor_v2_device *xor_dev = data;
+       unsigned int ndescs;
+       u32 reg;
+
+       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+       ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+                 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+
+       /* No descriptors to process */
+       if (!ndescs)
+               return IRQ_NONE;
+
+       /*
+        * Update IMSG threshold, to disable new IMSG interrupts until
+        * end of the tasklet
+        */
+       mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
+
+       /* schedule a tasklet to handle descriptors callbacks */
+       tasklet_schedule(&xor_dev->irq_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * submit a descriptor to the DMA engine
+ */
+static dma_cookie_t
+mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       int desq_ptr;
+       void *dest_hw_desc;
+       dma_cookie_t cookie;
+       struct mv_xor_v2_sw_desc *sw_desc =
+               container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
+       struct mv_xor_v2_device *xor_dev =
+               container_of(tx->chan, struct mv_xor_v2_device, dmachan);
+
+       dev_dbg(xor_dev->dmadev.dev,
+               "%s sw_desc %p: async_tx %p\n",
+               __func__, sw_desc, &sw_desc->async_tx);
+
+       /* assign coookie */
+       spin_lock_bh(&xor_dev->lock);
+       cookie = dma_cookie_assign(tx);
+
+       /* get the next available slot in the DESQ */
+       desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
+
+       /* copy the HW descriptor from the SW descriptor to the DESQ */
+       dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+
+       memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
+
+       xor_dev->npendings++;
+
+       spin_unlock_bh(&xor_dev->lock);
+
+       return cookie;
+}
+
+/*
+ * Prepare a SW descriptor
+ */
+static struct mv_xor_v2_sw_desc        *
+mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+{
+       struct mv_xor_v2_sw_desc *sw_desc;
+
+       /* Lock the channel */
+       spin_lock_bh(&xor_dev->lock);
+
+       if (list_empty(&xor_dev->free_sw_desc)) {
+               spin_unlock_bh(&xor_dev->lock);
+               /* schedule tasklet to free some descriptors */
+               tasklet_schedule(&xor_dev->irq_tasklet);
+               return NULL;
+       }
+
+       /* get a free SW descriptor from the SW DESQ */
+       sw_desc = list_first_entry(&xor_dev->free_sw_desc,
+                                  struct mv_xor_v2_sw_desc, free_list);
+       list_del(&sw_desc->free_list);
+
+       /* Release the channel */
+       spin_unlock_bh(&xor_dev->lock);
+
+       /* set the async tx descriptor */
+       dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
+       sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+       async_tx_ack(&sw_desc->async_tx);
+
+       return sw_desc;
+}
+
+/*
+ * Prepare a HW descriptor for a memcpy operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+                         dma_addr_t src, size_t len, unsigned long flags)
+{
+       struct mv_xor_v2_sw_desc *sw_desc;
+       struct mv_xor_v2_descriptor *hw_descriptor;
+       struct mv_xor_v2_device *xor_dev;
+
+       xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
+
+       dev_dbg(xor_dev->dmadev.dev,
+               "%s len: %zu src %pad dest %pad flags: %ld\n",
+               __func__, len, &src, &dest, flags);
+
+       sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+       sw_desc->async_tx.flags = flags;
+
+       /* set the HW descriptor */
+       hw_descriptor = &sw_desc->hw_desc;
+
+       /* save the SW descriptor ID to restore when operation is done */
+       hw_descriptor->desc_id = sw_desc->idx;
+
+       /* Set the MEMCPY control word */
+       hw_descriptor->desc_ctrl =
+               DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
+
+       if (flags & DMA_PREP_INTERRUPT)
+               hw_descriptor->desc_ctrl |= DESC_IOD;
+
+       /* Set source address */
+       hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
+       hw_descriptor->fill_pattern_src_addr[1] =
+               upper_32_bits(src) & 0xFFFF;
+
+       /* Set Destination address */
+       hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+       hw_descriptor->fill_pattern_src_addr[3] =
+               upper_32_bits(dest) & 0xFFFF;
+
+       /* Set buffers size */
+       hw_descriptor->buff_size = len;
+
+       /* return the async tx descriptor */
+       return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for a XOR operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+                      unsigned int src_cnt, size_t len, unsigned long flags)
+{
+       struct mv_xor_v2_sw_desc *sw_desc;
+       struct mv_xor_v2_descriptor *hw_descriptor;
+       struct mv_xor_v2_device *xor_dev =
+               container_of(chan, struct mv_xor_v2_device, dmachan);
+       int i;
+
+       if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
+               return NULL;
+
+       dev_dbg(xor_dev->dmadev.dev,
+               "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
+               __func__, src_cnt, len, &dest, flags);
+
+       sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+       sw_desc->async_tx.flags = flags;
+
+       /* set the HW descriptor */
+       hw_descriptor = &sw_desc->hw_desc;
+
+       /* save the SW descriptor ID to restore when operation is done */
+       hw_descriptor->desc_id = sw_desc->idx;
+
+       /* Set the XOR control word */
+       hw_descriptor->desc_ctrl =
+               DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
+       hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
+
+       if (flags & DMA_PREP_INTERRUPT)
+               hw_descriptor->desc_ctrl |= DESC_IOD;
+
+       /* Set the data buffers */
+       for (i = 0; i < src_cnt; i++)
+               mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
+
+       hw_descriptor->desc_ctrl |=
+               src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
+
+       /* Set Destination address */
+       hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+       hw_descriptor->fill_pattern_src_addr[3] =
+               upper_32_bits(dest) & 0xFFFF;
+
+       /* Set buffers size */
+       hw_descriptor->buff_size = len;
+
+       /* return the async tx descriptor */
+       return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for interrupt operation.
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
+{
+       struct mv_xor_v2_sw_desc *sw_desc;
+       struct mv_xor_v2_descriptor *hw_descriptor;
+       struct mv_xor_v2_device *xor_dev =
+               container_of(chan, struct mv_xor_v2_device, dmachan);
+
+       sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+       /* set the HW descriptor */
+       hw_descriptor = &sw_desc->hw_desc;
+
+       /* save the SW descriptor ID to restore when operation is done */
+       hw_descriptor->desc_id = sw_desc->idx;
+
+       /* Set the INTERRUPT control word */
+       hw_descriptor->desc_ctrl =
+               DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
+       hw_descriptor->desc_ctrl |= DESC_IOD;
+
+       /* return the async tx descriptor */
+       return &sw_desc->async_tx;
+}
+
+/*
+ * push pending transactions to hardware
+ */
+static void mv_xor_v2_issue_pending(struct dma_chan *chan)
+{
+       struct mv_xor_v2_device *xor_dev =
+               container_of(chan, struct mv_xor_v2_device, dmachan);
+
+       spin_lock_bh(&xor_dev->lock);
+
+       /*
+        * update the engine with the number of descriptors to
+        * process
+        */
+       mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
+       xor_dev->npendings = 0;
+
+       /* Activate the channel */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+       spin_unlock_bh(&xor_dev->lock);
+}
+
+static inline
+int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
+                                int *pending_ptr)
+{
+       u32 reg;
+
+       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+       /* get the next pending descriptor index */
+       *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
+                       MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
+
+       /* get the number of descriptors pending handle */
+       return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+               MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+}
+
+/*
+ * handle the descriptors after HW process
+ */
+static void mv_xor_v2_tasklet(unsigned long data)
+{
+       struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+       int pending_ptr, num_of_pending, i;
+       struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
+       struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
+
+       dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
+
+       /* get the pending descriptors parameters */
+       num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
+
+       /* next HW descriptor */
+       next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
+
+       /* loop over free descriptors */
+       for (i = 0; i < num_of_pending; i++) {
+
+               if (pending_ptr > MV_XOR_V2_DESC_NUM)
+                       pending_ptr = 0;
+
+               if (next_pending_sw_desc != NULL)
+                       next_pending_hw_desc++;
+
+               /* get the SW descriptor related to the HW descriptor */
+               next_pending_sw_desc =
+                       &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
+
+               /* call the callback */
+               if (next_pending_sw_desc->async_tx.cookie > 0) {
+                       /*
+                        * update the channel's completed cookie - no
+                        * lock is required the IMSG threshold provide
+                        * the locking
+                        */
+                       dma_cookie_complete(&next_pending_sw_desc->async_tx);
+
+                       if (next_pending_sw_desc->async_tx.callback)
+                               next_pending_sw_desc->async_tx.callback(
+                               next_pending_sw_desc->async_tx.callback_param);
+
+                       dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
+               }
+
+               dma_run_dependencies(&next_pending_sw_desc->async_tx);
+
+               /* Lock the channel */
+               spin_lock_bh(&xor_dev->lock);
+
+               /* add the SW descriptor to the free descriptors list */
+               list_add(&next_pending_sw_desc->free_list,
+                        &xor_dev->free_sw_desc);
+
+               /* Release the channel */
+               spin_unlock_bh(&xor_dev->lock);
+
+               /* increment the next descriptor */
+               pending_ptr++;
+       }
+
+       if (num_of_pending != 0) {
+               /* free the descriptores */
+               mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
+       }
+
+       /* Update IMSG threshold, to enable new IMSG interrupts */
+       mv_xor_v2_set_imsg_thrd(xor_dev, 0);
+}
+
+/*
+ *     Set DMA Interrupt-message (IMSG) parameters
+ */
+static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+       struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
+
+       writel(msg->address_lo,
+              xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
+       writel(msg->address_hi & 0xFFFF,
+              xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
+       writel(msg->data,
+              xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
+}
+
+static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
+{
+       u32 reg;
+
+       /* write the DESQ size to the DMA engine */
+       writel(MV_XOR_V2_DESC_NUM,
+              xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
+
+       /* write the DESQ address to the DMA enngine*/
+       writel(xor_dev->hw_desq & 0xFFFFFFFF,
+              xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
+       writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+              xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
+
+       /* enable the DMA engine */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+       /*
+        * This is a temporary solution, until we activate the
+        * SMMU. Set the attributes for reading & writing data buffers
+        * & descriptors to:
+        *
+        *  - OuterShareable - Snoops will be performed on CPU caches
+        *  - Enable cacheable - Bufferable, Modifiable, Other Allocate
+        *    and Allocate
+        */
+       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+       reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+       reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+               MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+
+       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+       reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+       reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+               MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+
+       /* BW CTRL - set values to optimize the XOR performance:
+        *
+        *  - Set WrBurstLen & RdBurstLen - the unit will issue
+        *    maximum of 256B write/read transactions.
+        * -  Limit the number of outstanding write & read data
+        *    (OBB/IBB) requests to the maximal value.
+       */
+       reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
+               MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
+              (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL  <<
+               MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
+              (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
+               MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
+              (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
+               MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
+       writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
+
+       /* Disable the AXI timer feature */
+       reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+       reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
+       writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+
+       return 0;
+}
+
+static int mv_xor_v2_probe(struct platform_device *pdev)
+{
+       struct mv_xor_v2_device *xor_dev;
+       struct resource *res;
+       int i, ret = 0;
+       struct dma_device *dma_dev;
+       struct mv_xor_v2_sw_desc *sw_desc;
+       struct msi_desc *msi_desc;
+
+       BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
+                    MV_XOR_V2_EXT_DESC_SIZE);
+
+       xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
+       if (!xor_dev)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(xor_dev->dma_base))
+               return PTR_ERR(xor_dev->dma_base);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(xor_dev->glob_base))
+               return PTR_ERR(xor_dev->glob_base);
+
+       platform_set_drvdata(pdev, xor_dev);
+
+       xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       if (!IS_ERR(xor_dev->clk)) {
+               ret = clk_prepare_enable(xor_dev->clk);
+               if (ret)
+                       return ret;
+       }
+
+       ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
+                                            mv_xor_v2_set_msi_msg);
+       if (ret)
+               goto disable_clk;
+
+       msi_desc = first_msi_entry(&pdev->dev);
+       if (!msi_desc)
+               goto free_msi_irqs;
+
+       ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+                              mv_xor_v2_interrupt_handler, 0,
+                              dev_name(&pdev->dev), xor_dev);
+       if (ret)
+               goto free_msi_irqs;
+
+       tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
+                    (unsigned long) xor_dev);
+
+       xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
+
+       dma_cookie_init(&xor_dev->dmachan);
+
+       /*
+        * allocate coherent memory for hardware descriptors
+        * note: writecombine gives slightly better performance, but
+        * requires that we explicitly flush the writes
+        */
+       xor_dev->hw_desq_virt =
+               dma_alloc_coherent(&pdev->dev,
+                                  xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+                                  &xor_dev->hw_desq, GFP_KERNEL);
+       if (!xor_dev->hw_desq_virt) {
+               ret = -ENOMEM;
+               goto free_msi_irqs;
+       }
+
+       /* alloc memory for the SW descriptors */
+       xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) *
+                                       MV_XOR_V2_DESC_NUM, GFP_KERNEL);
+       if (!xor_dev->sw_desq) {
+               ret = -ENOMEM;
+               goto free_hw_desq;
+       }
+
+       spin_lock_init(&xor_dev->lock);
+
+       /* init the free SW descriptors list */
+       INIT_LIST_HEAD(&xor_dev->free_sw_desc);
+
+       /* add all SW descriptors to the free list */
+       for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
+               xor_dev->sw_desq[i].idx = i;
+               list_add(&xor_dev->sw_desq[i].free_list,
+                        &xor_dev->free_sw_desc);
+       }
+
+       dma_dev = &xor_dev->dmadev;
+
+       /* set DMA capabilities */
+       dma_cap_zero(dma_dev->cap_mask);
+       dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+       dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+       /* init dma link list */
+       INIT_LIST_HEAD(&dma_dev->channels);
+
+       /* set base routines */
+       dma_dev->device_tx_status = dma_cookie_status;
+       dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
+       dma_dev->dev = &pdev->dev;
+
+       dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
+       dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
+       dma_dev->max_xor = 8;
+       dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
+
+       xor_dev->dmachan.device = dma_dev;
+
+       list_add_tail(&xor_dev->dmachan.device_node,
+                     &dma_dev->channels);
+
+       mv_xor_v2_descq_init(xor_dev);
+
+       ret = dma_async_device_register(dma_dev);
+       if (ret)
+               goto free_hw_desq;
+
+       dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
+
+       return 0;
+
+free_hw_desq:
+       dma_free_coherent(&pdev->dev,
+                         xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+                         xor_dev->hw_desq_virt, xor_dev->hw_desq);
+free_msi_irqs:
+       platform_msi_domain_free_irqs(&pdev->dev);
+disable_clk:
+       if (!IS_ERR(xor_dev->clk))
+               clk_disable_unprepare(xor_dev->clk);
+       return ret;
+}
+
+static int mv_xor_v2_remove(struct platform_device *pdev)
+{
+       struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&xor_dev->dmadev);
+
+       dma_free_coherent(&pdev->dev,
+                         xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+                         xor_dev->hw_desq_virt, xor_dev->hw_desq);
+
+       platform_msi_domain_free_irqs(&pdev->dev);
+
+       clk_disable_unprepare(xor_dev->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mv_xor_v2_dt_ids[] = {
+       { .compatible = "marvell,xor-v2", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_v2_driver = {
+       .probe          = mv_xor_v2_probe,
+       .remove         = mv_xor_v2_remove,
+       .driver         = {
+               .name   = "mv_xor_v2",
+               .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
+       },
+};
+
+module_platform_driver(mv_xor_v2_driver);
+
+MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
+MODULE_LICENSE("GPL");
index 2b5a198ac77e483fdf9a0e0479b200935cc4f501..08c45c18554966fb0cc1aa657ad18cd5d1be5e30 100644 (file)
@@ -227,6 +227,7 @@ struct nbpf_device {
        void __iomem *base;
        struct clk *clk;
        const struct nbpf_config *config;
+       unsigned int eirq;
        struct nbpf_channel chan[];
 };
 
@@ -1300,10 +1301,9 @@ static int nbpf_probe(struct platform_device *pdev)
 
        nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
                            sizeof(nbpf->chan[0]), GFP_KERNEL);
-       if (!nbpf) {
-               dev_err(dev, "Memory allocation failed\n");
+       if (!nbpf)
                return -ENOMEM;
-       }
+
        dma_dev = &nbpf->dma_dev;
        dma_dev->dev = dev;
 
@@ -1376,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev)
                               IRQF_SHARED, "dma error", nbpf);
        if (ret < 0)
                return ret;
+       nbpf->eirq = eirq;
 
        INIT_LIST_HEAD(&dma_dev->channels);
 
@@ -1447,6 +1448,17 @@ e_clk_off:
 static int nbpf_remove(struct platform_device *pdev)
 {
        struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+       int i;
+
+       devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
+
+       for (i = 0; i < nbpf->config->num_channels; i++) {
+               struct nbpf_channel *chan = nbpf->chan + i;
+
+               devm_free_irq(&pdev->dev, chan->irq, chan);
+
+               tasklet_kill(&chan->tasklet);
+       }
 
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&nbpf->dma_dev);
index 1e984e18c1266b74fc01c247fad36f3e132ad587..d99ca2b511c4101ce262569106ffe1d3fa5644fb 100644 (file)
@@ -59,6 +59,8 @@ struct omap_sg {
        dma_addr_t addr;
        uint32_t en;            /* number of elements (24-bit) */
        uint32_t fn;            /* number of frames (16-bit) */
+       int32_t fi;             /* for double indexing */
+       int16_t ei;             /* for double indexing */
 };
 
 struct omap_desc {
@@ -66,7 +68,8 @@ struct omap_desc {
        enum dma_transfer_direction dir;
        dma_addr_t dev_addr;
 
-       int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
+       int32_t fi;             /* for OMAP_DMA_SYNC_PACKET / double indexing */
+       int16_t ei;             /* for double indexing */
        uint8_t es;             /* CSDP_DATA_TYPE_xxx */
        uint32_t ccr;           /* CCR value */
        uint16_t clnk_ctrl;     /* CLNK_CTRL value */
@@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
        }
 
        omap_dma_chan_write(c, cxsa, sg->addr);
-       omap_dma_chan_write(c, cxei, 0);
-       omap_dma_chan_write(c, cxfi, 0);
+       omap_dma_chan_write(c, cxei, sg->ei);
+       omap_dma_chan_write(c, cxfi, sg->fi);
        omap_dma_chan_write(c, CEN, sg->en);
        omap_dma_chan_write(c, CFN, sg->fn);
 
@@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c)
        }
 
        omap_dma_chan_write(c, cxsa, d->dev_addr);
-       omap_dma_chan_write(c, cxei, 0);
+       omap_dma_chan_write(c, cxei, d->ei);
        omap_dma_chan_write(c, cxfi, d->fi);
        omap_dma_chan_write(c, CSDP, d->csdp);
        omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
@@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
        return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
 }
 
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+       struct dma_chan *chan, struct dma_interleaved_template *xt,
+       unsigned long flags)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_desc *d;
+       struct omap_sg *sg;
+       uint8_t data_type;
+       size_t src_icg, dst_icg;
+
+       /* Slave mode is not supported */
+       if (is_slave_direction(xt->dir))
+               return NULL;
+
+       if (xt->frame_size != 1 || xt->numf == 0)
+               return NULL;
+
+       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
+       if (data_type > CSDP_DATA_TYPE_32)
+               data_type = CSDP_DATA_TYPE_32;
+
+       sg = &d->sg[0];
+       d->dir = DMA_MEM_TO_MEM;
+       d->dev_addr = xt->src_start;
+       d->es = data_type;
+       sg->en = xt->sgl[0].size / BIT(data_type);
+       sg->fn = xt->numf;
+       sg->addr = xt->dst_start;
+       d->sglen = 1;
+       d->ccr = c->ccr;
+
+       src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+       dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+       if (src_icg) {
+               d->ccr |= CCR_SRC_AMODE_DBLIDX;
+               d->ei = 1;
+               d->fi = src_icg;
+       } else if (xt->src_inc) {
+               d->ccr |= CCR_SRC_AMODE_POSTINC;
+               d->fi = 0;
+       } else {
+               dev_err(chan->device->dev,
+                       "%s: SRC constant addressing is not supported\n",
+                       __func__);
+               kfree(d);
+               return NULL;
+       }
+
+       if (dst_icg) {
+               d->ccr |= CCR_DST_AMODE_DBLIDX;
+               sg->ei = 1;
+               sg->fi = dst_icg;
+       } else if (xt->dst_inc) {
+               d->ccr |= CCR_DST_AMODE_POSTINC;
+               sg->fi = 0;
+       } else {
+               dev_err(chan->device->dev,
+                       "%s: DST constant addressing is not supported\n",
+                       __func__);
+               kfree(d);
+               return NULL;
+       }
+
+       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+       d->csdp = data_type;
+
+       if (dma_omap1()) {
+               d->cicr |= CICR_TOUT_IE;
+               d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+       } else {
+               d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
 {
        struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev)
        dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
        dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
        dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
        od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
        od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
        od->ddev.device_tx_status = omap_dma_tx_status;
@@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
        od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
        od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
+       od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
        od->ddev.device_config = omap_dma_slave_config;
        od->ddev.device_pause = omap_dma_pause;
        od->ddev.device_resume = omap_dma_resume;
@@ -1204,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev)
 static int omap_dma_remove(struct platform_device *pdev)
 {
        struct omap_dmadev *od = platform_get_drvdata(pdev);
+       int irq;
 
        if (pdev->dev.of_node)
                of_dma_controller_free(pdev->dev.of_node);
 
+       irq = platform_get_irq(pdev, 1);
+       devm_free_irq(&pdev->dev, irq, od);
+
        dma_async_device_unregister(&od->ddev);
 
        if (!od->legacy) {
index 372b4359da97d4d7753233884e65c40abc7261cb..4fc3ffbd5ca0d0a36b489eceba61091b323228e3 100644 (file)
@@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        /* Allocate a new DMAC and its Channels */
        pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
-       if (!pl330) {
-               dev_err(&adev->dev, "unable to allocate mem\n");
+       if (!pl330)
                return -ENOMEM;
-       }
 
        pd = &pl330->ddma;
        pd->dev = &adev->dev;
@@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
        if (!pl330->peripherals) {
                ret = -ENOMEM;
-               dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
                goto probe_err2;
        }
 
@@ -3005,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev)
 {
        struct pl330_dmac *pl330 = amba_get_drvdata(adev);
        struct dma_pl330_chan *pch, *_p;
+       int i, irq;
 
        pm_runtime_get_noresume(pl330->ddma.dev);
 
        if (adev->dev.of_node)
                of_dma_controller_free(adev->dev.of_node);
 
+       for (i = 0; i < AMBA_NR_IRQS; i++) {
+               irq = adev->irq[i];
+               devm_free_irq(&adev->dev, irq, pl330);
+       }
+
        dma_async_device_unregister(&pl330->ddma);
 
        /* Idle the DMAC */
index 9217f893b0d1af7efec3d56bdc281beeb426fcfe..da3688b94bdc1a460c913e5c36aa4cd80838a380 100644 (file)
@@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
        /* create a device */
        adev = kzalloc(sizeof(*adev), GFP_KERNEL);
        if (!adev) {
-               dev_err(&ofdev->dev, "failed to allocate device\n");
                initcode = PPC_ADMA_INIT_ALLOC;
                ret = -ENOMEM;
                goto err_adev_alloc;
@@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
        /* create a channel */
        chan = kzalloc(sizeof(*chan), GFP_KERNEL);
        if (!chan) {
-               dev_err(&ofdev->dev, "can't allocate channel structure\n");
                initcode = PPC_ADMA_INIT_CHANNEL;
                ret = -ENOMEM;
                goto err_chan_alloc;
index e756a30ccba2c9b46ea24b036a369b7aa0b07b8d..dc7850a422b8d4212495d067c4ea31c26e0af8a2 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of.h>
+#include <linux/wait.h>
 #include <linux/dma/pxa-dma.h>
 
 #include "dmaengine.h"
@@ -118,6 +119,8 @@ struct pxad_chan {
        struct pxad_phy         *phy;
        struct dma_pool         *desc_pool;     /* Descriptors pool */
        dma_cookie_t            bus_error;
+
+       wait_queue_head_t       wq_state;
 };
 
 struct pxad_device {
@@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \
        return single_open(file, dbg_show_##name, inode->i_private); \
 } \
 static const struct file_operations dbg_fops_##name = { \
-       .owner          = THIS_MODULE, \
        .open           = dbg_open_##name, \
        .llseek         = seq_lseek, \
        .read           = seq_read, \
@@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
         */
        phy_writel(chan->phy, desc->first, DDADR);
        phy_enable(chan->phy, chan->misaligned);
+       wake_up(&chan->wq_state);
 }
 
 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
@@ -717,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
                }
        }
        spin_unlock_irqrestore(&chan->vc.lock, flags);
+       wake_up(&chan->wq_state);
 
        return IRQ_HANDLED;
 }
@@ -1268,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
        return ret;
 }
 
+static void pxad_synchronize(struct dma_chan *dchan)
+{
+       struct pxad_chan *chan = to_pxad_chan(dchan);
+
+       wait_event(chan->wq_state, !is_chan_running(chan));
+       vchan_synchronize(&chan->vc);
+}
+
 static void pxad_free_channels(struct dma_device *dmadev)
 {
        struct pxad_chan *c, *cn;
@@ -1372,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op,
        pdev->slave.device_tx_status = pxad_tx_status;
        pdev->slave.device_issue_pending = pxad_issue_pending;
        pdev->slave.device_config = pxad_config;
+       pdev->slave.device_synchronize = pxad_synchronize;
        pdev->slave.device_terminate_all = pxad_terminate_all;
 
        if (op->dev.coherent_dma_mask)
@@ -1389,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op,
                        return -ENOMEM;
                c->vc.desc_free = pxad_free_desc;
                vchan_init(&c->vc, &pdev->slave);
+               init_waitqueue_head(&c->wq_state);
        }
 
        return dma_async_device_register(&pdev->slave);
index 969b48176745031f4b2d9bf952561b4101af00e7..03c4eb3fd314d31df1cc8ce39122dc37b85ce8ea 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/of_dma.h>
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
 
 #include "../dmaengine.h"
 #include "../virt-dma.h"
@@ -58,6 +59,8 @@ struct bam_desc_hw {
        __le16 flags;
 };
 
+#define BAM_DMA_AUTOSUSPEND_DELAY 100
+
 #define DESC_FLAG_INT BIT(15)
 #define DESC_FLAG_EOT BIT(14)
 #define DESC_FLAG_EOB BIT(13)
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan)
        struct bam_device *bdev = bchan->bdev;
        u32 val;
        unsigned long flags;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return;
 
        vchan_free_chan_resources(to_virt_chan(chan));
 
        if (bchan->curr_txd) {
                dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
-               return;
+               goto err;
        }
 
        spin_lock_irqsave(&bchan->vc.lock, flags);
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan)
 
        /* disable irq */
        writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+err:
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 }
 
 /**
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan)
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
        unsigned long flag;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&bchan->vc.lock, flag);
        writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
        bchan->paused = 1;
        spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 
        return 0;
 }
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan)
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
        unsigned long flag;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&bchan->vc.lock, flag);
        writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
        bchan->paused = 0;
        spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 
        return 0;
 }
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
 {
        struct bam_device *bdev = data;
        u32 clr_mask = 0, srcs = 0;
+       int ret;
 
        srcs |= process_channel_irqs(bdev);
 
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
        if (srcs & P_IRQ)
                tasklet_schedule(&bdev->task);
 
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
+
        if (srcs & BAM_IRQ) {
                clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
 
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
                writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
        }
 
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
+
        return IRQ_HANDLED;
 }
 
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan)
        struct bam_desc_hw *desc;
        struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
                                        sizeof(struct bam_desc_hw));
+       int ret;
 
        lockdep_assert_held(&bchan->vc.lock);
 
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan)
        async_desc = container_of(vd, struct bam_async_desc, vd);
        bchan->curr_txd = async_desc;
 
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return;
+
        /* on first use, initialize the channel hardware */
        if (!bchan->initialized)
                bam_chan_init_hw(bchan, async_desc->dir);
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan)
        wmb();
        writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
                        bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
+
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 }
 
 /**
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data)
                        bam_start_dma(bchan);
                spin_unlock_irqrestore(&bchan->vc.lock, flags);
        }
+
 }
 
 /**
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_unregister_dma;
 
+       pm_runtime_irq_safe(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        return 0;
 
 err_unregister_dma:
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev)
        struct bam_device *bdev = platform_get_drvdata(pdev);
        u32 i;
 
+       pm_runtime_force_suspend(&pdev->dev);
+
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&bdev->common);
 
@@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+
+       clk_disable(bdev->bamclk);
+
+       return 0;
+}
+
+static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(bdev->bamclk);
+       if (ret < 0) {
+               dev_err(dev, "clk_enable failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused bam_dma_suspend(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+
+       pm_runtime_force_suspend(dev);
+
+       clk_unprepare(bdev->bamclk);
+
+       return 0;
+}
+
+static int __maybe_unused bam_dma_resume(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare(bdev->bamclk);
+       if (ret)
+               return ret;
+
+       pm_runtime_force_resume(dev);
+
+       return 0;
+}
+
+static const struct dev_pm_ops bam_dma_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
+       SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
+                               NULL)
+};
+
 static struct platform_driver bam_dma_driver = {
        .probe = bam_dma_probe,
        .remove = bam_dma_remove,
        .driver = {
                .name = "bam-dma-engine",
+               .pm = &bam_dma_pm_ops,
                .of_match_table = bam_of_match,
        },
 };
index 41b5c6dee713239623b550dbbe39189f6110ba1c..b2374cd91e457f38b946548cd3bcda7aed47e82e 100644 (file)
@@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
        pm_runtime_get_sync(dmadev->ddev.dev);
        dma_async_device_unregister(&dmadev->ddev);
        devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+       tasklet_kill(&dmadev->task);
        hidma_debug_uninit(dmadev);
        hidma_ll_uninit(dmadev->lldev);
        hidma_free(dmadev);
index f3929001539b0a464c30ff650a7934b22c588f52..ad20dfb64c71cb32d3b392c0fcb0dcd0002a5787 100644 (file)
@@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
 
        required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
        tasklet_kill(&lldev->task);
+       tasklet_kill(&lldev->rst_task);
        memset(lldev->trepool, 0, required_bytes);
        lldev->trepool = NULL;
        lldev->pending_tre_count = 0;
index c0e365321310009a6df4443b173a9f7823438a4d..82f36e4660830b33262bfc3d9a2acc5ef198a336 100644 (file)
@@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
                pdevinfo.size_data = 0;
                pdevinfo.dma_mask = DMA_BIT_MASK(64);
                new_pdev = platform_device_register_full(&pdevinfo);
-               if (!new_pdev) {
-                       ret = -ENODEV;
+               if (IS_ERR(new_pdev)) {
+                       ret = PTR_ERR(new_pdev);
                        goto out;
                }
                of_dma_configure(&new_pdev->dev, child);
@@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void)
 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        struct device_node *child;
 
-       for (child = of_find_matching_node(NULL, hidma_mgmt_match); child;
-            child = of_find_matching_node(child, hidma_mgmt_match)) {
+       for_each_matching_node(child, hidma_mgmt_match) {
                /* device tree based firmware here */
                hidma_mgmt_of_populate_channels(child);
                of_node_put(child);
index 17ccdfd28f3702d7de7b0b98299262a36690df85..ce67075589f52813570bddc9a92bcdce4b8a4e60 100644 (file)
@@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
 
        spin_lock_irqsave(&s3cchan->vc.lock, flags);
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_COMPLETE) {
-               spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-               return ret;
-       }
 
        /*
         * There's no point calculating the residue if there's
         * no txstate to store the value.
         */
-       if (!txstate) {
+       if (ret == DMA_COMPLETE || !txstate) {
                spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
                return ret;
        }
@@ -1105,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
         */
        for (i = 0; i < channels; i++) {
                chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
-               if (!chan) {
-                       dev_err(dmadev->dev,
-                               "%s no memory for channel\n", __func__);
+               if (!chan)
                        return -ENOMEM;
-               }
 
                chan->id = i;
                chan->host = s3cdma;
@@ -1143,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
        struct s3c24xx_dma_chan *next;
 
        list_for_each_entry_safe(chan,
-                                next, &dmadev->channels, vc.chan.device_node)
+                                next, &dmadev->channels, vc.chan.device_node) {
                list_del(&chan->vc.chan.device_node);
+               tasklet_kill(&chan->vc.task);
+       }
 }
 
 /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
@@ -1366,6 +1361,18 @@ err_memcpy:
        return ret;
 }
 
+static void s3c24xx_dma_free_irq(struct platform_device *pdev,
+                               struct s3c24xx_dma_engine *s3cdma)
+{
+       int i;
+
+       for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+               struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+
+               devm_free_irq(&pdev->dev, phy->irq, phy);
+       }
+}
+
 static int s3c24xx_dma_remove(struct platform_device *pdev)
 {
        const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
@@ -1376,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
        dma_async_device_unregister(&s3cdma->slave);
        dma_async_device_unregister(&s3cdma->memcpy);
 
+       s3c24xx_dma_free_irq(pdev, s3cdma);
+
        s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
        s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
 
index dfb17926297bd4f095a77b8105725335fa7c1aff..0dd953884d1d63eb90e3b1599aedcb3f02ac0caa 100644 (file)
@@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
 {
        u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
 
-       return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
+       return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
 }
 
 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
@@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
 
        spin_lock_irqsave(&chan->lock, flags);
        list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
-       list_add_tail(&desc->node, &chan->desc.free);
+       list_add(&desc->node, &chan->desc.free);
        spin_unlock_irqrestore(&chan->lock, flags);
 }
 
@@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
        list_splice_init(&rchan->desc.done, &list);
        list_splice_init(&rchan->desc.wait, &list);
 
+       rchan->desc.running = NULL;
+
        list_for_each_entry(desc, &list, node)
                rcar_dmac_realloc_hwdesc(rchan, desc, 0);
 
@@ -1143,19 +1145,46 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        struct rcar_dmac_desc *desc = chan->desc.running;
        struct rcar_dmac_xfer_chunk *running = NULL;
        struct rcar_dmac_xfer_chunk *chunk;
+       enum dma_status status;
        unsigned int residue = 0;
        unsigned int dptr = 0;
 
        if (!desc)
                return 0;
 
+       /*
+        * If the cookie corresponds to a descriptor that has been completed
+        * there is no residue. The same check has already been performed by the
+        * caller but without holding the channel lock, so the descriptor could
+        * now be complete.
+        */
+       status = dma_cookie_status(&chan->chan, cookie, NULL);
+       if (status == DMA_COMPLETE)
+               return 0;
+
        /*
         * If the cookie doesn't correspond to the currently running transfer
         * then the descriptor hasn't been processed yet, and the residue is
         * equal to the full descriptor size.
         */
-       if (cookie != desc->async_tx.cookie)
-               return desc->size;
+       if (cookie != desc->async_tx.cookie) {
+               list_for_each_entry(desc, &chan->desc.pending, node) {
+                       if (cookie == desc->async_tx.cookie)
+                               return desc->size;
+               }
+               list_for_each_entry(desc, &chan->desc.active, node) {
+                       if (cookie == desc->async_tx.cookie)
+                               return desc->size;
+               }
+
+               /*
+                * No descriptor found for the cookie, there's thus no residue.
+                * This shouldn't happen if the calling driver passes a correct
+                * cookie value.
+                */
+               WARN(1, "No descriptor for cookie!");
+               return 0;
+       }
 
        /*
         * In descriptor mode the descriptor running pointer is not maintained
@@ -1202,6 +1231,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
        residue = rcar_dmac_chan_get_residue(rchan, cookie);
        spin_unlock_irqrestore(&rchan->lock, flags);
 
+       /* if there's no residue, the cookie is complete */
+       if (!residue)
+               return DMA_COMPLETE;
+
        dma_set_residue(txstate, residue);
 
        return status;
index 80d86402490eef39b9873ad3b5a31a39f790973b..c94ffab0d25c756609b1d0c01733a4abc322a8a1 100644 (file)
@@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
 
        sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
                               GFP_KERNEL);
-       if (!sh_chan) {
-               dev_err(sdev->dma_dev.dev,
-                       "No free memory for allocating dma channels!\n");
+       if (!sh_chan)
                return -ENOMEM;
-       }
 
        schan = &sh_chan->shdma_chan;
        schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
@@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
 
        shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
                             GFP_KERNEL);
-       if (!shdev) {
-               dev_err(&pdev->dev, "Not enough memory\n");
+       if (!shdev)
                return -ENOMEM;
-       }
 
        dma_dev = &shdev->shdma_dev.dma_dev;
 
index 6da2eaa6c294e6018d481f8833ac7c3a379f607e..69b9564dc9d9cb2e68e2c3f9226075e40efd2b8d 100644 (file)
@@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
        int err;
 
        sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
-       if (!sc) {
-               dev_err(sdev->dma_dev.dev,
-                       "No free memory for allocating dma channels!\n");
+       if (!sc)
                return -ENOMEM;
-       }
 
        schan = &sc->shdma_chan;
        schan->max_xfer_len = 64 * 1024 * 1024 - 1;
@@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev)
        err = -ENOMEM;
        su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
                              GFP_KERNEL);
-       if (!su_dev) {
-               dev_err(&pdev->dev, "Not enough memory\n");
+       if (!su_dev)
                return err;
-       }
 
        dma_dev = &su_dev->shdma_dev.dma_dev;
 
index e48350e650899de4e6e6a90c275d1dd49c6ea582..d8bc3f2a71db1ad7dee743cd92999f970eb97832 100644 (file)
@@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
        int ret, i;
 
        sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
-       if (!sdma) {
-               dev_err(dev, "Memory exhausted!\n");
+       if (!sdma)
                return -ENOMEM;
-       }
+
        data = (struct sirfsoc_dmadata *)
                (of_match_device(op->dev.driver->of_match_table,
                                 &op->dev)->data);
@@ -981,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
        of_dma_controller_free(op->dev.of_node);
        dma_async_device_unregister(&sdma->dma);
        free_irq(sdma->irq, sdma);
+       tasklet_kill(&sdma->tasklet);
        irq_dispose_mapping(sdma->irq);
        pm_runtime_disable(&op->dev);
        if (!pm_runtime_status_suspended(&op->dev))
@@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
 };
 
-struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
        .exec = sirfsoc_dma_execute_hw_a6,
        .type = SIRFSOC_DMA_VER_A6,
 };
 
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
        .exec = sirfsoc_dma_execute_hw_a7v1,
        .type = SIRFSOC_DMA_VER_A7V1,
 };
 
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
        .exec = sirfsoc_dma_execute_hw_a7v2,
        .type = SIRFSOC_DMA_VER_A7V2,
 };
index 6fb8307468ab60b847dab3c21f2899cd99876c69..8b18e44a02d51438809216a6d1b16e002d60fbde 100644 (file)
@@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
        }
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_COMPLETE)
+       if (ret != DMA_COMPLETE && txstate)
                dma_set_residue(txstate, stedma40_residue(chan));
 
        if (d40_is_paused(d40c))
@@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                       (num_phy_chans + num_log_chans + num_memcpy_chans) *
                       sizeof(struct d40_chan), GFP_KERNEL);
 
-       if (base == NULL) {
-               d40_err(&pdev->dev, "Out of memory\n");
+       if (base == NULL)
                goto failure;
-       }
 
        base->rev = rev;
        base->clk = clk;
index 27b818dee7c7f85b5aa730d4db81b9fa4f231782..13b42dd9900c5136bcc6ee6048e2d7086f108bdc 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "ste_dma40_ll.h"
 
-u8 d40_width_to_bits(enum dma_slave_buswidth width)
+static u8 d40_width_to_bits(enum dma_slave_buswidth width)
 {
        if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
                return STEDMA40_ESIZE_8_BIT;
index 5065ca43facede040f92a2ac64fa35c147675fe3..3835fcde35456fb20e89e77d5b02644b98df4df8 100644 (file)
@@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
        size_t bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, state);
-       if (ret == DMA_COMPLETE)
+       if (ret == DMA_COMPLETE || !state)
                return ret;
 
        spin_lock_irqsave(&vchan->vc.lock, flags);
index 01e316f73559ceb2c45dc487f1972b9c0f0a680e..6ab9eb98588a88514f64f4b1221d87da3ecc4721 100644 (file)
@@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
 
        /* Allocate DMA desc */
        dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
-       if (!dma_desc) {
-               dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
+       if (!dma_desc)
                return NULL;
-       }
 
        dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
        dma_desc->txd.tx_submit = tegra_dma_tx_submit;
@@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
        spin_unlock_irqrestore(&tdc->lock, flags);
 
        sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
-       if (!sg_req)
-               dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
+
        return sg_req;
 }
 
@@ -484,7 +481,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
         * load new configuration.
         */
        tegra_dma_pause(tdc, false);
-       status  = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+       status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 
        /*
         * If interrupt is pending then do nothing as the ISR will handle
@@ -822,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        /* Check on wait_ack desc status */
        list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
                if (dma_desc->txd.cookie == cookie) {
-                       residual =  dma_desc->bytes_requested -
-                                       (dma_desc->bytes_transferred %
-                                               dma_desc->bytes_requested);
-                       dma_set_residue(txstate, residual);
                        ret = dma_desc->dma_status;
-                       spin_unlock_irqrestore(&tdc->lock, flags);
-                       return ret;
+                       goto found;
                }
        }
 
@@ -836,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
                dma_desc = sg_req->dma_desc;
                if (dma_desc->txd.cookie == cookie) {
-                       residual =  dma_desc->bytes_requested -
-                                       (dma_desc->bytes_transferred %
-                                               dma_desc->bytes_requested);
-                       dma_set_residue(txstate, residual);
                        ret = dma_desc->dma_status;
-                       spin_unlock_irqrestore(&tdc->lock, flags);
-                       return ret;
+                       goto found;
                }
        }
 
-       dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
+       dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
+       dma_desc = NULL;
+
+found:
+       if (dma_desc && txstate) {
+               residual = dma_desc->bytes_requested -
+                          (dma_desc->bytes_transferred %
+                           dma_desc->bytes_requested);
+               dma_set_residue(txstate, residual);
+       }
+
        spin_unlock_irqrestore(&tdc->lock, flags);
        return ret;
 }
@@ -905,7 +902,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
        unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
        enum dma_slave_buswidth *slave_bw)
 {
-
        switch (direction) {
        case DMA_MEM_TO_DEV:
                *apb_addr = tdc->dma_sconfig.dst_addr;
@@ -948,8 +944,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        struct tegra_dma_desc *dma_desc;
-       unsigned int        i;
-       struct scatterlist      *sg;
+       unsigned int i;
+       struct scatterlist *sg;
        unsigned long csr, ahb_seq, apb_ptr, apb_seq;
        struct list_head req_list;
        struct tegra_dma_sg_req  *sg_req = NULL;
@@ -1062,7 +1058,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        struct tegra_dma_desc *dma_desc = NULL;
-       struct tegra_dma_sg_req  *sg_req = NULL;
+       struct tegra_dma_sg_req *sg_req = NULL;
        unsigned long csr, ahb_seq, apb_ptr, apb_seq;
        int len;
        size_t remain_len;
@@ -1204,7 +1200,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        struct tegra_dma *tdma = tdc->tdma;
-
        struct tegra_dma_desc *dma_desc;
        struct tegra_dma_sg_req *sg_req;
        struct list_head dma_desc_list;
@@ -1305,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
 
 static int tegra_dma_probe(struct platform_device *pdev)
 {
-       struct resource *res;
+       struct resource *res;
        struct tegra_dma *tdma;
        int ret;
        int i;
@@ -1319,10 +1314,8 @@ static int tegra_dma_probe(struct platform_device *pdev)
 
        tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
                        sizeof(struct tegra_dma_channel), GFP_KERNEL);
-       if (!tdma) {
-               dev_err(&pdev->dev, "Error: memory allocation failed\n");
+       if (!tdma)
                return -ENOMEM;
-       }
 
        tdma->dev = &pdev->dev;
        tdma->chip_data = cdata;
index e107779b1a2e81392e135c45a7513052bbaa0545..5ae294b256a7e45e6480b74c3c1cccc9cb480ba8 100644 (file)
@@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = {
        .probe  = ti_dma_xbar_probe,
 };
 
-int omap_dmaxbar_init(void)
+static int omap_dmaxbar_init(void)
 {
        return platform_driver_register(&ti_dma_xbar_driver);
 }
index 559cd4073698ce806bf448274af6458bdb89ff83..e82745aa42a8bfc5deaecfb62f39d1b2a2972278 100644 (file)
@@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
        int err;
 
        td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
-       if (!td_desc) {
-               dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+       if (!td_desc)
                goto out;
-       }
 
        td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
 
        td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
-       if (!td_desc->desc_list) {
-               dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+       if (!td_desc->desc_list)
                goto err;
-       }
 
        dma_async_tx_descriptor_init(&td_desc->txd, chan);
        td_desc->txd.tx_submit = td_tx_submit;
index 8849318b32b7ab5b48f14ea8304027f8a0da7fd9..7632290e7c1438ff331a79c2750041366c352366 100644 (file)
@@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
 {
        struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
 
+
        dma_async_device_unregister(&dc->dma);
-       if (dc->irq >= 0)
+       if (dc->irq >= 0) {
+               devm_free_irq(&pdev->dev, dc->irq, dc);
                tasklet_kill(&dc->tasklet);
+       }
        dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
        return 0;
 }
@@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev)
        struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
 
        txx9dmac_off(ddev);
-       if (ddev->irq >= 0)
+       if (ddev->irq >= 0) {
+               devm_free_irq(&pdev->dev, ddev->irq, ddev);
                tasklet_kill(&ddev->tasklet);
+       }
        return 0;
 }
 
index 3c4e9f2fea2871dce7b300a345ac20531ecdf3a4..9e91f8f5b087a8cf5baa57a73305669225fb5f08 100644 (file)
@@ -1 +1,2 @@
-obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
+obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
new file mode 100644 (file)
index 0000000..4e223d0
--- /dev/null
@@ -0,0 +1,2689 @@
+/*
+ * DMA driver for Xilinx Video DMA Engine
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
+ * core that provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream type video target peripherals. The core provides efficient
+ * two dimensional DMA operations with independent asynchronous read (S2MM)
+ * and write (MM2S) channel operation. It can be configured to have either
+ * one channel or two channels. If configured as two channels, one is to
+ * transmit to the video device (MM2S) and another is to receive from the
+ * video device (S2MM). Initialization, status, interrupt and management
+ * registers are accessed through an AXI4-Lite slave interface.
+ *
+ * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
+ * provides high-bandwidth one dimensional direct memory access between memory
+ * and AXI4-Stream target peripherals. It supports one receive and one
+ * transmit channel, both of them optional at synthesis time.
+ *
+ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ * Access (DMA) between a memory-mapped source address and a memory-mapped
+ * destination address.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_DMA_MM2S_CTRL_OFFSET            0x0000
+#define XILINX_DMA_S2MM_CTRL_OFFSET            0x0030
+#define XILINX_VDMA_MM2S_DESC_OFFSET           0x0050
+#define XILINX_VDMA_S2MM_DESC_OFFSET           0x00a0
+
+/* Control Registers */
+#define XILINX_DMA_REG_DMACR                   0x0000
+#define XILINX_DMA_DMACR_DELAY_MAX             0xff
+#define XILINX_DMA_DMACR_DELAY_SHIFT           24
+#define XILINX_DMA_DMACR_FRAME_COUNT_MAX       0xff
+#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT     16
+#define XILINX_DMA_DMACR_ERR_IRQ               BIT(14)
+#define XILINX_DMA_DMACR_DLY_CNT_IRQ           BIT(13)
+#define XILINX_DMA_DMACR_FRM_CNT_IRQ           BIT(12)
+#define XILINX_DMA_DMACR_MASTER_SHIFT          8
+#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT        5
+#define XILINX_DMA_DMACR_FRAMECNT_EN           BIT(4)
+#define XILINX_DMA_DMACR_GENLOCK_EN            BIT(3)
+#define XILINX_DMA_DMACR_RESET                 BIT(2)
+#define XILINX_DMA_DMACR_CIRC_EN               BIT(1)
+#define XILINX_DMA_DMACR_RUNSTOP               BIT(0)
+#define XILINX_DMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
+
+#define XILINX_DMA_REG_DMASR                   0x0004
+#define XILINX_DMA_DMASR_EOL_LATE_ERR          BIT(15)
+#define XILINX_DMA_DMASR_ERR_IRQ               BIT(14)
+#define XILINX_DMA_DMASR_DLY_CNT_IRQ           BIT(13)
+#define XILINX_DMA_DMASR_FRM_CNT_IRQ           BIT(12)
+#define XILINX_DMA_DMASR_SOF_LATE_ERR          BIT(11)
+#define XILINX_DMA_DMASR_SG_DEC_ERR            BIT(10)
+#define XILINX_DMA_DMASR_SG_SLV_ERR            BIT(9)
+#define XILINX_DMA_DMASR_EOF_EARLY_ERR         BIT(8)
+#define XILINX_DMA_DMASR_SOF_EARLY_ERR         BIT(7)
+#define XILINX_DMA_DMASR_DMA_DEC_ERR           BIT(6)
+#define XILINX_DMA_DMASR_DMA_SLAVE_ERR         BIT(5)
+#define XILINX_DMA_DMASR_DMA_INT_ERR           BIT(4)
+#define XILINX_DMA_DMASR_IDLE                  BIT(1)
+#define XILINX_DMA_DMASR_HALTED                BIT(0)
+#define XILINX_DMA_DMASR_DELAY_MASK            GENMASK(31, 24)
+#define XILINX_DMA_DMASR_FRAME_COUNT_MASK      GENMASK(23, 16)
+
+#define XILINX_DMA_REG_CURDESC                 0x0008
+#define XILINX_DMA_REG_TAILDESC                0x0010
+#define XILINX_DMA_REG_REG_INDEX               0x0014
+#define XILINX_DMA_REG_FRMSTORE                0x0018
+#define XILINX_DMA_REG_THRESHOLD               0x001c
+#define XILINX_DMA_REG_FRMPTR_STS              0x0024
+#define XILINX_DMA_REG_PARK_PTR                0x0028
+#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT       8
+#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT       0
+#define XILINX_DMA_REG_VDMA_VERSION            0x002c
+
+/* Register Direct Mode Registers */
+#define XILINX_DMA_REG_VSIZE                   0x0000
+#define XILINX_DMA_REG_HSIZE                   0x0004
+
+#define XILINX_DMA_REG_FRMDLY_STRIDE           0x0008
+#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT  24
+#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT  0
+
+#define XILINX_VDMA_REG_START_ADDRESS(n)       (0x000c + 4 * (n))
+#define XILINX_VDMA_REG_START_ADDRESS_64(n)    (0x000c + 8 * (n))
+
+/* HW specific definitions */
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE        0x20
+
+#define XILINX_DMA_DMAXR_ALL_IRQ_MASK  \
+               (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
+                XILINX_DMA_DMASR_DLY_CNT_IRQ | \
+                XILINX_DMA_DMASR_ERR_IRQ)
+
+#define XILINX_DMA_DMASR_ALL_ERR_MASK  \
+               (XILINX_DMA_DMASR_EOL_LATE_ERR | \
+                XILINX_DMA_DMASR_SOF_LATE_ERR | \
+                XILINX_DMA_DMASR_SG_DEC_ERR | \
+                XILINX_DMA_DMASR_SG_SLV_ERR | \
+                XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+                XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+                XILINX_DMA_DMASR_DMA_DEC_ERR | \
+                XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
+                XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/*
+ * Recoverable errors are DMA Internal error, SOF Early, EOF Early
+ * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
+ * is enabled in the h/w system.
+ */
+#define XILINX_DMA_DMASR_ERR_RECOVER_MASK      \
+               (XILINX_DMA_DMASR_SOF_LATE_ERR | \
+                XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+                XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+                XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/* Axi VDMA Flush on Fsync bits */
+#define XILINX_DMA_FLUSH_S2MM          3
+#define XILINX_DMA_FLUSH_MM2S          2
+#define XILINX_DMA_FLUSH_BOTH          1
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_DMA_LOOP_COUNT          1000000
+
+/* AXI DMA Specific Registers/Offsets */
+#define XILINX_DMA_REG_SRCDSTADDR      0x18
+#define XILINX_DMA_REG_BTT             0x28
+
+/* AXI DMA Specific Masks/Bit fields */
+#define XILINX_DMA_MAX_TRANS_LEN       GENMASK(22, 0)
+#define XILINX_DMA_CR_COALESCE_MAX     GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK        BIT(4)
+#define XILINX_DMA_CR_COALESCE_SHIFT   16
+#define XILINX_DMA_BD_SOP              BIT(27)
+#define XILINX_DMA_BD_EOP              BIT(26)
+#define XILINX_DMA_COALESCE_MAX                255
+#define XILINX_DMA_NUM_APP_WORDS       5
+
+/* Multi-Channel DMA Descriptor offsets*/
+#define XILINX_DMA_MCRX_CDESC(x)       (0x40 + (x-1) * 0x20)
+#define XILINX_DMA_MCRX_TDESC(x)       (0x48 + (x-1) * 0x20)
+
+/* Multi-Channel DMA Masks/Shifts */
+#define XILINX_DMA_BD_HSIZE_MASK       GENMASK(15, 0)
+#define XILINX_DMA_BD_STRIDE_MASK      GENMASK(15, 0)
+#define XILINX_DMA_BD_VSIZE_MASK       GENMASK(31, 19)
+#define XILINX_DMA_BD_TDEST_MASK       GENMASK(4, 0)
+#define XILINX_DMA_BD_STRIDE_SHIFT     0
+#define XILINX_DMA_BD_VSIZE_SHIFT      19
+
+/* AXI CDMA Specific Registers/Offsets */
+#define XILINX_CDMA_REG_SRCADDR                0x18
+#define XILINX_CDMA_REG_DSTADDR                0x20
+
+/* AXI CDMA Specific Masks */
+#define XILINX_CDMA_CR_SGMODE          BIT(3)
+
+/**
+ * struct xilinx_vdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @vsize: Vertical Size @0x10
+ * @hsize: Horizontal Size @0x14
+ * @stride: Number of bytes between the first
+ *         pixels of each horizontal line @0x18
+ */
+struct xilinx_vdma_desc_hw {
+       u32 next_desc;
+       u32 pad1;
+       u32 buf_addr;
+       u32 buf_addr_msb;
+       u32 vsize;
+       u32 hsize;
+       u32 stride;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @pad1: Reserved @0x10
+ * @pad2: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_axidma_desc_hw {
+       u32 next_desc;
+       u32 next_desc_msb;
+       u32 buf_addr;
+       u32 buf_addr_msb;
+       u32 mcdma_control;
+       u32 vsize_stride;
+       u32 control;
+       u32 status;
+       u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @src_addr: Source address @0x08
+ * @src_addrmsb: Source address MSB @0x0C
+ * @dest_addr: Destination address @0x10
+ * @dest_addrmsb: Destination address MSB @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+       u32 next_desc;
+       u32 next_desc_msb;
+       u32 src_addr;
+       u32 src_addr_msb;
+       u32 dest_addr;
+       u32 dest_addr_msb;
+       u32 control;
+       u32 status;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_vdma_tx_segment {
+       struct xilinx_vdma_desc_hw hw;
+       struct list_head node;
+       dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_axidma_tx_segment {
+       struct xilinx_axidma_desc_hw hw;
+       struct list_head node;
+       dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+       struct xilinx_cdma_desc_hw hw;
+       struct list_head node;
+       dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
+ */
+struct xilinx_dma_tx_descriptor {
+       struct dma_async_tx_descriptor async_tx;
+       struct list_head segments;
+       struct list_head node;
+       bool cyclic;
+};
+
+/**
+ * struct xilinx_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @ctrl_offset: Control registers offset
+ * @desc_offset: TX descriptor registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @active_list: Descriptors ready to submit
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @id: Channel ID
+ * @direction: Transfer direction
+ * @num_frms: Number of frames
+ * @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
+ * @genlock: Support genlock mode
+ * @err: Channel has errors
+ * @tasklet: Cleanup work after irq
+ * @config: Device configuration info
+ * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
+ * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
+ * @residue: Residue for AXI DMA
+ * @seg_v: Statically allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @start_transfer: Differentiate b/w DMA IP's transfer
+ */
+struct xilinx_dma_chan {
+       struct xilinx_dma_device *xdev;
+       u32 ctrl_offset;
+       u32 desc_offset;
+       spinlock_t lock;
+       struct list_head pending_list;
+       struct list_head active_list;
+       struct list_head done_list;
+       struct dma_chan common;
+       struct dma_pool *desc_pool;
+       struct device *dev;
+       int irq;
+       int id;
+       enum dma_transfer_direction direction;
+       int num_frms;
+       bool has_sg;
+       bool cyclic;
+       bool genlock;
+       bool err;
+       struct tasklet_struct tasklet;
+       struct xilinx_vdma_config config;
+       bool flush_on_fsync;
+       u32 desc_pendingcount;
+       bool ext_addr;
+       u32 desc_submitcount;
+       u32 residue;
+       struct xilinx_axidma_tx_segment *seg_v;
+       struct xilinx_axidma_tx_segment *cyclic_seg_v;
+       void (*start_transfer)(struct xilinx_dma_chan *chan);
+       u16 tdest;
+};
+
+struct xilinx_dma_config {
+       enum xdma_ip_type dmatype;
+       int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+                       struct clk **tx_clk, struct clk **txs_clk,
+                       struct clk **rx_clk, struct clk **rxs_clk);
+};
+
+/**
+ * struct xilinx_dma_device - DMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ * @has_sg: Specifies whether Scatter-Gather is present or not
+ * @mcdma: Specifies whether Multi-Channel is present or not
+ * @flush_on_fsync: Flush on frame sync
+ * @ext_addr: Indicates 64 bit addressing is supported by dma device
+ * @pdev: Platform device structure pointer
+ * @dma_config: DMA config structure
+ * @axi_clk: DMA Axi4-lite interace clock
+ * @tx_clk: DMA mm2s clock
+ * @txs_clk: DMA mm2s stream clock
+ * @rx_clk: DMA s2mm clock
+ * @rxs_clk: DMA s2mm stream clock
+ * @nr_channels: Number of channels DMA device supports
+ * @chan_id: DMA channel identifier
+ */
+struct xilinx_dma_device {
+       void __iomem *regs;
+       struct device *dev;
+       struct dma_device common;
+       struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
+       bool has_sg;
+       bool mcdma;
+       u32 flush_on_fsync;
+       bool ext_addr;
+       struct platform_device  *pdev;
+       const struct xilinx_dma_config *dma_config;
+       struct clk *axi_clk;
+       struct clk *tx_clk;
+       struct clk *txs_clk;
+       struct clk *rx_clk;
+       struct clk *rxs_clk;
+       u32 nr_channels;
+       u32 chan_id;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+       container_of(chan, struct xilinx_dma_chan, common)
+#define to_dma_tx_descriptor(tx) \
+       container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+       readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+                          cond, delay_us, timeout_us)
+
+/* IO accessors */
+static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+       return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
+{
+       iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
+                                  u32 value)
+{
+       dma_write(chan, chan->desc_offset + reg, value);
+}
+
+static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+       return dma_read(chan, chan->ctrl_offset + reg);
+}
+
+static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
+                                  u32 value)
+{
+       dma_write(chan, chan->ctrl_offset + reg, value);
+}
+
+static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
+                                u32 clr)
+{
+       dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
+}
+
+static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
+                                u32 set)
+{
+       dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
+}
+
+/**
+ * vdma_desc_write_64 - 64-bit descriptor write
+ * @chan: Driver specific VDMA channel
+ * @reg: Register to write
+ * @value_lsb: lower address of the descriptor.
+ * @value_msb: upper address of the descriptor.
+ *
+ * Since vdma driver is trying to write to a register offset which is not a
+ * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
+ * instead of a single 64 bit register write.
+ */
+static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
+                                     u32 value_lsb, u32 value_msb)
+{
+       /* Write the lsb 32 bits*/
+       writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
+
+       /* Write the msb 32 bits */
+       writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
+}
+
+static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
+{
+       lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
+}
+
+static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
+                               dma_addr_t addr)
+{
+       if (chan->ext_addr)
+               dma_writeq(chan, reg, addr);
+       else
+               dma_ctrl_write(chan, reg, addr);
+}
+
+static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
+                                    struct xilinx_axidma_desc_hw *hw,
+                                    dma_addr_t buf_addr, size_t sg_used,
+                                    size_t period_len)
+{
+       if (chan->ext_addr) {
+               hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
+               hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
+                                                period_len);
+       } else {
+               hw->buf_addr = buf_addr + sg_used + period_len;
+       }
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_segment *
+xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_vdma_tx_segment *segment;
+       dma_addr_t phys;
+
+       segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+       if (!segment)
+               return NULL;
+
+       segment->phys = phys;
+
+       return segment;
+}
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_cdma_tx_segment *segment;
+       dma_addr_t phys;
+
+       segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+       if (!segment)
+               return NULL;
+
+       segment->phys = phys;
+
+       return segment;
+}
+
+/**
+ * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_axidma_tx_segment *
+xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_axidma_tx_segment *segment;
+       dma_addr_t phys;
+
+       segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+       if (!segment)
+               return NULL;
+
+       segment->phys = phys;
+
+       return segment;
+}
+
+/**
+ * xilinx_dma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
+                               struct xilinx_axidma_tx_segment *segment)
+{
+       dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
+                               struct xilinx_cdma_tx_segment *segment)
+{
+       dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
+                                       struct xilinx_vdma_tx_segment *segment)
+{
+       dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_dma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_dma_tx_descriptor *
+xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *desc;
+
+       desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return NULL;
+
+       INIT_LIST_HEAD(&desc->segments);
+
+       return desc;
+}
+
+/**
+ * xilinx_dma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific DMA channel
+ * @desc: DMA transaction descriptor
+ */
+static void
+xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
+                              struct xilinx_dma_tx_descriptor *desc)
+{
+       struct xilinx_vdma_tx_segment *segment, *next;
+       struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
+       struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+
+       if (!desc)
+               return;
+
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+               list_for_each_entry_safe(segment, next, &desc->segments, node) {
+                       list_del(&segment->node);
+                       xilinx_vdma_free_tx_segment(chan, segment);
+               }
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+               list_for_each_entry_safe(cdma_segment, cdma_next,
+                                        &desc->segments, node) {
+                       list_del(&cdma_segment->node);
+                       xilinx_cdma_free_tx_segment(chan, cdma_segment);
+               }
+       } else {
+               list_for_each_entry_safe(axidma_segment, axidma_next,
+                                        &desc->segments, node) {
+                       list_del(&axidma_segment->node);
+                       xilinx_dma_free_tx_segment(chan, axidma_segment);
+               }
+       }
+
+       kfree(desc);
+}
+
+/* Required functions */
+
+/**
+ * xilinx_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific DMA channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
+                                       struct list_head *list)
+{
+       struct xilinx_dma_tx_descriptor *desc, *next;
+
+       list_for_each_entry_safe(desc, next, list, node) {
+               list_del(&desc->node);
+               xilinx_dma_free_tx_descriptor(chan, desc);
+       }
+}
+
+/**
+ * xilinx_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       xilinx_dma_free_desc_list(chan, &chan->pending_list);
+       xilinx_dma_free_desc_list(chan, &chan->done_list);
+       xilinx_dma_free_desc_list(chan, &chan->active_list);
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+       dev_dbg(chan->dev, "Free all channel resources.\n");
+
+       xilinx_dma_free_descriptors(chan);
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
+               xilinx_dma_free_tx_segment(chan, chan->seg_v);
+       }
+       dma_pool_destroy(chan->desc_pool);
+       chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+                                         struct xilinx_dma_tx_descriptor *desc,
+                                         unsigned long *flags)
+{
+       dma_async_tx_callback callback;
+       void *callback_param;
+
+       callback = desc->async_tx.callback;
+       callback_param = desc->async_tx.callback_param;
+       if (callback) {
+               spin_unlock_irqrestore(&chan->lock, *flags);
+               callback(callback_param);
+               spin_lock_irqsave(&chan->lock, *flags);
+       }
+}
+
+/**
+ * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *desc, *next;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+               dma_async_tx_callback callback;
+               void *callback_param;
+
+               if (desc->cyclic) {
+                       xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+                       break;
+               }
+
+               /* Remove from the list of running transactions */
+               list_del(&desc->node);
+
+               /* Run the link descriptor callback function */
+               callback = desc->async_tx.callback;
+               callback_param = desc->async_tx.callback_param;
+               if (callback) {
+                       spin_unlock_irqrestore(&chan->lock, flags);
+                       callback(callback_param);
+                       spin_lock_irqsave(&chan->lock, flags);
+               }
+
+               /* Run any dependencies, then free the descriptor */
+               dma_run_dependencies(&desc->async_tx);
+               xilinx_dma_free_tx_descriptor(chan, desc);
+       }
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx DMA channel structure
+ */
+static void xilinx_dma_do_tasklet(unsigned long data)
+{
+       struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
+
+       xilinx_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+       /* Has this channel already been allocated? */
+       if (chan->desc_pool)
+               return 0;
+
+       /*
+        * We need the descriptor to be aligned to 64bytes
+        * for meeting Xilinx VDMA specification requirement.
+        */
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
+                                  chan->dev,
+                                  sizeof(struct xilinx_axidma_tx_segment),
+                                  __alignof__(struct xilinx_axidma_tx_segment),
+                                  0);
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+               chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+                                  chan->dev,
+                                  sizeof(struct xilinx_cdma_tx_segment),
+                                  __alignof__(struct xilinx_cdma_tx_segment),
+                                  0);
+       } else {
+               chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+                                    chan->dev,
+                                    sizeof(struct xilinx_vdma_tx_segment),
+                                    __alignof__(struct xilinx_vdma_tx_segment),
+                                    0);
+       }
+
+       if (!chan->desc_pool) {
+               dev_err(chan->dev,
+                       "unable to allocate channel %d descriptor pool\n",
+                       chan->id);
+               return -ENOMEM;
+       }
+
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               /*
+                * For AXI DMA case after submitting a pending_list, keep
+                * an extra segment allocated so that the "next descriptor"
+                * pointer on the tail descriptor always points to a
+                * valid descriptor, even when paused after reaching taildesc.
+                * This way, it is possible to issue additional
+                * transfers without halting and restarting the channel.
+                */
+               chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
+
+               /*
+                * For cyclic DMA mode we need to program the tail Descriptor
+                * register with a value which is not a part of the BD chain
+                * so allocating a desc segment during channel allocation for
+                * programming tail descriptor.
+                */
+               chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+       }
+
+       dma_cookie_init(dchan);
+
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               /* For AXI DMA resetting once channel will reset the
+                * other channel as well so enable the interrupts here.
+                */
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                             XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+       }
+
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+
+       return 0;
+}
+
+/**
+ * xilinx_dma_tx_status - Get DMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
+                                       dma_cookie_t cookie,
+                                       struct dma_tx_state *txstate)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_axidma_tx_segment *segment;
+       struct xilinx_axidma_desc_hw *hw;
+       enum dma_status ret;
+       unsigned long flags;
+       u32 residue = 0;
+
+       ret = dma_cookie_status(dchan, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               spin_lock_irqsave(&chan->lock, flags);
+
+               desc = list_last_entry(&chan->active_list,
+                                      struct xilinx_dma_tx_descriptor, node);
+               if (chan->has_sg) {
+                       list_for_each_entry(segment, &desc->segments, node) {
+                               hw = &segment->hw;
+                               residue += (hw->control - hw->status) &
+                                          XILINX_DMA_MAX_TRANS_LEN;
+                       }
+               }
+               spin_unlock_irqrestore(&chan->lock, flags);
+
+               chan->residue = residue;
+               dma_set_residue(txstate, chan->residue);
+       }
+
+       return ret;
+}
+
+/**
+ * xilinx_dma_is_running - Check if DMA channel is running
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '1' if running, '0' if not.
+ */
+static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
+{
+       return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+                XILINX_DMA_DMASR_HALTED) &&
+               (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
+                XILINX_DMA_DMACR_RUNSTOP);
+}
+
+/**
+ * xilinx_dma_is_idle - Check if DMA channel is idle
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '1' if idle, '0' if not.
+ */
+static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
+{
+       return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+               XILINX_DMA_DMASR_IDLE;
+}
+
+/**
+ * xilinx_dma_halt - Halt DMA channel
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
+{
+       int err;
+       u32 val;
+
+       dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+       /* Wait for the hardware to halt */
+       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+                                     (val & XILINX_DMA_DMASR_HALTED), 0,
+                                     XILINX_DMA_LOOP_COUNT);
+
+       if (err) {
+               dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+                       chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+               chan->err = true;
+       }
+}
+
+/**
+ * xilinx_dma_start - Start DMA channel
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_start(struct xilinx_dma_chan *chan)
+{
+       int err;
+       u32 val;
+
+       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+       /* Wait for the hardware to start */
+       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+                                     !(val & XILINX_DMA_DMASR_HALTED), 0,
+                                     XILINX_DMA_LOOP_COUNT);
+
+       if (err) {
+               dev_err(chan->dev, "Cannot start channel %p: %x\n",
+                       chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+
+               chan->err = true;
+       }
+}
+
+/**
+ * xilinx_vdma_start_transfer - Starts VDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_vdma_config *config = &chan->config;
+       struct xilinx_dma_tx_descriptor *desc, *tail_desc;
+       u32 reg;
+       struct xilinx_vdma_tx_segment *tail_segment;
+
+       /* This function was invoked with lock held */
+       if (chan->err)
+               return;
+
+       if (list_empty(&chan->pending_list))
+               return;
+
+       desc = list_first_entry(&chan->pending_list,
+                               struct xilinx_dma_tx_descriptor, node);
+       tail_desc = list_last_entry(&chan->pending_list,
+                                   struct xilinx_dma_tx_descriptor, node);
+
+       tail_segment = list_last_entry(&tail_desc->segments,
+                                      struct xilinx_vdma_tx_segment, node);
+
+       /* If it is SG mode and hardware is busy, cannot submit */
+       if (chan->has_sg && xilinx_dma_is_running(chan) &&
+           !xilinx_dma_is_idle(chan)) {
+               dev_dbg(chan->dev, "DMA controller still busy\n");
+               return;
+       }
+
+       /*
+        * If hardware is idle, then all descriptors on the running lists are
+        * done, start new transfers
+        */
+       if (chan->has_sg)
+               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+                               desc->async_tx.phys);
+
+       /* Configure the hardware using info in the config structure */
+       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+       if (config->frm_cnt_en)
+               reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
+       else
+               reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
+
+       /* Configure channel to allow number frame buffers */
+       dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
+                       chan->desc_pendingcount);
+
+       /*
+        * With SG, start with circular mode, so that BDs can be fetched.
+        * In direct register mode, if not parking, enable circular mode
+        */
+       if (chan->has_sg || !config->park)
+               reg |= XILINX_DMA_DMACR_CIRC_EN;
+
+       if (config->park)
+               reg &= ~XILINX_DMA_DMACR_CIRC_EN;
+
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+       if (config->park && (config->park_frm >= 0) &&
+                       (config->park_frm < chan->num_frms)) {
+               if (chan->direction == DMA_MEM_TO_DEV)
+                       dma_write(chan, XILINX_DMA_REG_PARK_PTR,
+                               config->park_frm <<
+                                       XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
+               else
+                       dma_write(chan, XILINX_DMA_REG_PARK_PTR,
+                               config->park_frm <<
+                                       XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+       }
+
+       /* Start the hardware */
+       xilinx_dma_start(chan);
+
+       if (chan->err)
+               return;
+
+       /* Start the transfer */
+       if (chan->has_sg) {
+               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+                               tail_segment->phys);
+       } else {
+               struct xilinx_vdma_tx_segment *segment, *last = NULL;
+               int i = 0;
+
+               if (chan->desc_submitcount < chan->num_frms)
+                       i = chan->desc_submitcount;
+
+               list_for_each_entry(segment, &desc->segments, node) {
+                       if (chan->ext_addr)
+                               vdma_desc_write_64(chan,
+                                       XILINX_VDMA_REG_START_ADDRESS_64(i++),
+                                       segment->hw.buf_addr,
+                                       segment->hw.buf_addr_msb);
+                       else
+                               vdma_desc_write(chan,
+                                       XILINX_VDMA_REG_START_ADDRESS(i++),
+                                       segment->hw.buf_addr);
+
+                       last = segment;
+               }
+
+               if (!last)
+                       return;
+
+               /* HW expects these parameters to be same for one transaction */
+               vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+               vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
+                               last->hw.stride);
+               vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+       }
+
+       if (!chan->has_sg) {
+               list_del(&desc->node);
+               list_add_tail(&desc->node, &chan->active_list);
+               chan->desc_submitcount++;
+               chan->desc_pendingcount--;
+               if (chan->desc_submitcount == chan->num_frms)
+                       chan->desc_submitcount = 0;
+       } else {
+               list_splice_tail_init(&chan->pending_list, &chan->active_list);
+               chan->desc_pendingcount = 0;
+       }
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+       struct xilinx_cdma_tx_segment *tail_segment;
+       u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
+
+       if (chan->err)
+               return;
+
+       if (list_empty(&chan->pending_list))
+               return;
+
+       head_desc = list_first_entry(&chan->pending_list,
+                                    struct xilinx_dma_tx_descriptor, node);
+       tail_desc = list_last_entry(&chan->pending_list,
+                                   struct xilinx_dma_tx_descriptor, node);
+       tail_segment = list_last_entry(&tail_desc->segments,
+                                      struct xilinx_cdma_tx_segment, node);
+
+       if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+               ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+               ctrl_reg |= chan->desc_pendingcount <<
+                               XILINX_DMA_CR_COALESCE_SHIFT;
+               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
+       }
+
+       if (chan->has_sg) {
+               xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+                            head_desc->async_tx.phys);
+
+               /* Update tail ptr register which will start the transfer */
+               xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+                            tail_segment->phys);
+       } else {
+               /* In simple mode */
+               struct xilinx_cdma_tx_segment *segment;
+               struct xilinx_cdma_desc_hw *hw;
+
+               segment = list_first_entry(&head_desc->segments,
+                                          struct xilinx_cdma_tx_segment,
+                                          node);
+
+               hw = &segment->hw;
+
+               xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+               xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+
+               /* Start the transfer */
+               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+                               hw->control & XILINX_DMA_MAX_TRANS_LEN);
+       }
+
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+       chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_start_transfer - Starts DMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+       struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+       u32 reg;
+
+       if (chan->err)
+               return;
+
+       if (list_empty(&chan->pending_list))
+               return;
+
+       /* If it is SG mode and hardware is busy, cannot submit */
+       if (chan->has_sg && xilinx_dma_is_running(chan) &&
+           !xilinx_dma_is_idle(chan)) {
+               dev_dbg(chan->dev, "DMA controller still busy\n");
+               return;
+       }
+
+       head_desc = list_first_entry(&chan->pending_list,
+                                    struct xilinx_dma_tx_descriptor, node);
+       tail_desc = list_last_entry(&chan->pending_list,
+                                   struct xilinx_dma_tx_descriptor, node);
+       tail_segment = list_last_entry(&tail_desc->segments,
+                                      struct xilinx_axidma_tx_segment, node);
+
+       if (chan->has_sg && !chan->xdev->mcdma) {
+               old_head = list_first_entry(&head_desc->segments,
+                                       struct xilinx_axidma_tx_segment, node);
+               new_head = chan->seg_v;
+               /* Copy Buffer Descriptor fields. */
+               new_head->hw = old_head->hw;
+
+               /* Swap and save new reserve */
+               list_replace_init(&old_head->node, &new_head->node);
+               chan->seg_v = old_head;
+
+               tail_segment->hw.next_desc = chan->seg_v->phys;
+               head_desc->async_tx.phys = new_head->phys;
+       }
+
+       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+       if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+               reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+               reg |= chan->desc_pendingcount <<
+                                 XILINX_DMA_CR_COALESCE_SHIFT;
+               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       }
+
+       if (chan->has_sg && !chan->xdev->mcdma)
+               xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+                            head_desc->async_tx.phys);
+
+       if (chan->has_sg && chan->xdev->mcdma) {
+               if (chan->direction == DMA_MEM_TO_DEV) {
+                       dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+                                      head_desc->async_tx.phys);
+               } else {
+                       if (!chan->tdest) {
+                               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+                                      head_desc->async_tx.phys);
+                       } else {
+                               dma_ctrl_write(chan,
+                                       XILINX_DMA_MCRX_CDESC(chan->tdest),
+                                      head_desc->async_tx.phys);
+                       }
+               }
+       }
+
+       xilinx_dma_start(chan);
+
+       if (chan->err)
+               return;
+
+       /* Start the transfer */
+       if (chan->has_sg && !chan->xdev->mcdma) {
+               if (chan->cyclic)
+                       xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+                                    chan->cyclic_seg_v->phys);
+               else
+                       xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+                                    tail_segment->phys);
+       } else if (chan->has_sg && chan->xdev->mcdma) {
+               if (chan->direction == DMA_MEM_TO_DEV) {
+                       dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+                              tail_segment->phys);
+               } else {
+                       if (!chan->tdest) {
+                               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+                                              tail_segment->phys);
+                       } else {
+                               dma_ctrl_write(chan,
+                                       XILINX_DMA_MCRX_TDESC(chan->tdest),
+                                       tail_segment->phys);
+                       }
+               }
+       } else {
+               struct xilinx_axidma_tx_segment *segment;
+               struct xilinx_axidma_desc_hw *hw;
+
+               segment = list_first_entry(&head_desc->segments,
+                                          struct xilinx_axidma_tx_segment,
+                                          node);
+               hw = &segment->hw;
+
+               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+
+               /* Start the transfer */
+               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+                              hw->control & XILINX_DMA_MAX_TRANS_LEN);
+       }
+
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+       chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_issue_pending(struct dma_chan *dchan)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       chan->start_transfer(chan);
+       spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *desc, *next;
+
+       /* This function was invoked with lock held */
+       if (list_empty(&chan->active_list))
+               return;
+
+       list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+               list_del(&desc->node);
+               if (!desc->cyclic)
+                       dma_cookie_complete(&desc->async_tx);
+               list_add_tail(&desc->node, &chan->done_list);
+       }
+}
+
+/**
+ * xilinx_dma_reset - Reset DMA channel
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
+{
+       int err;
+       u32 tmp;
+
+       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
+
+       /* Wait for the hardware to finish reset */
+       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
+                                     !(tmp & XILINX_DMA_DMACR_RESET), 0,
+                                     XILINX_DMA_LOOP_COUNT);
+
+       if (err) {
+               dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+                       dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
+                       dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+               return -ETIMEDOUT;
+       }
+
+       chan->err = false;
+
+       return err;
+}
+
+/**
+ * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
+{
+       int err;
+
+       /* Reset VDMA */
+       err = xilinx_dma_reset(chan);
+       if (err)
+               return err;
+
+       /* Enable interrupts */
+       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                     XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+       return 0;
+}
+
+/**
+ * xilinx_dma_irq_handler - DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+{
+       struct xilinx_dma_chan *chan = data;
+       u32 status;
+
+       /* Read the status and ack the interrupts. */
+       status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
+       if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
+               return IRQ_NONE;
+
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+                       status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+       if (status & XILINX_DMA_DMASR_ERR_IRQ) {
+               /*
+                * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
+                * error is recoverable, ignore it. Otherwise flag the error.
+                *
+                * Only recoverable errors can be cleared in the DMASR register,
+                * make sure not to write to other error bits to 1.
+                */
+               u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
+
+               dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+                               errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
+
+               if (!chan->flush_on_fsync ||
+                   (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
+                       dev_err(chan->dev,
+                               "Channel %p has errors %x, cdr %x tdr %x\n",
+                               chan, errors,
+                               dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
+                               dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
+                       chan->err = true;
+               }
+       }
+
+       if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+               /*
+                * Device takes too long to do the transfer when user requires
+                * responsiveness.
+                */
+               dev_dbg(chan->dev, "Inter-packet latency too long\n");
+       }
+
+       if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+               spin_lock(&chan->lock);
+               xilinx_dma_complete_descriptor(chan);
+               chan->start_transfer(chan);
+               spin_unlock(&chan->lock);
+       }
+
+       tasklet_schedule(&chan->tasklet);
+       return IRQ_HANDLED;
+}
+
+/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_dma_chan *chan,
+                             struct xilinx_dma_tx_descriptor *desc)
+{
+       struct xilinx_vdma_tx_segment *tail_segment;
+       struct xilinx_dma_tx_descriptor *tail_desc;
+       struct xilinx_axidma_tx_segment *axidma_tail_segment;
+       struct xilinx_cdma_tx_segment *cdma_tail_segment;
+
+       if (list_empty(&chan->pending_list))
+               goto append;
+
+       /*
+        * Add the hardware descriptor to the chain of hardware descriptors
+        * that already exists in memory.
+        */
+       tail_desc = list_last_entry(&chan->pending_list,
+                                   struct xilinx_dma_tx_descriptor, node);
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+               tail_segment = list_last_entry(&tail_desc->segments,
+                                              struct xilinx_vdma_tx_segment,
+                                              node);
+               tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+               cdma_tail_segment = list_last_entry(&tail_desc->segments,
+                                               struct xilinx_cdma_tx_segment,
+                                               node);
+               cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+       } else {
+               axidma_tail_segment = list_last_entry(&tail_desc->segments,
+                                              struct xilinx_axidma_tx_segment,
+                                              node);
+               axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+       }
+
+       /*
+        * Add the software descriptor and all children to the list
+        * of pending transactions
+        */
+append:
+       list_add_tail(&desc->node, &chan->pending_list);
+       chan->desc_pendingcount++;
+
+       if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+           && unlikely(chan->desc_pendingcount > chan->num_frms)) {
+               dev_dbg(chan->dev, "desc pendingcount is too high\n");
+               chan->desc_pendingcount = chan->num_frms;
+       }
+}
+
+/**
+ * xilinx_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+       struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
+       dma_cookie_t cookie;
+       unsigned long flags;
+       int err;
+
+       if (chan->cyclic) {
+               xilinx_dma_free_tx_descriptor(chan, desc);
+               return -EBUSY;
+       }
+
+       if (chan->err) {
+               /*
+                * If reset fails, need to hard reset the system.
+                * Channel is no longer functional
+                */
+               err = xilinx_dma_chan_reset(chan);
+               if (err < 0)
+                       return err;
+       }
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       cookie = dma_cookie_assign(tx);
+
+       /* Put this transaction onto the tail of the pending queue */
+       append_desc_queue(chan, desc);
+
+       if (desc->cyclic)
+               chan->cyclic = true;
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       return cookie;
+}
+
+/**
+ * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
+ *     DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
+                                struct dma_interleaved_template *xt,
+                                unsigned long flags)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+       struct xilinx_vdma_desc_hw *hw;
+
+       if (!is_slave_direction(xt->dir))
+               return NULL;
+
+       if (!xt->numf || !xt->sgl[0].size)
+               return NULL;
+
+       if (xt->frame_size != 1)
+               return NULL;
+
+       /* Allocate a transaction descriptor. */
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+       async_tx_ack(&desc->async_tx);
+
+       /* Allocate the link descriptor from DMA pool */
+       segment = xilinx_vdma_alloc_tx_segment(chan);
+       if (!segment)
+               goto error;
+
+       /* Fill in the hardware descriptor */
+       hw = &segment->hw;
+       hw->vsize = xt->numf;
+       hw->hsize = xt->sgl[0].size;
+       hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
+                       XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+       hw->stride |= chan->config.frm_dly <<
+                       XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+       if (xt->dir != DMA_MEM_TO_DEV) {
+               if (chan->ext_addr) {
+                       hw->buf_addr = lower_32_bits(xt->dst_start);
+                       hw->buf_addr_msb = upper_32_bits(xt->dst_start);
+               } else {
+                       hw->buf_addr = xt->dst_start;
+               }
+       } else {
+               if (chan->ext_addr) {
+                       hw->buf_addr = lower_32_bits(xt->src_start);
+                       hw->buf_addr_msb = upper_32_bits(xt->src_start);
+               } else {
+                       hw->buf_addr = xt->src_start;
+               }
+       }
+
+       /* Insert the segment into the descriptor segments list. */
+       list_add_tail(&segment->node, &desc->segments);
+
+       prev = segment;
+
+       /* Link the last hardware descriptor with the first. */
+       segment = list_first_entry(&desc->segments,
+                                  struct xilinx_vdma_tx_segment, node);
+       desc->async_tx.phys = segment->phys;
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
+/**
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+                       dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_cdma_tx_segment *segment, *prev;
+       struct xilinx_cdma_desc_hw *hw;
+
+       if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+               return NULL;
+
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+       /* Allocate the link descriptor from DMA pool */
+       segment = xilinx_cdma_alloc_tx_segment(chan);
+       if (!segment)
+               goto error;
+
+       hw = &segment->hw;
+       hw->control = len;
+       hw->src_addr = dma_src;
+       hw->dest_addr = dma_dst;
+       if (chan->ext_addr) {
+               hw->src_addr_msb = upper_32_bits(dma_src);
+               hw->dest_addr_msb = upper_32_bits(dma_dst);
+       }
+
+       /* Fill the previous next descriptor with current */
+       prev = list_last_entry(&desc->segments,
+                              struct xilinx_cdma_tx_segment, node);
+       prev->hw.next_desc = segment->phys;
+
+       /* Insert the segment into the descriptor segments list. */
+       list_add_tail(&segment->node, &desc->segments);
+
+       prev = segment;
+
+       /* Link the last hardware descriptor with the first. */
+       segment = list_first_entry(&desc->segments,
+                               struct xilinx_cdma_tx_segment, node);
+       desc->async_tx.phys = segment->phys;
+       prev->hw.next_desc = segment->phys;
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
+/**
+ * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
+       struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction direction, unsigned long flags,
+       void *context)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+       u32 *app_w = (u32 *)context;
+       struct scatterlist *sg;
+       size_t copy;
+       size_t sg_used;
+       unsigned int i;
+
+       if (!is_slave_direction(direction))
+               return NULL;
+
+       /* Allocate a transaction descriptor. */
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+       /* Build transactions using information in the scatter gather list */
+       for_each_sg(sgl, sg, sg_len, i) {
+               sg_used = 0;
+
+               /* Loop until the entire scatterlist entry is used */
+               while (sg_used < sg_dma_len(sg)) {
+                       struct xilinx_axidma_desc_hw *hw;
+
+                       /* Get a free segment */
+                       segment = xilinx_axidma_alloc_tx_segment(chan);
+                       if (!segment)
+                               goto error;
+
+                       /*
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+                       copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+                                    XILINX_DMA_MAX_TRANS_LEN);
+                       hw = &segment->hw;
+
+                       /* Fill in the descriptor */
+                       xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
+                                         sg_used, 0);
+
+                       hw->control = copy;
+
+                       if (chan->direction == DMA_MEM_TO_DEV) {
+                               if (app_w)
+                                       memcpy(hw->app, app_w, sizeof(u32) *
+                                              XILINX_DMA_NUM_APP_WORDS);
+                       }
+
+                       if (prev)
+                               prev->hw.next_desc = segment->phys;
+
+                       prev = segment;
+                       sg_used += copy;
+
+                       /*
+                        * Insert the segment into the descriptor segments
+                        * list.
+                        */
+                       list_add_tail(&segment->node, &desc->segments);
+               }
+       }
+
+       segment = list_first_entry(&desc->segments,
+                                  struct xilinx_axidma_tx_segment, node);
+       desc->async_tx.phys = segment->phys;
+       prev->hw.next_desc = segment->phys;
+
+       /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+       if (chan->direction == DMA_MEM_TO_DEV) {
+               segment->hw.control |= XILINX_DMA_BD_SOP;
+               segment = list_last_entry(&desc->segments,
+                                         struct xilinx_axidma_tx_segment,
+                                         node);
+               segment->hw.control |= XILINX_DMA_BD_EOP;
+       }
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
+/**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+       struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+       size_t copy, sg_used;
+       unsigned int num_periods;
+       int i;
+       u32 reg;
+
+       if (!period_len)
+               return NULL;
+
+       num_periods = buf_len / period_len;
+
+       if (!num_periods)
+               return NULL;
+
+       if (!is_slave_direction(direction))
+               return NULL;
+
+       /* Allocate a transaction descriptor. */
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       chan->direction = direction;
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+       for (i = 0; i < num_periods; ++i) {
+               sg_used = 0;
+
+               while (sg_used < period_len) {
+                       struct xilinx_axidma_desc_hw *hw;
+
+                       /* Get a free segment */
+                       segment = xilinx_axidma_alloc_tx_segment(chan);
+                       if (!segment)
+                               goto error;
+
+                       /*
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+                       copy = min_t(size_t, period_len - sg_used,
+                                    XILINX_DMA_MAX_TRANS_LEN);
+                       hw = &segment->hw;
+                       xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
+                                         period_len * i);
+                       hw->control = copy;
+
+                       if (prev)
+                               prev->hw.next_desc = segment->phys;
+
+                       prev = segment;
+                       sg_used += copy;
+
+                       /*
+                        * Insert the segment into the descriptor segments
+                        * list.
+                        */
+                       list_add_tail(&segment->node, &desc->segments);
+               }
+       }
+
+       head_segment = list_first_entry(&desc->segments,
+                                  struct xilinx_axidma_tx_segment, node);
+       desc->async_tx.phys = head_segment->phys;
+
+       desc->cyclic = true;
+       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+       reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+       segment = list_last_entry(&desc->segments,
+                                 struct xilinx_axidma_tx_segment,
+                                 node);
+       segment->hw.next_desc = (u32) head_segment->phys;
+
+       /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+       if (direction == DMA_MEM_TO_DEV) {
+               head_segment->hw.control |= XILINX_DMA_BD_SOP;
+               segment->hw.control |= XILINX_DMA_BD_EOP;
+       }
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
+/**
+ * xilinx_dma_prep_interleaved - prepare a descriptor for a
+ *     DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dma_prep_interleaved(struct dma_chan *dchan,
+                                struct dma_interleaved_template *xt,
+                                unsigned long flags)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_axidma_tx_segment *segment;
+       struct xilinx_axidma_desc_hw *hw;
+
+       if (!is_slave_direction(xt->dir))
+               return NULL;
+
+       if (!xt->numf || !xt->sgl[0].size)
+               return NULL;
+
+       if (xt->frame_size != 1)
+               return NULL;
+
+       /* Allocate a transaction descriptor. */
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       chan->direction = xt->dir;
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+       /* Get a free segment */
+       segment = xilinx_axidma_alloc_tx_segment(chan);
+       if (!segment)
+               goto error;
+
+       hw = &segment->hw;
+
+       /* Fill in the descriptor */
+       if (xt->dir != DMA_MEM_TO_DEV)
+               hw->buf_addr = xt->dst_start;
+       else
+               hw->buf_addr = xt->src_start;
+
+       hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
+       hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
+                           XILINX_DMA_BD_VSIZE_MASK;
+       hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
+                           XILINX_DMA_BD_STRIDE_MASK;
+       hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+
+       /*
+        * Insert the segment into the descriptor segments
+        * list.
+        */
+       list_add_tail(&segment->node, &desc->segments);
+
+
+       segment = list_first_entry(&desc->segments,
+                                  struct xilinx_axidma_tx_segment, node);
+       desc->async_tx.phys = segment->phys;
+
+       /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+       if (xt->dir == DMA_MEM_TO_DEV) {
+               segment->hw.control |= XILINX_DMA_BD_SOP;
+               segment = list_last_entry(&desc->segments,
+                                         struct xilinx_axidma_tx_segment,
+                                         node);
+               segment->hw.control |= XILINX_DMA_BD_EOP;
+       }
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
+/**
+ * xilinx_dma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific DMA Channel pointer
+ */
+static int xilinx_dma_terminate_all(struct dma_chan *dchan)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       u32 reg;
+
+       if (chan->cyclic)
+               xilinx_dma_chan_reset(chan);
+
+       /* Halt the DMA engine */
+       xilinx_dma_halt(chan);
+
+       /* Remove and free all of the descriptors in the lists */
+       xilinx_dma_free_descriptors(chan);
+
+       if (chan->cyclic) {
+               reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+               reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+               chan->cyclic = false;
+       }
+
+       return 0;
+}
+
+/**
+ * xilinx_dma_channel_set_config - Configure VDMA channel
+ * Run-time configuration for Axi VDMA, supports:
+ * . halt the channel
+ * . configure interrupt coalescing and inter-packet delay threshold
+ * . start/stop parking
+ * . enable genlock
+ *
+ * @dchan: DMA channel
+ * @cfg: VDMA device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+                                       struct xilinx_vdma_config *cfg)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       u32 dmacr;
+
+       if (cfg->reset)
+               return xilinx_dma_chan_reset(chan);
+
+       dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+       chan->config.frm_dly = cfg->frm_dly;
+       chan->config.park = cfg->park;
+
+       /* genlock settings */
+       chan->config.gen_lock = cfg->gen_lock;
+       chan->config.master = cfg->master;
+
+       if (cfg->gen_lock && chan->genlock) {
+               dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+               dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
+       }
+
+       chan->config.frm_cnt_en = cfg->frm_cnt_en;
+       if (cfg->park)
+               chan->config.park_frm = cfg->park_frm;
+       else
+               chan->config.park_frm = -1;
+
+       chan->config.coalesc = cfg->coalesc;
+       chan->config.delay = cfg->delay;
+
+       if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+               dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
+               chan->config.coalesc = cfg->coalesc;
+       }
+
+       if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+               dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
+               chan->config.delay = cfg->delay;
+       }
+
+       /* FSync Source selection */
+       dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
+       dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
+
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
+
+       return 0;
+}
+EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
+{
+       /* Disable all interrupts */
+       dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+                     XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+       if (chan->irq > 0)
+               free_irq(chan->irq, chan);
+
+       tasklet_kill(&chan->tasklet);
+
+       list_del(&chan->common.device_node);
+}
+
+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+                           struct clk **tx_clk, struct clk **rx_clk,
+                           struct clk **sg_clk, struct clk **tmp_clk)
+{
+       int err;
+
+       *tmp_clk = NULL;
+
+       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+       if (IS_ERR(*axi_clk)) {
+               err = PTR_ERR(*axi_clk);
+               dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+               return err;
+       }
+
+       *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+       if (IS_ERR(*tx_clk))
+               *tx_clk = NULL;
+
+       *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+       if (IS_ERR(*rx_clk))
+               *rx_clk = NULL;
+
+       *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+       if (IS_ERR(*sg_clk))
+               *sg_clk = NULL;
+
+       err = clk_prepare_enable(*axi_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(*tx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+               goto err_disable_axiclk;
+       }
+
+       err = clk_prepare_enable(*rx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+               goto err_disable_txclk;
+       }
+
+       err = clk_prepare_enable(*sg_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
+               goto err_disable_rxclk;
+       }
+
+       return 0;
+
+err_disable_rxclk:
+       clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+       clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+       clk_disable_unprepare(*axi_clk);
+
+       return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+                           struct clk **dev_clk, struct clk **tmp_clk,
+                           struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+       int err;
+
+       *tmp_clk = NULL;
+       *tmp1_clk = NULL;
+       *tmp2_clk = NULL;
+
+       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+       if (IS_ERR(*axi_clk)) {
+               err = PTR_ERR(*axi_clk);
+               dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
+               return err;
+       }
+
+       *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+       if (IS_ERR(*dev_clk)) {
+               err = PTR_ERR(*dev_clk);
+               dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(*axi_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(*dev_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
+               goto err_disable_axiclk;
+       }
+
+       return 0;
+
+err_disable_axiclk:
+       clk_disable_unprepare(*axi_clk);
+
+       return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+                           struct clk **tx_clk, struct clk **txs_clk,
+                           struct clk **rx_clk, struct clk **rxs_clk)
+{
+       int err;
+
+       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+       if (IS_ERR(*axi_clk)) {
+               err = PTR_ERR(*axi_clk);
+               dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+               return err;
+       }
+
+       *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+       if (IS_ERR(*tx_clk))
+               *tx_clk = NULL;
+
+       *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+       if (IS_ERR(*txs_clk))
+               *txs_clk = NULL;
+
+       *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+       if (IS_ERR(*rx_clk))
+               *rx_clk = NULL;
+
+       *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+       if (IS_ERR(*rxs_clk))
+               *rxs_clk = NULL;
+
+       err = clk_prepare_enable(*axi_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(*tx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+               goto err_disable_axiclk;
+       }
+
+       err = clk_prepare_enable(*txs_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+               goto err_disable_txclk;
+       }
+
+       err = clk_prepare_enable(*rx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+               goto err_disable_txsclk;
+       }
+
+       err = clk_prepare_enable(*rxs_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+               goto err_disable_rxclk;
+       }
+
+       return 0;
+
+err_disable_rxclk:
+       clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+       clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+       clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+       clk_disable_unprepare(*axi_clk);
+
+       return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+       clk_disable_unprepare(xdev->rxs_clk);
+       clk_disable_unprepare(xdev->rx_clk);
+       clk_disable_unprepare(xdev->txs_clk);
+       clk_disable_unprepare(xdev->tx_clk);
+       clk_disable_unprepare(xdev->axi_clk);
+}
+
+/**
+ * xilinx_dma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+                                 struct device_node *node, int chan_id)
+{
+       struct xilinx_dma_chan *chan;
+       bool has_dre = false;
+       u32 value, width;
+       int err;
+
+       /* Allocate and initialize the channel structure */
+       chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+       if (!chan)
+               return -ENOMEM;
+
+       chan->dev = xdev->dev;
+       chan->xdev = xdev;
+       chan->has_sg = xdev->has_sg;
+       chan->desc_pendingcount = 0x0;
+       chan->ext_addr = xdev->ext_addr;
+
+       spin_lock_init(&chan->lock);
+       INIT_LIST_HEAD(&chan->pending_list);
+       INIT_LIST_HEAD(&chan->done_list);
+       INIT_LIST_HEAD(&chan->active_list);
+
+       /* Retrieve the channel properties from the device tree */
+       has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+       chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+
+       err = of_property_read_u32(node, "xlnx,datawidth", &value);
+       if (err) {
+               dev_err(xdev->dev, "missing xlnx,datawidth property\n");
+               return err;
+       }
+       width = value >> 3; /* Convert bits to bytes */
+
+       /* If data width is greater than 8 bytes, DRE is not in hw */
+       if (width > 8)
+               has_dre = false;
+
+       if (!has_dre)
+               xdev->common.copy_align = fls(width - 1);
+
+       if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+           of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+           of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
+               chan->direction = DMA_MEM_TO_DEV;
+               chan->id = chan_id;
+               chan->tdest = chan_id;
+
+               chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
+               if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+                       chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+
+                       if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+                           xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
+                               chan->flush_on_fsync = true;
+               }
+       } else if (of_device_is_compatible(node,
+                                          "xlnx,axi-vdma-s2mm-channel") ||
+                  of_device_is_compatible(node,
+                                          "xlnx,axi-dma-s2mm-channel")) {
+               chan->direction = DMA_DEV_TO_MEM;
+               chan->id = chan_id;
+               chan->tdest = chan_id - xdev->nr_channels;
+
+               chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+               if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+                       chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+
+                       if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+                           xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
+                               chan->flush_on_fsync = true;
+               }
+       } else {
+               dev_err(xdev->dev, "Invalid channel compatible node\n");
+               return -EINVAL;
+       }
+
+       /* Request the interrupt */
+       chan->irq = irq_of_parse_and_map(node, 0);
+       err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
+                         "xilinx-dma-controller", chan);
+       if (err) {
+               dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+               return err;
+       }
+
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+               chan->start_transfer = xilinx_dma_start_transfer;
+       else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+               chan->start_transfer = xilinx_cdma_start_transfer;
+       else
+               chan->start_transfer = xilinx_vdma_start_transfer;
+
+       /* Initialize the tasklet */
+       tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
+                       (unsigned long)chan);
+
+       /*
+        * Initialize the DMA channel and add it to the DMA engine channels
+        * list.
+        */
+       chan->common.device = &xdev->common;
+
+       list_add_tail(&chan->common.device_node, &xdev->common.channels);
+       xdev->chan[chan->id] = chan;
+
+       /* Reset the channel */
+       err = xilinx_dma_chan_reset(chan);
+       if (err < 0) {
+               dev_err(xdev->dev, "Reset channel failed\n");
+               return err;
+       }
+
+       return 0;
+}
+
+/**
+ * xilinx_dma_child_probe - Per child node probe
+ * It get number of dma-channels per child node from
+ * device-tree and initializes all the channels.
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: 0 always.
+ */
+static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+                                   struct device_node *node) {
+       int ret, i, nr_channels = 1;
+
+       ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+       if ((ret < 0) && xdev->mcdma)
+               dev_warn(xdev->dev, "missing dma-channels property\n");
+
+       for (i = 0; i < nr_channels; i++)
+               xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
+
+       xdev->nr_channels += nr_channels;
+
+       return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+                                               struct of_dma *ofdma)
+{
+       struct xilinx_dma_device *xdev = ofdma->of_dma_data;
+       int chan_id = dma_spec->args[0];
+
+       if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
+               return NULL;
+
+       return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static const struct xilinx_dma_config axidma_config = {
+       .dmatype = XDMA_TYPE_AXIDMA,
+       .clk_init = axidma_clk_init,
+};
+
+static const struct xilinx_dma_config axicdma_config = {
+       .dmatype = XDMA_TYPE_CDMA,
+       .clk_init = axicdma_clk_init,
+};
+
+static const struct xilinx_dma_config axivdma_config = {
+       .dmatype = XDMA_TYPE_VDMA,
+       .clk_init = axivdma_clk_init,
+};
+
+static const struct of_device_id xilinx_dma_of_ids[] = {
+       { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+       { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+       { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+       {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
+
+/**
+ * xilinx_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+                       struct clk **, struct clk **, struct clk **)
+                                       = axivdma_clk_init;
+       struct device_node *node = pdev->dev.of_node;
+       struct xilinx_dma_device *xdev;
+       struct device_node *child, *np = pdev->dev.of_node;
+       struct resource *io;
+       u32 num_frames, addr_width;
+       int i, err;
+
+       /* Allocate and initialize the DMA engine structure */
+       xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+       if (!xdev)
+               return -ENOMEM;
+
+       xdev->dev = &pdev->dev;
+       if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_node(xilinx_dma_of_ids, np);
+               if (match && match->data) {
+                       xdev->dma_config = match->data;
+                       clk_init = xdev->dma_config->clk_init;
+               }
+       }
+
+       err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
+                      &xdev->rx_clk, &xdev->rxs_clk);
+       if (err)
+               return err;
+
+       /* Request and map I/O memory */
+       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+       if (IS_ERR(xdev->regs))
+               return PTR_ERR(xdev->regs);
+
+       /* Retrieve the DMA engine properties from the device tree */
+       xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+               xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+
+       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+               err = of_property_read_u32(node, "xlnx,num-fstores",
+                                          &num_frames);
+               if (err < 0) {
+                       dev_err(xdev->dev,
+                               "missing xlnx,num-fstores property\n");
+                       return err;
+               }
+
+               err = of_property_read_u32(node, "xlnx,flush-fsync",
+                                          &xdev->flush_on_fsync);
+               if (err < 0)
+                       dev_warn(xdev->dev,
+                                "missing xlnx,flush-fsync property\n");
+       }
+
+       err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
+       if (err < 0)
+               dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+
+       if (addr_width > 32)
+               xdev->ext_addr = true;
+       else
+               xdev->ext_addr = false;
+
+       /* Set the dma mask bits */
+       dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
+
+       /* Initialize the DMA engine */
+       xdev->common.dev = &pdev->dev;
+
+       INIT_LIST_HEAD(&xdev->common.channels);
+       if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
+               dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+               dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+       }
+
+       xdev->common.device_alloc_chan_resources =
+                               xilinx_dma_alloc_chan_resources;
+       xdev->common.device_free_chan_resources =
+                               xilinx_dma_free_chan_resources;
+       xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+       xdev->common.device_tx_status = xilinx_dma_tx_status;
+       xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+               xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+               xdev->common.device_prep_dma_cyclic =
+                                         xilinx_dma_prep_dma_cyclic;
+               xdev->common.device_prep_interleaved_dma =
+                                       xilinx_dma_prep_interleaved;
+               /* Residue calculation is supported by only AXI DMA */
+               xdev->common.residue_granularity =
+                                         DMA_RESIDUE_GRANULARITY_SEGMENT;
+       } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+               dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+               xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+       } else {
+               xdev->common.device_prep_interleaved_dma =
+                               xilinx_vdma_dma_prep_interleaved;
+       }
+
+       platform_set_drvdata(pdev, xdev);
+
+       /* Initialize the channels */
+       for_each_child_of_node(node, child) {
+               err = xilinx_dma_child_probe(xdev, child);
+               if (err < 0)
+                       goto disable_clks;
+       }
+
+       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+               for (i = 0; i < xdev->nr_channels; i++)
+                       if (xdev->chan[i])
+                               xdev->chan[i]->num_frms = num_frames;
+       }
+
+       /* Register the DMA engine with the core */
+       dma_async_device_register(&xdev->common);
+
+       err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+                                        xdev);
+       if (err < 0) {
+               dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+               dma_async_device_unregister(&xdev->common);
+               goto error;
+       }
+
+       dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+
+       return 0;
+
+disable_clks:
+       xdma_disable_allclks(xdev);
+error:
+       for (i = 0; i < xdev->nr_channels; i++)
+               if (xdev->chan[i])
+                       xilinx_dma_chan_remove(xdev->chan[i]);
+
+       return err;
+}
+
+/**
+ * xilinx_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_dma_remove(struct platform_device *pdev)
+{
+       struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
+       int i;
+
+       of_dma_controller_free(pdev->dev.of_node);
+
+       dma_async_device_unregister(&xdev->common);
+
+       for (i = 0; i < xdev->nr_channels; i++)
+               if (xdev->chan[i])
+                       xilinx_dma_chan_remove(xdev->chan[i]);
+
+       xdma_disable_allclks(xdev);
+
+       return 0;
+}
+
+static struct platform_driver xilinx_vdma_driver = {
+       .driver = {
+               .name = "xilinx-vdma",
+               .of_match_table = xilinx_dma_of_ids,
+       },
+       .probe = xilinx_dma_probe,
+       .remove = xilinx_dma_remove,
+};
+
+module_platform_driver(xilinx_vdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx VDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
deleted file mode 100644 (file)
index df91185..0000000
+++ /dev/null
@@ -1,2310 +0,0 @@
-/*
- * DMA driver for Xilinx Video DMA Engine
- *
- * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
- *
- * Based on the Freescale DMA driver.
- *
- * Description:
- * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
- * core that provides high-bandwidth direct memory access between memory
- * and AXI4-Stream type video target peripherals. The core provides efficient
- * two dimensional DMA operations with independent asynchronous read (S2MM)
- * and write (MM2S) channel operation. It can be configured to have either
- * one channel or two channels. If configured as two channels, one is to
- * transmit to the video device (MM2S) and another is to receive from the
- * video device (S2MM). Initialization, status, interrupt and management
- * registers are accessed through an AXI4-Lite slave interface.
- *
- * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
- * provides high-bandwidth one dimensional direct memory access between memory
- * and AXI4-Stream target peripherals. It supports one receive and one
- * transmit channel, both of them optional at synthesis time.
- *
- * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
- * Access (DMA) between a memory-mapped source address and a memory-mapped
- * destination address.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/dmapool.h>
-#include <linux/dma/xilinx_dma.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_dma.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-
-#include "../dmaengine.h"
-
-/* Register/Descriptor Offsets */
-#define XILINX_DMA_MM2S_CTRL_OFFSET            0x0000
-#define XILINX_DMA_S2MM_CTRL_OFFSET            0x0030
-#define XILINX_VDMA_MM2S_DESC_OFFSET           0x0050
-#define XILINX_VDMA_S2MM_DESC_OFFSET           0x00a0
-
-/* Control Registers */
-#define XILINX_DMA_REG_DMACR                   0x0000
-#define XILINX_DMA_DMACR_DELAY_MAX             0xff
-#define XILINX_DMA_DMACR_DELAY_SHIFT           24
-#define XILINX_DMA_DMACR_FRAME_COUNT_MAX       0xff
-#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT     16
-#define XILINX_DMA_DMACR_ERR_IRQ               BIT(14)
-#define XILINX_DMA_DMACR_DLY_CNT_IRQ           BIT(13)
-#define XILINX_DMA_DMACR_FRM_CNT_IRQ           BIT(12)
-#define XILINX_DMA_DMACR_MASTER_SHIFT          8
-#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT        5
-#define XILINX_DMA_DMACR_FRAMECNT_EN           BIT(4)
-#define XILINX_DMA_DMACR_GENLOCK_EN            BIT(3)
-#define XILINX_DMA_DMACR_RESET                 BIT(2)
-#define XILINX_DMA_DMACR_CIRC_EN               BIT(1)
-#define XILINX_DMA_DMACR_RUNSTOP               BIT(0)
-#define XILINX_DMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
-
-#define XILINX_DMA_REG_DMASR                   0x0004
-#define XILINX_DMA_DMASR_EOL_LATE_ERR          BIT(15)
-#define XILINX_DMA_DMASR_ERR_IRQ               BIT(14)
-#define XILINX_DMA_DMASR_DLY_CNT_IRQ           BIT(13)
-#define XILINX_DMA_DMASR_FRM_CNT_IRQ           BIT(12)
-#define XILINX_DMA_DMASR_SOF_LATE_ERR          BIT(11)
-#define XILINX_DMA_DMASR_SG_DEC_ERR            BIT(10)
-#define XILINX_DMA_DMASR_SG_SLV_ERR            BIT(9)
-#define XILINX_DMA_DMASR_EOF_EARLY_ERR         BIT(8)
-#define XILINX_DMA_DMASR_SOF_EARLY_ERR         BIT(7)
-#define XILINX_DMA_DMASR_DMA_DEC_ERR           BIT(6)
-#define XILINX_DMA_DMASR_DMA_SLAVE_ERR         BIT(5)
-#define XILINX_DMA_DMASR_DMA_INT_ERR           BIT(4)
-#define XILINX_DMA_DMASR_IDLE                  BIT(1)
-#define XILINX_DMA_DMASR_HALTED                BIT(0)
-#define XILINX_DMA_DMASR_DELAY_MASK            GENMASK(31, 24)
-#define XILINX_DMA_DMASR_FRAME_COUNT_MASK      GENMASK(23, 16)
-
-#define XILINX_DMA_REG_CURDESC                 0x0008
-#define XILINX_DMA_REG_TAILDESC                0x0010
-#define XILINX_DMA_REG_REG_INDEX               0x0014
-#define XILINX_DMA_REG_FRMSTORE                0x0018
-#define XILINX_DMA_REG_THRESHOLD               0x001c
-#define XILINX_DMA_REG_FRMPTR_STS              0x0024
-#define XILINX_DMA_REG_PARK_PTR                0x0028
-#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT       8
-#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT       0
-#define XILINX_DMA_REG_VDMA_VERSION            0x002c
-
-/* Register Direct Mode Registers */
-#define XILINX_DMA_REG_VSIZE                   0x0000
-#define XILINX_DMA_REG_HSIZE                   0x0004
-
-#define XILINX_DMA_REG_FRMDLY_STRIDE           0x0008
-#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT  24
-#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT  0
-
-#define XILINX_VDMA_REG_START_ADDRESS(n)       (0x000c + 4 * (n))
-#define XILINX_VDMA_REG_START_ADDRESS_64(n)    (0x000c + 8 * (n))
-
-/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE        0x2
-
-#define XILINX_DMA_DMAXR_ALL_IRQ_MASK  \
-               (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
-                XILINX_DMA_DMASR_DLY_CNT_IRQ | \
-                XILINX_DMA_DMASR_ERR_IRQ)
-
-#define XILINX_DMA_DMASR_ALL_ERR_MASK  \
-               (XILINX_DMA_DMASR_EOL_LATE_ERR | \
-                XILINX_DMA_DMASR_SOF_LATE_ERR | \
-                XILINX_DMA_DMASR_SG_DEC_ERR | \
-                XILINX_DMA_DMASR_SG_SLV_ERR | \
-                XILINX_DMA_DMASR_EOF_EARLY_ERR | \
-                XILINX_DMA_DMASR_SOF_EARLY_ERR | \
-                XILINX_DMA_DMASR_DMA_DEC_ERR | \
-                XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
-                XILINX_DMA_DMASR_DMA_INT_ERR)
-
-/*
- * Recoverable errors are DMA Internal error, SOF Early, EOF Early
- * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
- * is enabled in the h/w system.
- */
-#define XILINX_DMA_DMASR_ERR_RECOVER_MASK      \
-               (XILINX_DMA_DMASR_SOF_LATE_ERR | \
-                XILINX_DMA_DMASR_EOF_EARLY_ERR | \
-                XILINX_DMA_DMASR_SOF_EARLY_ERR | \
-                XILINX_DMA_DMASR_DMA_INT_ERR)
-
-/* Axi VDMA Flush on Fsync bits */
-#define XILINX_DMA_FLUSH_S2MM          3
-#define XILINX_DMA_FLUSH_MM2S          2
-#define XILINX_DMA_FLUSH_BOTH          1
-
-/* Delay loop counter to prevent hardware failure */
-#define XILINX_DMA_LOOP_COUNT          1000000
-
-/* AXI DMA Specific Registers/Offsets */
-#define XILINX_DMA_REG_SRCDSTADDR      0x18
-#define XILINX_DMA_REG_BTT             0x28
-
-/* AXI DMA Specific Masks/Bit fields */
-#define XILINX_DMA_MAX_TRANS_LEN       GENMASK(22, 0)
-#define XILINX_DMA_CR_COALESCE_MAX     GENMASK(23, 16)
-#define XILINX_DMA_CR_COALESCE_SHIFT   16
-#define XILINX_DMA_BD_SOP              BIT(27)
-#define XILINX_DMA_BD_EOP              BIT(26)
-#define XILINX_DMA_COALESCE_MAX                255
-#define XILINX_DMA_NUM_APP_WORDS       5
-
-/* AXI CDMA Specific Registers/Offsets */
-#define XILINX_CDMA_REG_SRCADDR                0x18
-#define XILINX_CDMA_REG_DSTADDR                0x20
-
-/* AXI CDMA Specific Masks */
-#define XILINX_CDMA_CR_SGMODE          BIT(3)
-
-/**
- * struct xilinx_vdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @vsize: Vertical Size @0x10
- * @hsize: Horizontal Size @0x14
- * @stride: Number of bytes between the first
- *         pixels of each horizontal line @0x18
- */
-struct xilinx_vdma_desc_hw {
-       u32 next_desc;
-       u32 pad1;
-       u32 buf_addr;
-       u32 buf_addr_msb;
-       u32 vsize;
-       u32 hsize;
-       u32 stride;
-} __aligned(64);
-
-/**
- * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
- * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
- * @buf_addr: Buffer address @0x08
- * @pad2: Reserved @0x0C
- * @pad3: Reserved @0x10
- * @pad4: Reserved @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- * @app: APP Fields @0x20 - 0x30
- */
-struct xilinx_axidma_desc_hw {
-       u32 next_desc;
-       u32 pad1;
-       u32 buf_addr;
-       u32 pad2;
-       u32 pad3;
-       u32 pad4;
-       u32 control;
-       u32 status;
-       u32 app[XILINX_DMA_NUM_APP_WORDS];
-} __aligned(64);
-
-/**
- * struct xilinx_cdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
- * @src_addr: Source address @0x08
- * @pad2: Reserved @0x0C
- * @dest_addr: Destination address @0x10
- * @pad3: Reserved @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- */
-struct xilinx_cdma_desc_hw {
-       u32 next_desc;
-       u32 pad1;
-       u32 src_addr;
-       u32 pad2;
-       u32 dest_addr;
-       u32 pad3;
-       u32 control;
-       u32 status;
-} __aligned(64);
-
-/**
- * struct xilinx_vdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
-struct xilinx_vdma_tx_segment {
-       struct xilinx_vdma_desc_hw hw;
-       struct list_head node;
-       dma_addr_t phys;
-} __aligned(64);
-
-/**
- * struct xilinx_axidma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
-struct xilinx_axidma_tx_segment {
-       struct xilinx_axidma_desc_hw hw;
-       struct list_head node;
-       dma_addr_t phys;
-} __aligned(64);
-
-/**
- * struct xilinx_cdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
-struct xilinx_cdma_tx_segment {
-       struct xilinx_cdma_desc_hw hw;
-       struct list_head node;
-       dma_addr_t phys;
-} __aligned(64);
-
-/**
- * struct xilinx_dma_tx_descriptor - Per Transaction structure
- * @async_tx: Async transaction descriptor
- * @segments: TX segments list
- * @node: Node in the channel descriptors list
- */
-struct xilinx_dma_tx_descriptor {
-       struct dma_async_tx_descriptor async_tx;
-       struct list_head segments;
-       struct list_head node;
-};
-
-/**
- * struct xilinx_dma_chan - Driver specific DMA channel structure
- * @xdev: Driver specific device structure
- * @ctrl_offset: Control registers offset
- * @desc_offset: TX descriptor registers offset
- * @lock: Descriptor operation lock
- * @pending_list: Descriptors waiting
- * @active_list: Descriptors ready to submit
- * @done_list: Complete descriptors
- * @common: DMA common channel
- * @desc_pool: Descriptors pool
- * @dev: The dma device
- * @irq: Channel IRQ
- * @id: Channel ID
- * @direction: Transfer direction
- * @num_frms: Number of frames
- * @has_sg: Support scatter transfers
- * @genlock: Support genlock mode
- * @err: Channel has errors
- * @tasklet: Cleanup work after irq
- * @config: Device configuration info
- * @flush_on_fsync: Flush on Frame sync
- * @desc_pendingcount: Descriptor pending count
- * @ext_addr: Indicates 64 bit addressing is supported by dma channel
- * @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
- * @seg_v: Statically allocated segments base
- * @start_transfer: Differentiate b/w DMA IP's transfer
- */
-struct xilinx_dma_chan {
-       struct xilinx_dma_device *xdev;
-       u32 ctrl_offset;
-       u32 desc_offset;
-       spinlock_t lock;
-       struct list_head pending_list;
-       struct list_head active_list;
-       struct list_head done_list;
-       struct dma_chan common;
-       struct dma_pool *desc_pool;
-       struct device *dev;
-       int irq;
-       int id;
-       enum dma_transfer_direction direction;
-       int num_frms;
-       bool has_sg;
-       bool genlock;
-       bool err;
-       struct tasklet_struct tasklet;
-       struct xilinx_vdma_config config;
-       bool flush_on_fsync;
-       u32 desc_pendingcount;
-       bool ext_addr;
-       u32 desc_submitcount;
-       u32 residue;
-       struct xilinx_axidma_tx_segment *seg_v;
-       void (*start_transfer)(struct xilinx_dma_chan *chan);
-};
-
-struct xilinx_dma_config {
-       enum xdma_ip_type dmatype;
-       int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
-                       struct clk **tx_clk, struct clk **txs_clk,
-                       struct clk **rx_clk, struct clk **rxs_clk);
-};
-
-/**
- * struct xilinx_dma_device - DMA device structure
- * @regs: I/O mapped base address
- * @dev: Device Structure
- * @common: DMA device structure
- * @chan: Driver specific DMA channel
- * @has_sg: Specifies whether Scatter-Gather is present or not
- * @flush_on_fsync: Flush on frame sync
- * @ext_addr: Indicates 64 bit addressing is supported by dma device
- * @pdev: Platform device structure pointer
- * @dma_config: DMA config structure
- * @axi_clk: DMA Axi4-lite interace clock
- * @tx_clk: DMA mm2s clock
- * @txs_clk: DMA mm2s stream clock
- * @rx_clk: DMA s2mm clock
- * @rxs_clk: DMA s2mm stream clock
- */
-struct xilinx_dma_device {
-       void __iomem *regs;
-       struct device *dev;
-       struct dma_device common;
-       struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
-       bool has_sg;
-       u32 flush_on_fsync;
-       bool ext_addr;
-       struct platform_device  *pdev;
-       const struct xilinx_dma_config *dma_config;
-       struct clk *axi_clk;
-       struct clk *tx_clk;
-       struct clk *txs_clk;
-       struct clk *rx_clk;
-       struct clk *rxs_clk;
-};
-
-/* Macros */
-#define to_xilinx_chan(chan) \
-       container_of(chan, struct xilinx_dma_chan, common)
-#define to_dma_tx_descriptor(tx) \
-       container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
-#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
-       readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
-                          cond, delay_us, timeout_us)
-
-/* IO accessors */
-static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
-{
-       return ioread32(chan->xdev->regs + reg);
-}
-
-static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
-{
-       iowrite32(value, chan->xdev->regs + reg);
-}
-
-static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
-                                  u32 value)
-{
-       dma_write(chan, chan->desc_offset + reg, value);
-}
-
-static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
-{
-       return dma_read(chan, chan->ctrl_offset + reg);
-}
-
-static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
-                                  u32 value)
-{
-       dma_write(chan, chan->ctrl_offset + reg, value);
-}
-
-static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
-                                u32 clr)
-{
-       dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
-}
-
-static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
-                                u32 set)
-{
-       dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
-}
-
-/**
- * vdma_desc_write_64 - 64-bit descriptor write
- * @chan: Driver specific VDMA channel
- * @reg: Register to write
- * @value_lsb: lower address of the descriptor.
- * @value_msb: upper address of the descriptor.
- *
- * Since vdma driver is trying to write to a register offset which is not a
- * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
- * instead of a single 64 bit register write.
- */
-static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
-                                     u32 value_lsb, u32 value_msb)
-{
-       /* Write the lsb 32 bits*/
-       writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
-
-       /* Write the msb 32 bits */
-       writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
-}
-
-/* -----------------------------------------------------------------------------
- * Descriptors and segments alloc and free
- */
-
-/**
- * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
-static struct xilinx_vdma_tx_segment *
-xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_vdma_tx_segment *segment;
-       dma_addr_t phys;
-
-       segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
-       if (!segment)
-               return NULL;
-
-       segment->phys = phys;
-
-       return segment;
-}
-
-/**
- * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
-static struct xilinx_cdma_tx_segment *
-xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_cdma_tx_segment *segment;
-       dma_addr_t phys;
-
-       segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
-       if (!segment)
-               return NULL;
-
-       memset(segment, 0, sizeof(*segment));
-       segment->phys = phys;
-
-       return segment;
-}
-
-/**
- * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
-static struct xilinx_axidma_tx_segment *
-xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_axidma_tx_segment *segment;
-       dma_addr_t phys;
-
-       segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
-       if (!segment)
-               return NULL;
-
-       memset(segment, 0, sizeof(*segment));
-       segment->phys = phys;
-
-       return segment;
-}
-
-/**
- * xilinx_dma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
-static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
-                               struct xilinx_axidma_tx_segment *segment)
-{
-       dma_pool_free(chan->desc_pool, segment, segment->phys);
-}
-
-/**
- * xilinx_cdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
-static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
-                               struct xilinx_cdma_tx_segment *segment)
-{
-       dma_pool_free(chan->desc_pool, segment, segment->phys);
-}
-
-/**
- * xilinx_vdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
-static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
-                                       struct xilinx_vdma_tx_segment *segment)
-{
-       dma_pool_free(chan->desc_pool, segment, segment->phys);
-}
-
-/**
- * xilinx_dma_tx_descriptor - Allocate transaction descriptor
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated descriptor on success and NULL on failure.
- */
-static struct xilinx_dma_tx_descriptor *
-xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_dma_tx_descriptor *desc;
-
-       desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-       if (!desc)
-               return NULL;
-
-       INIT_LIST_HEAD(&desc->segments);
-
-       return desc;
-}
-
-/**
- * xilinx_dma_free_tx_descriptor - Free transaction descriptor
- * @chan: Driver specific DMA channel
- * @desc: DMA transaction descriptor
- */
-static void
-xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
-                              struct xilinx_dma_tx_descriptor *desc)
-{
-       struct xilinx_vdma_tx_segment *segment, *next;
-       struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
-       struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
-
-       if (!desc)
-               return;
-
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-               list_for_each_entry_safe(segment, next, &desc->segments, node) {
-                       list_del(&segment->node);
-                       xilinx_vdma_free_tx_segment(chan, segment);
-               }
-       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
-               list_for_each_entry_safe(cdma_segment, cdma_next,
-                                        &desc->segments, node) {
-                       list_del(&cdma_segment->node);
-                       xilinx_cdma_free_tx_segment(chan, cdma_segment);
-               }
-       } else {
-               list_for_each_entry_safe(axidma_segment, axidma_next,
-                                        &desc->segments, node) {
-                       list_del(&axidma_segment->node);
-                       xilinx_dma_free_tx_segment(chan, axidma_segment);
-               }
-       }
-
-       kfree(desc);
-}
-
-/* Required functions */
-
-/**
- * xilinx_dma_free_desc_list - Free descriptors list
- * @chan: Driver specific DMA channel
- * @list: List to parse and delete the descriptor
- */
-static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
-                                       struct list_head *list)
-{
-       struct xilinx_dma_tx_descriptor *desc, *next;
-
-       list_for_each_entry_safe(desc, next, list, node) {
-               list_del(&desc->node);
-               xilinx_dma_free_tx_descriptor(chan, desc);
-       }
-}
-
-/**
- * xilinx_dma_free_descriptors - Free channel descriptors
- * @chan: Driver specific DMA channel
- */
-static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->lock, flags);
-
-       xilinx_dma_free_desc_list(chan, &chan->pending_list);
-       xilinx_dma_free_desc_list(chan, &chan->done_list);
-       xilinx_dma_free_desc_list(chan, &chan->active_list);
-
-       spin_unlock_irqrestore(&chan->lock, flags);
-}
-
-/**
- * xilinx_dma_free_chan_resources - Free channel resources
- * @dchan: DMA channel
- */
-static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-
-       dev_dbg(chan->dev, "Free all channel resources.\n");
-
-       xilinx_dma_free_descriptors(chan);
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
-               xilinx_dma_free_tx_segment(chan, chan->seg_v);
-       dma_pool_destroy(chan->desc_pool);
-       chan->desc_pool = NULL;
-}
-
-/**
- * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
- * @chan: Driver specific DMA channel
- */
-static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_dma_tx_descriptor *desc, *next;
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->lock, flags);
-
-       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
-               dma_async_tx_callback callback;
-               void *callback_param;
-
-               /* Remove from the list of running transactions */
-               list_del(&desc->node);
-
-               /* Run the link descriptor callback function */
-               callback = desc->async_tx.callback;
-               callback_param = desc->async_tx.callback_param;
-               if (callback) {
-                       spin_unlock_irqrestore(&chan->lock, flags);
-                       callback(callback_param);
-                       spin_lock_irqsave(&chan->lock, flags);
-               }
-
-               /* Run any dependencies, then free the descriptor */
-               dma_run_dependencies(&desc->async_tx);
-               xilinx_dma_free_tx_descriptor(chan, desc);
-       }
-
-       spin_unlock_irqrestore(&chan->lock, flags);
-}
-
-/**
- * xilinx_dma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the Xilinx DMA channel structure
- */
-static void xilinx_dma_do_tasklet(unsigned long data)
-{
-       struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
-
-       xilinx_dma_chan_desc_cleanup(chan);
-}
-
-/**
- * xilinx_dma_alloc_chan_resources - Allocate channel resources
- * @dchan: DMA channel
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-
-       /* Has this channel already been allocated? */
-       if (chan->desc_pool)
-               return 0;
-
-       /*
-        * We need the descriptor to be aligned to 64bytes
-        * for meeting Xilinx VDMA specification requirement.
-        */
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
-                                  chan->dev,
-                                  sizeof(struct xilinx_axidma_tx_segment),
-                                  __alignof__(struct xilinx_axidma_tx_segment),
-                                  0);
-       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
-               chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
-                                  chan->dev,
-                                  sizeof(struct xilinx_cdma_tx_segment),
-                                  __alignof__(struct xilinx_cdma_tx_segment),
-                                  0);
-       } else {
-               chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
-                                    chan->dev,
-                                    sizeof(struct xilinx_vdma_tx_segment),
-                                    __alignof__(struct xilinx_vdma_tx_segment),
-                                    0);
-       }
-
-       if (!chan->desc_pool) {
-               dev_err(chan->dev,
-                       "unable to allocate channel %d descriptor pool\n",
-                       chan->id);
-               return -ENOMEM;
-       }
-
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
-               /*
-                * For AXI DMA case after submitting a pending_list, keep
-                * an extra segment allocated so that the "next descriptor"
-                * pointer on the tail descriptor always points to a
-                * valid descriptor, even when paused after reaching taildesc.
-                * This way, it is possible to issue additional
-                * transfers without halting and restarting the channel.
-                */
-               chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
-       dma_cookie_init(dchan);
-
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               /* For AXI DMA resetting once channel will reset the
-                * other channel as well so enable the interrupts here.
-                */
-               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
-                             XILINX_DMA_DMAXR_ALL_IRQ_MASK);
-       }
-
-       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
-               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
-                            XILINX_CDMA_CR_SGMODE);
-
-       return 0;
-}
-
-/**
- * xilinx_dma_tx_status - Get DMA transaction status
- * @dchan: DMA channel
- * @cookie: Transaction identifier
- * @txstate: Transaction state
- *
- * Return: DMA transaction status
- */
-static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
-                                       dma_cookie_t cookie,
-                                       struct dma_tx_state *txstate)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_axidma_tx_segment *segment;
-       struct xilinx_axidma_desc_hw *hw;
-       enum dma_status ret;
-       unsigned long flags;
-       u32 residue = 0;
-
-       ret = dma_cookie_status(dchan, cookie, txstate);
-       if (ret == DMA_COMPLETE || !txstate)
-               return ret;
-
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               spin_lock_irqsave(&chan->lock, flags);
-
-               desc = list_last_entry(&chan->active_list,
-                                      struct xilinx_dma_tx_descriptor, node);
-               if (chan->has_sg) {
-                       list_for_each_entry(segment, &desc->segments, node) {
-                               hw = &segment->hw;
-                               residue += (hw->control - hw->status) &
-                                          XILINX_DMA_MAX_TRANS_LEN;
-                       }
-               }
-               spin_unlock_irqrestore(&chan->lock, flags);
-
-               chan->residue = residue;
-               dma_set_residue(txstate, chan->residue);
-       }
-
-       return ret;
-}
-
-/**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
-       return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
-                XILINX_DMA_DMASR_HALTED) &&
-               (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
-                XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
-       return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
-               XILINX_DMA_DMASR_IDLE;
-}
-
-/**
- * xilinx_dma_halt - Halt DMA channel
- * @chan: Driver specific DMA channel
- */
-static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
-{
-       int err;
-       u32 val;
-
-       dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
-
-       /* Wait for the hardware to halt */
-       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
-                                     (val & XILINX_DMA_DMASR_HALTED), 0,
-                                     XILINX_DMA_LOOP_COUNT);
-
-       if (err) {
-               dev_err(chan->dev, "Cannot stop channel %p: %x\n",
-                       chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
-               chan->err = true;
-       }
-}
-
-/**
- * xilinx_dma_start - Start DMA channel
- * @chan: Driver specific DMA channel
- */
-static void xilinx_dma_start(struct xilinx_dma_chan *chan)
-{
-       int err;
-       u32 val;
-
-       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
-
-       /* Wait for the hardware to start */
-       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
-                                     !(val & XILINX_DMA_DMASR_HALTED), 0,
-                                     XILINX_DMA_LOOP_COUNT);
-
-       if (err) {
-               dev_err(chan->dev, "Cannot start channel %p: %x\n",
-                       chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
-
-               chan->err = true;
-       }
-}
-
-/**
- * xilinx_vdma_start_transfer - Starts VDMA transfer
- * @chan: Driver specific channel struct pointer
- */
-static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_vdma_config *config = &chan->config;
-       struct xilinx_dma_tx_descriptor *desc, *tail_desc;
-       u32 reg;
-       struct xilinx_vdma_tx_segment *tail_segment;
-
-       /* This function was invoked with lock held */
-       if (chan->err)
-               return;
-
-       if (list_empty(&chan->pending_list))
-               return;
-
-       desc = list_first_entry(&chan->pending_list,
-                               struct xilinx_dma_tx_descriptor, node);
-       tail_desc = list_last_entry(&chan->pending_list,
-                                   struct xilinx_dma_tx_descriptor, node);
-
-       tail_segment = list_last_entry(&tail_desc->segments,
-                                      struct xilinx_vdma_tx_segment, node);
-
-       /* If it is SG mode and hardware is busy, cannot submit */
-       if (chan->has_sg && xilinx_dma_is_running(chan) &&
-           !xilinx_dma_is_idle(chan)) {
-               dev_dbg(chan->dev, "DMA controller still busy\n");
-               return;
-       }
-
-       /*
-        * If hardware is idle, then all descriptors on the running lists are
-        * done, start new transfers
-        */
-       if (chan->has_sg)
-               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-                               desc->async_tx.phys);
-
-       /* Configure the hardware using info in the config structure */
-       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
-
-       if (config->frm_cnt_en)
-               reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
-       else
-               reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
-
-       /* Configure channel to allow number frame buffers */
-       dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
-                       chan->desc_pendingcount);
-
-       /*
-        * With SG, start with circular mode, so that BDs can be fetched.
-        * In direct register mode, if not parking, enable circular mode
-        */
-       if (chan->has_sg || !config->park)
-               reg |= XILINX_DMA_DMACR_CIRC_EN;
-
-       if (config->park)
-               reg &= ~XILINX_DMA_DMACR_CIRC_EN;
-
-       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
-
-       if (config->park && (config->park_frm >= 0) &&
-                       (config->park_frm < chan->num_frms)) {
-               if (chan->direction == DMA_MEM_TO_DEV)
-                       dma_write(chan, XILINX_DMA_REG_PARK_PTR,
-                               config->park_frm <<
-                                       XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
-               else
-                       dma_write(chan, XILINX_DMA_REG_PARK_PTR,
-                               config->park_frm <<
-                                       XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
-       }
-
-       /* Start the hardware */
-       xilinx_dma_start(chan);
-
-       if (chan->err)
-               return;
-
-       /* Start the transfer */
-       if (chan->has_sg) {
-               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                               tail_segment->phys);
-       } else {
-               struct xilinx_vdma_tx_segment *segment, *last = NULL;
-               int i = 0;
-
-               if (chan->desc_submitcount < chan->num_frms)
-                       i = chan->desc_submitcount;
-
-               list_for_each_entry(segment, &desc->segments, node) {
-                       if (chan->ext_addr)
-                               vdma_desc_write_64(chan,
-                                       XILINX_VDMA_REG_START_ADDRESS_64(i++),
-                                       segment->hw.buf_addr,
-                                       segment->hw.buf_addr_msb);
-                       else
-                               vdma_desc_write(chan,
-                                       XILINX_VDMA_REG_START_ADDRESS(i++),
-                                       segment->hw.buf_addr);
-
-                       last = segment;
-               }
-
-               if (!last)
-                       return;
-
-               /* HW expects these parameters to be same for one transaction */
-               vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
-               vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
-                               last->hw.stride);
-               vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
-       }
-
-       if (!chan->has_sg) {
-               list_del(&desc->node);
-               list_add_tail(&desc->node, &chan->active_list);
-               chan->desc_submitcount++;
-               chan->desc_pendingcount--;
-               if (chan->desc_submitcount == chan->num_frms)
-                       chan->desc_submitcount = 0;
-       } else {
-               list_splice_tail_init(&chan->pending_list, &chan->active_list);
-               chan->desc_pendingcount = 0;
-       }
-}
-
-/**
- * xilinx_cdma_start_transfer - Starts cdma transfer
- * @chan: Driver specific channel struct pointer
- */
-static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
-       struct xilinx_cdma_tx_segment *tail_segment;
-       u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
-
-       if (chan->err)
-               return;
-
-       if (list_empty(&chan->pending_list))
-               return;
-
-       head_desc = list_first_entry(&chan->pending_list,
-                                    struct xilinx_dma_tx_descriptor, node);
-       tail_desc = list_last_entry(&chan->pending_list,
-                                   struct xilinx_dma_tx_descriptor, node);
-       tail_segment = list_last_entry(&tail_desc->segments,
-                                      struct xilinx_cdma_tx_segment, node);
-
-       if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
-               ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
-               ctrl_reg |= chan->desc_pendingcount <<
-                               XILINX_DMA_CR_COALESCE_SHIFT;
-               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
-       }
-
-       if (chan->has_sg) {
-               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-                          head_desc->async_tx.phys);
-
-               /* Update tail ptr register which will start the transfer */
-               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                              tail_segment->phys);
-       } else {
-               /* In simple mode */
-               struct xilinx_cdma_tx_segment *segment;
-               struct xilinx_cdma_desc_hw *hw;
-
-               segment = list_first_entry(&head_desc->segments,
-                                          struct xilinx_cdma_tx_segment,
-                                          node);
-
-               hw = &segment->hw;
-
-               dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
-               dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
-
-               /* Start the transfer */
-               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                               hw->control & XILINX_DMA_MAX_TRANS_LEN);
-       }
-
-       list_splice_tail_init(&chan->pending_list, &chan->active_list);
-       chan->desc_pendingcount = 0;
-}
-
-/**
- * xilinx_dma_start_transfer - Starts DMA transfer
- * @chan: Driver specific channel struct pointer
- */
-static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
-       struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
-       u32 reg;
-
-       if (chan->err)
-               return;
-
-       if (list_empty(&chan->pending_list))
-               return;
-
-       /* If it is SG mode and hardware is busy, cannot submit */
-       if (chan->has_sg && xilinx_dma_is_running(chan) &&
-           !xilinx_dma_is_idle(chan)) {
-               dev_dbg(chan->dev, "DMA controller still busy\n");
-               return;
-       }
-
-       head_desc = list_first_entry(&chan->pending_list,
-                                    struct xilinx_dma_tx_descriptor, node);
-       tail_desc = list_last_entry(&chan->pending_list,
-                                   struct xilinx_dma_tx_descriptor, node);
-       tail_segment = list_last_entry(&tail_desc->segments,
-                                      struct xilinx_axidma_tx_segment, node);
-
-       old_head = list_first_entry(&head_desc->segments,
-                               struct xilinx_axidma_tx_segment, node);
-       new_head = chan->seg_v;
-       /* Copy Buffer Descriptor fields. */
-       new_head->hw = old_head->hw;
-
-       /* Swap and save new reserve */
-       list_replace_init(&old_head->node, &new_head->node);
-       chan->seg_v = old_head;
-
-       tail_segment->hw.next_desc = chan->seg_v->phys;
-       head_desc->async_tx.phys = new_head->phys;
-
-       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
-
-       if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
-               reg &= ~XILINX_DMA_CR_COALESCE_MAX;
-               reg |= chan->desc_pendingcount <<
-                                 XILINX_DMA_CR_COALESCE_SHIFT;
-               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
-       }
-
-       if (chan->has_sg)
-               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-                              head_desc->async_tx.phys);
-
-       xilinx_dma_start(chan);
-
-       if (chan->err)
-               return;
-
-       /* Start the transfer */
-       if (chan->has_sg) {
-               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                              tail_segment->phys);
-       } else {
-               struct xilinx_axidma_tx_segment *segment;
-               struct xilinx_axidma_desc_hw *hw;
-
-               segment = list_first_entry(&head_desc->segments,
-                                          struct xilinx_axidma_tx_segment,
-                                          node);
-               hw = &segment->hw;
-
-               dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
-
-               /* Start the transfer */
-               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                              hw->control & XILINX_DMA_MAX_TRANS_LEN);
-       }
-
-       list_splice_tail_init(&chan->pending_list, &chan->active_list);
-       chan->desc_pendingcount = 0;
-}
-
-/**
- * xilinx_dma_issue_pending - Issue pending transactions
- * @dchan: DMA channel
- */
-static void xilinx_dma_issue_pending(struct dma_chan *dchan)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->lock, flags);
-       chan->start_transfer(chan);
-       spin_unlock_irqrestore(&chan->lock, flags);
-}
-
-/**
- * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
- * @chan : xilinx DMA channel
- *
- * CONTEXT: hardirq
- */
-static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
-{
-       struct xilinx_dma_tx_descriptor *desc, *next;
-
-       /* This function was invoked with lock held */
-       if (list_empty(&chan->active_list))
-               return;
-
-       list_for_each_entry_safe(desc, next, &chan->active_list, node) {
-               list_del(&desc->node);
-               dma_cookie_complete(&desc->async_tx);
-               list_add_tail(&desc->node, &chan->done_list);
-       }
-}
-
-/**
- * xilinx_dma_reset - Reset DMA channel
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
-{
-       int err;
-       u32 tmp;
-
-       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
-
-       /* Wait for the hardware to finish reset */
-       err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
-                                     !(tmp & XILINX_DMA_DMACR_RESET), 0,
-                                     XILINX_DMA_LOOP_COUNT);
-
-       if (err) {
-               dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
-                       dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
-                       dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
-               return -ETIMEDOUT;
-       }
-
-       chan->err = false;
-
-       return err;
-}
-
-/**
- * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
-{
-       int err;
-
-       /* Reset VDMA */
-       err = xilinx_dma_reset(chan);
-       if (err)
-               return err;
-
-       /* Enable interrupts */
-       dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
-                     XILINX_DMA_DMAXR_ALL_IRQ_MASK);
-
-       return 0;
-}
-
-/**
- * xilinx_dma_irq_handler - DMA Interrupt handler
- * @irq: IRQ number
- * @data: Pointer to the Xilinx DMA channel structure
- *
- * Return: IRQ_HANDLED/IRQ_NONE
- */
-static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
-{
-       struct xilinx_dma_chan *chan = data;
-       u32 status;
-
-       /* Read the status and ack the interrupts. */
-       status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
-       if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
-               return IRQ_NONE;
-
-       dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
-                       status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
-
-       if (status & XILINX_DMA_DMASR_ERR_IRQ) {
-               /*
-                * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
-                * error is recoverable, ignore it. Otherwise flag the error.
-                *
-                * Only recoverable errors can be cleared in the DMASR register,
-                * make sure not to write to other error bits to 1.
-                */
-               u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
-
-               dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
-                               errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
-
-               if (!chan->flush_on_fsync ||
-                   (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
-                       dev_err(chan->dev,
-                               "Channel %p has errors %x, cdr %x tdr %x\n",
-                               chan, errors,
-                               dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
-                               dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
-                       chan->err = true;
-               }
-       }
-
-       if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
-               /*
-                * Device takes too long to do the transfer when user requires
-                * responsiveness.
-                */
-               dev_dbg(chan->dev, "Inter-packet latency too long\n");
-       }
-
-       if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
-               spin_lock(&chan->lock);
-               xilinx_dma_complete_descriptor(chan);
-               chan->start_transfer(chan);
-               spin_unlock(&chan->lock);
-       }
-
-       tasklet_schedule(&chan->tasklet);
-       return IRQ_HANDLED;
-}
-
-/**
- * append_desc_queue - Queuing descriptor
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- */
-static void append_desc_queue(struct xilinx_dma_chan *chan,
-                             struct xilinx_dma_tx_descriptor *desc)
-{
-       struct xilinx_vdma_tx_segment *tail_segment;
-       struct xilinx_dma_tx_descriptor *tail_desc;
-       struct xilinx_axidma_tx_segment *axidma_tail_segment;
-       struct xilinx_cdma_tx_segment *cdma_tail_segment;
-
-       if (list_empty(&chan->pending_list))
-               goto append;
-
-       /*
-        * Add the hardware descriptor to the chain of hardware descriptors
-        * that already exists in memory.
-        */
-       tail_desc = list_last_entry(&chan->pending_list,
-                                   struct xilinx_dma_tx_descriptor, node);
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-               tail_segment = list_last_entry(&tail_desc->segments,
-                                              struct xilinx_vdma_tx_segment,
-                                              node);
-               tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
-       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
-               cdma_tail_segment = list_last_entry(&tail_desc->segments,
-                                               struct xilinx_cdma_tx_segment,
-                                               node);
-               cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
-       } else {
-               axidma_tail_segment = list_last_entry(&tail_desc->segments,
-                                              struct xilinx_axidma_tx_segment,
-                                              node);
-               axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
-       }
-
-       /*
-        * Add the software descriptor and all children to the list
-        * of pending transactions
-        */
-append:
-       list_add_tail(&desc->node, &chan->pending_list);
-       chan->desc_pendingcount++;
-
-       if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
-           && unlikely(chan->desc_pendingcount > chan->num_frms)) {
-               dev_dbg(chan->dev, "desc pendingcount is too high\n");
-               chan->desc_pendingcount = chan->num_frms;
-       }
-}
-
-/**
- * xilinx_dma_tx_submit - Submit DMA transaction
- * @tx: Async transaction descriptor
- *
- * Return: cookie value on success and failure value on error
- */
-static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
-       struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
-       dma_cookie_t cookie;
-       unsigned long flags;
-       int err;
-
-       if (chan->err) {
-               /*
-                * If reset fails, need to hard reset the system.
-                * Channel is no longer functional
-                */
-               err = xilinx_dma_chan_reset(chan);
-               if (err < 0)
-                       return err;
-       }
-
-       spin_lock_irqsave(&chan->lock, flags);
-
-       cookie = dma_cookie_assign(tx);
-
-       /* Put this transaction onto the tail of the pending queue */
-       append_desc_queue(chan, desc);
-
-       spin_unlock_irqrestore(&chan->lock, flags);
-
-       return cookie;
-}
-
-/**
- * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
- *     DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
-                                struct dma_interleaved_template *xt,
-                                unsigned long flags)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_vdma_tx_segment *segment, *prev = NULL;
-       struct xilinx_vdma_desc_hw *hw;
-
-       if (!is_slave_direction(xt->dir))
-               return NULL;
-
-       if (!xt->numf || !xt->sgl[0].size)
-               return NULL;
-
-       if (xt->frame_size != 1)
-               return NULL;
-
-       /* Allocate a transaction descriptor. */
-       desc = xilinx_dma_alloc_tx_descriptor(chan);
-       if (!desc)
-               return NULL;
-
-       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
-       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-       async_tx_ack(&desc->async_tx);
-
-       /* Allocate the link descriptor from DMA pool */
-       segment = xilinx_vdma_alloc_tx_segment(chan);
-       if (!segment)
-               goto error;
-
-       /* Fill in the hardware descriptor */
-       hw = &segment->hw;
-       hw->vsize = xt->numf;
-       hw->hsize = xt->sgl[0].size;
-       hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
-                       XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
-       hw->stride |= chan->config.frm_dly <<
-                       XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
-
-       if (xt->dir != DMA_MEM_TO_DEV) {
-               if (chan->ext_addr) {
-                       hw->buf_addr = lower_32_bits(xt->dst_start);
-                       hw->buf_addr_msb = upper_32_bits(xt->dst_start);
-               } else {
-                       hw->buf_addr = xt->dst_start;
-               }
-       } else {
-               if (chan->ext_addr) {
-                       hw->buf_addr = lower_32_bits(xt->src_start);
-                       hw->buf_addr_msb = upper_32_bits(xt->src_start);
-               } else {
-                       hw->buf_addr = xt->src_start;
-               }
-       }
-
-       /* Insert the segment into the descriptor segments list. */
-       list_add_tail(&segment->node, &desc->segments);
-
-       prev = segment;
-
-       /* Link the last hardware descriptor with the first. */
-       segment = list_first_entry(&desc->segments,
-                                  struct xilinx_vdma_tx_segment, node);
-       desc->async_tx.phys = segment->phys;
-
-       return &desc->async_tx;
-
-error:
-       xilinx_dma_free_tx_descriptor(chan, desc);
-       return NULL;
-}
-
-/**
- * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
- * @dchan: DMA channel
- * @dma_dst: destination address
- * @dma_src: source address
- * @len: transfer length
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
-                       dma_addr_t dma_src, size_t len, unsigned long flags)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_cdma_tx_segment *segment, *prev;
-       struct xilinx_cdma_desc_hw *hw;
-
-       if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
-               return NULL;
-
-       desc = xilinx_dma_alloc_tx_descriptor(chan);
-       if (!desc)
-               return NULL;
-
-       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
-       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
-       /* Allocate the link descriptor from DMA pool */
-       segment = xilinx_cdma_alloc_tx_segment(chan);
-       if (!segment)
-               goto error;
-
-       hw = &segment->hw;
-       hw->control = len;
-       hw->src_addr = dma_src;
-       hw->dest_addr = dma_dst;
-
-       /* Fill the previous next descriptor with current */
-       prev = list_last_entry(&desc->segments,
-                              struct xilinx_cdma_tx_segment, node);
-       prev->hw.next_desc = segment->phys;
-
-       /* Insert the segment into the descriptor segments list. */
-       list_add_tail(&segment->node, &desc->segments);
-
-       prev = segment;
-
-       /* Link the last hardware descriptor with the first. */
-       segment = list_first_entry(&desc->segments,
-                               struct xilinx_cdma_tx_segment, node);
-       desc->async_tx.phys = segment->phys;
-       prev->hw.next_desc = segment->phys;
-
-       return &desc->async_tx;
-
-error:
-       xilinx_dma_free_tx_descriptor(chan, desc);
-       return NULL;
-}
-
-/**
- * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @dchan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- * @context: APP words of the descriptor
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
-       struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_transfer_direction direction, unsigned long flags,
-       void *context)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
-       u32 *app_w = (u32 *)context;
-       struct scatterlist *sg;
-       size_t copy;
-       size_t sg_used;
-       unsigned int i;
-
-       if (!is_slave_direction(direction))
-               return NULL;
-
-       /* Allocate a transaction descriptor. */
-       desc = xilinx_dma_alloc_tx_descriptor(chan);
-       if (!desc)
-               return NULL;
-
-       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
-       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
-       /* Build transactions using information in the scatter gather list */
-       for_each_sg(sgl, sg, sg_len, i) {
-               sg_used = 0;
-
-               /* Loop until the entire scatterlist entry is used */
-               while (sg_used < sg_dma_len(sg)) {
-                       struct xilinx_axidma_desc_hw *hw;
-
-                       /* Get a free segment */
-                       segment = xilinx_axidma_alloc_tx_segment(chan);
-                       if (!segment)
-                               goto error;
-
-                       /*
-                        * Calculate the maximum number of bytes to transfer,
-                        * making sure it is less than the hw limit
-                        */
-                       copy = min_t(size_t, sg_dma_len(sg) - sg_used,
-                                    XILINX_DMA_MAX_TRANS_LEN);
-                       hw = &segment->hw;
-
-                       /* Fill in the descriptor */
-                       hw->buf_addr = sg_dma_address(sg) + sg_used;
-
-                       hw->control = copy;
-
-                       if (chan->direction == DMA_MEM_TO_DEV) {
-                               if (app_w)
-                                       memcpy(hw->app, app_w, sizeof(u32) *
-                                              XILINX_DMA_NUM_APP_WORDS);
-                       }
-
-                       if (prev)
-                               prev->hw.next_desc = segment->phys;
-
-                       prev = segment;
-                       sg_used += copy;
-
-                       /*
-                        * Insert the segment into the descriptor segments
-                        * list.
-                        */
-                       list_add_tail(&segment->node, &desc->segments);
-               }
-       }
-
-       segment = list_first_entry(&desc->segments,
-                                  struct xilinx_axidma_tx_segment, node);
-       desc->async_tx.phys = segment->phys;
-       prev->hw.next_desc = segment->phys;
-
-       /* For the last DMA_MEM_TO_DEV transfer, set EOP */
-       if (chan->direction == DMA_MEM_TO_DEV) {
-               segment->hw.control |= XILINX_DMA_BD_SOP;
-               segment = list_last_entry(&desc->segments,
-                                         struct xilinx_axidma_tx_segment,
-                                         node);
-               segment->hw.control |= XILINX_DMA_BD_EOP;
-       }
-
-       return &desc->async_tx;
-
-error:
-       xilinx_dma_free_tx_descriptor(chan, desc);
-       return NULL;
-}
-
-/**
- * xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
- */
-static int xilinx_dma_terminate_all(struct dma_chan *dchan)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-
-       /* Halt the DMA engine */
-       xilinx_dma_halt(chan);
-
-       /* Remove and free all of the descriptors in the lists */
-       xilinx_dma_free_descriptors(chan);
-
-       return 0;
-}
-
-/**
- * xilinx_dma_channel_set_config - Configure VDMA channel
- * Run-time configuration for Axi VDMA, supports:
- * . halt the channel
- * . configure interrupt coalescing and inter-packet delay threshold
- * . start/stop parking
- * . enable genlock
- *
- * @dchan: DMA channel
- * @cfg: VDMA device configuration pointer
- *
- * Return: '0' on success and failure value on error
- */
-int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
-                                       struct xilinx_vdma_config *cfg)
-{
-       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
-       u32 dmacr;
-
-       if (cfg->reset)
-               return xilinx_dma_chan_reset(chan);
-
-       dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
-
-       chan->config.frm_dly = cfg->frm_dly;
-       chan->config.park = cfg->park;
-
-       /* genlock settings */
-       chan->config.gen_lock = cfg->gen_lock;
-       chan->config.master = cfg->master;
-
-       if (cfg->gen_lock && chan->genlock) {
-               dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
-               dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
-       }
-
-       chan->config.frm_cnt_en = cfg->frm_cnt_en;
-       if (cfg->park)
-               chan->config.park_frm = cfg->park_frm;
-       else
-               chan->config.park_frm = -1;
-
-       chan->config.coalesc = cfg->coalesc;
-       chan->config.delay = cfg->delay;
-
-       if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
-               dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
-               chan->config.coalesc = cfg->coalesc;
-       }
-
-       if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
-               dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
-               chan->config.delay = cfg->delay;
-       }
-
-       /* FSync Source selection */
-       dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
-       dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
-
-       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
-
-       return 0;
-}
-EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
-
-/* -----------------------------------------------------------------------------
- * Probe and remove
- */
-
-/**
- * xilinx_dma_chan_remove - Per Channel remove function
- * @chan: Driver specific DMA channel
- */
-static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
-{
-       /* Disable all interrupts */
-       dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
-                     XILINX_DMA_DMAXR_ALL_IRQ_MASK);
-
-       if (chan->irq > 0)
-               free_irq(chan->irq, chan);
-
-       tasklet_kill(&chan->tasklet);
-
-       list_del(&chan->common.device_node);
-}
-
-static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
-                           struct clk **tx_clk, struct clk **rx_clk,
-                           struct clk **sg_clk, struct clk **tmp_clk)
-{
-       int err;
-
-       *tmp_clk = NULL;
-
-       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
-       if (IS_ERR(*axi_clk)) {
-               err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
-               return err;
-       }
-
-       *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
-       if (IS_ERR(*tx_clk))
-               *tx_clk = NULL;
-
-       *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
-       if (IS_ERR(*rx_clk))
-               *rx_clk = NULL;
-
-       *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
-       if (IS_ERR(*sg_clk))
-               *sg_clk = NULL;
-
-       err = clk_prepare_enable(*axi_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
-               return err;
-       }
-
-       err = clk_prepare_enable(*tx_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
-               goto err_disable_axiclk;
-       }
-
-       err = clk_prepare_enable(*rx_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
-               goto err_disable_txclk;
-       }
-
-       err = clk_prepare_enable(*sg_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
-               goto err_disable_rxclk;
-       }
-
-       return 0;
-
-err_disable_rxclk:
-       clk_disable_unprepare(*rx_clk);
-err_disable_txclk:
-       clk_disable_unprepare(*tx_clk);
-err_disable_axiclk:
-       clk_disable_unprepare(*axi_clk);
-
-       return err;
-}
-
-static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
-                           struct clk **dev_clk, struct clk **tmp_clk,
-                           struct clk **tmp1_clk, struct clk **tmp2_clk)
-{
-       int err;
-
-       *tmp_clk = NULL;
-       *tmp1_clk = NULL;
-       *tmp2_clk = NULL;
-
-       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
-       if (IS_ERR(*axi_clk)) {
-               err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
-               return err;
-       }
-
-       *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
-       if (IS_ERR(*dev_clk)) {
-               err = PTR_ERR(*dev_clk);
-               dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
-               return err;
-       }
-
-       err = clk_prepare_enable(*axi_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
-               return err;
-       }
-
-       err = clk_prepare_enable(*dev_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
-               goto err_disable_axiclk;
-       }
-
-       return 0;
-
-err_disable_axiclk:
-       clk_disable_unprepare(*axi_clk);
-
-       return err;
-}
-
-static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
-                           struct clk **tx_clk, struct clk **txs_clk,
-                           struct clk **rx_clk, struct clk **rxs_clk)
-{
-       int err;
-
-       *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
-       if (IS_ERR(*axi_clk)) {
-               err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
-               return err;
-       }
-
-       *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
-       if (IS_ERR(*tx_clk))
-               *tx_clk = NULL;
-
-       *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
-       if (IS_ERR(*txs_clk))
-               *txs_clk = NULL;
-
-       *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
-       if (IS_ERR(*rx_clk))
-               *rx_clk = NULL;
-
-       *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
-       if (IS_ERR(*rxs_clk))
-               *rxs_clk = NULL;
-
-       err = clk_prepare_enable(*axi_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
-               return err;
-       }
-
-       err = clk_prepare_enable(*tx_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
-               goto err_disable_axiclk;
-       }
-
-       err = clk_prepare_enable(*txs_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
-               goto err_disable_txclk;
-       }
-
-       err = clk_prepare_enable(*rx_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
-               goto err_disable_txsclk;
-       }
-
-       err = clk_prepare_enable(*rxs_clk);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
-               goto err_disable_rxclk;
-       }
-
-       return 0;
-
-err_disable_rxclk:
-       clk_disable_unprepare(*rx_clk);
-err_disable_txsclk:
-       clk_disable_unprepare(*txs_clk);
-err_disable_txclk:
-       clk_disable_unprepare(*tx_clk);
-err_disable_axiclk:
-       clk_disable_unprepare(*axi_clk);
-
-       return err;
-}
-
-static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
-{
-       clk_disable_unprepare(xdev->rxs_clk);
-       clk_disable_unprepare(xdev->rx_clk);
-       clk_disable_unprepare(xdev->txs_clk);
-       clk_disable_unprepare(xdev->tx_clk);
-       clk_disable_unprepare(xdev->axi_clk);
-}
-
-/**
- * xilinx_dma_chan_probe - Per Channel Probing
- * It get channel features from the device tree entry and
- * initialize special channel handling routines
- *
- * @xdev: Driver specific device structure
- * @node: Device node
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
-                                 struct device_node *node)
-{
-       struct xilinx_dma_chan *chan;
-       bool has_dre = false;
-       u32 value, width;
-       int err;
-
-       /* Allocate and initialize the channel structure */
-       chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
-       if (!chan)
-               return -ENOMEM;
-
-       chan->dev = xdev->dev;
-       chan->xdev = xdev;
-       chan->has_sg = xdev->has_sg;
-       chan->desc_pendingcount = 0x0;
-       chan->ext_addr = xdev->ext_addr;
-
-       spin_lock_init(&chan->lock);
-       INIT_LIST_HEAD(&chan->pending_list);
-       INIT_LIST_HEAD(&chan->done_list);
-       INIT_LIST_HEAD(&chan->active_list);
-
-       /* Retrieve the channel properties from the device tree */
-       has_dre = of_property_read_bool(node, "xlnx,include-dre");
-
-       chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
-
-       err = of_property_read_u32(node, "xlnx,datawidth", &value);
-       if (err) {
-               dev_err(xdev->dev, "missing xlnx,datawidth property\n");
-               return err;
-       }
-       width = value >> 3; /* Convert bits to bytes */
-
-       /* If data width is greater than 8 bytes, DRE is not in hw */
-       if (width > 8)
-               has_dre = false;
-
-       if (!has_dre)
-               xdev->common.copy_align = fls(width - 1);
-
-       if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
-               chan->direction = DMA_MEM_TO_DEV;
-               chan->id = 0;
-
-               chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
-               if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-                       chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
-
-                       if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
-                           xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
-                               chan->flush_on_fsync = true;
-               }
-       } else if (of_device_is_compatible(node,
-                                           "xlnx,axi-vdma-s2mm-channel")) {
-               chan->direction = DMA_DEV_TO_MEM;
-               chan->id = 1;
-
-               chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
-               if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-                       chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
-
-                       if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
-                           xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
-                               chan->flush_on_fsync = true;
-               }
-       } else {
-               dev_err(xdev->dev, "Invalid channel compatible node\n");
-               return -EINVAL;
-       }
-
-       /* Request the interrupt */
-       chan->irq = irq_of_parse_and_map(node, 0);
-       err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
-                         "xilinx-dma-controller", chan);
-       if (err) {
-               dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
-               return err;
-       }
-
-       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
-               chan->start_transfer = xilinx_dma_start_transfer;
-       else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
-               chan->start_transfer = xilinx_cdma_start_transfer;
-       else
-               chan->start_transfer = xilinx_vdma_start_transfer;
-
-       /* Initialize the tasklet */
-       tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
-                       (unsigned long)chan);
-
-       /*
-        * Initialize the DMA channel and add it to the DMA engine channels
-        * list.
-        */
-       chan->common.device = &xdev->common;
-
-       list_add_tail(&chan->common.device_node, &xdev->common.channels);
-       xdev->chan[chan->id] = chan;
-
-       /* Reset the channel */
-       err = xilinx_dma_chan_reset(chan);
-       if (err < 0) {
-               dev_err(xdev->dev, "Reset channel failed\n");
-               return err;
-       }
-
-       return 0;
-}
-
-/**
- * of_dma_xilinx_xlate - Translation function
- * @dma_spec: Pointer to DMA specifier as found in the device tree
- * @ofdma: Pointer to DMA controller data
- *
- * Return: DMA channel pointer on success and NULL on error
- */
-static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
-                                               struct of_dma *ofdma)
-{
-       struct xilinx_dma_device *xdev = ofdma->of_dma_data;
-       int chan_id = dma_spec->args[0];
-
-       if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
-               return NULL;
-
-       return dma_get_slave_channel(&xdev->chan[chan_id]->common);
-}
-
-static const struct xilinx_dma_config axidma_config = {
-       .dmatype = XDMA_TYPE_AXIDMA,
-       .clk_init = axidma_clk_init,
-};
-
-static const struct xilinx_dma_config axicdma_config = {
-       .dmatype = XDMA_TYPE_CDMA,
-       .clk_init = axicdma_clk_init,
-};
-
-static const struct xilinx_dma_config axivdma_config = {
-       .dmatype = XDMA_TYPE_VDMA,
-       .clk_init = axivdma_clk_init,
-};
-
-static const struct of_device_id xilinx_dma_of_ids[] = {
-       { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
-       { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
-       { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
-       {}
-};
-MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
-
-/**
- * xilinx_dma_probe - Driver probe function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_dma_probe(struct platform_device *pdev)
-{
-       int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
-                       struct clk **, struct clk **, struct clk **)
-                                       = axivdma_clk_init;
-       struct device_node *node = pdev->dev.of_node;
-       struct xilinx_dma_device *xdev;
-       struct device_node *child, *np = pdev->dev.of_node;
-       struct resource *io;
-       u32 num_frames, addr_width;
-       int i, err;
-
-       /* Allocate and initialize the DMA engine structure */
-       xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
-       if (!xdev)
-               return -ENOMEM;
-
-       xdev->dev = &pdev->dev;
-       if (np) {
-               const struct of_device_id *match;
-
-               match = of_match_node(xilinx_dma_of_ids, np);
-               if (match && match->data) {
-                       xdev->dma_config = match->data;
-                       clk_init = xdev->dma_config->clk_init;
-               }
-       }
-
-       err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
-                      &xdev->rx_clk, &xdev->rxs_clk);
-       if (err)
-               return err;
-
-       /* Request and map I/O memory */
-       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xdev->regs = devm_ioremap_resource(&pdev->dev, io);
-       if (IS_ERR(xdev->regs))
-               return PTR_ERR(xdev->regs);
-
-       /* Retrieve the DMA engine properties from the device tree */
-       xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
-
-       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-               err = of_property_read_u32(node, "xlnx,num-fstores",
-                                          &num_frames);
-               if (err < 0) {
-                       dev_err(xdev->dev,
-                               "missing xlnx,num-fstores property\n");
-                       return err;
-               }
-
-               err = of_property_read_u32(node, "xlnx,flush-fsync",
-                                          &xdev->flush_on_fsync);
-               if (err < 0)
-                       dev_warn(xdev->dev,
-                                "missing xlnx,flush-fsync property\n");
-       }
-
-       err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
-       if (err < 0)
-               dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
-
-       if (addr_width > 32)
-               xdev->ext_addr = true;
-       else
-               xdev->ext_addr = false;
-
-       /* Set the dma mask bits */
-       dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
-
-       /* Initialize the DMA engine */
-       xdev->common.dev = &pdev->dev;
-
-       INIT_LIST_HEAD(&xdev->common.channels);
-       if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
-               dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
-               dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
-       }
-
-       xdev->common.device_alloc_chan_resources =
-                               xilinx_dma_alloc_chan_resources;
-       xdev->common.device_free_chan_resources =
-                               xilinx_dma_free_chan_resources;
-       xdev->common.device_terminate_all = xilinx_dma_terminate_all;
-       xdev->common.device_tx_status = xilinx_dma_tx_status;
-       xdev->common.device_issue_pending = xilinx_dma_issue_pending;
-       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
-               /* Residue calculation is supported by only AXI DMA */
-               xdev->common.residue_granularity =
-                                         DMA_RESIDUE_GRANULARITY_SEGMENT;
-       } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
-               dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
-               xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
-       } else {
-               xdev->common.device_prep_interleaved_dma =
-                               xilinx_vdma_dma_prep_interleaved;
-       }
-
-       platform_set_drvdata(pdev, xdev);
-
-       /* Initialize the channels */
-       for_each_child_of_node(node, child) {
-               err = xilinx_dma_chan_probe(xdev, child);
-               if (err < 0)
-                       goto disable_clks;
-       }
-
-       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
-               for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
-                       if (xdev->chan[i])
-                               xdev->chan[i]->num_frms = num_frames;
-       }
-
-       /* Register the DMA engine with the core */
-       dma_async_device_register(&xdev->common);
-
-       err = of_dma_controller_register(node, of_dma_xilinx_xlate,
-                                        xdev);
-       if (err < 0) {
-               dev_err(&pdev->dev, "Unable to register DMA to DT\n");
-               dma_async_device_unregister(&xdev->common);
-               goto error;
-       }
-
-       dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
-
-       return 0;
-
-disable_clks:
-       xdma_disable_allclks(xdev);
-error:
-       for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
-               if (xdev->chan[i])
-                       xilinx_dma_chan_remove(xdev->chan[i]);
-
-       return err;
-}
-
-/**
- * xilinx_dma_remove - Driver remove function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: Always '0'
- */
-static int xilinx_dma_remove(struct platform_device *pdev)
-{
-       struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
-       int i;
-
-       of_dma_controller_free(pdev->dev.of_node);
-
-       dma_async_device_unregister(&xdev->common);
-
-       for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
-               if (xdev->chan[i])
-                       xilinx_dma_chan_remove(xdev->chan[i]);
-
-       xdma_disable_allclks(xdev);
-
-       return 0;
-}
-
-static struct platform_driver xilinx_vdma_driver = {
-       .driver = {
-               .name = "xilinx-vdma",
-               .of_match_table = xilinx_dma_of_ids,
-       },
-       .probe = xilinx_dma_probe,
-       .remove = xilinx_dma_remove,
-};
-
-module_platform_driver(xilinx_vdma_driver);
-
-MODULE_AUTHOR("Xilinx, Inc.");
-MODULE_DESCRIPTION("Xilinx VDMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
new file mode 100644 (file)
index 0000000..6d221e5
--- /dev/null
@@ -0,0 +1,1151 @@
+/*
+ * DMA driver for Xilinx ZynqMP DMA Engine
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register Offsets */
+#define ZYNQMP_DMA_ISR                 0x100
+#define ZYNQMP_DMA_IMR                 0x104
+#define ZYNQMP_DMA_IER                 0x108
+#define ZYNQMP_DMA_IDS                 0x10C
+#define ZYNQMP_DMA_CTRL0               0x110
+#define ZYNQMP_DMA_CTRL1               0x114
+#define ZYNQMP_DMA_DATA_ATTR           0x120
+#define ZYNQMP_DMA_DSCR_ATTR           0x124
+#define ZYNQMP_DMA_SRC_DSCR_WRD0       0x128
+#define ZYNQMP_DMA_SRC_DSCR_WRD1       0x12C
+#define ZYNQMP_DMA_SRC_DSCR_WRD2       0x130
+#define ZYNQMP_DMA_SRC_DSCR_WRD3       0x134
+#define ZYNQMP_DMA_DST_DSCR_WRD0       0x138
+#define ZYNQMP_DMA_DST_DSCR_WRD1       0x13C
+#define ZYNQMP_DMA_DST_DSCR_WRD2       0x140
+#define ZYNQMP_DMA_DST_DSCR_WRD3       0x144
+#define ZYNQMP_DMA_SRC_START_LSB       0x158
+#define ZYNQMP_DMA_SRC_START_MSB       0x15C
+#define ZYNQMP_DMA_DST_START_LSB       0x160
+#define ZYNQMP_DMA_DST_START_MSB       0x164
+#define ZYNQMP_DMA_RATE_CTRL           0x18C
+#define ZYNQMP_DMA_IRQ_SRC_ACCT                0x190
+#define ZYNQMP_DMA_IRQ_DST_ACCT                0x194
+#define ZYNQMP_DMA_CTRL2               0x200
+
+/* Interrupt registers bit field definitions */
+#define ZYNQMP_DMA_DONE                        BIT(10)
+#define ZYNQMP_DMA_AXI_WR_DATA         BIT(9)
+#define ZYNQMP_DMA_AXI_RD_DATA         BIT(8)
+#define ZYNQMP_DMA_AXI_RD_DST_DSCR     BIT(7)
+#define ZYNQMP_DMA_AXI_RD_SRC_DSCR     BIT(6)
+#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR    BIT(5)
+#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR    BIT(4)
+#define ZYNQMP_DMA_BYTE_CNT_OVRFL      BIT(3)
+#define ZYNQMP_DMA_DST_DSCR_DONE       BIT(2)
+#define ZYNQMP_DMA_INV_APB             BIT(0)
+
+/* Control 0 register bit field definitions */
+#define ZYNQMP_DMA_OVR_FETCH           BIT(7)
+#define ZYNQMP_DMA_POINT_TYPE_SG       BIT(6)
+#define ZYNQMP_DMA_RATE_CTRL_EN                BIT(3)
+
+/* Control 1 register bit field definitions */
+#define ZYNQMP_DMA_SRC_ISSUE           GENMASK(4, 0)
+
+/* Data Attribute register bit field definitions */
+#define ZYNQMP_DMA_ARBURST             GENMASK(27, 26)
+#define ZYNQMP_DMA_ARCACHE             GENMASK(25, 22)
+#define ZYNQMP_DMA_ARCACHE_OFST                22
+#define ZYNQMP_DMA_ARQOS               GENMASK(21, 18)
+#define ZYNQMP_DMA_ARQOS_OFST          18
+#define ZYNQMP_DMA_ARLEN               GENMASK(17, 14)
+#define ZYNQMP_DMA_ARLEN_OFST          14
+#define ZYNQMP_DMA_AWBURST             GENMASK(13, 12)
+#define ZYNQMP_DMA_AWCACHE             GENMASK(11, 8)
+#define ZYNQMP_DMA_AWCACHE_OFST                8
+#define ZYNQMP_DMA_AWQOS               GENMASK(7, 4)
+#define ZYNQMP_DMA_AWQOS_OFST          4
+#define ZYNQMP_DMA_AWLEN               GENMASK(3, 0)
+#define ZYNQMP_DMA_AWLEN_OFST          0
+
+/* Descriptor Attribute register bit field definitions */
+#define ZYNQMP_DMA_AXCOHRNT            BIT(8)
+#define ZYNQMP_DMA_AXCACHE             GENMASK(7, 4)
+#define ZYNQMP_DMA_AXCACHE_OFST                4
+#define ZYNQMP_DMA_AXQOS               GENMASK(3, 0)
+#define ZYNQMP_DMA_AXQOS_OFST          0
+
+/* Control register 2 bit field definitions */
+#define ZYNQMP_DMA_ENABLE              BIT(0)
+
+/* Buffer Descriptor definitions */
+#define ZYNQMP_DMA_DESC_CTRL_STOP      0x10
+#define ZYNQMP_DMA_DESC_CTRL_COMP_INT  0x4
+#define ZYNQMP_DMA_DESC_CTRL_SIZE_256  0x2
+#define ZYNQMP_DMA_DESC_CTRL_COHRNT    0x1
+
+/* Interrupt Mask specific definitions */
+#define ZYNQMP_DMA_INT_ERR     (ZYNQMP_DMA_AXI_RD_DATA | \
+                               ZYNQMP_DMA_AXI_WR_DATA | \
+                               ZYNQMP_DMA_AXI_RD_DST_DSCR | \
+                               ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
+                               ZYNQMP_DMA_INV_APB)
+#define ZYNQMP_DMA_INT_OVRFL   (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
+                               ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
+                               ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+#define ZYNQMP_DMA_INT_DONE    (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
+#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
+                                       ZYNQMP_DMA_INT_ERR | \
+                                       ZYNQMP_DMA_INT_OVRFL | \
+                                       ZYNQMP_DMA_DST_DSCR_DONE)
+
+/* Max number of descriptors per channel */
+#define ZYNQMP_DMA_NUM_DESCS   32
+
+/* Max transfer size per descriptor */
+#define ZYNQMP_DMA_MAX_TRANS_LEN       0x40000000
+
+/* Reset values for data attributes */
+#define ZYNQMP_DMA_AXCACHE_VAL         0xF
+#define ZYNQMP_DMA_ARLEN_RST_VAL       0xF
+#define ZYNQMP_DMA_AWLEN_RST_VAL       0xF
+
+#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL   0x1F
+
+#define ZYNQMP_DMA_IDS_DEFAULT_MASK    0xFFF
+
+/* Bus width in bits */
+#define ZYNQMP_DMA_BUS_WIDTH_64                64
+#define ZYNQMP_DMA_BUS_WIDTH_128       128
+
+#define ZYNQMP_DMA_DESC_SIZE(chan)     (chan->desc_size)
+
+#define to_chan(chan)          container_of(chan, struct zynqmp_dma_chan, \
+                                            common)
+#define tx_to_desc(tx)         container_of(tx, struct zynqmp_dma_desc_sw, \
+                                            async_tx)
+
+/**
+ * struct zynqmp_dma_desc_ll - Hw linked list descriptor
+ * @addr: Buffer address
+ * @size: Size of the buffer
+ * @ctrl: Control word
+ * @nxtdscraddr: Next descriptor base address
+ * @rsvd: Reserved field and for Hw internal use.
+ */
+struct zynqmp_dma_desc_ll {
+       u64 addr;
+       u32 size;
+       u32 ctrl;
+       u64 nxtdscraddr;
+       u64 rsvd;
+}; __aligned(64)
+
+/**
+ * struct zynqmp_dma_desc_sw - Per Transaction structure
+ * @src: Source address for simple mode dma
+ * @dst: Destination address for simple mode dma
+ * @len: Transfer length for simple mode dma
+ * @node: Node in the channel descriptor list
+ * @tx_list: List head for the current transfer
+ * @async_tx: Async transaction descriptor
+ * @src_v: Virtual address of the src descriptor
+ * @src_p: Physical address of the src descriptor
+ * @dst_v: Virtual address of the dst descriptor
+ * @dst_p: Physical address of the dst descriptor
+ */
+struct zynqmp_dma_desc_sw {
+       u64 src;
+       u64 dst;
+       u32 len;
+       struct list_head node;
+       struct list_head tx_list;
+       struct dma_async_tx_descriptor async_tx;
+       struct zynqmp_dma_desc_ll *src_v;
+       dma_addr_t src_p;
+       struct zynqmp_dma_desc_ll *dst_v;
+       dma_addr_t dst_p;
+};
+
+/**
+ * struct zynqmp_dma_chan - Driver specific DMA channel structure
+ * @zdev: Driver specific device structure
+ * @regs: Control registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @free_list: Descriptors free
+ * @active_list: Descriptors active
+ * @sw_desc_pool: SW descriptor pool
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool_v: Statically allocated descriptor base
+ * @desc_pool_p: Physical allocated descriptor base
+ * @desc_free_cnt: Descriptor available count
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @is_dmacoherent: Tells whether dma operations are coherent or not
+ * @tasklet: Cleanup work after irq
+ * @idle : Channel status;
+ * @desc_size: Size of the low level descriptor
+ * @err: Channel has errors
+ * @bus_width: Bus width
+ * @src_burst_len: Source burst length
+ * @dst_burst_len: Dest burst length
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
+ */
+struct zynqmp_dma_chan {
+       struct zynqmp_dma_device *zdev;
+       void __iomem *regs;
+       spinlock_t lock;
+       struct list_head pending_list;
+       struct list_head free_list;
+       struct list_head active_list;
+       struct zynqmp_dma_desc_sw *sw_desc_pool;
+       struct list_head done_list;
+       struct dma_chan common;
+       void *desc_pool_v;
+       dma_addr_t desc_pool_p;
+       u32 desc_free_cnt;
+       struct device *dev;
+       int irq;
+       bool is_dmacoherent;
+       struct tasklet_struct tasklet;
+       bool idle;
+       u32 desc_size;
+       bool err;
+       u32 bus_width;
+       u32 src_burst_len;
+       u32 dst_burst_len;
+       struct clk *clk_main;
+       struct clk *clk_apb;
+};
+
+/**
+ * struct zynqmp_dma_device - DMA device structure
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ */
+struct zynqmp_dma_device {
+       struct device *dev;
+       struct dma_device common;
+       struct zynqmp_dma_chan *chan;
+};
+
+static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
+                                    u64 value)
+{
+       lo_hi_writeq(value, chan->regs + reg);
+}
+
+/**
+ * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
+ * @chan: ZynqMP DMA DMA channel pointer
+ * @desc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
+                                     struct zynqmp_dma_desc_sw *desc)
+{
+       dma_addr_t addr;
+
+       addr = desc->src_p;
+       zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
+       addr = desc->dst_p;
+       zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
+}
+
+/**
+ * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @desc: Hw descriptor pointer
+ */
+static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
+                                      void *desc)
+{
+       struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
+
+       hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
+       hw++;
+       hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
+}
+
+/**
+ * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Hw descriptor pointer
+ * @src: Source buffer address
+ * @dst: Destination buffer address
+ * @len: Transfer length
+ * @prev: Previous hw descriptor pointer
+ */
+static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
+                                  struct zynqmp_dma_desc_ll *sdesc,
+                                  dma_addr_t src, dma_addr_t dst, size_t len,
+                                  struct zynqmp_dma_desc_ll *prev)
+{
+       struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
+
+       sdesc->size = ddesc->size = len;
+       sdesc->addr = src;
+       ddesc->addr = dst;
+
+       sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
+       if (chan->is_dmacoherent) {
+               sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+               ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+       }
+
+       if (prev) {
+               dma_addr_t addr = chan->desc_pool_p +
+                           ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
+               ddesc = prev + 1;
+               prev->nxtdscraddr = addr;
+               ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
+       }
+}
+
+/**
+ * zynqmp_dma_init - Initialize the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
+{
+       u32 val;
+
+       writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+       val = readl(chan->regs + ZYNQMP_DMA_ISR);
+       writel(val, chan->regs + ZYNQMP_DMA_ISR);
+
+       if (chan->is_dmacoherent) {
+               val = ZYNQMP_DMA_AXCOHRNT;
+               val = (val & ~ZYNQMP_DMA_AXCACHE) |
+                       (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
+               writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
+       }
+
+       val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+       if (chan->is_dmacoherent) {
+               val = (val & ~ZYNQMP_DMA_ARCACHE) |
+                       (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
+               val = (val & ~ZYNQMP_DMA_AWCACHE) |
+                       (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
+       }
+       writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+
+       /* Clearing the interrupt account rgisters */
+       val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+       val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+       chan->idle = true;
+}
+
+/**
+ * zynqmp_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct zynqmp_dma_chan *chan = to_chan(tx->chan);
+       struct zynqmp_dma_desc_sw *desc, *new;
+       dma_cookie_t cookie;
+
+       new = tx_to_desc(tx);
+       spin_lock_bh(&chan->lock);
+       cookie = dma_cookie_assign(tx);
+
+       if (!list_empty(&chan->pending_list)) {
+               desc = list_last_entry(&chan->pending_list,
+                                    struct zynqmp_dma_desc_sw, node);
+               if (!list_empty(&desc->tx_list))
+                       desc = list_last_entry(&desc->tx_list,
+                                              struct zynqmp_dma_desc_sw, node);
+               desc->src_v->nxtdscraddr = new->src_p;
+               desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+               desc->dst_v->nxtdscraddr = new->dst_p;
+               desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+       }
+
+       list_add_tail(&new->node, &chan->pending_list);
+       spin_unlock_bh(&chan->lock);
+
+       return cookie;
+}
+
+/**
+ * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
+ * @chan: ZynqMP DMA channel pointer
+ *
+ * Return: The sw descriptor
+ */
+static struct zynqmp_dma_desc_sw *
+zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
+{
+       struct zynqmp_dma_desc_sw *desc;
+
+       spin_lock_bh(&chan->lock);
+       desc = list_first_entry(&chan->free_list,
+                               struct zynqmp_dma_desc_sw, node);
+       list_del(&desc->node);
+       spin_unlock_bh(&chan->lock);
+
+       INIT_LIST_HEAD(&desc->tx_list);
+       /* Clear the src and dst descriptor memory */
+       memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+       memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+
+       return desc;
+}
+
+/**
+ * zynqmp_dma_free_descriptor - Issue pending transactions
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
+                                struct zynqmp_dma_desc_sw *sdesc)
+{
+       struct zynqmp_dma_desc_sw *child, *next;
+
+       chan->desc_free_cnt++;
+       list_add_tail(&sdesc->node, &chan->free_list);
+       list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
+               chan->desc_free_cnt++;
+               list_move_tail(&child->node, &chan->free_list);
+       }
+}
+
+/**
+ * zynqmp_dma_free_desc_list - Free descriptors list
+ * @chan: ZynqMP DMA channel pointer
+ * @list: List to parse and delete the descriptor
+ */
+static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
+                                     struct list_head *list)
+{
+       struct zynqmp_dma_desc_sw *desc, *next;
+
+       list_for_each_entry_safe(desc, next, list, node)
+               zynqmp_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * zynqmp_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+       struct zynqmp_dma_desc_sw *desc;
+       int i;
+
+       chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
+                                    GFP_KERNEL);
+       if (!chan->sw_desc_pool)
+               return -ENOMEM;
+
+       chan->idle = true;
+       chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
+
+       INIT_LIST_HEAD(&chan->free_list);
+
+       for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+               desc = chan->sw_desc_pool + i;
+               dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+               desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
+               list_add_tail(&desc->node, &chan->free_list);
+       }
+
+       chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
+                               (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+                               &chan->desc_pool_p, GFP_KERNEL);
+       if (!chan->desc_pool_v)
+               return -ENOMEM;
+
+       for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+               desc = chan->sw_desc_pool + i;
+               desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
+                                       (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
+               desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
+               desc->src_p = chan->desc_pool_p +
+                               (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
+               desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
+       }
+
+       return ZYNQMP_DMA_NUM_DESCS;
+}
+
+/**
+ * zynqmp_dma_start - Start DMA channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
+{
+       writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+       chan->idle = false;
+       writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
+}
+
+/**
+ * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
+ * @chan: ZynqMP DMA channel pointer
+ * @status: Interrupt status value
+ */
+static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+{
+       u32 val;
+
+       if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+               val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+       if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
+               val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+}
+
+static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+{
+       u32 val;
+
+       val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+       val |= ZYNQMP_DMA_POINT_TYPE_SG;
+       writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+
+       val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+       val = (val & ~ZYNQMP_DMA_ARLEN) |
+               (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+       val = (val & ~ZYNQMP_DMA_AWLEN) |
+               (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+       writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+}
+
+/**
+ * zynqmp_dma_device_config - Zynqmp dma device configuration
+ * @dchan: DMA channel
+ * @config: DMA device config
+ */
+static int zynqmp_dma_device_config(struct dma_chan *dchan,
+                                   struct dma_slave_config *config)
+{
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+       chan->src_burst_len = config->src_maxburst;
+       chan->dst_burst_len = config->dst_maxburst;
+
+       return 0;
+}
+
+/**
+ * zynqmp_dma_start_transfer - Initiate the new transfer
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
+{
+       struct zynqmp_dma_desc_sw *desc;
+
+       if (!chan->idle)
+               return;
+
+       zynqmp_dma_config(chan);
+
+       desc = list_first_entry_or_null(&chan->pending_list,
+                                       struct zynqmp_dma_desc_sw, node);
+       if (!desc)
+               return;
+
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+       zynqmp_dma_update_desc_to_ctrlr(chan, desc);
+       zynqmp_dma_start(chan);
+}
+
+
+/**
+ * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @chan: ZynqMP DMA channel
+ */
+static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
+{
+       struct zynqmp_dma_desc_sw *desc, *next;
+
+       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+               dma_async_tx_callback callback;
+               void *callback_param;
+
+               list_del(&desc->node);
+
+               callback = desc->async_tx.callback;
+               callback_param = desc->async_tx.callback_param;
+               if (callback) {
+                       spin_unlock(&chan->lock);
+                       callback(callback_param);
+                       spin_lock(&chan->lock);
+               }
+
+               /* Run any dependencies, then free the descriptor */
+               zynqmp_dma_free_descriptor(chan, desc);
+       }
+}
+
+/**
+ * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
+{
+       struct zynqmp_dma_desc_sw *desc;
+
+       desc = list_first_entry_or_null(&chan->active_list,
+                                       struct zynqmp_dma_desc_sw, node);
+       if (!desc)
+               return;
+       list_del(&desc->node);
+       dma_cookie_complete(&desc->async_tx);
+       list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
+{
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+       spin_lock_bh(&chan->lock);
+       zynqmp_dma_start_transfer(chan);
+       spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_free_descriptors - Free channel descriptors
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
+{
+       zynqmp_dma_free_desc_list(chan, &chan->active_list);
+       zynqmp_dma_free_desc_list(chan, &chan->pending_list);
+       zynqmp_dma_free_desc_list(chan, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
+{
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+       spin_lock_bh(&chan->lock);
+       zynqmp_dma_free_descriptors(chan);
+       spin_unlock_bh(&chan->lock);
+       dma_free_coherent(chan->dev,
+               (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
+               chan->desc_pool_v, chan->desc_pool_p);
+       kfree(chan->sw_desc_pool);
+}
+
+/**
+ * zynqmp_dma_reset - Reset the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
+{
+       writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+
+       zynqmp_dma_complete_descriptor(chan);
+       zynqmp_dma_chan_desc_cleanup(chan);
+       zynqmp_dma_free_descriptors(chan);
+       zynqmp_dma_init(chan);
+}
+
+/**
+ * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the ZynqMP DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
+{
+       struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+       u32 isr, imr, status;
+       irqreturn_t ret = IRQ_NONE;
+
+       isr = readl(chan->regs + ZYNQMP_DMA_ISR);
+       imr = readl(chan->regs + ZYNQMP_DMA_IMR);
+       status = isr & ~imr;
+
+       writel(isr, chan->regs + ZYNQMP_DMA_ISR);
+       if (status & ZYNQMP_DMA_INT_DONE) {
+               tasklet_schedule(&chan->tasklet);
+               ret = IRQ_HANDLED;
+       }
+
+       if (status & ZYNQMP_DMA_DONE)
+               chan->idle = true;
+
+       if (status & ZYNQMP_DMA_INT_ERR) {
+               chan->err = true;
+               tasklet_schedule(&chan->tasklet);
+               dev_err(chan->dev, "Channel %p has errors\n", chan);
+               ret = IRQ_HANDLED;
+       }
+
+       if (status & ZYNQMP_DMA_INT_OVRFL) {
+               zynqmp_dma_handle_ovfl_int(chan, status);
+               dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+/**
+ * zynqmp_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the ZynqMP DMA channel structure
+ */
+static void zynqmp_dma_do_tasklet(unsigned long data)
+{
+       struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+       u32 count;
+
+       spin_lock(&chan->lock);
+
+       if (chan->err) {
+               zynqmp_dma_reset(chan);
+               chan->err = false;
+               goto unlock;
+       }
+
+       count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+       while (count) {
+               zynqmp_dma_complete_descriptor(chan);
+               zynqmp_dma_chan_desc_cleanup(chan);
+               count--;
+       }
+
+       if (chan->idle)
+               zynqmp_dma_start_transfer(chan);
+
+unlock:
+       spin_unlock(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
+ * @dchan: DMA channel pointer
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
+{
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+       spin_lock_bh(&chan->lock);
+       writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+       zynqmp_dma_free_descriptors(chan);
+       spin_unlock_bh(&chan->lock);
+
+       return 0;
+}
+
+/**
+ * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
+                               struct dma_chan *dchan, dma_addr_t dma_dst,
+                               dma_addr_t dma_src, size_t len, ulong flags)
+{
+       struct zynqmp_dma_chan *chan;
+       struct zynqmp_dma_desc_sw *new, *first = NULL;
+       void *desc = NULL, *prev = NULL;
+       size_t copy;
+       u32 desc_cnt;
+
+       chan = to_chan(dchan);
+
+       if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
+               return NULL;
+
+       desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
+
+       spin_lock_bh(&chan->lock);
+       if (desc_cnt > chan->desc_free_cnt) {
+               spin_unlock_bh(&chan->lock);
+               dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+               return NULL;
+       }
+       chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+       spin_unlock_bh(&chan->lock);
+
+       do {
+               /* Allocate and populate the descriptor */
+               new = zynqmp_dma_get_descriptor(chan);
+
+               copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+               desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+               zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
+                                            dma_dst, copy, prev);
+               prev = desc;
+               len -= copy;
+               dma_src += copy;
+               dma_dst += copy;
+               if (!first)
+                       first = new;
+               else
+                       list_add_tail(&new->node, &first->tx_list);
+       } while (len);
+
+       zynqmp_dma_desc_config_eod(chan, desc);
+       async_tx_ack(&first->async_tx);
+       first->async_tx.flags = flags;
+       return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+                       struct dma_chan *dchan, struct scatterlist *dst_sg,
+                       unsigned int dst_sg_len, struct scatterlist *src_sg,
+                       unsigned int src_sg_len, unsigned long flags)
+{
+       struct zynqmp_dma_desc_sw *new, *first = NULL;
+       struct zynqmp_dma_chan *chan = to_chan(dchan);
+       void *desc = NULL, *prev = NULL;
+       size_t len, dst_avail, src_avail;
+       dma_addr_t dma_dst, dma_src;
+       u32 desc_cnt = 0, i;
+       struct scatterlist *sg;
+
+       for_each_sg(src_sg, sg, src_sg_len, i)
+               desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+                                        ZYNQMP_DMA_MAX_TRANS_LEN);
+
+       spin_lock_bh(&chan->lock);
+       if (desc_cnt > chan->desc_free_cnt) {
+               spin_unlock_bh(&chan->lock);
+               dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+               return NULL;
+       }
+       chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+       spin_unlock_bh(&chan->lock);
+
+       dst_avail = sg_dma_len(dst_sg);
+       src_avail = sg_dma_len(src_sg);
+
+       /* Run until we are out of scatterlist entries */
+       while (true) {
+               /* Allocate and populate the descriptor */
+               new = zynqmp_dma_get_descriptor(chan);
+               desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+               len = min_t(size_t, src_avail, dst_avail);
+               len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+               if (len == 0)
+                       goto fetch;
+               dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+                       dst_avail;
+               dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+                       src_avail;
+
+               zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+                                            len, prev);
+               prev = desc;
+               dst_avail -= len;
+               src_avail -= len;
+
+               if (!first)
+                       first = new;
+               else
+                       list_add_tail(&new->node, &first->tx_list);
+fetch:
+               /* Fetch the next dst scatterlist entry */
+               if (dst_avail == 0) {
+                       if (dst_sg_len == 0)
+                               break;
+                       dst_sg = sg_next(dst_sg);
+                       if (dst_sg == NULL)
+                               break;
+                       dst_sg_len--;
+                       dst_avail = sg_dma_len(dst_sg);
+               }
+               /* Fetch the next src scatterlist entry */
+               if (src_avail == 0) {
+                       if (src_sg_len == 0)
+                               break;
+                       src_sg = sg_next(src_sg);
+                       if (src_sg == NULL)
+                               break;
+                       src_sg_len--;
+                       src_avail = sg_dma_len(src_sg);
+               }
+       }
+
+       zynqmp_dma_desc_config_eod(chan, desc);
+       first->async_tx.flags = flags;
+       return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_chan_remove - Channel remove function
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
+{
+       if (!chan)
+               return;
+
+       devm_free_irq(chan->zdev->dev, chan->irq, chan);
+       tasklet_kill(&chan->tasklet);
+       list_del(&chan->common.device_node);
+       clk_disable_unprepare(chan->clk_apb);
+       clk_disable_unprepare(chan->clk_main);
+}
+
+/**
+ * zynqmp_dma_chan_probe - Per Channel Probing
+ * @zdev: Driver specific device structure
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+                          struct platform_device *pdev)
+{
+       struct zynqmp_dma_chan *chan;
+       struct resource *res;
+       struct device_node *node = pdev->dev.of_node;
+       int err;
+
+       chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
+       if (!chan)
+               return -ENOMEM;
+       chan->dev = zdev->dev;
+       chan->zdev = zdev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       chan->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(chan->regs))
+               return PTR_ERR(chan->regs);
+
+       chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+       chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
+       chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+       err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+       if (err < 0) {
+               dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+               return err;
+       }
+
+       if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
+           chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
+               dev_err(zdev->dev, "invalid bus-width value");
+               return -EINVAL;
+       }
+
+       chan->is_dmacoherent =  of_property_read_bool(node, "dma-coherent");
+       zdev->chan = chan;
+       tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
+       spin_lock_init(&chan->lock);
+       INIT_LIST_HEAD(&chan->active_list);
+       INIT_LIST_HEAD(&chan->pending_list);
+       INIT_LIST_HEAD(&chan->done_list);
+       INIT_LIST_HEAD(&chan->free_list);
+
+       dma_cookie_init(&chan->common);
+       chan->common.device = &zdev->common;
+       list_add_tail(&chan->common.device_node, &zdev->common.channels);
+
+       zynqmp_dma_init(chan);
+       chan->irq = platform_get_irq(pdev, 0);
+       if (chan->irq < 0)
+               return -ENXIO;
+       err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
+                              "zynqmp-dma", chan);
+       if (err)
+               return err;
+       chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+       if (IS_ERR(chan->clk_main)) {
+               dev_err(&pdev->dev, "main clock not found.\n");
+               return PTR_ERR(chan->clk_main);
+       }
+
+       chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+       if (IS_ERR(chan->clk_apb)) {
+               dev_err(&pdev->dev, "apb clock not found.\n");
+               return PTR_ERR(chan->clk_apb);
+       }
+
+       err = clk_prepare_enable(chan->clk_main);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to enable main clock.\n");
+               return err;
+       }
+
+       err = clk_prepare_enable(chan->clk_apb);
+       if (err) {
+               clk_disable_unprepare(chan->clk_main);
+               dev_err(&pdev->dev, "Unable to enable apb clock.\n");
+               return err;
+       }
+
+       chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
+       chan->idle = true;
+       return 0;
+}
+
+/**
+ * of_zynqmp_dma_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
+                                           struct of_dma *ofdma)
+{
+       struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
+
+       return dma_get_slave_channel(&zdev->chan->common);
+}
+
+/**
+ * zynqmp_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_probe(struct platform_device *pdev)
+{
+       struct zynqmp_dma_device *zdev;
+       struct dma_device *p;
+       int ret;
+
+       zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
+       if (!zdev)
+               return -ENOMEM;
+
+       zdev->dev = &pdev->dev;
+       INIT_LIST_HEAD(&zdev->common.channels);
+
+       dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+       dma_cap_set(DMA_SG, zdev->common.cap_mask);
+       dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
+
+       p = &zdev->common;
+       p->device_prep_dma_sg = zynqmp_dma_prep_sg;
+       p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
+       p->device_terminate_all = zynqmp_dma_device_terminate_all;
+       p->device_issue_pending = zynqmp_dma_issue_pending;
+       p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
+       p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
+       p->device_tx_status = dma_cookie_status;
+       p->device_config = zynqmp_dma_device_config;
+       p->dev = &pdev->dev;
+
+       platform_set_drvdata(pdev, zdev);
+
+       ret = zynqmp_dma_chan_probe(zdev, pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Probing channel failed\n");
+               goto free_chan_resources;
+       }
+
+       p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
+       p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
+
+       dma_async_device_register(&zdev->common);
+
+       ret = of_dma_controller_register(pdev->dev.of_node,
+                                        of_zynqmp_dma_xlate, zdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+               dma_async_device_unregister(&zdev->common);
+               goto free_chan_resources;
+       }
+
+       dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
+
+       return 0;
+
+free_chan_resources:
+       zynqmp_dma_chan_remove(zdev->chan);
+       return ret;
+}
+
+/**
+ * zynqmp_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_remove(struct platform_device *pdev)
+{
+       struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&zdev->common);
+
+       zynqmp_dma_chan_remove(zdev->chan);
+
+       return 0;
+}
+
+static const struct of_device_id zynqmp_dma_of_match[] = {
+       { .compatible = "xlnx,zynqmp-dma-1.0", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
+
+static struct platform_driver zynqmp_dma_driver = {
+       .driver = {
+               .name = "xilinx-zynqmp-dma",
+               .of_match_table = zynqmp_dma_of_match,
+       },
+       .probe = zynqmp_dma_probe,
+       .remove = zynqmp_dma_remove,
+};
+
+module_platform_driver(zynqmp_dma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");