]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blobdiff - drivers/dma/dma-axi-dmac.c
Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma
[mirror_ubuntu-kernels.git] / drivers / dma / dma-axi-dmac.c
index 8a3f1043917b248a38536a02ef6e4125f85e064c..a0ee404b736ed966098c94e170fd62a71bfb6a5e 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Driver for the Analog Devices AXI-DMAC core
  *
- * Copyright 2013-2015 Analog Devices Inc.
+ * Copyright 2013-2019 Analog Devices Inc.
  *  Author: Lars-Peter Clausen <lars@metafoo.de>
  */
 
@@ -18,7 +18,9 @@
 #include <linux/of.h>
 #include <linux/of_dma.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/fpga/adi-axi-common.h>
 
 #include <dt-bindings/dma/axi-dmac.h>
 
@@ -62,6 +64,8 @@
 #define AXI_DMAC_REG_STATUS            0x430
 #define AXI_DMAC_REG_CURRENT_SRC_ADDR  0x434
 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
+#define AXI_DMAC_REG_PARTIAL_XFER_LEN  0x44c
+#define AXI_DMAC_REG_PARTIAL_XFER_ID   0x450
 
 #define AXI_DMAC_CTRL_ENABLE           BIT(0)
 #define AXI_DMAC_CTRL_PAUSE            BIT(1)
 #define AXI_DMAC_IRQ_EOT               BIT(1)
 
 #define AXI_DMAC_FLAG_CYCLIC           BIT(0)
+#define AXI_DMAC_FLAG_LAST             BIT(1)
+#define AXI_DMAC_FLAG_PARTIAL_REPORT   BIT(2)
+
+#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
 
 /* The maximum ID allocated by the hardware is 31 */
 #define AXI_DMAC_SG_UNUSED 32U
@@ -82,12 +90,14 @@ struct axi_dmac_sg {
        unsigned int dest_stride;
        unsigned int src_stride;
        unsigned int id;
+       unsigned int partial_len;
        bool schedule_when_free;
 };
 
 struct axi_dmac_desc {
        struct virt_dma_desc vdesc;
        bool cyclic;
+       bool have_partial_xfer;
 
        unsigned int num_submitted;
        unsigned int num_completed;
@@ -108,8 +118,10 @@ struct axi_dmac_chan {
        unsigned int dest_type;
 
        unsigned int max_length;
-       unsigned int align_mask;
+       unsigned int address_align_mask;
+       unsigned int length_align_mask;
 
+       bool hw_partial_xfer;
        bool hw_cyclic;
        bool hw_2d;
 };
@@ -167,14 +179,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
 {
        if (len == 0)
                return false;
-       if ((len & chan->align_mask) != 0) /* Not aligned */
+       if ((len & chan->length_align_mask) != 0) /* Not aligned */
                return false;
        return true;
 }
 
 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
 {
-       if ((addr & chan->align_mask) != 0) /* Not aligned */
+       if ((addr & chan->address_align_mask) != 0) /* Not aligned */
                return false;
        return true;
 }
@@ -210,11 +222,13 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
        }
 
        desc->num_submitted++;
-       if (desc->num_submitted == desc->num_sgs) {
+       if (desc->num_submitted == desc->num_sgs ||
+           desc->have_partial_xfer) {
                if (desc->cyclic)
                        desc->num_submitted = 0; /* Start again */
                else
                        chan->next_desc = NULL;
+               flags |= AXI_DMAC_FLAG_LAST;
        } else {
                chan->next_desc = desc;
        }
@@ -240,6 +254,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
                desc->num_sgs == 1)
                flags |= AXI_DMAC_FLAG_CYCLIC;
 
+       if (chan->hw_partial_xfer)
+               flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
+
        axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
        axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
@@ -252,6 +269,83 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
                struct axi_dmac_desc, vdesc.node);
 }
 
+static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
+       struct axi_dmac_sg *sg)
+{
+       if (chan->hw_2d)
+               return sg->x_len * sg->y_len;
+       else
+               return sg->x_len;
+}
+
+static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
+{
+       struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+       struct axi_dmac_desc *desc;
+       struct axi_dmac_sg *sg;
+       u32 xfer_done, len, id, i;
+       bool found_sg;
+
+       do {
+               len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
+               id  = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
+
+               found_sg = false;
+               list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
+                       for (i = 0; i < desc->num_sgs; i++) {
+                               sg = &desc->sg[i];
+                               if (sg->id == AXI_DMAC_SG_UNUSED)
+                                       continue;
+                               if (sg->id == id) {
+                                       desc->have_partial_xfer = true;
+                                       sg->partial_len = len;
+                                       found_sg = true;
+                                       break;
+                               }
+                       }
+                       if (found_sg)
+                               break;
+               }
+
+               if (found_sg) {
+                       dev_dbg(dmac->dma_dev.dev,
+                               "Found partial segment id=%u, len=%u\n",
+                               id, len);
+               } else {
+                       dev_warn(dmac->dma_dev.dev,
+                                "Not found partial segment id=%u, len=%u\n",
+                                id, len);
+               }
+
+               /* Check if we have any more partial transfers */
+               xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+               xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
+
+       } while (!xfer_done);
+}
+
+static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
+       struct axi_dmac_desc *active)
+{
+       struct dmaengine_result *rslt = &active->vdesc.tx_result;
+       unsigned int start = active->num_completed - 1;
+       struct axi_dmac_sg *sg;
+       unsigned int i, total;
+
+       rslt->result = DMA_TRANS_NOERROR;
+       rslt->residue = 0;
+
+       /*
+        * We get here if the last completed segment is partial, which
+        * means we can compute the residue from that segment onwards
+        */
+       for (i = start; i < active->num_sgs; i++) {
+               sg = &active->sg[i];
+               total = axi_dmac_total_sg_bytes(chan, sg);
+               rslt->residue += (total - sg->partial_len);
+       }
+}
+
 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
        unsigned int completed_transfers)
 {
@@ -263,6 +357,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
        if (!active)
                return false;
 
+       if (chan->hw_partial_xfer &&
+           (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
+               axi_dmac_dequeue_partial_xfers(chan);
+
        do {
                sg = &active->sg[active->num_completed];
                if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
@@ -276,10 +374,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
                        start_next = true;
                }
 
+               if (sg->partial_len)
+                       axi_dmac_compute_residue(chan, active);
+
                if (active->cyclic)
                        vchan_cyclic_callback(&active->vdesc);
 
-               if (active->num_completed == active->num_sgs) {
+               if (active->num_completed == active->num_sgs ||
+                   sg->partial_len) {
                        if (active->cyclic) {
                                active->num_completed = 0; /* wrap around */
                        } else {
@@ -391,7 +493,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
        num_segments = DIV_ROUND_UP(period_len, chan->max_length);
        segment_size = DIV_ROUND_UP(period_len, num_segments);
        /* Take care of alignment */
-       segment_size = ((segment_size - 1) | chan->align_mask) + 1;
+       segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
 
        for (i = 0; i < num_periods; i++) {
                len = period_len;
@@ -561,6 +663,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
                desc->sg[0].y_len = 1;
        }
 
+       if (flags & DMA_CYCLIC)
+               desc->cyclic = true;
+
        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 }
 
@@ -574,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
        kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
 }
 
+static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AXI_DMAC_REG_IRQ_MASK:
+       case AXI_DMAC_REG_IRQ_SOURCE:
+       case AXI_DMAC_REG_IRQ_PENDING:
+       case AXI_DMAC_REG_CTRL:
+       case AXI_DMAC_REG_TRANSFER_ID:
+       case AXI_DMAC_REG_START_TRANSFER:
+       case AXI_DMAC_REG_FLAGS:
+       case AXI_DMAC_REG_DEST_ADDRESS:
+       case AXI_DMAC_REG_SRC_ADDRESS:
+       case AXI_DMAC_REG_X_LENGTH:
+       case AXI_DMAC_REG_Y_LENGTH:
+       case AXI_DMAC_REG_DEST_STRIDE:
+       case AXI_DMAC_REG_SRC_STRIDE:
+       case AXI_DMAC_REG_TRANSFER_DONE:
+       case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
+       case AXI_DMAC_REG_STATUS:
+       case AXI_DMAC_REG_CURRENT_SRC_ADDR:
+       case AXI_DMAC_REG_CURRENT_DEST_ADDR:
+       case AXI_DMAC_REG_PARTIAL_XFER_LEN:
+       case AXI_DMAC_REG_PARTIAL_XFER_ID:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config axi_dmac_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
+       .readable_reg = axi_dmac_regmap_rdwr,
+       .writeable_reg = axi_dmac_regmap_rdwr,
+};
+
 /*
  * The configuration stored in the devicetree matches the configuration
  * parameters of the peripheral instance and allows the driver to know which
@@ -617,7 +760,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
                return ret;
        chan->dest_width = val / 8;
 
-       chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+       chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
 
        if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
                chan->direction = DMA_MEM_TO_MEM;
@@ -631,9 +774,12 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
        return 0;
 }
 
-static void axi_dmac_detect_caps(struct axi_dmac *dmac)
+static int axi_dmac_detect_caps(struct axi_dmac *dmac)
 {
        struct axi_dmac_chan *chan = &dmac->chan;
+       unsigned int version;
+
+       version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
 
        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
        if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
@@ -647,6 +793,35 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac)
        chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
        if (chan->max_length != UINT_MAX)
                chan->max_length++;
+
+       axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
+       if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
+           chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
+               dev_err(dmac->dma_dev.dev,
+                       "Destination memory-mapped interface not supported.");
+               return -ENODEV;
+       }
+
+       axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
+       if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
+           chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
+               dev_err(dmac->dma_dev.dev,
+                       "Source memory-mapped interface not supported.");
+               return -ENODEV;
+       }
+
+       if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
+               chan->hw_partial_xfer = true;
+
+       if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
+               axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
+               chan->length_align_mask =
+                       axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
+       } else {
+               chan->length_align_mask = chan->address_align_mask;
+       }
+
+       return 0;
 }
 
 static int axi_dmac_probe(struct platform_device *pdev)
@@ -722,7 +897,11 @@ static int axi_dmac_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
-       axi_dmac_detect_caps(dmac);
+       ret = axi_dmac_detect_caps(dmac);
+       if (ret)
+               goto err_clk_disable;
+
+       dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
 
        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
 
@@ -742,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dmac);
 
+       devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+
        return 0;
 
 err_unregister_of: