]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/dma/dw/core.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / dma / dw / core.c
index e5adf5d1c34fcf53dfeab355ee647a97c2765661..e500950dad822ab84c52a6412649f740c321405b 100644 (file)
@@ -138,16 +138,32 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
        dwc->descs_allocated--;
 }
 
-static void dwc_initialize(struct dw_dma_chan *dwc)
+static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
+{
+       u32 cfghi = 0;
+       u32 cfglo = 0;
+
+       /* Set default burst alignment */
+       cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+       /* Low 4 bits of the request lines */
+       cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
+       cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
+
+       /* Request line extension (2 bits) */
+       cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
+       cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
+
+       channel_writel(dwc, CFG_LO, cfglo);
+       channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
 {
-       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
        u32 cfghi = DWC_CFGH_FIFO_MODE;
        u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
        bool hs_polarity = dwc->dws.hs_polarity;
 
-       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
-               return;
-
        cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
        cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
 
@@ -156,6 +172,19 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
 
        channel_writel(dwc, CFG_LO, cfglo);
        channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
+               return;
+
+       if (dw->pdata->is_idma32)
+               dwc_initialize_chan_idma32(dwc);
+       else
+               dwc_initialize_chan_dw(dwc);
 
        /* Enable interrupts */
        channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -184,6 +213,37 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
                cpu_relax();
 }
 
+static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
+                         unsigned int width, size_t *len)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+       u32 block;
+
+       /* Always in bytes for iDMA 32-bit */
+       if (dw->pdata->is_idma32)
+               width = 0;
+
+       if ((bytes >> width) > dwc->block_size) {
+               block = dwc->block_size;
+               *len = block << width;
+       } else {
+               block = bytes >> width;
+               *len = bytes;
+       }
+
+       return block;
+}
+
+static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       if (dw->pdata->is_idma32)
+               return IDMA32C_CTLH_BLOCK_TS(block);
+
+       return DWC_CTLH_BLOCK_TS(block) << width;
+}
+
 /*----------------------------------------------------------------------*/
 
 /* Perform single block transfer */
@@ -332,7 +392,7 @@ static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
        u32 ctlhi = channel_readl(dwc, CTL_HI);
        u32 ctllo = channel_readl(dwc, CTL_LO);
 
-       return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+       return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
 }
 
 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -692,10 +752,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                        | DWC_CTLL_FC_M2M;
        prev = first = NULL;
 
-       for (offset = 0; offset < len; offset += xfer_count << src_width) {
-               xfer_count = min_t(size_t, (len - offset) >> src_width,
-                                          dwc->block_size);
-
+       for (offset = 0; offset < len; offset += xfer_count) {
                desc = dwc_desc_get(dwc);
                if (!desc)
                        goto err_desc_get;
@@ -703,8 +760,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                lli_write(desc, sar, src + offset);
                lli_write(desc, dar, dest + offset);
                lli_write(desc, ctllo, ctllo);
-               lli_write(desc, ctlhi, xfer_count);
-               desc->len = xfer_count << src_width;
+               lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
+               desc->len = xfer_count;
 
                if (!first) {
                        first = desc;
@@ -775,7 +832,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
-                       u32             len, dlen, mem;
+                       u32             len, mem;
+                       size_t          dlen;
 
                        mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
@@ -789,17 +847,8 @@ slave_sg_todev_fill_desc:
 
                        lli_write(desc, sar, mem);
                        lli_write(desc, dar, reg);
+                       lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
                        lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
-                       if ((len >> mem_width) > dwc->block_size) {
-                               dlen = dwc->block_size << mem_width;
-                               mem += dlen;
-                               len -= dlen;
-                       } else {
-                               dlen = len;
-                               len = 0;
-                       }
-
-                       lli_write(desc, ctlhi, dlen >> mem_width);
                        desc->len = dlen;
 
                        if (!first) {
@@ -809,6 +858,9 @@ slave_sg_todev_fill_desc:
                                list_add_tail(&desc->desc_node, &first->tx_list);
                        }
                        prev = desc;
+
+                       mem += dlen;
+                       len -= dlen;
                        total_len += dlen;
 
                        if (len)
@@ -828,13 +880,12 @@ slave_sg_todev_fill_desc:
 
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
-                       u32             len, dlen, mem;
+                       u32             len, mem;
+                       size_t          dlen;
 
                        mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
 
-                       mem_width = __ffs(data_width | mem | len);
-
 slave_sg_fromdev_fill_desc:
                        desc = dwc_desc_get(dwc);
                        if (!desc)
@@ -842,16 +893,9 @@ slave_sg_fromdev_fill_desc:
 
                        lli_write(desc, sar, reg);
                        lli_write(desc, dar, mem);
+                       lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
+                       mem_width = __ffs(data_width | mem | dlen);
                        lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
-                       if ((len >> reg_width) > dwc->block_size) {
-                               dlen = dwc->block_size << reg_width;
-                               mem += dlen;
-                               len -= dlen;
-                       } else {
-                               dlen = len;
-                               len = 0;
-                       }
-                       lli_write(desc, ctlhi, dlen >> reg_width);
                        desc->len = dlen;
 
                        if (!first) {
@@ -861,6 +905,9 @@ slave_sg_fromdev_fill_desc:
                                list_add_tail(&desc->desc_node, &first->tx_list);
                        }
                        prev = desc;
+
+                       mem += dlen;
+                       len -= dlen;
                        total_len += dlen;
 
                        if (len)
@@ -903,25 +950,20 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
 }
 EXPORT_SYMBOL_GPL(dw_dma_filter);
 
-/*
- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by controller.
- *
- * This can be done by finding least significant bit set: n & (n - 1)
- */
-static inline void convert_burst(u32 *maxburst)
-{
-       if (*maxburst > 1)
-               *maxburst = fls(*maxburst) - 2;
-       else
-               *maxburst = 0;
-}
-
 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       struct dma_slave_config *sc = &dwc->dma_sconfig;
+       struct dw_dma *dw = to_dw_dma(chan->device);
+       /*
+        * Fix sconfig's burst size according to dw_dmac. We need to convert
+        * them as:
+        * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+        *
+        * NOTE: burst size 2 is not supported by DesignWare controller.
+        *       iDMA 32-bit supports it.
+        */
+       u32 s = dw->pdata->is_idma32 ? 1 : 2;
 
        /* Check if chan will be configured for slave transfers */
        if (!is_slave_direction(sconfig->direction))
@@ -930,28 +972,39 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
        memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
        dwc->direction = sconfig->direction;
 
-       convert_burst(&dwc->dma_sconfig.src_maxburst);
-       convert_burst(&dwc->dma_sconfig.dst_maxburst);
+       sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
+       sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
 
        return 0;
 }
 
-static int dwc_pause(struct dma_chan *chan)
+static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
 {
-       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
-       unsigned long           flags;
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
        unsigned int            count = 20;     /* timeout iterations */
        u32                     cfglo;
 
-       spin_lock_irqsave(&dwc->lock, flags);
-
        cfglo = channel_readl(dwc, CFG_LO);
+       if (dw->pdata->is_idma32) {
+               if (drain)
+                       cfglo |= IDMA32C_CFGL_CH_DRAIN;
+               else
+                       cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
+       }
        channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
        while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
                udelay(2);
 
        set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
+}
 
+static int dwc_pause(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       unsigned long           flags;
+
+       spin_lock_irqsave(&dwc->lock, flags);
+       dwc_chan_pause(dwc, false);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
@@ -993,6 +1046,8 @@ static int dwc_terminate_all(struct dma_chan *chan)
 
        clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
 
+       dwc_chan_pause(dwc, true);
+
        dwc_chan_disable(dw, dwc);
 
        dwc_chan_resume(dwc);
@@ -1085,6 +1140,32 @@ static void dwc_issue_pending(struct dma_chan *chan)
 
 /*----------------------------------------------------------------------*/
 
+/*
+ * Program FIFO size of channels.
+ *
+ * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+static void idma32_fifo_partition(struct dw_dma *dw)
+{
+       u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+                   IDMA32C_FP_UPDATE;
+       u64 fifo_partition = 0;
+
+       if (!dw->pdata->is_idma32)
+               return;
+
+       /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
+       fifo_partition |= value << 0;
+
+       /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+       fifo_partition |= value << 32;
+
+       /* Program FIFO Partition registers - 128 bytes for each channel */
+       idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+       idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+}
+
 static void dw_dma_off(struct dw_dma *dw)
 {
        unsigned int i;
@@ -1504,8 +1585,16 @@ int dw_dma_probe(struct dw_dma_chip *chip)
        /* Force dma off, just in case */
        dw_dma_off(dw);
 
+       idma32_fifo_partition(dw);
+
+       /* Device and instance ID for IRQ and DMA pool */
+       if (pdata->is_idma32)
+               snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
+       else
+               snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
+
        /* Create a pool of consistent memory blocks for hardware descriptors */
-       dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+       dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
        if (!dw->desc_pool) {
                dev_err(chip->dev, "No memory for descriptors dma pool\n");
@@ -1516,7 +1605,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
 
        err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
-                         "dw_dmac", dw);
+                         dw->name, dw);
        if (err)
                goto err_pdata;
 
@@ -1665,6 +1754,8 @@ int dw_dma_enable(struct dw_dma_chip *chip)
 {
        struct dw_dma *dw = chip->dw;
 
+       idma32_fifo_partition(dw);
+
        dw_dma_on(dw);
        return 0;
 }