]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
dmaengine/dw_dmac: implement pause and resume in dwc_control
authorLinus Walleij <linus.walleij@linaro.org>
Tue, 19 Apr 2011 00:31:32 +0000 (08:31 +0800)
committerVinod Koul <vinod.koul@intel.com>
Fri, 13 May 2011 14:10:15 +0000 (19:40 +0530)
Some peripherals like amba-pl011 needs pause to be implemented in DMA controller
drivers. This also returns correct status from dwc_tx_status() in case chan is
paused.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h

index 442b98b81e7cae27f8c888a5cf0d65fb36c5192b..eec675bf4f9540128addb797de9305e8d8d33af7 100644 (file)
@@ -862,34 +862,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        struct dw_dma           *dw = to_dw_dma(chan->device);
        struct dw_desc          *desc, *_desc;
        unsigned long           flags;
+       u32                     cfglo;
        LIST_HEAD(list);
 
-       /* Only supports DMA_TERMINATE_ALL */
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
+       if (cmd == DMA_PAUSE) {
+               spin_lock_irqsave(&dwc->lock, flags);
 
-       /*
-        * This is only called when something went wrong elsewhere, so
-        * we don't really care about the data. Just disable the
-        * channel. We still have to poll the channel enable bit due
-        * to AHB/HSB limitations.
-        */
-       spin_lock_irqsave(&dwc->lock, flags);
+               cfglo = channel_readl(dwc, CFG_LO);
+               channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+               while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+                       cpu_relax();
 
-       channel_clear_bit(dw, CH_EN, dwc->mask);
+               dwc->paused = true;
+               spin_unlock_irqrestore(&dwc->lock, flags);
+       } else if (cmd == DMA_RESUME) {
+               if (!dwc->paused)
+                       return 0;
 
-       while (dma_readl(dw, CH_EN) & dwc->mask)
-               cpu_relax();
+               spin_lock_irqsave(&dwc->lock, flags);
 
-       /* active_list entries will end up before queued entries */
-       list_splice_init(&dwc->queue, &list);
-       list_splice_init(&dwc->active_list, &list);
+               cfglo = channel_readl(dwc, CFG_LO);
+               channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+               dwc->paused = false;
 
-       spin_unlock_irqrestore(&dwc->lock, flags);
+               spin_unlock_irqrestore(&dwc->lock, flags);
+       } else if (cmd == DMA_TERMINATE_ALL) {
+               spin_lock_irqsave(&dwc->lock, flags);
 
-       /* Flush all pending and queued descriptors */
-       list_for_each_entry_safe(desc, _desc, &list, desc_node)
-               dwc_descriptor_complete(dwc, desc, false);
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+               while (dma_readl(dw, CH_EN) & dwc->mask)
+                       cpu_relax();
+
+               dwc->paused = false;
+
+               /* active_list entries will end up before queued entries */
+               list_splice_init(&dwc->queue, &list);
+               list_splice_init(&dwc->active_list, &list);
+
+               spin_unlock_irqrestore(&dwc->lock, flags);
+
+               /* Flush all pending and queued descriptors */
+               list_for_each_entry_safe(desc, _desc, &list, desc_node)
+                       dwc_descriptor_complete(dwc, desc, false);
+       } else
+               return -ENXIO;
 
        return 0;
 }
@@ -923,6 +939,9 @@ dwc_tx_status(struct dma_chan *chan,
        else
                dma_set_tx_state(txstate, last_complete, last_used, 0);
 
+       if (dwc->paused)
+               return DMA_PAUSED;
+
        return ret;
 }
 
index 720f821527f8aeebd8738c34f1f5c87a5a34f867..c968597c32aba76243927f96f7147056139045a2 100644 (file)
@@ -138,6 +138,7 @@ struct dw_dma_chan {
        void __iomem            *ch_regs;
        u8                      mask;
        u8                      priority;
+       bool                    paused;
 
        spinlock_t              lock;