]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/dma/bcm2708-dmaengine.c
dmaengine: Add support for BCM2708
[mirror_ubuntu-artful-kernel.git] / drivers / dma / bcm2708-dmaengine.c
1 /*
2 * BCM2708 legacy DMA API
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/platform_data/dma-bcm2708.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/spinlock.h>
24
25 #include "virt-dma.h"
26
27 #define CACHE_LINE_MASK 31
28 #define DEFAULT_DMACHAN_BITMAP 0x10 /* channel 4 only */
29
30 /* valid only for channels 0 - 14, 15 has its own base address */
31 #define BCM2708_DMA_CHAN(n) ((n) << 8) /* base address */
32 #define BCM2708_DMA_CHANIO(dma_base, n) \
33 ((void __iomem *)((char *)(dma_base) + BCM2708_DMA_CHAN(n)))
34
35 struct vc_dmaman {
36 void __iomem *dma_base;
37 u32 chan_available; /* bitmap of available channels */
38 u32 has_feature[BCM_DMA_FEATURE_COUNT]; /* bitmap of feature presence */
39 struct mutex lock;
40 };
41
42 static struct device *dmaman_dev; /* we assume there's only one! */
43 static struct vc_dmaman *g_dmaman; /* DMA manager */
44
45 /* DMA Auxiliary Functions */
46
47 /* A DMA buffer on an arbitrary boundary may separate a cache line into a
48 section inside the DMA buffer and another section outside it.
49 Even if we flush DMA buffers from the cache there is always the chance that
50 during a DMA someone will access the part of a cache line that is outside
51 the DMA buffer - which will then bring in unwelcome data.
52 Without being able to dictate our own buffer pools we must insist that
53 DMA buffers consist of a whole number of cache lines.
54 */
55 extern int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len)
56 {
57 int i;
58
59 for (i = 0; i < sg_len; i++) {
60 if (sg_ptr[i].offset & CACHE_LINE_MASK ||
61 sg_ptr[i].length & CACHE_LINE_MASK)
62 return 0;
63 }
64
65 return 1;
66 }
67 EXPORT_SYMBOL_GPL(bcm_sg_suitable_for_dma);
68
69 extern void bcm_dma_start(void __iomem *dma_chan_base,
70 dma_addr_t control_block)
71 {
72 dsb(sy); /* ARM data synchronization (push) operation */
73
74 writel(control_block, dma_chan_base + BCM2708_DMA_ADDR);
75 writel(BCM2708_DMA_ACTIVE, dma_chan_base + BCM2708_DMA_CS);
76 }
77 EXPORT_SYMBOL_GPL(bcm_dma_start);
78
79 extern void bcm_dma_wait_idle(void __iomem *dma_chan_base)
80 {
81 dsb(sy);
82
83 /* ugly busy wait only option for now */
84 while (readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE)
85 cpu_relax();
86 }
87 EXPORT_SYMBOL_GPL(bcm_dma_wait_idle);
88
89 extern bool bcm_dma_is_busy(void __iomem *dma_chan_base)
90 {
91 dsb(sy);
92
93 return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE;
94 }
95 EXPORT_SYMBOL_GPL(bcm_dma_is_busy);
96
97 /* Complete an ongoing DMA (assuming its results are to be ignored)
98 Does nothing if there is no DMA in progress.
99 This routine waits for the current AXI transfer to complete before
100 terminating the current DMA. If the current transfer is hung on a DREQ used
101 by an uncooperative peripheral the AXI transfer may never complete. In this
102 case the routine times out and return a non-zero error code.
103 Use of this routine doesn't guarantee that the ongoing or aborted DMA
104 does not produce an interrupt.
105 */
106 extern int bcm_dma_abort(void __iomem *dma_chan_base)
107 {
108 unsigned long int cs;
109 int rc = 0;
110
111 cs = readl(dma_chan_base + BCM2708_DMA_CS);
112
113 if (BCM2708_DMA_ACTIVE & cs) {
114 long int timeout = 10000;
115
116 /* write 0 to the active bit - pause the DMA */
117 writel(0, dma_chan_base + BCM2708_DMA_CS);
118
119 /* wait for any current AXI transfer to complete */
120 while (0 != (cs & BCM2708_DMA_ISPAUSED) && --timeout >= 0)
121 cs = readl(dma_chan_base + BCM2708_DMA_CS);
122
123 if (0 != (cs & BCM2708_DMA_ISPAUSED)) {
124 /* we'll un-pause when we set of our next DMA */
125 rc = -ETIMEDOUT;
126
127 } else if (BCM2708_DMA_ACTIVE & cs) {
128 /* terminate the control block chain */
129 writel(0, dma_chan_base + BCM2708_DMA_NEXTCB);
130
131 /* abort the whole DMA */
132 writel(BCM2708_DMA_ABORT | BCM2708_DMA_ACTIVE,
133 dma_chan_base + BCM2708_DMA_CS);
134 }
135 }
136
137 return rc;
138 }
139 EXPORT_SYMBOL_GPL(bcm_dma_abort);
140
141 /* DMA Manager Device Methods */
142
143 static void vc_dmaman_init(struct vc_dmaman *dmaman, void __iomem *dma_base,
144 u32 chans_available)
145 {
146 dmaman->dma_base = dma_base;
147 dmaman->chan_available = chans_available;
148 dmaman->has_feature[BCM_DMA_FEATURE_FAST_ORD] = 0x0c; /* 2 & 3 */
149 dmaman->has_feature[BCM_DMA_FEATURE_BULK_ORD] = 0x01; /* 0 */
150 dmaman->has_feature[BCM_DMA_FEATURE_NORMAL_ORD] = 0xfe; /* 1 to 7 */
151 dmaman->has_feature[BCM_DMA_FEATURE_LITE_ORD] = 0x7f00; /* 8 to 14 */
152 }
153
154 static int vc_dmaman_chan_alloc(struct vc_dmaman *dmaman,
155 unsigned required_feature_set)
156 {
157 u32 chans;
158 int chan = 0;
159 int feature;
160
161 chans = dmaman->chan_available;
162 for (feature = 0; feature < BCM_DMA_FEATURE_COUNT; feature++)
163 /* select the subset of available channels with the desired
164 features */
165 if (required_feature_set & (1 << feature))
166 chans &= dmaman->has_feature[feature];
167
168 if (!chans)
169 return -ENOENT;
170
171 /* return the ordinal of the first channel in the bitmap */
172 while (chans != 0 && (chans & 1) == 0) {
173 chans >>= 1;
174 chan++;
175 }
176 /* claim the channel */
177 dmaman->chan_available &= ~(1 << chan);
178
179 return chan;
180 }
181
182 static int vc_dmaman_chan_free(struct vc_dmaman *dmaman, int chan)
183 {
184 if (chan < 0)
185 return -EINVAL;
186
187 if ((1 << chan) & dmaman->chan_available)
188 return -EIDRM;
189
190 dmaman->chan_available |= (1 << chan);
191
192 return 0;
193 }
194
195 /* DMA Manager Monitor */
196
197 extern int bcm_dma_chan_alloc(unsigned required_feature_set,
198 void __iomem **out_dma_base, int *out_dma_irq)
199 {
200 struct vc_dmaman *dmaman = g_dmaman;
201 struct platform_device *pdev = to_platform_device(dmaman_dev);
202 struct resource *r;
203 int chan;
204
205 if (!dmaman_dev)
206 return -ENODEV;
207
208 mutex_lock(&dmaman->lock);
209 chan = vc_dmaman_chan_alloc(dmaman, required_feature_set);
210 if (chan < 0)
211 goto out;
212
213 r = platform_get_resource(pdev, IORESOURCE_IRQ, (unsigned int)chan);
214 if (!r) {
215 dev_err(dmaman_dev, "failed to get irq for DMA channel %d\n",
216 chan);
217 vc_dmaman_chan_free(dmaman, chan);
218 chan = -ENOENT;
219 goto out;
220 }
221
222 *out_dma_base = BCM2708_DMA_CHANIO(dmaman->dma_base, chan);
223 *out_dma_irq = r->start;
224 dev_dbg(dmaman_dev,
225 "Legacy API allocated channel=%d, base=%p, irq=%i\n",
226 chan, *out_dma_base, *out_dma_irq);
227
228 out:
229 mutex_unlock(&dmaman->lock);
230
231 return chan;
232 }
233 EXPORT_SYMBOL_GPL(bcm_dma_chan_alloc);
234
235 extern int bcm_dma_chan_free(int channel)
236 {
237 struct vc_dmaman *dmaman = g_dmaman;
238 int rc;
239
240 if (!dmaman_dev)
241 return -ENODEV;
242
243 mutex_lock(&dmaman->lock);
244 rc = vc_dmaman_chan_free(dmaman, channel);
245 mutex_unlock(&dmaman->lock);
246
247 return rc;
248 }
249 EXPORT_SYMBOL_GPL(bcm_dma_chan_free);
250
251 int bcm_dmaman_probe(struct platform_device *pdev, void __iomem *base,
252 u32 chans_available)
253 {
254 struct device *dev = &pdev->dev;
255 struct vc_dmaman *dmaman;
256
257 dmaman = devm_kzalloc(dev, sizeof(*dmaman), GFP_KERNEL);
258 if (!dmaman)
259 return -ENOMEM;
260
261 mutex_init(&dmaman->lock);
262 vc_dmaman_init(dmaman, base, chans_available);
263 g_dmaman = dmaman;
264 dmaman_dev = dev;
265
266 dev_info(dev, "DMA legacy API manager at %p, dmachans=0x%x\n",
267 base, chans_available);
268
269 return 0;
270 }
271 EXPORT_SYMBOL(bcm_dmaman_probe);
272
273 int bcm_dmaman_remove(struct platform_device *pdev)
274 {
275 dmaman_dev = NULL;
276
277 return 0;
278 }
279 EXPORT_SYMBOL(bcm_dmaman_remove);
280
281 MODULE_LICENSE("GPL");