]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/arch/arm/plat-mxc/dma-v1.c | |
3 | * | |
4 | * i.MX DMA registration and IRQ dispatching | |
5 | * | |
6 | * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz> | |
7 | * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de> | |
8 | * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version 2 | |
13 | * of the License, or (at your option) any later version. | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | |
22 | * MA 02110-1301, USA. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/errno.h> | |
31 | #include <linux/clk.h> | |
32 | #include <linux/scatterlist.h> | |
33 | #include <linux/io.h> | |
34 | ||
35 | #include <asm/irq.h> | |
36 | #include <mach/hardware.h> | |
37 | #include <mach/dma-v1.h> | |
38 | ||
39 | #define DMA_DCR 0x00 /* Control Register */ | |
40 | #define DMA_DISR 0x04 /* Interrupt status Register */ | |
41 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | |
42 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | |
43 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | |
44 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | |
45 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | |
46 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | |
47 | #define DMA_WSRA 0x40 /* W-Size Register A */ | |
48 | #define DMA_XSRA 0x44 /* X-Size Register A */ | |
49 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | |
50 | #define DMA_WSRB 0x4c /* W-Size Register B */ | |
51 | #define DMA_XSRB 0x50 /* X-Size Register B */ | |
52 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | |
53 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | |
54 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | |
55 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | |
56 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | |
57 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | |
58 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | |
59 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | |
60 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | |
61 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | |
62 | ||
63 | #define DCR_DRST (1<<1) | |
64 | #define DCR_DEN (1<<0) | |
65 | #define DBTOCR_EN (1<<15) | |
66 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | |
67 | #define CNTR_CNT(x) ((x) & 0xffffff) | |
68 | #define CCR_ACRPT (1<<14) | |
69 | #define CCR_DMOD_LINEAR (0x0 << 12) | |
70 | #define CCR_DMOD_2D (0x1 << 12) | |
71 | #define CCR_DMOD_FIFO (0x2 << 12) | |
72 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | |
73 | #define CCR_SMOD_LINEAR (0x0 << 10) | |
74 | #define CCR_SMOD_2D (0x1 << 10) | |
75 | #define CCR_SMOD_FIFO (0x2 << 10) | |
76 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | |
77 | #define CCR_MDIR_DEC (1<<9) | |
78 | #define CCR_MSEL_B (1<<8) | |
79 | #define CCR_DSIZ_32 (0x0 << 6) | |
80 | #define CCR_DSIZ_8 (0x1 << 6) | |
81 | #define CCR_DSIZ_16 (0x2 << 6) | |
82 | #define CCR_SSIZ_32 (0x0 << 4) | |
83 | #define CCR_SSIZ_8 (0x1 << 4) | |
84 | #define CCR_SSIZ_16 (0x2 << 4) | |
85 | #define CCR_REN (1<<3) | |
86 | #define CCR_RPT (1<<2) | |
87 | #define CCR_FRC (1<<1) | |
88 | #define CCR_CEN (1<<0) | |
89 | #define RTOR_EN (1<<15) | |
90 | #define RTOR_CLK (1<<14) | |
91 | #define RTOR_PSC (1<<13) | |
92 | ||
93 | /* | |
94 | * struct imx_dma_channel - i.MX specific DMA extension | |
95 | * @name: name specified by DMA client | |
96 | * @irq_handler: client callback for end of transfer | |
97 | * @err_handler: client callback for error condition | |
98 | * @data: clients context data for callbacks | |
99 | * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE | |
100 | * @sg: pointer to the actual read/written chunk for scatter-gather emulation | |
101 | * @resbytes: total residual number of bytes to transfer | |
102 | * (it can be lower or same as sum of SG mapped chunk sizes) | |
103 | * @sgcount: number of chunks to be read/written | |
104 | * | |
105 | * Structure is used for IMX DMA processing. It would be probably good | |
106 | * @struct dma_struct in the future for external interfacing and use | |
107 | * @struct imx_dma_channel only as extension to it. | |
108 | */ | |
109 | ||
110 | struct imx_dma_channel { | |
111 | const char *name; | |
112 | void (*irq_handler) (int, void *); | |
113 | void (*err_handler) (int, void *, int errcode); | |
114 | void (*prog_handler) (int, void *, struct scatterlist *); | |
115 | void *data; | |
116 | unsigned int dma_mode; | |
117 | struct scatterlist *sg; | |
118 | unsigned int resbytes; | |
119 | int dma_num; | |
120 | ||
121 | int in_use; | |
122 | ||
123 | u32 ccr_from_device; | |
124 | u32 ccr_to_device; | |
125 | ||
126 | struct timer_list watchdog; | |
127 | ||
128 | int hw_chaining; | |
129 | }; | |
130 | ||
131 | static void __iomem *imx_dmav1_baseaddr; | |
132 | ||
133 | static void imx_dmav1_writel(unsigned val, unsigned offset) | |
134 | { | |
135 | __raw_writel(val, imx_dmav1_baseaddr + offset); | |
136 | } | |
137 | ||
138 | static unsigned imx_dmav1_readl(unsigned offset) | |
139 | { | |
140 | return __raw_readl(imx_dmav1_baseaddr + offset); | |
141 | } | |
142 | ||
143 | static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS]; | |
144 | ||
145 | static struct clk *dma_clk; | |
146 | ||
147 | static int imx_dma_hw_chain(struct imx_dma_channel *imxdma) | |
148 | { | |
149 | if (cpu_is_mx27()) | |
150 | return imxdma->hw_chaining; | |
151 | else | |
152 | return 0; | |
153 | } | |
154 | ||
155 | /* | |
156 | * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation | |
157 | */ | |
158 | static inline int imx_dma_sg_next(int channel, struct scatterlist *sg) | |
159 | { | |
160 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
161 | unsigned long now; | |
162 | ||
163 | if (!imxdma->name) { | |
164 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | |
165 | __func__, channel); | |
166 | return 0; | |
167 | } | |
168 | ||
169 | now = min(imxdma->resbytes, sg->length); | |
170 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) | |
171 | imxdma->resbytes -= now; | |
172 | ||
173 | if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) | |
174 | imx_dmav1_writel(sg->dma_address, DMA_DAR(channel)); | |
175 | else | |
176 | imx_dmav1_writel(sg->dma_address, DMA_SAR(channel)); | |
177 | ||
178 | imx_dmav1_writel(now, DMA_CNTR(channel)); | |
179 | ||
180 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " | |
181 | "size 0x%08x\n", channel, | |
182 | imx_dmav1_readl(DMA_DAR(channel)), | |
183 | imx_dmav1_readl(DMA_SAR(channel)), | |
184 | imx_dmav1_readl(DMA_CNTR(channel))); | |
185 | ||
186 | return now; | |
187 | } | |
188 | ||
189 | /** | |
190 | * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from | |
191 | * device transfer | |
192 | * | |
193 | * @channel: i.MX DMA channel number | |
194 | * @dma_address: the DMA/physical memory address of the linear data block | |
195 | * to transfer | |
196 | * @dma_length: length of the data block in bytes | |
197 | * @dev_addr: physical device port address | |
198 | * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory | |
199 | * or %DMA_MODE_WRITE from memory to the device | |
200 | * | |
201 | * Return value: if incorrect parameters are provided -%EINVAL. | |
202 | * Zero indicates success. | |
203 | */ | |
204 | int | |
205 | imx_dma_setup_single(int channel, dma_addr_t dma_address, | |
206 | unsigned int dma_length, unsigned int dev_addr, | |
207 | unsigned int dmamode) | |
208 | { | |
209 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
210 | ||
211 | imxdma->sg = NULL; | |
212 | imxdma->dma_mode = dmamode; | |
213 | ||
214 | if (!dma_address) { | |
215 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", | |
216 | channel); | |
217 | return -EINVAL; | |
218 | } | |
219 | ||
220 | if (!dma_length) { | |
221 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", | |
222 | channel); | |
223 | return -EINVAL; | |
224 | } | |
225 | ||
226 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | |
227 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | |
228 | "dev_addr=0x%08x for read\n", | |
229 | channel, __func__, (unsigned int)dma_address, | |
230 | dma_length, dev_addr); | |
231 | ||
232 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | |
233 | imx_dmav1_writel(dma_address, DMA_DAR(channel)); | |
234 | imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); | |
235 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | |
236 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | |
237 | "dev_addr=0x%08x for write\n", | |
238 | channel, __func__, (unsigned int)dma_address, | |
239 | dma_length, dev_addr); | |
240 | ||
241 | imx_dmav1_writel(dma_address, DMA_SAR(channel)); | |
242 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | |
243 | imx_dmav1_writel(imxdma->ccr_to_device, | |
244 | DMA_CCR(channel)); | |
245 | } else { | |
246 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", | |
247 | channel); | |
248 | return -EINVAL; | |
249 | } | |
250 | ||
251 | imx_dmav1_writel(dma_length, DMA_CNTR(channel)); | |
252 | ||
253 | return 0; | |
254 | } | |
255 | EXPORT_SYMBOL(imx_dma_setup_single); | |
256 | ||
257 | /** | |
258 | * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer | |
259 | * @channel: i.MX DMA channel number | |
260 | * @sg: pointer to the scatter-gather list/vector | |
261 | * @sgcount: scatter-gather list hungs count | |
262 | * @dma_length: total length of the transfer request in bytes | |
263 | * @dev_addr: physical device port address | |
264 | * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory | |
265 | * or %DMA_MODE_WRITE from memory to the device | |
266 | * | |
267 | * The function sets up DMA channel state and registers to be ready for | |
268 | * transfer specified by provided parameters. The scatter-gather emulation | |
269 | * is set up according to the parameters. | |
270 | * | |
271 | * The full preparation of the transfer requires setup of more register | |
272 | * by the caller before imx_dma_enable() can be called. | |
273 | * | |
274 | * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes | |
275 | * | |
276 | * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx | |
277 | * | |
278 | * %CCR(channel) has to specify transfer parameters, the next settings is | |
279 | * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is | |
280 | * specified | |
281 | * | |
282 | * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x | |
283 | * | |
284 | * The typical setup for %DMA_MODE_WRITE is specified by next options | |
285 | * combination | |
286 | * | |
287 | * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x | |
288 | * | |
289 | * Be careful here and do not mistakenly mix source and target device | |
290 | * port sizes constants, they are really different: | |
291 | * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32, | |
292 | * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32 | |
293 | * | |
294 | * Return value: if incorrect parameters are provided -%EINVAL. | |
295 | * Zero indicates success. | |
296 | */ | |
297 | int | |
298 | imx_dma_setup_sg(int channel, | |
299 | struct scatterlist *sg, unsigned int sgcount, | |
300 | unsigned int dma_length, unsigned int dev_addr, | |
301 | unsigned int dmamode) | |
302 | { | |
303 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
304 | ||
305 | if (imxdma->in_use) | |
306 | return -EBUSY; | |
307 | ||
308 | imxdma->sg = sg; | |
309 | imxdma->dma_mode = dmamode; | |
310 | imxdma->resbytes = dma_length; | |
311 | ||
312 | if (!sg || !sgcount) { | |
313 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", | |
314 | channel); | |
315 | return -EINVAL; | |
316 | } | |
317 | ||
318 | if (!sg->length) { | |
319 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", | |
320 | channel); | |
321 | return -EINVAL; | |
322 | } | |
323 | ||
324 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | |
325 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | |
326 | "dev_addr=0x%08x for read\n", | |
327 | channel, __func__, sg, sgcount, dma_length, dev_addr); | |
328 | ||
329 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | |
330 | imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); | |
331 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | |
332 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | |
333 | "dev_addr=0x%08x for write\n", | |
334 | channel, __func__, sg, sgcount, dma_length, dev_addr); | |
335 | ||
336 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | |
337 | imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel)); | |
338 | } else { | |
339 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", | |
340 | channel); | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
344 | imx_dma_sg_next(channel, sg); | |
345 | ||
346 | return 0; | |
347 | } | |
348 | EXPORT_SYMBOL(imx_dma_setup_sg); | |
349 | ||
350 | int | |
351 | imx_dma_config_channel(int channel, unsigned int config_port, | |
352 | unsigned int config_mem, unsigned int dmareq, int hw_chaining) | |
353 | { | |
354 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
355 | u32 dreq = 0; | |
356 | ||
357 | imxdma->hw_chaining = 0; | |
358 | ||
359 | if (hw_chaining) { | |
360 | imxdma->hw_chaining = 1; | |
361 | if (!imx_dma_hw_chain(imxdma)) | |
362 | return -EINVAL; | |
363 | } | |
364 | ||
365 | if (dmareq) | |
366 | dreq = CCR_REN; | |
367 | ||
368 | imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq; | |
369 | imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq; | |
370 | ||
371 | imx_dmav1_writel(dmareq, DMA_RSSR(channel)); | |
372 | ||
373 | return 0; | |
374 | } | |
375 | EXPORT_SYMBOL(imx_dma_config_channel); | |
376 | ||
377 | void imx_dma_config_burstlen(int channel, unsigned int burstlen) | |
378 | { | |
379 | imx_dmav1_writel(burstlen, DMA_BLR(channel)); | |
380 | } | |
381 | EXPORT_SYMBOL(imx_dma_config_burstlen); | |
382 | ||
383 | /** | |
384 | * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification | |
385 | * handlers | |
386 | * @channel: i.MX DMA channel number | |
387 | * @irq_handler: the pointer to the function called if the transfer | |
388 | * ends successfully | |
389 | * @err_handler: the pointer to the function called if the premature | |
390 | * end caused by error occurs | |
391 | * @data: user specified value to be passed to the handlers | |
392 | */ | |
393 | int | |
394 | imx_dma_setup_handlers(int channel, | |
395 | void (*irq_handler) (int, void *), | |
396 | void (*err_handler) (int, void *, int), | |
397 | void *data) | |
398 | { | |
399 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
400 | unsigned long flags; | |
401 | ||
402 | if (!imxdma->name) { | |
403 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | |
404 | __func__, channel); | |
405 | return -ENODEV; | |
406 | } | |
407 | ||
408 | local_irq_save(flags); | |
409 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
410 | imxdma->irq_handler = irq_handler; | |
411 | imxdma->err_handler = err_handler; | |
412 | imxdma->data = data; | |
413 | local_irq_restore(flags); | |
414 | return 0; | |
415 | } | |
416 | EXPORT_SYMBOL(imx_dma_setup_handlers); | |
417 | ||
418 | /** | |
419 | * imx_dma_setup_progression_handler - setup i.MX DMA channel progression | |
420 | * handlers | |
421 | * @channel: i.MX DMA channel number | |
422 | * @prog_handler: the pointer to the function called if the transfer progresses | |
423 | */ | |
424 | int | |
425 | imx_dma_setup_progression_handler(int channel, | |
426 | void (*prog_handler) (int, void*, struct scatterlist*)) | |
427 | { | |
428 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
429 | unsigned long flags; | |
430 | ||
431 | if (!imxdma->name) { | |
432 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | |
433 | __func__, channel); | |
434 | return -ENODEV; | |
435 | } | |
436 | ||
437 | local_irq_save(flags); | |
438 | imxdma->prog_handler = prog_handler; | |
439 | local_irq_restore(flags); | |
440 | return 0; | |
441 | } | |
442 | EXPORT_SYMBOL(imx_dma_setup_progression_handler); | |
443 | ||
444 | /** | |
445 | * imx_dma_enable - function to start i.MX DMA channel operation | |
446 | * @channel: i.MX DMA channel number | |
447 | * | |
448 | * The channel has to be allocated by driver through imx_dma_request() | |
449 | * or imx_dma_request_by_prio() function. | |
450 | * The transfer parameters has to be set to the channel registers through | |
451 | * call of the imx_dma_setup_single() or imx_dma_setup_sg() function | |
452 | * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to | |
453 | * be set prior this function call by the channel user. | |
454 | */ | |
455 | void imx_dma_enable(int channel) | |
456 | { | |
457 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
458 | unsigned long flags; | |
459 | ||
460 | pr_debug("imxdma%d: imx_dma_enable\n", channel); | |
461 | ||
462 | if (!imxdma->name) { | |
463 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | |
464 | __func__, channel); | |
465 | return; | |
466 | } | |
467 | ||
468 | if (imxdma->in_use) | |
469 | return; | |
470 | ||
471 | local_irq_save(flags); | |
472 | ||
473 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
474 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); | |
475 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | | |
476 | CCR_ACRPT, DMA_CCR(channel)); | |
477 | ||
478 | if ((cpu_is_mx21() || cpu_is_mx27()) && | |
479 | imxdma->sg && imx_dma_hw_chain(imxdma)) { | |
480 | imxdma->sg = sg_next(imxdma->sg); | |
481 | if (imxdma->sg) { | |
482 | u32 tmp; | |
483 | imx_dma_sg_next(channel, imxdma->sg); | |
484 | tmp = imx_dmav1_readl(DMA_CCR(channel)); | |
485 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | |
486 | DMA_CCR(channel)); | |
487 | } | |
488 | } | |
489 | imxdma->in_use = 1; | |
490 | ||
491 | local_irq_restore(flags); | |
492 | } | |
493 | EXPORT_SYMBOL(imx_dma_enable); | |
494 | ||
495 | /** | |
496 | * imx_dma_disable - stop, finish i.MX DMA channel operatin | |
497 | * @channel: i.MX DMA channel number | |
498 | */ | |
499 | void imx_dma_disable(int channel) | |
500 | { | |
501 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
502 | unsigned long flags; | |
503 | ||
504 | pr_debug("imxdma%d: imx_dma_disable\n", channel); | |
505 | ||
506 | if (imx_dma_hw_chain(imxdma)) | |
507 | del_timer(&imxdma->watchdog); | |
508 | ||
509 | local_irq_save(flags); | |
510 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); | |
511 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, | |
512 | DMA_CCR(channel)); | |
513 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
514 | imxdma->in_use = 0; | |
515 | local_irq_restore(flags); | |
516 | } | |
517 | EXPORT_SYMBOL(imx_dma_disable); | |
518 | ||
519 | static void imx_dma_watchdog(unsigned long chno) | |
520 | { | |
521 | struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; | |
522 | ||
523 | imx_dmav1_writel(0, DMA_CCR(chno)); | |
524 | imxdma->in_use = 0; | |
525 | imxdma->sg = NULL; | |
526 | ||
527 | if (imxdma->err_handler) | |
528 | imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT); | |
529 | } | |
530 | ||
531 | static irqreturn_t dma_err_handler(int irq, void *dev_id) | |
532 | { | |
533 | int i, disr; | |
534 | struct imx_dma_channel *imxdma; | |
535 | unsigned int err_mask; | |
536 | int errcode; | |
537 | ||
538 | disr = imx_dmav1_readl(DMA_DISR); | |
539 | ||
540 | err_mask = imx_dmav1_readl(DMA_DBTOSR) | | |
541 | imx_dmav1_readl(DMA_DRTOSR) | | |
542 | imx_dmav1_readl(DMA_DSESR) | | |
543 | imx_dmav1_readl(DMA_DBOSR); | |
544 | ||
545 | if (!err_mask) | |
546 | return IRQ_HANDLED; | |
547 | ||
548 | imx_dmav1_writel(disr & err_mask, DMA_DISR); | |
549 | ||
550 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
551 | if (!(err_mask & (1 << i))) | |
552 | continue; | |
553 | imxdma = &imx_dma_channels[i]; | |
554 | errcode = 0; | |
555 | ||
556 | if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { | |
557 | imx_dmav1_writel(1 << i, DMA_DBTOSR); | |
558 | errcode |= IMX_DMA_ERR_BURST; | |
559 | } | |
560 | if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { | |
561 | imx_dmav1_writel(1 << i, DMA_DRTOSR); | |
562 | errcode |= IMX_DMA_ERR_REQUEST; | |
563 | } | |
564 | if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { | |
565 | imx_dmav1_writel(1 << i, DMA_DSESR); | |
566 | errcode |= IMX_DMA_ERR_TRANSFER; | |
567 | } | |
568 | if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { | |
569 | imx_dmav1_writel(1 << i, DMA_DBOSR); | |
570 | errcode |= IMX_DMA_ERR_BUFFER; | |
571 | } | |
572 | if (imxdma->name && imxdma->err_handler) { | |
573 | imxdma->err_handler(i, imxdma->data, errcode); | |
574 | continue; | |
575 | } | |
576 | ||
577 | imx_dma_channels[i].sg = NULL; | |
578 | ||
579 | printk(KERN_WARNING | |
580 | "DMA timeout on channel %d (%s) -%s%s%s%s\n", | |
581 | i, imxdma->name, | |
582 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | |
583 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | |
584 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | |
585 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | |
586 | } | |
587 | return IRQ_HANDLED; | |
588 | } | |
589 | ||
590 | static void dma_irq_handle_channel(int chno) | |
591 | { | |
592 | struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; | |
593 | ||
594 | if (!imxdma->name) { | |
595 | /* | |
596 | * IRQ for an unregistered DMA channel: | |
597 | * let's clear the interrupts and disable it. | |
598 | */ | |
599 | printk(KERN_WARNING | |
600 | "spurious IRQ for DMA channel %d\n", chno); | |
601 | return; | |
602 | } | |
603 | ||
604 | if (imxdma->sg) { | |
605 | u32 tmp; | |
606 | struct scatterlist *current_sg = imxdma->sg; | |
607 | imxdma->sg = sg_next(imxdma->sg); | |
608 | ||
609 | if (imxdma->sg) { | |
610 | imx_dma_sg_next(chno, imxdma->sg); | |
611 | ||
612 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | |
613 | ||
614 | if (imx_dma_hw_chain(imxdma)) { | |
615 | /* FIXME: The timeout should probably be | |
616 | * configurable | |
617 | */ | |
618 | mod_timer(&imxdma->watchdog, | |
619 | jiffies + msecs_to_jiffies(500)); | |
620 | ||
621 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | |
622 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
623 | } else { | |
624 | imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); | |
625 | tmp |= CCR_CEN; | |
626 | } | |
627 | ||
628 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
629 | ||
630 | if (imxdma->prog_handler) | |
631 | imxdma->prog_handler(chno, imxdma->data, | |
632 | current_sg); | |
633 | ||
634 | return; | |
635 | } | |
636 | ||
637 | if (imx_dma_hw_chain(imxdma)) { | |
638 | del_timer(&imxdma->watchdog); | |
639 | return; | |
640 | } | |
641 | } | |
642 | ||
643 | imx_dmav1_writel(0, DMA_CCR(chno)); | |
644 | imxdma->in_use = 0; | |
645 | if (imxdma->irq_handler) | |
646 | imxdma->irq_handler(chno, imxdma->data); | |
647 | } | |
648 | ||
649 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) | |
650 | { | |
651 | int i, disr; | |
652 | ||
653 | if (cpu_is_mx21() || cpu_is_mx27()) | |
654 | dma_err_handler(irq, dev_id); | |
655 | ||
656 | disr = imx_dmav1_readl(DMA_DISR); | |
657 | ||
658 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", | |
659 | disr); | |
660 | ||
661 | imx_dmav1_writel(disr, DMA_DISR); | |
662 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
663 | if (disr & (1 << i)) | |
664 | dma_irq_handle_channel(i); | |
665 | } | |
666 | ||
667 | return IRQ_HANDLED; | |
668 | } | |
669 | ||
670 | /** | |
671 | * imx_dma_request - request/allocate specified channel number | |
672 | * @channel: i.MX DMA channel number | |
673 | * @name: the driver/caller own non-%NULL identification | |
674 | */ | |
675 | int imx_dma_request(int channel, const char *name) | |
676 | { | |
677 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
678 | unsigned long flags; | |
679 | int ret = 0; | |
680 | ||
681 | /* basic sanity checks */ | |
682 | if (!name) | |
683 | return -EINVAL; | |
684 | ||
685 | if (channel >= IMX_DMA_CHANNELS) { | |
686 | printk(KERN_CRIT "%s: called for non-existed channel %d\n", | |
687 | __func__, channel); | |
688 | return -EINVAL; | |
689 | } | |
690 | ||
691 | local_irq_save(flags); | |
692 | if (imxdma->name) { | |
693 | local_irq_restore(flags); | |
694 | return -EBUSY; | |
695 | } | |
696 | memset(imxdma, 0, sizeof(*imxdma)); | |
697 | imxdma->name = name; | |
698 | local_irq_restore(flags); /* request_irq() can block */ | |
699 | ||
700 | if (cpu_is_mx21() || cpu_is_mx27()) { | |
701 | ret = request_irq(MX2x_INT_DMACH0 + channel, | |
702 | dma_irq_handler, 0, "DMA", NULL); | |
703 | if (ret) { | |
704 | imxdma->name = NULL; | |
705 | pr_crit("Can't register IRQ %d for DMA channel %d\n", | |
706 | MX2x_INT_DMACH0 + channel, channel); | |
707 | return ret; | |
708 | } | |
709 | init_timer(&imxdma->watchdog); | |
710 | imxdma->watchdog.function = &imx_dma_watchdog; | |
711 | imxdma->watchdog.data = channel; | |
712 | } | |
713 | ||
714 | return ret; | |
715 | } | |
716 | EXPORT_SYMBOL(imx_dma_request); | |
717 | ||
718 | /** | |
719 | * imx_dma_free - release previously acquired channel | |
720 | * @channel: i.MX DMA channel number | |
721 | */ | |
722 | void imx_dma_free(int channel) | |
723 | { | |
724 | unsigned long flags; | |
725 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | |
726 | ||
727 | if (!imxdma->name) { | |
728 | printk(KERN_CRIT | |
729 | "%s: trying to free free channel %d\n", | |
730 | __func__, channel); | |
731 | return; | |
732 | } | |
733 | ||
734 | local_irq_save(flags); | |
735 | /* Disable interrupts */ | |
736 | imx_dma_disable(channel); | |
737 | imxdma->name = NULL; | |
738 | ||
739 | if (cpu_is_mx21() || cpu_is_mx27()) | |
740 | free_irq(MX2x_INT_DMACH0 + channel, NULL); | |
741 | ||
742 | local_irq_restore(flags); | |
743 | } | |
744 | EXPORT_SYMBOL(imx_dma_free); | |
745 | ||
746 | /** | |
747 | * imx_dma_request_by_prio - find and request some of free channels best | |
748 | * suiting requested priority | |
749 | * @channel: i.MX DMA channel number | |
750 | * @name: the driver/caller own non-%NULL identification | |
751 | * | |
752 | * This function tries to find a free channel in the specified priority group | |
753 | * if the priority cannot be achieved it tries to look for free channel | |
754 | * in the higher and then even lower priority groups. | |
755 | * | |
756 | * Return value: If there is no free channel to allocate, -%ENODEV is returned. | |
757 | * On successful allocation channel is returned. | |
758 | */ | |
759 | int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio) | |
760 | { | |
761 | int i; | |
762 | int best; | |
763 | ||
764 | switch (prio) { | |
765 | case (DMA_PRIO_HIGH): | |
766 | best = 8; | |
767 | break; | |
768 | case (DMA_PRIO_MEDIUM): | |
769 | best = 4; | |
770 | break; | |
771 | case (DMA_PRIO_LOW): | |
772 | default: | |
773 | best = 0; | |
774 | break; | |
775 | } | |
776 | ||
777 | for (i = best; i < IMX_DMA_CHANNELS; i++) | |
778 | if (!imx_dma_request(i, name)) | |
779 | return i; | |
780 | ||
781 | for (i = best - 1; i >= 0; i--) | |
782 | if (!imx_dma_request(i, name)) | |
783 | return i; | |
784 | ||
785 | printk(KERN_ERR "%s: no free DMA channel found\n", __func__); | |
786 | ||
787 | return -ENODEV; | |
788 | } | |
789 | EXPORT_SYMBOL(imx_dma_request_by_prio); | |
790 | ||
791 | static int __init imx_dma_init(void) | |
792 | { | |
793 | int ret = 0; | |
794 | int i; | |
795 | ||
796 | if (cpu_is_mx1()) | |
797 | imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | |
798 | else if (cpu_is_mx21()) | |
799 | imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | |
800 | else if (cpu_is_mx27()) | |
801 | imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | |
802 | else | |
803 | return 0; | |
804 | ||
805 | dma_clk = clk_get(NULL, "dma"); | |
806 | if (IS_ERR(dma_clk)) | |
807 | return PTR_ERR(dma_clk); | |
808 | clk_enable(dma_clk); | |
809 | ||
810 | /* reset DMA module */ | |
811 | imx_dmav1_writel(DCR_DRST, DMA_DCR); | |
812 | ||
813 | if (cpu_is_mx1()) { | |
814 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL); | |
815 | if (ret) { | |
816 | pr_crit("Wow! Can't register IRQ for DMA\n"); | |
817 | return ret; | |
818 | } | |
819 | ||
820 | ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL); | |
821 | if (ret) { | |
822 | pr_crit("Wow! Can't register ERRIRQ for DMA\n"); | |
823 | free_irq(MX1_DMA_INT, NULL); | |
824 | return ret; | |
825 | } | |
826 | } | |
827 | ||
828 | /* enable DMA module */ | |
829 | imx_dmav1_writel(DCR_DEN, DMA_DCR); | |
830 | ||
831 | /* clear all interrupts */ | |
832 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | |
833 | ||
834 | /* disable interrupts */ | |
835 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | |
836 | ||
837 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
838 | imx_dma_channels[i].sg = NULL; | |
839 | imx_dma_channels[i].dma_num = i; | |
840 | } | |
841 | ||
842 | return ret; | |
843 | } | |
844 | ||
845 | arch_initcall(imx_dma_init); |