]>
Commit | Line | Data |
---|---|---|
1f1846c6 SH |
1 | /* |
2 | * drivers/dma/imx-dma.c | |
3 | * | |
4 | * This file contains a driver for the Freescale i.MX DMA engine | |
5 | * found on i.MX1/21/27 | |
6 | * | |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | |
9e15db7c | 8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> |
1f1846c6 SH |
9 | * |
10 | * The code contained herein is licensed under the GNU General Public | |
11 | * License. You may obtain a copy of the GNU General Public License | |
12 | * Version 2 or later at the following locations: | |
13 | * | |
14 | * http://www.opensource.org/licenses/gpl-license.html | |
15 | * http://www.gnu.org/copyleft/gpl.html | |
16 | */ | |
9e15db7c | 17 | |
1f1846c6 | 18 | #include <linux/init.h> |
f8de8f4c | 19 | #include <linux/module.h> |
1f1846c6 SH |
20 | #include <linux/types.h> |
21 | #include <linux/mm.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/platform_device.h> | |
28 | #include <linux/dmaengine.h> | |
5170c051 | 29 | #include <linux/module.h> |
1f1846c6 SH |
30 | |
31 | #include <asm/irq.h> | |
32 | #include <mach/dma-v1.h> | |
33 | #include <mach/hardware.h> | |
34 | ||
d2ebfb33 | 35 | #include "dmaengine.h" |
9e15db7c JM |
36 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
37 | ||
38 | enum imxdma_prep_type { | |
39 | IMXDMA_DESC_MEMCPY, | |
40 | IMXDMA_DESC_INTERLEAVED, | |
41 | IMXDMA_DESC_SLAVE_SG, | |
42 | IMXDMA_DESC_CYCLIC, | |
43 | }; | |
44 | ||
45 | struct imxdma_desc { | |
46 | struct list_head node; | |
47 | struct dma_async_tx_descriptor desc; | |
48 | enum dma_status status; | |
49 | dma_addr_t src; | |
50 | dma_addr_t dest; | |
51 | size_t len; | |
52 | unsigned int dmamode; | |
53 | enum imxdma_prep_type type; | |
54 | /* For memcpy and interleaved */ | |
55 | unsigned int config_port; | |
56 | unsigned int config_mem; | |
57 | /* For interleaved transfers */ | |
58 | unsigned int x; | |
59 | unsigned int y; | |
60 | unsigned int w; | |
61 | /* For slave sg and cyclic */ | |
62 | struct scatterlist *sg; | |
63 | unsigned int sgcount; | |
64 | }; | |
65 | ||
1f1846c6 SH |
66 | struct imxdma_channel { |
67 | struct imxdma_engine *imxdma; | |
68 | unsigned int channel; | |
69 | unsigned int imxdma_channel; | |
70 | ||
9e15db7c JM |
71 | struct tasklet_struct dma_tasklet; |
72 | struct list_head ld_free; | |
73 | struct list_head ld_queue; | |
74 | struct list_head ld_active; | |
75 | int descs_allocated; | |
1f1846c6 SH |
76 | enum dma_slave_buswidth word_size; |
77 | dma_addr_t per_address; | |
78 | u32 watermark_level; | |
79 | struct dma_chan chan; | |
80 | spinlock_t lock; | |
81 | struct dma_async_tx_descriptor desc; | |
1f1846c6 SH |
82 | enum dma_status status; |
83 | int dma_request; | |
84 | struct scatterlist *sg_list; | |
85 | }; | |
86 | ||
87 | #define MAX_DMA_CHANNELS 8 | |
88 | ||
89 | struct imxdma_engine { | |
90 | struct device *dev; | |
1e070a60 | 91 | struct device_dma_parameters dma_parms; |
1f1846c6 SH |
92 | struct dma_device dma_device; |
93 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | |
94 | }; | |
95 | ||
96 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | |
97 | { | |
98 | return container_of(chan, struct imxdma_channel, chan); | |
99 | } | |
100 | ||
9e15db7c | 101 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
1f1846c6 | 102 | { |
9e15db7c JM |
103 | struct imxdma_desc *desc; |
104 | ||
105 | if (!list_empty(&imxdmac->ld_active)) { | |
106 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | |
107 | node); | |
108 | if (desc->type == IMXDMA_DESC_CYCLIC) | |
109 | return true; | |
110 | } | |
111 | return false; | |
1f1846c6 SH |
112 | } |
113 | ||
114 | static void imxdma_irq_handler(int channel, void *data) | |
115 | { | |
116 | struct imxdma_channel *imxdmac = data; | |
117 | ||
9e15db7c | 118 | tasklet_schedule(&imxdmac->dma_tasklet); |
1f1846c6 SH |
119 | } |
120 | ||
121 | static void imxdma_err_handler(int channel, void *data, int error) | |
122 | { | |
123 | struct imxdma_channel *imxdmac = data; | |
124 | ||
9e15db7c | 125 | tasklet_schedule(&imxdmac->dma_tasklet); |
1f1846c6 SH |
126 | } |
127 | ||
128 | static void imxdma_progression(int channel, void *data, | |
129 | struct scatterlist *sg) | |
130 | { | |
131 | struct imxdma_channel *imxdmac = data; | |
132 | ||
9e15db7c JM |
133 | tasklet_schedule(&imxdmac->dma_tasklet); |
134 | } | |
135 | ||
136 | static int imxdma_xfer_desc(struct imxdma_desc *d) | |
137 | { | |
138 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | |
139 | int ret; | |
140 | ||
141 | /* Configure and enable */ | |
142 | switch (d->type) { | |
143 | case IMXDMA_DESC_MEMCPY: | |
144 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | |
145 | d->config_port, d->config_mem, 0, 0); | |
146 | if (ret < 0) | |
147 | return ret; | |
148 | ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src, | |
149 | d->len, d->dest, d->dmamode); | |
150 | if (ret < 0) | |
151 | return ret; | |
152 | break; | |
153 | case IMXDMA_DESC_CYCLIC: | |
154 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | |
155 | imxdma_progression); | |
156 | if (ret < 0) | |
157 | return ret; | |
158 | /* | |
159 | * We fall through here since cyclic transfer is the same as | |
160 | * slave_sg adding a progression handler and a specific sg | |
161 | * configuration which is done in 'imxdma_prep_dma_cyclic'. | |
162 | */ | |
163 | case IMXDMA_DESC_SLAVE_SG: | |
164 | if (d->dmamode == DMA_MODE_READ) | |
165 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, | |
166 | d->sgcount, d->len, d->src, d->dmamode); | |
167 | else | |
168 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, | |
169 | d->sgcount, d->len, d->dest, d->dmamode); | |
170 | if (ret < 0) | |
171 | return ret; | |
172 | break; | |
173 | default: | |
174 | return -EINVAL; | |
175 | } | |
176 | imx_dma_enable(imxdmac->imxdma_channel); | |
177 | return 0; | |
178 | } | |
179 | ||
180 | static void imxdma_tasklet(unsigned long data) | |
181 | { | |
182 | struct imxdma_channel *imxdmac = (void *)data; | |
183 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
184 | struct imxdma_desc *desc; | |
185 | ||
186 | spin_lock(&imxdmac->lock); | |
187 | ||
188 | if (list_empty(&imxdmac->ld_active)) { | |
189 | /* Someone might have called terminate all */ | |
190 | goto out; | |
191 | } | |
192 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | |
193 | ||
194 | if (desc->desc.callback) | |
195 | desc->desc.callback(desc->desc.callback_param); | |
196 | ||
1f3d6dc0 | 197 | dma_cookie_complete(&desc->desc); |
9e15db7c JM |
198 | |
199 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | |
200 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | |
201 | goto out; | |
202 | ||
203 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | |
204 | ||
205 | if (!list_empty(&imxdmac->ld_queue)) { | |
206 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | |
207 | node); | |
208 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | |
209 | if (imxdma_xfer_desc(desc) < 0) | |
210 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | |
211 | __func__, imxdmac->channel); | |
212 | } | |
213 | out: | |
214 | spin_unlock(&imxdmac->lock); | |
1f1846c6 SH |
215 | } |
216 | ||
217 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
218 | unsigned long arg) | |
219 | { | |
220 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
221 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | |
222 | int ret; | |
9e15db7c | 223 | unsigned long flags; |
1f1846c6 SH |
224 | unsigned int mode = 0; |
225 | ||
226 | switch (cmd) { | |
227 | case DMA_TERMINATE_ALL: | |
1f1846c6 | 228 | imx_dma_disable(imxdmac->imxdma_channel); |
9e15db7c JM |
229 | |
230 | spin_lock_irqsave(&imxdmac->lock, flags); | |
231 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | |
232 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
233 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
234 | return 0; |
235 | case DMA_SLAVE_CONFIG: | |
db8196df | 236 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1f1846c6 SH |
237 | imxdmac->per_address = dmaengine_cfg->src_addr; |
238 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | |
239 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | |
240 | } else { | |
241 | imxdmac->per_address = dmaengine_cfg->dst_addr; | |
242 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | |
243 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | |
244 | } | |
245 | ||
246 | switch (imxdmac->word_size) { | |
247 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
248 | mode = IMX_DMA_MEMSIZE_8; | |
249 | break; | |
250 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
251 | mode = IMX_DMA_MEMSIZE_16; | |
252 | break; | |
253 | default: | |
254 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
255 | mode = IMX_DMA_MEMSIZE_32; | |
256 | break; | |
257 | } | |
258 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | |
259 | mode | IMX_DMA_TYPE_FIFO, | |
260 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | |
261 | imxdmac->dma_request, 1); | |
262 | ||
263 | if (ret) | |
264 | return ret; | |
265 | ||
6584cb88 SH |
266 | imx_dma_config_burstlen(imxdmac->imxdma_channel, |
267 | imxdmac->watermark_level * imxdmac->word_size); | |
1f1846c6 SH |
268 | |
269 | return 0; | |
270 | default: | |
271 | return -ENOSYS; | |
272 | } | |
273 | ||
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |
278 | dma_cookie_t cookie, | |
279 | struct dma_tx_state *txstate) | |
280 | { | |
96a2af41 | 281 | return dma_cookie_status(chan, cookie, txstate); |
1f1846c6 SH |
282 | } |
283 | ||
284 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
285 | { | |
286 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | |
287 | dma_cookie_t cookie; | |
9e15db7c | 288 | unsigned long flags; |
1f1846c6 | 289 | |
9e15db7c | 290 | spin_lock_irqsave(&imxdmac->lock, flags); |
884485e1 | 291 | cookie = dma_cookie_assign(tx); |
9e15db7c | 292 | spin_unlock_irqrestore(&imxdmac->lock, flags); |
1f1846c6 SH |
293 | |
294 | return cookie; | |
295 | } | |
296 | ||
297 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |
298 | { | |
299 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
300 | struct imx_dma_data *data = chan->private; | |
301 | ||
6c05f091 JM |
302 | if (data != NULL) |
303 | imxdmac->dma_request = data->dma_request; | |
1f1846c6 | 304 | |
9e15db7c JM |
305 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
306 | struct imxdma_desc *desc; | |
1f1846c6 | 307 | |
9e15db7c JM |
308 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
309 | if (!desc) | |
310 | break; | |
311 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | |
312 | dma_async_tx_descriptor_init(&desc->desc, chan); | |
313 | desc->desc.tx_submit = imxdma_tx_submit; | |
314 | /* txd.flags will be overwritten in prep funcs */ | |
315 | desc->desc.flags = DMA_CTRL_ACK; | |
316 | desc->status = DMA_SUCCESS; | |
317 | ||
318 | list_add_tail(&desc->node, &imxdmac->ld_free); | |
319 | imxdmac->descs_allocated++; | |
320 | } | |
1f1846c6 | 321 | |
9e15db7c JM |
322 | if (!imxdmac->descs_allocated) |
323 | return -ENOMEM; | |
324 | ||
325 | return imxdmac->descs_allocated; | |
1f1846c6 SH |
326 | } |
327 | ||
328 | static void imxdma_free_chan_resources(struct dma_chan *chan) | |
329 | { | |
330 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
9e15db7c JM |
331 | struct imxdma_desc *desc, *_desc; |
332 | unsigned long flags; | |
333 | ||
334 | spin_lock_irqsave(&imxdmac->lock, flags); | |
1f1846c6 SH |
335 | |
336 | imx_dma_disable(imxdmac->imxdma_channel); | |
9e15db7c JM |
337 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); |
338 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
339 | ||
340 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
341 | ||
342 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | |
343 | kfree(desc); | |
344 | imxdmac->descs_allocated--; | |
345 | } | |
346 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
1f1846c6 SH |
347 | |
348 | if (imxdmac->sg_list) { | |
349 | kfree(imxdmac->sg_list); | |
350 | imxdmac->sg_list = NULL; | |
351 | } | |
352 | } | |
353 | ||
354 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |
355 | struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 356 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 357 | unsigned long flags, void *context) |
1f1846c6 SH |
358 | { |
359 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
360 | struct scatterlist *sg; | |
9e15db7c JM |
361 | int i, dma_length = 0; |
362 | struct imxdma_desc *desc; | |
1f1846c6 | 363 | |
9e15db7c JM |
364 | if (list_empty(&imxdmac->ld_free) || |
365 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 SH |
366 | return NULL; |
367 | ||
9e15db7c | 368 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
369 | |
370 | for_each_sg(sgl, sg, sg_len, i) { | |
371 | dma_length += sg->length; | |
372 | } | |
373 | ||
d07102a1 SH |
374 | switch (imxdmac->word_size) { |
375 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
376 | if (sgl->length & 3 || sgl->dma_address & 3) | |
377 | return NULL; | |
378 | break; | |
379 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
380 | if (sgl->length & 1 || sgl->dma_address & 1) | |
381 | return NULL; | |
382 | break; | |
383 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
384 | break; | |
385 | default: | |
386 | return NULL; | |
387 | } | |
388 | ||
9e15db7c JM |
389 | desc->type = IMXDMA_DESC_SLAVE_SG; |
390 | desc->sg = sgl; | |
391 | desc->sgcount = sg_len; | |
392 | desc->len = dma_length; | |
393 | if (direction == DMA_DEV_TO_MEM) { | |
394 | desc->dmamode = DMA_MODE_READ; | |
395 | desc->src = imxdmac->per_address; | |
396 | } else { | |
397 | desc->dmamode = DMA_MODE_WRITE; | |
398 | desc->dest = imxdmac->per_address; | |
399 | } | |
400 | desc->desc.callback = NULL; | |
401 | desc->desc.callback_param = NULL; | |
1f1846c6 | 402 | |
9e15db7c | 403 | return &desc->desc; |
1f1846c6 SH |
404 | } |
405 | ||
406 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |
407 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
185ecb5f AB |
408 | size_t period_len, enum dma_transfer_direction direction, |
409 | void *context) | |
1f1846c6 SH |
410 | { |
411 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
412 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c JM |
413 | struct imxdma_desc *desc; |
414 | int i; | |
1f1846c6 | 415 | unsigned int periods = buf_len / period_len; |
1f1846c6 SH |
416 | |
417 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | |
418 | __func__, imxdmac->channel, buf_len, period_len); | |
419 | ||
9e15db7c JM |
420 | if (list_empty(&imxdmac->ld_free) || |
421 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 | 422 | return NULL; |
1f1846c6 | 423 | |
9e15db7c | 424 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
425 | |
426 | if (imxdmac->sg_list) | |
427 | kfree(imxdmac->sg_list); | |
428 | ||
429 | imxdmac->sg_list = kcalloc(periods + 1, | |
430 | sizeof(struct scatterlist), GFP_KERNEL); | |
431 | if (!imxdmac->sg_list) | |
432 | return NULL; | |
433 | ||
434 | sg_init_table(imxdmac->sg_list, periods); | |
435 | ||
436 | for (i = 0; i < periods; i++) { | |
437 | imxdmac->sg_list[i].page_link = 0; | |
438 | imxdmac->sg_list[i].offset = 0; | |
439 | imxdmac->sg_list[i].dma_address = dma_addr; | |
440 | imxdmac->sg_list[i].length = period_len; | |
441 | dma_addr += period_len; | |
442 | } | |
443 | ||
444 | /* close the loop */ | |
445 | imxdmac->sg_list[periods].offset = 0; | |
446 | imxdmac->sg_list[periods].length = 0; | |
447 | imxdmac->sg_list[periods].page_link = | |
448 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | |
449 | ||
9e15db7c JM |
450 | desc->type = IMXDMA_DESC_CYCLIC; |
451 | desc->sg = imxdmac->sg_list; | |
452 | desc->sgcount = periods; | |
453 | desc->len = IMX_DMA_LENGTH_LOOP; | |
454 | if (direction == DMA_DEV_TO_MEM) { | |
455 | desc->dmamode = DMA_MODE_READ; | |
456 | desc->src = imxdmac->per_address; | |
457 | } else { | |
458 | desc->dmamode = DMA_MODE_WRITE; | |
459 | desc->dest = imxdmac->per_address; | |
460 | } | |
461 | desc->desc.callback = NULL; | |
462 | desc->desc.callback_param = NULL; | |
1f1846c6 | 463 | |
9e15db7c | 464 | return &desc->desc; |
1f1846c6 SH |
465 | } |
466 | ||
6c05f091 JM |
467 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( |
468 | struct dma_chan *chan, dma_addr_t dest, | |
469 | dma_addr_t src, size_t len, unsigned long flags) | |
470 | { | |
471 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
472 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c | 473 | struct imxdma_desc *desc; |
6c05f091 JM |
474 | |
475 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | |
476 | __func__, imxdmac->channel, src, dest, len); | |
477 | ||
9e15db7c JM |
478 | if (list_empty(&imxdmac->ld_free) || |
479 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
6c05f091 | 480 | return NULL; |
6c05f091 | 481 | |
9e15db7c | 482 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
6c05f091 | 483 | |
9e15db7c JM |
484 | desc->type = IMXDMA_DESC_MEMCPY; |
485 | desc->src = src; | |
486 | desc->dest = dest; | |
487 | desc->len = len; | |
488 | desc->dmamode = DMA_MODE_WRITE; | |
489 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | |
490 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | |
491 | desc->desc.callback = NULL; | |
492 | desc->desc.callback_param = NULL; | |
6c05f091 | 493 | |
9e15db7c | 494 | return &desc->desc; |
6c05f091 JM |
495 | } |
496 | ||
1f1846c6 SH |
497 | static void imxdma_issue_pending(struct dma_chan *chan) |
498 | { | |
5b316876 | 499 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
9e15db7c JM |
500 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
501 | struct imxdma_desc *desc; | |
502 | unsigned long flags; | |
503 | ||
504 | spin_lock_irqsave(&imxdmac->lock, flags); | |
505 | if (list_empty(&imxdmac->ld_active) && | |
506 | !list_empty(&imxdmac->ld_queue)) { | |
507 | desc = list_first_entry(&imxdmac->ld_queue, | |
508 | struct imxdma_desc, node); | |
509 | ||
510 | if (imxdma_xfer_desc(desc) < 0) { | |
511 | dev_warn(imxdma->dev, | |
512 | "%s: channel: %d couldn't issue DMA xfer\n", | |
513 | __func__, imxdmac->channel); | |
514 | } else { | |
515 | list_move_tail(imxdmac->ld_queue.next, | |
516 | &imxdmac->ld_active); | |
517 | } | |
518 | } | |
519 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
520 | } |
521 | ||
522 | static int __init imxdma_probe(struct platform_device *pdev) | |
523 | { | |
524 | struct imxdma_engine *imxdma; | |
525 | int ret, i; | |
526 | ||
527 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); | |
528 | if (!imxdma) | |
529 | return -ENOMEM; | |
530 | ||
531 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | |
532 | ||
f8a356ff SH |
533 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
534 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | |
6c05f091 | 535 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); |
f8a356ff | 536 | |
1f1846c6 SH |
537 | /* Initialize channel parameters */ |
538 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | |
539 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | |
540 | ||
541 | imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", | |
542 | DMA_PRIO_MEDIUM); | |
8267f16e SH |
543 | if ((int)imxdmac->channel < 0) { |
544 | ret = -ENODEV; | |
1f1846c6 | 545 | goto err_init; |
8267f16e | 546 | } |
1f1846c6 SH |
547 | |
548 | imx_dma_setup_handlers(imxdmac->imxdma_channel, | |
549 | imxdma_irq_handler, imxdma_err_handler, imxdmac); | |
550 | ||
551 | imxdmac->imxdma = imxdma; | |
552 | spin_lock_init(&imxdmac->lock); | |
553 | ||
9e15db7c JM |
554 | INIT_LIST_HEAD(&imxdmac->ld_queue); |
555 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
556 | INIT_LIST_HEAD(&imxdmac->ld_active); | |
557 | ||
558 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | |
559 | (unsigned long)imxdmac); | |
1f1846c6 | 560 | imxdmac->chan.device = &imxdma->dma_device; |
8ac69546 | 561 | dma_cookie_init(&imxdmac->chan); |
1f1846c6 SH |
562 | imxdmac->channel = i; |
563 | ||
564 | /* Add the channel to the DMAC list */ | |
9e15db7c JM |
565 | list_add_tail(&imxdmac->chan.device_node, |
566 | &imxdma->dma_device.channels); | |
1f1846c6 SH |
567 | } |
568 | ||
569 | imxdma->dev = &pdev->dev; | |
570 | imxdma->dma_device.dev = &pdev->dev; | |
571 | ||
572 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | |
573 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; | |
574 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | |
575 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | |
576 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | |
6c05f091 | 577 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
1f1846c6 SH |
578 | imxdma->dma_device.device_control = imxdma_control; |
579 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | |
580 | ||
581 | platform_set_drvdata(pdev, imxdma); | |
582 | ||
6c05f091 | 583 | imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ |
1e070a60 SH |
584 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; |
585 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | |
586 | ||
1f1846c6 SH |
587 | ret = dma_async_device_register(&imxdma->dma_device); |
588 | if (ret) { | |
589 | dev_err(&pdev->dev, "unable to register\n"); | |
590 | goto err_init; | |
591 | } | |
592 | ||
593 | return 0; | |
594 | ||
595 | err_init: | |
cbeae418 | 596 | while (--i >= 0) { |
1f1846c6 SH |
597 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
598 | imx_dma_free(imxdmac->imxdma_channel); | |
599 | } | |
600 | ||
601 | kfree(imxdma); | |
602 | return ret; | |
603 | } | |
604 | ||
605 | static int __exit imxdma_remove(struct platform_device *pdev) | |
606 | { | |
607 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | |
608 | int i; | |
609 | ||
610 | dma_async_device_unregister(&imxdma->dma_device); | |
611 | ||
612 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | |
613 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | |
614 | ||
615 | imx_dma_free(imxdmac->imxdma_channel); | |
616 | } | |
617 | ||
618 | kfree(imxdma); | |
619 | ||
620 | return 0; | |
621 | } | |
622 | ||
623 | static struct platform_driver imxdma_driver = { | |
624 | .driver = { | |
625 | .name = "imx-dma", | |
626 | }, | |
627 | .remove = __exit_p(imxdma_remove), | |
628 | }; | |
629 | ||
630 | static int __init imxdma_module_init(void) | |
631 | { | |
632 | return platform_driver_probe(&imxdma_driver, imxdma_probe); | |
633 | } | |
634 | subsys_initcall(imxdma_module_init); | |
635 | ||
636 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | |
637 | MODULE_DESCRIPTION("i.MX dma driver"); | |
638 | MODULE_LICENSE("GPL"); |