]>
Commit | Line | Data |
---|---|---|
7bedaa55 RK |
1 | /* |
2 | * OMAP DMAengine support | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/dmaengine.h> | |
9 | #include <linux/dma-mapping.h> | |
10 | #include <linux/err.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/omap-dma.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/spinlock.h> | |
8d30662a JH |
19 | #include <linux/of_dma.h> |
20 | #include <linux/of_device.h> | |
7bedaa55 RK |
21 | |
22 | #include "virt-dma.h" | |
7d7e1eba | 23 | |
7bedaa55 RK |
24 | struct omap_dmadev { |
25 | struct dma_device ddev; | |
26 | spinlock_t lock; | |
27 | struct tasklet_struct task; | |
28 | struct list_head pending; | |
1b416c4b | 29 | struct omap_system_dma_plat_info *plat; |
7bedaa55 RK |
30 | }; |
31 | ||
32 | struct omap_chan { | |
33 | struct virt_dma_chan vc; | |
34 | struct list_head node; | |
1b416c4b | 35 | struct omap_system_dma_plat_info *plat; |
7bedaa55 RK |
36 | |
37 | struct dma_slave_config cfg; | |
38 | unsigned dma_sig; | |
3a774ea9 | 39 | bool cyclic; |
2dcdf570 | 40 | bool paused; |
7bedaa55 RK |
41 | |
42 | int dma_ch; | |
43 | struct omap_desc *desc; | |
44 | unsigned sgidx; | |
45 | }; | |
46 | ||
47 | struct omap_sg { | |
48 | dma_addr_t addr; | |
49 | uint32_t en; /* number of elements (24-bit) */ | |
50 | uint32_t fn; /* number of frames (16-bit) */ | |
51 | }; | |
52 | ||
53 | struct omap_desc { | |
54 | struct virt_dma_desc vd; | |
55 | enum dma_transfer_direction dir; | |
56 | dma_addr_t dev_addr; | |
57 | ||
7c836bc7 | 58 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ |
7bedaa55 RK |
59 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ |
60 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ | |
61 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ | |
62 | uint8_t periph_port; /* Peripheral port */ | |
63 | ||
64 | unsigned sglen; | |
65 | struct omap_sg sg[0]; | |
66 | }; | |
67 | ||
68 | static const unsigned es_bytes[] = { | |
69 | [OMAP_DMA_DATA_TYPE_S8] = 1, | |
70 | [OMAP_DMA_DATA_TYPE_S16] = 2, | |
71 | [OMAP_DMA_DATA_TYPE_S32] = 4, | |
72 | }; | |
73 | ||
8d30662a JH |
74 | static struct of_dma_filter_info omap_dma_info = { |
75 | .filter_fn = omap_dma_filter_fn, | |
76 | }; | |
77 | ||
7bedaa55 RK |
78 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) |
79 | { | |
80 | return container_of(d, struct omap_dmadev, ddev); | |
81 | } | |
82 | ||
83 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) | |
84 | { | |
85 | return container_of(c, struct omap_chan, vc.chan); | |
86 | } | |
87 | ||
88 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) | |
89 | { | |
90 | return container_of(t, struct omap_desc, vd.tx); | |
91 | } | |
92 | ||
93 | static void omap_dma_desc_free(struct virt_dma_desc *vd) | |
94 | { | |
95 | kfree(container_of(vd, struct omap_desc, vd)); | |
96 | } | |
97 | ||
98 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | |
99 | unsigned idx) | |
100 | { | |
101 | struct omap_sg *sg = d->sg + idx; | |
913a2d0c RK |
102 | |
103 | if (d->dir == DMA_DEV_TO_MEM) { | |
104 | c->plat->dma_write(sg->addr, CDSA, c->dma_ch); | |
105 | c->plat->dma_write(0, CDEI, c->dma_ch); | |
106 | c->plat->dma_write(0, CDFI, c->dma_ch); | |
107 | } else { | |
108 | c->plat->dma_write(sg->addr, CSSA, c->dma_ch); | |
109 | c->plat->dma_write(0, CSEI, c->dma_ch); | |
110 | c->plat->dma_write(0, CSFI, c->dma_ch); | |
111 | } | |
112 | ||
113 | c->plat->dma_write(sg->en, CEN, c->dma_ch); | |
114 | c->plat->dma_write(sg->fn, CFN, c->dma_ch); | |
115 | ||
116 | omap_start_dma(c->dma_ch); | |
117 | } | |
118 | ||
119 | static void omap_dma_start_desc(struct omap_chan *c) | |
120 | { | |
121 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | |
122 | struct omap_desc *d; | |
b9e97822 RK |
123 | uint32_t val; |
124 | ||
913a2d0c RK |
125 | if (!vd) { |
126 | c->desc = NULL; | |
127 | return; | |
128 | } | |
129 | ||
130 | list_del(&vd->node); | |
131 | ||
132 | c->desc = d = to_omap_dma_desc(&vd->tx); | |
133 | c->sgidx = 0; | |
134 | ||
b9e97822 RK |
135 | if (d->dir == DMA_DEV_TO_MEM) { |
136 | if (dma_omap1()) { | |
137 | val = c->plat->dma_read(CSDP, c->dma_ch); | |
913a2d0c | 138 | val &= ~(0x1f << 9 | 0x1f << 2); |
b9e97822 | 139 | val |= OMAP_DMA_PORT_EMIFF << 9; |
913a2d0c | 140 | val |= d->periph_port << 2; |
b9e97822 RK |
141 | c->plat->dma_write(val, CSDP, c->dma_ch); |
142 | } | |
7bedaa55 | 143 | |
b9e97822 | 144 | val = c->plat->dma_read(CCR, c->dma_ch); |
913a2d0c | 145 | val &= ~(0x03 << 14 | 0x03 << 12); |
b9e97822 | 146 | val |= OMAP_DMA_AMODE_POST_INC << 14; |
913a2d0c | 147 | val |= OMAP_DMA_AMODE_CONSTANT << 12; |
b9e97822 RK |
148 | c->plat->dma_write(val, CCR, c->dma_ch); |
149 | ||
913a2d0c RK |
150 | c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch); |
151 | c->plat->dma_write(0, CSEI, c->dma_ch); | |
152 | c->plat->dma_write(d->fi, CSFI, c->dma_ch); | |
b9e97822 RK |
153 | } else { |
154 | if (dma_omap1()) { | |
155 | val = c->plat->dma_read(CSDP, c->dma_ch); | |
913a2d0c RK |
156 | val &= ~(0x1f << 9 | 0x1f << 2); |
157 | val |= d->periph_port << 9; | |
b9e97822 RK |
158 | val |= OMAP_DMA_PORT_EMIFF << 2; |
159 | c->plat->dma_write(val, CSDP, c->dma_ch); | |
160 | } | |
161 | ||
162 | val = c->plat->dma_read(CCR, c->dma_ch); | |
913a2d0c RK |
163 | val &= ~(0x03 << 12 | 0x03 << 14); |
164 | val |= OMAP_DMA_AMODE_CONSTANT << 14; | |
b9e97822 RK |
165 | val |= OMAP_DMA_AMODE_POST_INC << 12; |
166 | c->plat->dma_write(val, CCR, c->dma_ch); | |
167 | ||
913a2d0c RK |
168 | c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch); |
169 | c->plat->dma_write(0, CDEI, c->dma_ch); | |
170 | c->plat->dma_write(d->fi, CDFI, c->dma_ch); | |
b9e97822 RK |
171 | } |
172 | ||
173 | val = c->plat->dma_read(CSDP, c->dma_ch); | |
174 | val &= ~0x03; | |
175 | val |= d->es; | |
176 | c->plat->dma_write(val, CSDP, c->dma_ch); | |
177 | ||
178 | if (dma_omap1()) { | |
179 | val = c->plat->dma_read(CCR, c->dma_ch); | |
180 | val &= ~(1 << 5); | |
181 | if (d->sync_mode == OMAP_DMA_SYNC_FRAME) | |
182 | val |= 1 << 5; | |
183 | c->plat->dma_write(val, CCR, c->dma_ch); | |
184 | ||
185 | val = c->plat->dma_read(CCR2, c->dma_ch); | |
186 | val &= ~(1 << 2); | |
187 | if (d->sync_mode == OMAP_DMA_SYNC_BLOCK) | |
188 | val |= 1 << 2; | |
189 | c->plat->dma_write(val, CCR2, c->dma_ch); | |
190 | } else if (c->dma_sig) { | |
191 | val = c->plat->dma_read(CCR, c->dma_ch); | |
192 | ||
193 | /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ | |
913a2d0c | 194 | val &= ~(1 << 24 | 1 << 23 | 3 << 19 | 1 << 18 | 1 << 5 | 0x1f); |
b9e97822 RK |
195 | val |= (c->dma_sig & ~0x1f) << 14; |
196 | val |= c->dma_sig & 0x1f; | |
197 | ||
198 | if (d->sync_mode & OMAP_DMA_SYNC_FRAME) | |
199 | val |= 1 << 5; | |
b9e97822 RK |
200 | |
201 | if (d->sync_mode & OMAP_DMA_SYNC_BLOCK) | |
202 | val |= 1 << 18; | |
b9e97822 RK |
203 | |
204 | switch (d->sync_type) { | |
913a2d0c | 205 | case OMAP_DMA_DST_SYNC_PREFETCH:/* dest synch */ |
b9e97822 RK |
206 | val |= 1 << 23; /* Prefetch */ |
207 | break; | |
208 | case 0: | |
b9e97822 RK |
209 | break; |
210 | default: | |
913a2d0c | 211 | val |= 1 << 24; /* source synch */ |
b9e97822 RK |
212 | break; |
213 | } | |
214 | c->plat->dma_write(val, CCR, c->dma_ch); | |
215 | } | |
7bedaa55 | 216 | |
7bedaa55 RK |
217 | omap_dma_start_sg(c, d, 0); |
218 | } | |
219 | ||
220 | static void omap_dma_callback(int ch, u16 status, void *data) | |
221 | { | |
222 | struct omap_chan *c = data; | |
223 | struct omap_desc *d; | |
224 | unsigned long flags; | |
225 | ||
226 | spin_lock_irqsave(&c->vc.lock, flags); | |
227 | d = c->desc; | |
228 | if (d) { | |
3a774ea9 RK |
229 | if (!c->cyclic) { |
230 | if (++c->sgidx < d->sglen) { | |
231 | omap_dma_start_sg(c, d, c->sgidx); | |
232 | } else { | |
233 | omap_dma_start_desc(c); | |
234 | vchan_cookie_complete(&d->vd); | |
235 | } | |
7bedaa55 | 236 | } else { |
3a774ea9 | 237 | vchan_cyclic_callback(&d->vd); |
7bedaa55 RK |
238 | } |
239 | } | |
240 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
241 | } | |
242 | ||
243 | /* | |
244 | * This callback schedules all pending channels. We could be more | |
245 | * clever here by postponing allocation of the real DMA channels to | |
246 | * this point, and freeing them when our virtual channel becomes idle. | |
247 | * | |
248 | * We would then need to deal with 'all channels in-use' | |
249 | */ | |
250 | static void omap_dma_sched(unsigned long data) | |
251 | { | |
252 | struct omap_dmadev *d = (struct omap_dmadev *)data; | |
253 | LIST_HEAD(head); | |
254 | ||
255 | spin_lock_irq(&d->lock); | |
256 | list_splice_tail_init(&d->pending, &head); | |
257 | spin_unlock_irq(&d->lock); | |
258 | ||
259 | while (!list_empty(&head)) { | |
260 | struct omap_chan *c = list_first_entry(&head, | |
261 | struct omap_chan, node); | |
262 | ||
263 | spin_lock_irq(&c->vc.lock); | |
264 | list_del_init(&c->node); | |
265 | omap_dma_start_desc(c); | |
266 | spin_unlock_irq(&c->vc.lock); | |
267 | } | |
268 | } | |
269 | ||
270 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | |
271 | { | |
272 | struct omap_chan *c = to_omap_dma_chan(chan); | |
273 | ||
9e2f7d82 | 274 | dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); |
7bedaa55 RK |
275 | |
276 | return omap_request_dma(c->dma_sig, "DMA engine", | |
277 | omap_dma_callback, c, &c->dma_ch); | |
278 | } | |
279 | ||
280 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | |
281 | { | |
282 | struct omap_chan *c = to_omap_dma_chan(chan); | |
283 | ||
284 | vchan_free_chan_resources(&c->vc); | |
285 | omap_free_dma(c->dma_ch); | |
286 | ||
9e2f7d82 | 287 | dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); |
7bedaa55 RK |
288 | } |
289 | ||
3850e22f RK |
290 | static size_t omap_dma_sg_size(struct omap_sg *sg) |
291 | { | |
292 | return sg->en * sg->fn; | |
293 | } | |
294 | ||
295 | static size_t omap_dma_desc_size(struct omap_desc *d) | |
296 | { | |
297 | unsigned i; | |
298 | size_t size; | |
299 | ||
300 | for (size = i = 0; i < d->sglen; i++) | |
301 | size += omap_dma_sg_size(&d->sg[i]); | |
302 | ||
303 | return size * es_bytes[d->es]; | |
304 | } | |
305 | ||
306 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | |
307 | { | |
308 | unsigned i; | |
309 | size_t size, es_size = es_bytes[d->es]; | |
310 | ||
311 | for (size = i = 0; i < d->sglen; i++) { | |
312 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; | |
313 | ||
314 | if (size) | |
315 | size += this_size; | |
316 | else if (addr >= d->sg[i].addr && | |
317 | addr < d->sg[i].addr + this_size) | |
318 | size += d->sg[i].addr + this_size - addr; | |
319 | } | |
320 | return size; | |
321 | } | |
322 | ||
7bedaa55 RK |
323 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, |
324 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
325 | { | |
3850e22f RK |
326 | struct omap_chan *c = to_omap_dma_chan(chan); |
327 | struct virt_dma_desc *vd; | |
328 | enum dma_status ret; | |
329 | unsigned long flags; | |
330 | ||
331 | ret = dma_cookie_status(chan, cookie, txstate); | |
7cce5083 | 332 | if (ret == DMA_COMPLETE || !txstate) |
3850e22f RK |
333 | return ret; |
334 | ||
335 | spin_lock_irqsave(&c->vc.lock, flags); | |
336 | vd = vchan_find_desc(&c->vc, cookie); | |
337 | if (vd) { | |
338 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); | |
339 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | |
340 | struct omap_desc *d = c->desc; | |
341 | dma_addr_t pos; | |
342 | ||
343 | if (d->dir == DMA_MEM_TO_DEV) | |
344 | pos = omap_get_dma_src_pos(c->dma_ch); | |
345 | else if (d->dir == DMA_DEV_TO_MEM) | |
346 | pos = omap_get_dma_dst_pos(c->dma_ch); | |
347 | else | |
348 | pos = 0; | |
349 | ||
350 | txstate->residue = omap_dma_desc_size_pos(d, pos); | |
351 | } else { | |
352 | txstate->residue = 0; | |
353 | } | |
354 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
355 | ||
356 | return ret; | |
7bedaa55 RK |
357 | } |
358 | ||
359 | static void omap_dma_issue_pending(struct dma_chan *chan) | |
360 | { | |
361 | struct omap_chan *c = to_omap_dma_chan(chan); | |
362 | unsigned long flags; | |
363 | ||
364 | spin_lock_irqsave(&c->vc.lock, flags); | |
365 | if (vchan_issue_pending(&c->vc) && !c->desc) { | |
76502469 PU |
366 | /* |
367 | * c->cyclic is used only by audio and in this case the DMA need | |
368 | * to be started without delay. | |
369 | */ | |
370 | if (!c->cyclic) { | |
371 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | |
372 | spin_lock(&d->lock); | |
373 | if (list_empty(&c->node)) | |
374 | list_add_tail(&c->node, &d->pending); | |
375 | spin_unlock(&d->lock); | |
376 | tasklet_schedule(&d->task); | |
377 | } else { | |
378 | omap_dma_start_desc(c); | |
379 | } | |
7bedaa55 RK |
380 | } |
381 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
382 | } | |
383 | ||
384 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |
385 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | |
386 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | |
387 | { | |
388 | struct omap_chan *c = to_omap_dma_chan(chan); | |
389 | enum dma_slave_buswidth dev_width; | |
390 | struct scatterlist *sgent; | |
391 | struct omap_desc *d; | |
392 | dma_addr_t dev_addr; | |
393 | unsigned i, j = 0, es, en, frame_bytes, sync_type; | |
394 | u32 burst; | |
395 | ||
396 | if (dir == DMA_DEV_TO_MEM) { | |
397 | dev_addr = c->cfg.src_addr; | |
398 | dev_width = c->cfg.src_addr_width; | |
399 | burst = c->cfg.src_maxburst; | |
400 | sync_type = OMAP_DMA_SRC_SYNC; | |
401 | } else if (dir == DMA_MEM_TO_DEV) { | |
402 | dev_addr = c->cfg.dst_addr; | |
403 | dev_width = c->cfg.dst_addr_width; | |
404 | burst = c->cfg.dst_maxburst; | |
405 | sync_type = OMAP_DMA_DST_SYNC; | |
406 | } else { | |
407 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | |
408 | return NULL; | |
409 | } | |
410 | ||
411 | /* Bus width translates to the element size (ES) */ | |
412 | switch (dev_width) { | |
413 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
414 | es = OMAP_DMA_DATA_TYPE_S8; | |
415 | break; | |
416 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
417 | es = OMAP_DMA_DATA_TYPE_S16; | |
418 | break; | |
419 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
420 | es = OMAP_DMA_DATA_TYPE_S32; | |
421 | break; | |
422 | default: /* not reached */ | |
423 | return NULL; | |
424 | } | |
425 | ||
426 | /* Now allocate and setup the descriptor. */ | |
427 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | |
428 | if (!d) | |
429 | return NULL; | |
430 | ||
431 | d->dir = dir; | |
432 | d->dev_addr = dev_addr; | |
433 | d->es = es; | |
434 | d->sync_mode = OMAP_DMA_SYNC_FRAME; | |
435 | d->sync_type = sync_type; | |
436 | d->periph_port = OMAP_DMA_PORT_TIPB; | |
437 | ||
438 | /* | |
439 | * Build our scatterlist entries: each contains the address, | |
440 | * the number of elements (EN) in each frame, and the number of | |
441 | * frames (FN). Number of bytes for this entry = ES * EN * FN. | |
442 | * | |
443 | * Burst size translates to number of elements with frame sync. | |
444 | * Note: DMA engine defines burst to be the number of dev-width | |
445 | * transfers. | |
446 | */ | |
447 | en = burst; | |
448 | frame_bytes = es_bytes[es] * en; | |
449 | for_each_sg(sgl, sgent, sglen, i) { | |
450 | d->sg[j].addr = sg_dma_address(sgent); | |
451 | d->sg[j].en = en; | |
452 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | |
453 | j++; | |
454 | } | |
455 | ||
456 | d->sglen = j; | |
457 | ||
458 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | |
459 | } | |
460 | ||
3a774ea9 RK |
461 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( |
462 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
ec8b5e48 PU |
463 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, |
464 | void *context) | |
3a774ea9 RK |
465 | { |
466 | struct omap_chan *c = to_omap_dma_chan(chan); | |
467 | enum dma_slave_buswidth dev_width; | |
468 | struct omap_desc *d; | |
469 | dma_addr_t dev_addr; | |
470 | unsigned es, sync_type; | |
471 | u32 burst; | |
472 | ||
473 | if (dir == DMA_DEV_TO_MEM) { | |
474 | dev_addr = c->cfg.src_addr; | |
475 | dev_width = c->cfg.src_addr_width; | |
476 | burst = c->cfg.src_maxburst; | |
477 | sync_type = OMAP_DMA_SRC_SYNC; | |
478 | } else if (dir == DMA_MEM_TO_DEV) { | |
479 | dev_addr = c->cfg.dst_addr; | |
480 | dev_width = c->cfg.dst_addr_width; | |
481 | burst = c->cfg.dst_maxburst; | |
482 | sync_type = OMAP_DMA_DST_SYNC; | |
483 | } else { | |
484 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | |
485 | return NULL; | |
486 | } | |
487 | ||
488 | /* Bus width translates to the element size (ES) */ | |
489 | switch (dev_width) { | |
490 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
491 | es = OMAP_DMA_DATA_TYPE_S8; | |
492 | break; | |
493 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
494 | es = OMAP_DMA_DATA_TYPE_S16; | |
495 | break; | |
496 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
497 | es = OMAP_DMA_DATA_TYPE_S32; | |
498 | break; | |
499 | default: /* not reached */ | |
500 | return NULL; | |
501 | } | |
502 | ||
503 | /* Now allocate and setup the descriptor. */ | |
504 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); | |
505 | if (!d) | |
506 | return NULL; | |
507 | ||
508 | d->dir = dir; | |
509 | d->dev_addr = dev_addr; | |
510 | d->fi = burst; | |
511 | d->es = es; | |
ccffa387 PU |
512 | if (burst) |
513 | d->sync_mode = OMAP_DMA_SYNC_PACKET; | |
514 | else | |
515 | d->sync_mode = OMAP_DMA_SYNC_ELEMENT; | |
3a774ea9 RK |
516 | d->sync_type = sync_type; |
517 | d->periph_port = OMAP_DMA_PORT_MPUI; | |
518 | d->sg[0].addr = buf_addr; | |
519 | d->sg[0].en = period_len / es_bytes[es]; | |
520 | d->sg[0].fn = buf_len / period_len; | |
521 | d->sglen = 1; | |
522 | ||
523 | if (!c->cyclic) { | |
524 | c->cyclic = true; | |
525 | omap_dma_link_lch(c->dma_ch, c->dma_ch); | |
2dde5b90 PU |
526 | |
527 | if (flags & DMA_PREP_INTERRUPT) | |
528 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); | |
529 | ||
3a774ea9 RK |
530 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); |
531 | } | |
532 | ||
27615a97 | 533 | if (dma_omap2plus()) { |
b9e97822 RK |
534 | uint32_t val; |
535 | ||
536 | val = c->plat->dma_read(CSDP, c->dma_ch); | |
537 | val |= 0x03 << 7; /* src burst mode 16 */ | |
538 | val |= 0x03 << 14; /* dst burst mode 16 */ | |
539 | c->plat->dma_write(val, CSDP, c->dma_ch); | |
3a774ea9 RK |
540 | } |
541 | ||
2dde5b90 | 542 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
3a774ea9 RK |
543 | } |
544 | ||
7bedaa55 RK |
545 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) |
546 | { | |
547 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | |
548 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
549 | return -EINVAL; | |
550 | ||
551 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | static int omap_dma_terminate_all(struct omap_chan *c) | |
557 | { | |
558 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | |
559 | unsigned long flags; | |
560 | LIST_HEAD(head); | |
561 | ||
562 | spin_lock_irqsave(&c->vc.lock, flags); | |
563 | ||
564 | /* Prevent this channel being scheduled */ | |
565 | spin_lock(&d->lock); | |
566 | list_del_init(&c->node); | |
567 | spin_unlock(&d->lock); | |
568 | ||
569 | /* | |
570 | * Stop DMA activity: we assume the callback will not be called | |
571 | * after omap_stop_dma() returns (even if it does, it will see | |
572 | * c->desc is NULL and exit.) | |
573 | */ | |
574 | if (c->desc) { | |
575 | c->desc = NULL; | |
2dcdf570 PU |
576 | /* Avoid stopping the dma twice */ |
577 | if (!c->paused) | |
578 | omap_stop_dma(c->dma_ch); | |
7bedaa55 RK |
579 | } |
580 | ||
3a774ea9 RK |
581 | if (c->cyclic) { |
582 | c->cyclic = false; | |
2dcdf570 | 583 | c->paused = false; |
3a774ea9 RK |
584 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); |
585 | } | |
586 | ||
7bedaa55 RK |
587 | vchan_get_all_descriptors(&c->vc, &head); |
588 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
589 | vchan_dma_desc_free_list(&c->vc, &head); | |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
594 | static int omap_dma_pause(struct omap_chan *c) | |
595 | { | |
2dcdf570 PU |
596 | /* Pause/Resume only allowed with cyclic mode */ |
597 | if (!c->cyclic) | |
598 | return -EINVAL; | |
599 | ||
600 | if (!c->paused) { | |
601 | omap_stop_dma(c->dma_ch); | |
602 | c->paused = true; | |
603 | } | |
604 | ||
605 | return 0; | |
7bedaa55 RK |
606 | } |
607 | ||
608 | static int omap_dma_resume(struct omap_chan *c) | |
609 | { | |
2dcdf570 PU |
610 | /* Pause/Resume only allowed with cyclic mode */ |
611 | if (!c->cyclic) | |
612 | return -EINVAL; | |
613 | ||
614 | if (c->paused) { | |
615 | omap_start_dma(c->dma_ch); | |
616 | c->paused = false; | |
617 | } | |
618 | ||
619 | return 0; | |
7bedaa55 RK |
620 | } |
621 | ||
622 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
623 | unsigned long arg) | |
624 | { | |
625 | struct omap_chan *c = to_omap_dma_chan(chan); | |
626 | int ret; | |
627 | ||
628 | switch (cmd) { | |
629 | case DMA_SLAVE_CONFIG: | |
630 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | |
631 | break; | |
632 | ||
633 | case DMA_TERMINATE_ALL: | |
634 | ret = omap_dma_terminate_all(c); | |
635 | break; | |
636 | ||
637 | case DMA_PAUSE: | |
638 | ret = omap_dma_pause(c); | |
639 | break; | |
640 | ||
641 | case DMA_RESUME: | |
642 | ret = omap_dma_resume(c); | |
643 | break; | |
644 | ||
645 | default: | |
646 | ret = -ENXIO; | |
647 | break; | |
648 | } | |
649 | ||
650 | return ret; | |
651 | } | |
652 | ||
653 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | |
654 | { | |
655 | struct omap_chan *c; | |
656 | ||
657 | c = kzalloc(sizeof(*c), GFP_KERNEL); | |
658 | if (!c) | |
659 | return -ENOMEM; | |
660 | ||
1b416c4b | 661 | c->plat = od->plat; |
7bedaa55 RK |
662 | c->dma_sig = dma_sig; |
663 | c->vc.desc_free = omap_dma_desc_free; | |
664 | vchan_init(&c->vc, &od->ddev); | |
665 | INIT_LIST_HEAD(&c->node); | |
666 | ||
667 | od->ddev.chancnt++; | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | static void omap_dma_free(struct omap_dmadev *od) | |
673 | { | |
674 | tasklet_kill(&od->task); | |
675 | while (!list_empty(&od->ddev.channels)) { | |
676 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | |
677 | struct omap_chan, vc.chan.device_node); | |
678 | ||
679 | list_del(&c->vc.chan.device_node); | |
680 | tasklet_kill(&c->vc.task); | |
681 | kfree(c); | |
682 | } | |
7bedaa55 RK |
683 | } |
684 | ||
685 | static int omap_dma_probe(struct platform_device *pdev) | |
686 | { | |
687 | struct omap_dmadev *od; | |
688 | int rc, i; | |
689 | ||
104fce73 | 690 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); |
7bedaa55 RK |
691 | if (!od) |
692 | return -ENOMEM; | |
693 | ||
1b416c4b RK |
694 | od->plat = omap_get_plat_info(); |
695 | if (!od->plat) | |
696 | return -EPROBE_DEFER; | |
697 | ||
7bedaa55 | 698 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
3a774ea9 | 699 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
7bedaa55 RK |
700 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; |
701 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; | |
702 | od->ddev.device_tx_status = omap_dma_tx_status; | |
703 | od->ddev.device_issue_pending = omap_dma_issue_pending; | |
704 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | |
3a774ea9 | 705 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; |
7bedaa55 RK |
706 | od->ddev.device_control = omap_dma_control; |
707 | od->ddev.dev = &pdev->dev; | |
708 | INIT_LIST_HEAD(&od->ddev.channels); | |
709 | INIT_LIST_HEAD(&od->pending); | |
710 | spin_lock_init(&od->lock); | |
711 | ||
712 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | |
713 | ||
714 | for (i = 0; i < 127; i++) { | |
715 | rc = omap_dma_chan_init(od, i); | |
716 | if (rc) { | |
717 | omap_dma_free(od); | |
718 | return rc; | |
719 | } | |
720 | } | |
721 | ||
722 | rc = dma_async_device_register(&od->ddev); | |
723 | if (rc) { | |
724 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | |
725 | rc); | |
726 | omap_dma_free(od); | |
8d30662a JH |
727 | return rc; |
728 | } | |
729 | ||
730 | platform_set_drvdata(pdev, od); | |
731 | ||
732 | if (pdev->dev.of_node) { | |
733 | omap_dma_info.dma_cap = od->ddev.cap_mask; | |
734 | ||
735 | /* Device-tree DMA controller registration */ | |
736 | rc = of_dma_controller_register(pdev->dev.of_node, | |
737 | of_dma_simple_xlate, &omap_dma_info); | |
738 | if (rc) { | |
739 | pr_warn("OMAP-DMA: failed to register DMA controller\n"); | |
740 | dma_async_device_unregister(&od->ddev); | |
741 | omap_dma_free(od); | |
742 | } | |
7bedaa55 RK |
743 | } |
744 | ||
745 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | |
746 | ||
747 | return rc; | |
748 | } | |
749 | ||
750 | static int omap_dma_remove(struct platform_device *pdev) | |
751 | { | |
752 | struct omap_dmadev *od = platform_get_drvdata(pdev); | |
753 | ||
8d30662a JH |
754 | if (pdev->dev.of_node) |
755 | of_dma_controller_free(pdev->dev.of_node); | |
756 | ||
7bedaa55 RK |
757 | dma_async_device_unregister(&od->ddev); |
758 | omap_dma_free(od); | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
8d30662a JH |
763 | static const struct of_device_id omap_dma_match[] = { |
764 | { .compatible = "ti,omap2420-sdma", }, | |
765 | { .compatible = "ti,omap2430-sdma", }, | |
766 | { .compatible = "ti,omap3430-sdma", }, | |
767 | { .compatible = "ti,omap3630-sdma", }, | |
768 | { .compatible = "ti,omap4430-sdma", }, | |
769 | {}, | |
770 | }; | |
771 | MODULE_DEVICE_TABLE(of, omap_dma_match); | |
772 | ||
7bedaa55 RK |
773 | static struct platform_driver omap_dma_driver = { |
774 | .probe = omap_dma_probe, | |
775 | .remove = omap_dma_remove, | |
776 | .driver = { | |
777 | .name = "omap-dma-engine", | |
778 | .owner = THIS_MODULE, | |
8d30662a | 779 | .of_match_table = of_match_ptr(omap_dma_match), |
7bedaa55 RK |
780 | }, |
781 | }; | |
782 | ||
783 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) | |
784 | { | |
785 | if (chan->device->dev->driver == &omap_dma_driver.driver) { | |
786 | struct omap_chan *c = to_omap_dma_chan(chan); | |
787 | unsigned req = *(unsigned *)param; | |
788 | ||
789 | return req == c->dma_sig; | |
790 | } | |
791 | return false; | |
792 | } | |
793 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); | |
794 | ||
7bedaa55 RK |
795 | static int omap_dma_init(void) |
796 | { | |
be1f9481 | 797 | return platform_driver_register(&omap_dma_driver); |
7bedaa55 RK |
798 | } |
799 | subsys_initcall(omap_dma_init); | |
800 | ||
801 | static void __exit omap_dma_exit(void) | |
802 | { | |
7bedaa55 RK |
803 | platform_driver_unregister(&omap_dma_driver); |
804 | } | |
805 | module_exit(omap_dma_exit); | |
806 | ||
807 | MODULE_AUTHOR("Russell King"); | |
808 | MODULE_LICENSE("GPL"); |