]>
Commit | Line | Data |
---|---|---|
c2dde5f8 MP |
1 | /* |
2 | * TI EDMA DMA engine driver | |
3 | * | |
4 | * Copyright 2012 Texas Instruments | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation version 2. | |
9 | * | |
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
11 | * kind, whether express or implied; without even the implied warranty | |
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/dma-mapping.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
3ad7a42d | 27 | #include <linux/platform_data/edma.h> |
c2dde5f8 MP |
28 | |
29 | #include "dmaengine.h" | |
30 | #include "virt-dma.h" | |
31 | ||
32 | /* | |
33 | * This will go away when the private EDMA API is folded | |
34 | * into this driver and the platform device(s) are | |
35 | * instantiated in the arch code. We can only get away | |
36 | * with this simplification because DA8XX may not be built | |
37 | * in the same kernel image with other DaVinci parts. This | |
38 | * avoids having to sprinkle dmaengine driver platform devices | |
39 | * and data throughout all the existing board files. | |
40 | */ | |
41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | |
42 | #define EDMA_CTLRS 2 | |
43 | #define EDMA_CHANS 32 | |
44 | #else | |
45 | #define EDMA_CTLRS 1 | |
46 | #define EDMA_CHANS 64 | |
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | |
48 | ||
2abd5f1b JF |
49 | /* |
50 | * Max of 20 segments per channel to conserve PaRAM slots | |
51 | * Also note that MAX_NR_SG should be atleast the no.of periods | |
52 | * that are required for ASoC, otherwise DMA prep calls will | |
53 | * fail. Today davinci-pcm is the only user of this driver and | |
54 | * requires atleast 17 slots, so we setup the default to 20. | |
55 | */ | |
56 | #define MAX_NR_SG 20 | |
c2dde5f8 MP |
57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
58 | #define EDMA_DESCRIPTORS 16 | |
59 | ||
60 | struct edma_desc { | |
61 | struct virt_dma_desc vdesc; | |
62 | struct list_head node; | |
50a9c707 | 63 | int cyclic; |
c2dde5f8 MP |
64 | int absync; |
65 | int pset_nr; | |
53407062 | 66 | int processed; |
c2dde5f8 MP |
67 | struct edmacc_param pset[0]; |
68 | }; | |
69 | ||
70 | struct edma_cc; | |
71 | ||
72 | struct edma_chan { | |
73 | struct virt_dma_chan vchan; | |
74 | struct list_head node; | |
75 | struct edma_desc *edesc; | |
76 | struct edma_cc *ecc; | |
77 | int ch_num; | |
78 | bool alloced; | |
79 | int slot[EDMA_MAX_SLOTS]; | |
c5f47990 | 80 | int missed; |
661f7cb5 | 81 | struct dma_slave_config cfg; |
c2dde5f8 MP |
82 | }; |
83 | ||
84 | struct edma_cc { | |
85 | int ctlr; | |
86 | struct dma_device dma_slave; | |
87 | struct edma_chan slave_chans[EDMA_CHANS]; | |
88 | int num_slave_chans; | |
89 | int dummy_slot; | |
90 | }; | |
91 | ||
92 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | |
93 | { | |
94 | return container_of(d, struct edma_cc, dma_slave); | |
95 | } | |
96 | ||
97 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | |
98 | { | |
99 | return container_of(c, struct edma_chan, vchan.chan); | |
100 | } | |
101 | ||
102 | static inline struct edma_desc | |
103 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | |
104 | { | |
105 | return container_of(tx, struct edma_desc, vdesc.tx); | |
106 | } | |
107 | ||
108 | static void edma_desc_free(struct virt_dma_desc *vdesc) | |
109 | { | |
110 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | |
111 | } | |
112 | ||
113 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | |
114 | static void edma_execute(struct edma_chan *echan) | |
115 | { | |
53407062 | 116 | struct virt_dma_desc *vdesc; |
c2dde5f8 | 117 | struct edma_desc *edesc; |
53407062 JF |
118 | struct device *dev = echan->vchan.chan.device->dev; |
119 | int i, j, left, nslots; | |
120 | ||
121 | /* If either we processed all psets or we're still not started */ | |
122 | if (!echan->edesc || | |
123 | echan->edesc->pset_nr == echan->edesc->processed) { | |
124 | /* Get next vdesc */ | |
125 | vdesc = vchan_next_desc(&echan->vchan); | |
126 | if (!vdesc) { | |
127 | echan->edesc = NULL; | |
128 | return; | |
129 | } | |
130 | list_del(&vdesc->node); | |
131 | echan->edesc = to_edma_desc(&vdesc->tx); | |
c2dde5f8 MP |
132 | } |
133 | ||
53407062 | 134 | edesc = echan->edesc; |
c2dde5f8 | 135 | |
53407062 JF |
136 | /* Find out how many left */ |
137 | left = edesc->pset_nr - edesc->processed; | |
138 | nslots = min(MAX_NR_SG, left); | |
c2dde5f8 MP |
139 | |
140 | /* Write descriptor PaRAM set(s) */ | |
53407062 JF |
141 | for (i = 0; i < nslots; i++) { |
142 | j = i + edesc->processed; | |
143 | edma_write_slot(echan->slot[i], &edesc->pset[j]); | |
c2dde5f8 MP |
144 | dev_dbg(echan->vchan.chan.device->dev, |
145 | "\n pset[%d]:\n" | |
146 | " chnum\t%d\n" | |
147 | " slot\t%d\n" | |
148 | " opt\t%08x\n" | |
149 | " src\t%08x\n" | |
150 | " dst\t%08x\n" | |
151 | " abcnt\t%08x\n" | |
152 | " ccnt\t%08x\n" | |
153 | " bidx\t%08x\n" | |
154 | " cidx\t%08x\n" | |
155 | " lkrld\t%08x\n", | |
53407062 JF |
156 | j, echan->ch_num, echan->slot[i], |
157 | edesc->pset[j].opt, | |
158 | edesc->pset[j].src, | |
159 | edesc->pset[j].dst, | |
160 | edesc->pset[j].a_b_cnt, | |
161 | edesc->pset[j].ccnt, | |
162 | edesc->pset[j].src_dst_bidx, | |
163 | edesc->pset[j].src_dst_cidx, | |
164 | edesc->pset[j].link_bcntrld); | |
c2dde5f8 | 165 | /* Link to the previous slot if not the last set */ |
53407062 | 166 | if (i != (nslots - 1)) |
c2dde5f8 | 167 | edma_link(echan->slot[i], echan->slot[i+1]); |
c2dde5f8 MP |
168 | } |
169 | ||
53407062 JF |
170 | edesc->processed += nslots; |
171 | ||
b267b3bc JF |
172 | /* |
173 | * If this is either the last set in a set of SG-list transactions | |
174 | * then setup a link to the dummy slot, this results in all future | |
175 | * events being absorbed and that's OK because we're done | |
176 | */ | |
50a9c707 JF |
177 | if (edesc->processed == edesc->pset_nr) { |
178 | if (edesc->cyclic) | |
179 | edma_link(echan->slot[nslots-1], echan->slot[1]); | |
180 | else | |
181 | edma_link(echan->slot[nslots-1], | |
182 | echan->ecc->dummy_slot); | |
183 | } | |
b267b3bc | 184 | |
53407062 JF |
185 | if (edesc->processed <= MAX_NR_SG) { |
186 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); | |
187 | edma_start(echan->ch_num); | |
5fc68a6c SN |
188 | } else { |
189 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | |
190 | echan->ch_num, edesc->processed); | |
191 | edma_resume(echan->ch_num); | |
53407062 | 192 | } |
c5f47990 JF |
193 | |
194 | /* | |
195 | * This happens due to setup times between intermediate transfers | |
196 | * in long SG lists which have to be broken up into transfers of | |
197 | * MAX_NR_SG | |
198 | */ | |
199 | if (echan->missed) { | |
200 | dev_dbg(dev, "missed event in execute detected\n"); | |
201 | edma_clean_channel(echan->ch_num); | |
202 | edma_stop(echan->ch_num); | |
203 | edma_start(echan->ch_num); | |
204 | edma_trigger_channel(echan->ch_num); | |
205 | echan->missed = 0; | |
206 | } | |
c2dde5f8 MP |
207 | } |
208 | ||
209 | static int edma_terminate_all(struct edma_chan *echan) | |
210 | { | |
211 | unsigned long flags; | |
212 | LIST_HEAD(head); | |
213 | ||
214 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
215 | ||
216 | /* | |
217 | * Stop DMA activity: we assume the callback will not be called | |
218 | * after edma_dma() returns (even if it does, it will see | |
219 | * echan->edesc is NULL and exit.) | |
220 | */ | |
221 | if (echan->edesc) { | |
222 | echan->edesc = NULL; | |
223 | edma_stop(echan->ch_num); | |
224 | } | |
225 | ||
226 | vchan_get_all_descriptors(&echan->vchan, &head); | |
227 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
228 | vchan_dma_desc_free_list(&echan->vchan, &head); | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
c2dde5f8 | 233 | static int edma_slave_config(struct edma_chan *echan, |
661f7cb5 | 234 | struct dma_slave_config *cfg) |
c2dde5f8 | 235 | { |
661f7cb5 MP |
236 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
237 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
c2dde5f8 MP |
238 | return -EINVAL; |
239 | ||
661f7cb5 | 240 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
c2dde5f8 MP |
241 | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
246 | unsigned long arg) | |
247 | { | |
248 | int ret = 0; | |
249 | struct dma_slave_config *config; | |
250 | struct edma_chan *echan = to_edma_chan(chan); | |
251 | ||
252 | switch (cmd) { | |
253 | case DMA_TERMINATE_ALL: | |
254 | edma_terminate_all(echan); | |
255 | break; | |
256 | case DMA_SLAVE_CONFIG: | |
257 | config = (struct dma_slave_config *)arg; | |
258 | ret = edma_slave_config(echan, config); | |
259 | break; | |
260 | default: | |
261 | ret = -ENOSYS; | |
262 | } | |
263 | ||
264 | return ret; | |
265 | } | |
266 | ||
fd009035 JF |
267 | /* |
268 | * A PaRAM set configuration abstraction used by other modes | |
269 | * @chan: Channel who's PaRAM set we're configuring | |
270 | * @pset: PaRAM set to initialize and setup. | |
271 | * @src_addr: Source address of the DMA | |
272 | * @dst_addr: Destination address of the DMA | |
273 | * @burst: In units of dev_width, how much to send | |
274 | * @dev_width: How much is the dev_width | |
275 | * @dma_length: Total length of the DMA transfer | |
276 | * @direction: Direction of the transfer | |
277 | */ | |
278 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | |
279 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | |
280 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | |
281 | enum dma_transfer_direction direction) | |
282 | { | |
283 | struct edma_chan *echan = to_edma_chan(chan); | |
284 | struct device *dev = chan->device->dev; | |
285 | int acnt, bcnt, ccnt, cidx; | |
286 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | |
287 | int absync; | |
288 | ||
289 | acnt = dev_width; | |
290 | /* | |
291 | * If the maxburst is equal to the fifo width, use | |
292 | * A-synced transfers. This allows for large contiguous | |
293 | * buffer transfers using only one PaRAM set. | |
294 | */ | |
295 | if (burst == 1) { | |
296 | /* | |
297 | * For the A-sync case, bcnt and ccnt are the remainder | |
298 | * and quotient respectively of the division of: | |
299 | * (dma_length / acnt) by (SZ_64K -1). This is so | |
300 | * that in case bcnt over flows, we have ccnt to use. | |
301 | * Note: In A-sync tranfer only, bcntrld is used, but it | |
302 | * only applies for sg_dma_len(sg) >= SZ_64K. | |
303 | * In this case, the best way adopted is- bccnt for the | |
304 | * first frame will be the remainder below. Then for | |
305 | * every successive frame, bcnt will be SZ_64K-1. This | |
306 | * is assured as bcntrld = 0xffff in end of function. | |
307 | */ | |
308 | absync = false; | |
309 | ccnt = dma_length / acnt / (SZ_64K - 1); | |
310 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); | |
311 | /* | |
312 | * If bcnt is non-zero, we have a remainder and hence an | |
313 | * extra frame to transfer, so increment ccnt. | |
314 | */ | |
315 | if (bcnt) | |
316 | ccnt++; | |
317 | else | |
318 | bcnt = SZ_64K - 1; | |
319 | cidx = acnt; | |
320 | } else { | |
321 | /* | |
322 | * If maxburst is greater than the fifo address_width, | |
323 | * use AB-synced transfers where A count is the fifo | |
324 | * address_width and B count is the maxburst. In this | |
325 | * case, we are limited to transfers of C count frames | |
326 | * of (address_width * maxburst) where C count is limited | |
327 | * to SZ_64K-1. This places an upper bound on the length | |
328 | * of an SG segment that can be handled. | |
329 | */ | |
330 | absync = true; | |
331 | bcnt = burst; | |
332 | ccnt = dma_length / (acnt * bcnt); | |
333 | if (ccnt > (SZ_64K - 1)) { | |
334 | dev_err(dev, "Exceeded max SG segment size\n"); | |
335 | return -EINVAL; | |
336 | } | |
337 | cidx = acnt * bcnt; | |
338 | } | |
339 | ||
340 | if (direction == DMA_MEM_TO_DEV) { | |
341 | src_bidx = acnt; | |
342 | src_cidx = cidx; | |
343 | dst_bidx = 0; | |
344 | dst_cidx = 0; | |
345 | } else if (direction == DMA_DEV_TO_MEM) { | |
346 | src_bidx = 0; | |
347 | src_cidx = 0; | |
348 | dst_bidx = acnt; | |
349 | dst_cidx = cidx; | |
350 | } else { | |
351 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | |
352 | return -EINVAL; | |
353 | } | |
354 | ||
355 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | |
356 | /* Configure A or AB synchronized transfers */ | |
357 | if (absync) | |
358 | pset->opt |= SYNCDIM; | |
359 | ||
360 | pset->src = src_addr; | |
361 | pset->dst = dst_addr; | |
362 | ||
363 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | |
364 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | |
365 | ||
366 | pset->a_b_cnt = bcnt << 16 | acnt; | |
367 | pset->ccnt = ccnt; | |
368 | /* | |
369 | * Only time when (bcntrld) auto reload is required is for | |
370 | * A-sync case, and in this case, a requirement of reload value | |
371 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | |
372 | * and then later will be populated by edma_execute. | |
373 | */ | |
374 | pset->link_bcntrld = 0xffffffff; | |
375 | return absync; | |
376 | } | |
377 | ||
c2dde5f8 MP |
378 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
379 | struct dma_chan *chan, struct scatterlist *sgl, | |
380 | unsigned int sg_len, enum dma_transfer_direction direction, | |
381 | unsigned long tx_flags, void *context) | |
382 | { | |
383 | struct edma_chan *echan = to_edma_chan(chan); | |
384 | struct device *dev = chan->device->dev; | |
385 | struct edma_desc *edesc; | |
fd009035 | 386 | dma_addr_t src_addr = 0, dst_addr = 0; |
661f7cb5 MP |
387 | enum dma_slave_buswidth dev_width; |
388 | u32 burst; | |
c2dde5f8 | 389 | struct scatterlist *sg; |
fd009035 | 390 | int i, nslots, ret; |
c2dde5f8 MP |
391 | |
392 | if (unlikely(!echan || !sgl || !sg_len)) | |
393 | return NULL; | |
394 | ||
661f7cb5 | 395 | if (direction == DMA_DEV_TO_MEM) { |
fd009035 | 396 | src_addr = echan->cfg.src_addr; |
661f7cb5 MP |
397 | dev_width = echan->cfg.src_addr_width; |
398 | burst = echan->cfg.src_maxburst; | |
399 | } else if (direction == DMA_MEM_TO_DEV) { | |
fd009035 | 400 | dst_addr = echan->cfg.dst_addr; |
661f7cb5 MP |
401 | dev_width = echan->cfg.dst_addr_width; |
402 | burst = echan->cfg.dst_maxburst; | |
403 | } else { | |
404 | dev_err(dev, "%s: bad direction?\n", __func__); | |
405 | return NULL; | |
406 | } | |
407 | ||
408 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
c2dde5f8 MP |
409 | dev_err(dev, "Undefined slave buswidth\n"); |
410 | return NULL; | |
411 | } | |
412 | ||
c2dde5f8 MP |
413 | edesc = kzalloc(sizeof(*edesc) + sg_len * |
414 | sizeof(edesc->pset[0]), GFP_ATOMIC); | |
415 | if (!edesc) { | |
416 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | |
417 | return NULL; | |
418 | } | |
419 | ||
420 | edesc->pset_nr = sg_len; | |
421 | ||
6fbe24da JF |
422 | /* Allocate a PaRAM slot, if needed */ |
423 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | |
424 | ||
425 | for (i = 0; i < nslots; i++) { | |
c2dde5f8 MP |
426 | if (echan->slot[i] < 0) { |
427 | echan->slot[i] = | |
428 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | |
429 | EDMA_SLOT_ANY); | |
430 | if (echan->slot[i] < 0) { | |
4b6271a6 | 431 | kfree(edesc); |
c2dde5f8 MP |
432 | dev_err(dev, "Failed to allocate slot\n"); |
433 | return NULL; | |
434 | } | |
435 | } | |
6fbe24da JF |
436 | } |
437 | ||
438 | /* Configure PaRAM sets for each SG */ | |
439 | for_each_sg(sgl, sg, sg_len, i) { | |
fd009035 JF |
440 | /* Get address for each SG */ |
441 | if (direction == DMA_DEV_TO_MEM) | |
442 | dst_addr = sg_dma_address(sg); | |
443 | else | |
444 | src_addr = sg_dma_address(sg); | |
c2dde5f8 | 445 | |
fd009035 JF |
446 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, |
447 | dst_addr, burst, dev_width, | |
448 | sg_dma_len(sg), direction); | |
b967aecf VK |
449 | if (ret < 0) { |
450 | kfree(edesc); | |
fd009035 | 451 | return NULL; |
c2dde5f8 MP |
452 | } |
453 | ||
fd009035 | 454 | edesc->absync = ret; |
6fbe24da JF |
455 | |
456 | /* If this is the last in a current SG set of transactions, | |
457 | enable interrupts so that next set is processed */ | |
458 | if (!((i+1) % MAX_NR_SG)) | |
459 | edesc->pset[i].opt |= TCINTEN; | |
460 | ||
c2dde5f8 MP |
461 | /* If this is the last set, enable completion interrupt flag */ |
462 | if (i == sg_len - 1) | |
463 | edesc->pset[i].opt |= TCINTEN; | |
c2dde5f8 | 464 | } |
c2dde5f8 | 465 | |
c2dde5f8 MP |
466 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
467 | } | |
c2dde5f8 | 468 | |
50a9c707 JF |
469 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
470 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
471 | size_t period_len, enum dma_transfer_direction direction, | |
472 | unsigned long tx_flags, void *context) | |
473 | { | |
474 | struct edma_chan *echan = to_edma_chan(chan); | |
475 | struct device *dev = chan->device->dev; | |
476 | struct edma_desc *edesc; | |
477 | dma_addr_t src_addr, dst_addr; | |
478 | enum dma_slave_buswidth dev_width; | |
479 | u32 burst; | |
480 | int i, ret, nslots; | |
481 | ||
482 | if (unlikely(!echan || !buf_len || !period_len)) | |
483 | return NULL; | |
484 | ||
485 | if (direction == DMA_DEV_TO_MEM) { | |
486 | src_addr = echan->cfg.src_addr; | |
487 | dst_addr = buf_addr; | |
488 | dev_width = echan->cfg.src_addr_width; | |
489 | burst = echan->cfg.src_maxburst; | |
490 | } else if (direction == DMA_MEM_TO_DEV) { | |
491 | src_addr = buf_addr; | |
492 | dst_addr = echan->cfg.dst_addr; | |
493 | dev_width = echan->cfg.dst_addr_width; | |
494 | burst = echan->cfg.dst_maxburst; | |
495 | } else { | |
496 | dev_err(dev, "%s: bad direction?\n", __func__); | |
497 | return NULL; | |
498 | } | |
499 | ||
500 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
501 | dev_err(dev, "Undefined slave buswidth\n"); | |
502 | return NULL; | |
503 | } | |
504 | ||
505 | if (unlikely(buf_len % period_len)) { | |
506 | dev_err(dev, "Period should be multiple of Buffer length\n"); | |
507 | return NULL; | |
508 | } | |
509 | ||
510 | nslots = (buf_len / period_len) + 1; | |
511 | ||
512 | /* | |
513 | * Cyclic DMA users such as audio cannot tolerate delays introduced | |
514 | * by cases where the number of periods is more than the maximum | |
515 | * number of SGs the EDMA driver can handle at a time. For DMA types | |
516 | * such as Slave SGs, such delays are tolerable and synchronized, | |
517 | * but the synchronization is difficult to achieve with Cyclic and | |
518 | * cannot be guaranteed, so we error out early. | |
519 | */ | |
520 | if (nslots > MAX_NR_SG) | |
521 | return NULL; | |
522 | ||
523 | edesc = kzalloc(sizeof(*edesc) + nslots * | |
524 | sizeof(edesc->pset[0]), GFP_ATOMIC); | |
525 | if (!edesc) { | |
526 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | |
527 | return NULL; | |
528 | } | |
529 | ||
530 | edesc->cyclic = 1; | |
531 | edesc->pset_nr = nslots; | |
532 | ||
533 | dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); | |
534 | dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); | |
535 | dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); | |
536 | ||
537 | for (i = 0; i < nslots; i++) { | |
538 | /* Allocate a PaRAM slot, if needed */ | |
539 | if (echan->slot[i] < 0) { | |
540 | echan->slot[i] = | |
541 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | |
542 | EDMA_SLOT_ANY); | |
543 | if (echan->slot[i] < 0) { | |
e3ddc979 | 544 | kfree(edesc); |
50a9c707 JF |
545 | dev_err(dev, "Failed to allocate slot\n"); |
546 | return NULL; | |
547 | } | |
548 | } | |
549 | ||
550 | if (i == nslots - 1) { | |
551 | memcpy(&edesc->pset[i], &edesc->pset[0], | |
552 | sizeof(edesc->pset[0])); | |
553 | break; | |
554 | } | |
555 | ||
556 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, | |
557 | dst_addr, burst, dev_width, period_len, | |
558 | direction); | |
e3ddc979 CE |
559 | if (ret < 0) { |
560 | kfree(edesc); | |
50a9c707 | 561 | return NULL; |
e3ddc979 | 562 | } |
c2dde5f8 | 563 | |
50a9c707 JF |
564 | if (direction == DMA_DEV_TO_MEM) |
565 | dst_addr += period_len; | |
566 | else | |
567 | src_addr += period_len; | |
c2dde5f8 | 568 | |
50a9c707 JF |
569 | dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); |
570 | dev_dbg(dev, | |
571 | "\n pset[%d]:\n" | |
572 | " chnum\t%d\n" | |
573 | " slot\t%d\n" | |
574 | " opt\t%08x\n" | |
575 | " src\t%08x\n" | |
576 | " dst\t%08x\n" | |
577 | " abcnt\t%08x\n" | |
578 | " ccnt\t%08x\n" | |
579 | " bidx\t%08x\n" | |
580 | " cidx\t%08x\n" | |
581 | " lkrld\t%08x\n", | |
582 | i, echan->ch_num, echan->slot[i], | |
583 | edesc->pset[i].opt, | |
584 | edesc->pset[i].src, | |
585 | edesc->pset[i].dst, | |
586 | edesc->pset[i].a_b_cnt, | |
587 | edesc->pset[i].ccnt, | |
588 | edesc->pset[i].src_dst_bidx, | |
589 | edesc->pset[i].src_dst_cidx, | |
590 | edesc->pset[i].link_bcntrld); | |
591 | ||
592 | edesc->absync = ret; | |
593 | ||
594 | /* | |
595 | * Enable interrupts for every period because callback | |
596 | * has to be called for every period. | |
597 | */ | |
598 | edesc->pset[i].opt |= TCINTEN; | |
c2dde5f8 MP |
599 | } |
600 | ||
601 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | |
602 | } | |
603 | ||
604 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |
605 | { | |
606 | struct edma_chan *echan = data; | |
607 | struct device *dev = echan->vchan.chan.device->dev; | |
608 | struct edma_desc *edesc; | |
609 | unsigned long flags; | |
c5f47990 | 610 | struct edmacc_param p; |
c2dde5f8 | 611 | |
50a9c707 JF |
612 | edesc = echan->edesc; |
613 | ||
614 | /* Pause the channel for non-cyclic */ | |
615 | if (!edesc || (edesc && !edesc->cyclic)) | |
616 | edma_pause(echan->ch_num); | |
c2dde5f8 MP |
617 | |
618 | switch (ch_status) { | |
db60d8da | 619 | case EDMA_DMA_COMPLETE: |
c2dde5f8 MP |
620 | spin_lock_irqsave(&echan->vchan.lock, flags); |
621 | ||
c2dde5f8 | 622 | if (edesc) { |
50a9c707 JF |
623 | if (edesc->cyclic) { |
624 | vchan_cyclic_callback(&edesc->vdesc); | |
625 | } else if (edesc->processed == edesc->pset_nr) { | |
53407062 JF |
626 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
627 | edma_stop(echan->ch_num); | |
628 | vchan_cookie_complete(&edesc->vdesc); | |
50a9c707 | 629 | edma_execute(echan); |
53407062 JF |
630 | } else { |
631 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | |
50a9c707 | 632 | edma_execute(echan); |
53407062 | 633 | } |
c2dde5f8 MP |
634 | } |
635 | ||
636 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
637 | ||
638 | break; | |
db60d8da | 639 | case EDMA_DMA_CC_ERROR: |
c5f47990 JF |
640 | spin_lock_irqsave(&echan->vchan.lock, flags); |
641 | ||
642 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | |
643 | ||
644 | /* | |
645 | * Issue later based on missed flag which will be sure | |
646 | * to happen as: | |
647 | * (1) we finished transmitting an intermediate slot and | |
648 | * edma_execute is coming up. | |
649 | * (2) or we finished current transfer and issue will | |
650 | * call edma_execute. | |
651 | * | |
652 | * Important note: issuing can be dangerous here and | |
653 | * lead to some nasty recursion when we are in a NULL | |
654 | * slot. So we avoid doing so and set the missed flag. | |
655 | */ | |
656 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | |
657 | dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); | |
658 | echan->missed = 1; | |
659 | } else { | |
660 | /* | |
661 | * The slot is already programmed but the event got | |
662 | * missed, so its safe to issue it here. | |
663 | */ | |
664 | dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); | |
665 | edma_clean_channel(echan->ch_num); | |
666 | edma_stop(echan->ch_num); | |
667 | edma_start(echan->ch_num); | |
668 | edma_trigger_channel(echan->ch_num); | |
669 | } | |
670 | ||
671 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
672 | ||
c2dde5f8 MP |
673 | break; |
674 | default: | |
675 | break; | |
676 | } | |
677 | } | |
678 | ||
679 | /* Alloc channel resources */ | |
680 | static int edma_alloc_chan_resources(struct dma_chan *chan) | |
681 | { | |
682 | struct edma_chan *echan = to_edma_chan(chan); | |
683 | struct device *dev = chan->device->dev; | |
684 | int ret; | |
685 | int a_ch_num; | |
686 | LIST_HEAD(descs); | |
687 | ||
688 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | |
689 | chan, EVENTQ_DEFAULT); | |
690 | ||
691 | if (a_ch_num < 0) { | |
692 | ret = -ENODEV; | |
693 | goto err_no_chan; | |
694 | } | |
695 | ||
696 | if (a_ch_num != echan->ch_num) { | |
697 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | |
698 | EDMA_CTLR(echan->ch_num), | |
699 | EDMA_CHAN_SLOT(echan->ch_num)); | |
700 | ret = -ENODEV; | |
701 | goto err_wrong_chan; | |
702 | } | |
703 | ||
704 | echan->alloced = true; | |
705 | echan->slot[0] = echan->ch_num; | |
706 | ||
0e772c67 EG |
707 | dev_dbg(dev, "allocated channel for %u:%u\n", |
708 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | |
c2dde5f8 MP |
709 | |
710 | return 0; | |
711 | ||
712 | err_wrong_chan: | |
713 | edma_free_channel(a_ch_num); | |
714 | err_no_chan: | |
715 | return ret; | |
716 | } | |
717 | ||
718 | /* Free channel resources */ | |
719 | static void edma_free_chan_resources(struct dma_chan *chan) | |
720 | { | |
721 | struct edma_chan *echan = to_edma_chan(chan); | |
722 | struct device *dev = chan->device->dev; | |
723 | int i; | |
724 | ||
725 | /* Terminate transfers */ | |
726 | edma_stop(echan->ch_num); | |
727 | ||
728 | vchan_free_chan_resources(&echan->vchan); | |
729 | ||
730 | /* Free EDMA PaRAM slots */ | |
731 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | |
732 | if (echan->slot[i] >= 0) { | |
733 | edma_free_slot(echan->slot[i]); | |
734 | echan->slot[i] = -1; | |
735 | } | |
736 | } | |
737 | ||
738 | /* Free EDMA channel */ | |
739 | if (echan->alloced) { | |
740 | edma_free_channel(echan->ch_num); | |
741 | echan->alloced = false; | |
742 | } | |
743 | ||
0e772c67 | 744 | dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); |
c2dde5f8 MP |
745 | } |
746 | ||
747 | /* Send pending descriptor to hardware */ | |
748 | static void edma_issue_pending(struct dma_chan *chan) | |
749 | { | |
750 | struct edma_chan *echan = to_edma_chan(chan); | |
751 | unsigned long flags; | |
752 | ||
753 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
754 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | |
755 | edma_execute(echan); | |
756 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
757 | } | |
758 | ||
759 | static size_t edma_desc_size(struct edma_desc *edesc) | |
760 | { | |
761 | int i; | |
762 | size_t size; | |
763 | ||
764 | if (edesc->absync) | |
765 | for (size = i = 0; i < edesc->pset_nr; i++) | |
766 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | |
767 | (edesc->pset[i].a_b_cnt >> 16) * | |
768 | edesc->pset[i].ccnt; | |
769 | else | |
770 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | |
771 | (edesc->pset[0].a_b_cnt >> 16) + | |
772 | (edesc->pset[0].a_b_cnt & 0xffff) * | |
773 | (SZ_64K - 1) * edesc->pset[0].ccnt; | |
774 | ||
775 | return size; | |
776 | } | |
777 | ||
778 | /* Check request completion status */ | |
779 | static enum dma_status edma_tx_status(struct dma_chan *chan, | |
780 | dma_cookie_t cookie, | |
781 | struct dma_tx_state *txstate) | |
782 | { | |
783 | struct edma_chan *echan = to_edma_chan(chan); | |
784 | struct virt_dma_desc *vdesc; | |
785 | enum dma_status ret; | |
786 | unsigned long flags; | |
787 | ||
788 | ret = dma_cookie_status(chan, cookie, txstate); | |
9d386ec5 | 789 | if (ret == DMA_COMPLETE || !txstate) |
c2dde5f8 MP |
790 | return ret; |
791 | ||
792 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
793 | vdesc = vchan_find_desc(&echan->vchan, cookie); | |
794 | if (vdesc) { | |
795 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | |
796 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | |
797 | struct edma_desc *edesc = echan->edesc; | |
798 | txstate->residue = edma_desc_size(edesc); | |
c2dde5f8 MP |
799 | } |
800 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
801 | ||
802 | return ret; | |
803 | } | |
804 | ||
805 | static void __init edma_chan_init(struct edma_cc *ecc, | |
806 | struct dma_device *dma, | |
807 | struct edma_chan *echans) | |
808 | { | |
809 | int i, j; | |
810 | ||
811 | for (i = 0; i < EDMA_CHANS; i++) { | |
812 | struct edma_chan *echan = &echans[i]; | |
813 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | |
814 | echan->ecc = ecc; | |
815 | echan->vchan.desc_free = edma_desc_free; | |
816 | ||
817 | vchan_init(&echan->vchan, dma); | |
818 | ||
819 | INIT_LIST_HEAD(&echan->node); | |
820 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | |
821 | echan->slot[j] = -1; | |
822 | } | |
823 | } | |
824 | ||
825 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |
826 | struct device *dev) | |
827 | { | |
828 | dma->device_prep_slave_sg = edma_prep_slave_sg; | |
50a9c707 | 829 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
c2dde5f8 MP |
830 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
831 | dma->device_free_chan_resources = edma_free_chan_resources; | |
832 | dma->device_issue_pending = edma_issue_pending; | |
833 | dma->device_tx_status = edma_tx_status; | |
834 | dma->device_control = edma_control; | |
835 | dma->dev = dev; | |
836 | ||
837 | INIT_LIST_HEAD(&dma->channels); | |
838 | } | |
839 | ||
463a1f8b | 840 | static int edma_probe(struct platform_device *pdev) |
c2dde5f8 MP |
841 | { |
842 | struct edma_cc *ecc; | |
843 | int ret; | |
844 | ||
94cb0e79 RK |
845 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
846 | if (ret) | |
847 | return ret; | |
848 | ||
c2dde5f8 MP |
849 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); |
850 | if (!ecc) { | |
851 | dev_err(&pdev->dev, "Can't allocate controller\n"); | |
852 | return -ENOMEM; | |
853 | } | |
854 | ||
855 | ecc->ctlr = pdev->id; | |
856 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | |
857 | if (ecc->dummy_slot < 0) { | |
858 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | |
859 | return -EIO; | |
860 | } | |
861 | ||
862 | dma_cap_zero(ecc->dma_slave.cap_mask); | |
863 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | |
864 | ||
865 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | |
866 | ||
867 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | |
868 | ||
869 | ret = dma_async_device_register(&ecc->dma_slave); | |
870 | if (ret) | |
871 | goto err_reg1; | |
872 | ||
873 | platform_set_drvdata(pdev, ecc); | |
874 | ||
875 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | |
876 | ||
877 | return 0; | |
878 | ||
879 | err_reg1: | |
880 | edma_free_slot(ecc->dummy_slot); | |
881 | return ret; | |
882 | } | |
883 | ||
4bf27b8b | 884 | static int edma_remove(struct platform_device *pdev) |
c2dde5f8 MP |
885 | { |
886 | struct device *dev = &pdev->dev; | |
887 | struct edma_cc *ecc = dev_get_drvdata(dev); | |
888 | ||
889 | dma_async_device_unregister(&ecc->dma_slave); | |
890 | edma_free_slot(ecc->dummy_slot); | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | static struct platform_driver edma_driver = { | |
896 | .probe = edma_probe, | |
a7d6e3ec | 897 | .remove = edma_remove, |
c2dde5f8 MP |
898 | .driver = { |
899 | .name = "edma-dma-engine", | |
900 | .owner = THIS_MODULE, | |
901 | }, | |
902 | }; | |
903 | ||
904 | bool edma_filter_fn(struct dma_chan *chan, void *param) | |
905 | { | |
906 | if (chan->device->dev->driver == &edma_driver.driver) { | |
907 | struct edma_chan *echan = to_edma_chan(chan); | |
908 | unsigned ch_req = *(unsigned *)param; | |
909 | return ch_req == echan->ch_num; | |
910 | } | |
911 | return false; | |
912 | } | |
913 | EXPORT_SYMBOL(edma_filter_fn); | |
914 | ||
915 | static struct platform_device *pdev0, *pdev1; | |
916 | ||
917 | static const struct platform_device_info edma_dev_info0 = { | |
918 | .name = "edma-dma-engine", | |
919 | .id = 0, | |
94cb0e79 | 920 | .dma_mask = DMA_BIT_MASK(32), |
c2dde5f8 MP |
921 | }; |
922 | ||
923 | static const struct platform_device_info edma_dev_info1 = { | |
924 | .name = "edma-dma-engine", | |
925 | .id = 1, | |
94cb0e79 | 926 | .dma_mask = DMA_BIT_MASK(32), |
c2dde5f8 MP |
927 | }; |
928 | ||
929 | static int edma_init(void) | |
930 | { | |
931 | int ret = platform_driver_register(&edma_driver); | |
932 | ||
933 | if (ret == 0) { | |
934 | pdev0 = platform_device_register_full(&edma_dev_info0); | |
935 | if (IS_ERR(pdev0)) { | |
936 | platform_driver_unregister(&edma_driver); | |
937 | ret = PTR_ERR(pdev0); | |
938 | goto out; | |
939 | } | |
940 | } | |
941 | ||
942 | if (EDMA_CTLRS == 2) { | |
943 | pdev1 = platform_device_register_full(&edma_dev_info1); | |
944 | if (IS_ERR(pdev1)) { | |
945 | platform_driver_unregister(&edma_driver); | |
946 | platform_device_unregister(pdev0); | |
947 | ret = PTR_ERR(pdev1); | |
948 | } | |
949 | } | |
950 | ||
951 | out: | |
952 | return ret; | |
953 | } | |
954 | subsys_initcall(edma_init); | |
955 | ||
956 | static void __exit edma_exit(void) | |
957 | { | |
958 | platform_device_unregister(pdev0); | |
959 | if (pdev1) | |
960 | platform_device_unregister(pdev1); | |
961 | platform_driver_unregister(&edma_driver); | |
962 | } | |
963 | module_exit(edma_exit); | |
964 | ||
d71505b6 | 965 | MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); |
c2dde5f8 MP |
966 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); |
967 | MODULE_LICENSE("GPL v2"); |