]>
Commit | Line | Data |
---|---|---|
c2dde5f8 MP |
1 | /* |
2 | * TI EDMA DMA engine driver | |
3 | * | |
4 | * Copyright 2012 Texas Instruments | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation version 2. | |
9 | * | |
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
11 | * kind, whether express or implied; without even the implied warranty | |
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/dma-mapping.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
3ad7a42d | 27 | #include <linux/platform_data/edma.h> |
c2dde5f8 MP |
28 | |
29 | #include "dmaengine.h" | |
30 | #include "virt-dma.h" | |
31 | ||
32 | /* | |
33 | * This will go away when the private EDMA API is folded | |
34 | * into this driver and the platform device(s) are | |
35 | * instantiated in the arch code. We can only get away | |
36 | * with this simplification because DA8XX may not be built | |
37 | * in the same kernel image with other DaVinci parts. This | |
38 | * avoids having to sprinkle dmaengine driver platform devices | |
39 | * and data throughout all the existing board files. | |
40 | */ | |
41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | |
42 | #define EDMA_CTLRS 2 | |
43 | #define EDMA_CHANS 32 | |
44 | #else | |
45 | #define EDMA_CTLRS 1 | |
46 | #define EDMA_CHANS 64 | |
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | |
48 | ||
2abd5f1b JF |
49 | /* |
50 | * Max of 20 segments per channel to conserve PaRAM slots | |
51 | * Also note that MAX_NR_SG should be atleast the no.of periods | |
52 | * that are required for ASoC, otherwise DMA prep calls will | |
53 | * fail. Today davinci-pcm is the only user of this driver and | |
54 | * requires atleast 17 slots, so we setup the default to 20. | |
55 | */ | |
56 | #define MAX_NR_SG 20 | |
c2dde5f8 MP |
57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
58 | #define EDMA_DESCRIPTORS 16 | |
59 | ||
60 | struct edma_desc { | |
61 | struct virt_dma_desc vdesc; | |
62 | struct list_head node; | |
63 | int absync; | |
64 | int pset_nr; | |
53407062 | 65 | int processed; |
c2dde5f8 MP |
66 | struct edmacc_param pset[0]; |
67 | }; | |
68 | ||
69 | struct edma_cc; | |
70 | ||
71 | struct edma_chan { | |
72 | struct virt_dma_chan vchan; | |
73 | struct list_head node; | |
74 | struct edma_desc *edesc; | |
75 | struct edma_cc *ecc; | |
76 | int ch_num; | |
77 | bool alloced; | |
78 | int slot[EDMA_MAX_SLOTS]; | |
c5f47990 | 79 | int missed; |
661f7cb5 | 80 | struct dma_slave_config cfg; |
c2dde5f8 MP |
81 | }; |
82 | ||
83 | struct edma_cc { | |
84 | int ctlr; | |
85 | struct dma_device dma_slave; | |
86 | struct edma_chan slave_chans[EDMA_CHANS]; | |
87 | int num_slave_chans; | |
88 | int dummy_slot; | |
89 | }; | |
90 | ||
91 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | |
92 | { | |
93 | return container_of(d, struct edma_cc, dma_slave); | |
94 | } | |
95 | ||
96 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | |
97 | { | |
98 | return container_of(c, struct edma_chan, vchan.chan); | |
99 | } | |
100 | ||
101 | static inline struct edma_desc | |
102 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | |
103 | { | |
104 | return container_of(tx, struct edma_desc, vdesc.tx); | |
105 | } | |
106 | ||
107 | static void edma_desc_free(struct virt_dma_desc *vdesc) | |
108 | { | |
109 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | |
110 | } | |
111 | ||
112 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | |
113 | static void edma_execute(struct edma_chan *echan) | |
114 | { | |
53407062 | 115 | struct virt_dma_desc *vdesc; |
c2dde5f8 | 116 | struct edma_desc *edesc; |
53407062 JF |
117 | struct device *dev = echan->vchan.chan.device->dev; |
118 | int i, j, left, nslots; | |
119 | ||
120 | /* If either we processed all psets or we're still not started */ | |
121 | if (!echan->edesc || | |
122 | echan->edesc->pset_nr == echan->edesc->processed) { | |
123 | /* Get next vdesc */ | |
124 | vdesc = vchan_next_desc(&echan->vchan); | |
125 | if (!vdesc) { | |
126 | echan->edesc = NULL; | |
127 | return; | |
128 | } | |
129 | list_del(&vdesc->node); | |
130 | echan->edesc = to_edma_desc(&vdesc->tx); | |
c2dde5f8 MP |
131 | } |
132 | ||
53407062 | 133 | edesc = echan->edesc; |
c2dde5f8 | 134 | |
53407062 JF |
135 | /* Find out how many left */ |
136 | left = edesc->pset_nr - edesc->processed; | |
137 | nslots = min(MAX_NR_SG, left); | |
c2dde5f8 MP |
138 | |
139 | /* Write descriptor PaRAM set(s) */ | |
53407062 JF |
140 | for (i = 0; i < nslots; i++) { |
141 | j = i + edesc->processed; | |
142 | edma_write_slot(echan->slot[i], &edesc->pset[j]); | |
c2dde5f8 MP |
143 | dev_dbg(echan->vchan.chan.device->dev, |
144 | "\n pset[%d]:\n" | |
145 | " chnum\t%d\n" | |
146 | " slot\t%d\n" | |
147 | " opt\t%08x\n" | |
148 | " src\t%08x\n" | |
149 | " dst\t%08x\n" | |
150 | " abcnt\t%08x\n" | |
151 | " ccnt\t%08x\n" | |
152 | " bidx\t%08x\n" | |
153 | " cidx\t%08x\n" | |
154 | " lkrld\t%08x\n", | |
53407062 JF |
155 | j, echan->ch_num, echan->slot[i], |
156 | edesc->pset[j].opt, | |
157 | edesc->pset[j].src, | |
158 | edesc->pset[j].dst, | |
159 | edesc->pset[j].a_b_cnt, | |
160 | edesc->pset[j].ccnt, | |
161 | edesc->pset[j].src_dst_bidx, | |
162 | edesc->pset[j].src_dst_cidx, | |
163 | edesc->pset[j].link_bcntrld); | |
c2dde5f8 | 164 | /* Link to the previous slot if not the last set */ |
53407062 | 165 | if (i != (nslots - 1)) |
c2dde5f8 | 166 | edma_link(echan->slot[i], echan->slot[i+1]); |
c2dde5f8 MP |
167 | } |
168 | ||
53407062 JF |
169 | edesc->processed += nslots; |
170 | ||
b267b3bc JF |
171 | /* |
172 | * If this is either the last set in a set of SG-list transactions | |
173 | * then setup a link to the dummy slot, this results in all future | |
174 | * events being absorbed and that's OK because we're done | |
175 | */ | |
176 | if (edesc->processed == edesc->pset_nr) | |
177 | edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); | |
178 | ||
53407062 JF |
179 | edma_resume(echan->ch_num); |
180 | ||
181 | if (edesc->processed <= MAX_NR_SG) { | |
182 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); | |
183 | edma_start(echan->ch_num); | |
184 | } | |
c5f47990 JF |
185 | |
186 | /* | |
187 | * This happens due to setup times between intermediate transfers | |
188 | * in long SG lists which have to be broken up into transfers of | |
189 | * MAX_NR_SG | |
190 | */ | |
191 | if (echan->missed) { | |
192 | dev_dbg(dev, "missed event in execute detected\n"); | |
193 | edma_clean_channel(echan->ch_num); | |
194 | edma_stop(echan->ch_num); | |
195 | edma_start(echan->ch_num); | |
196 | edma_trigger_channel(echan->ch_num); | |
197 | echan->missed = 0; | |
198 | } | |
c2dde5f8 MP |
199 | } |
200 | ||
201 | static int edma_terminate_all(struct edma_chan *echan) | |
202 | { | |
203 | unsigned long flags; | |
204 | LIST_HEAD(head); | |
205 | ||
206 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
207 | ||
208 | /* | |
209 | * Stop DMA activity: we assume the callback will not be called | |
210 | * after edma_dma() returns (even if it does, it will see | |
211 | * echan->edesc is NULL and exit.) | |
212 | */ | |
213 | if (echan->edesc) { | |
214 | echan->edesc = NULL; | |
215 | edma_stop(echan->ch_num); | |
216 | } | |
217 | ||
218 | vchan_get_all_descriptors(&echan->vchan, &head); | |
219 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
220 | vchan_dma_desc_free_list(&echan->vchan, &head); | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
c2dde5f8 | 225 | static int edma_slave_config(struct edma_chan *echan, |
661f7cb5 | 226 | struct dma_slave_config *cfg) |
c2dde5f8 | 227 | { |
661f7cb5 MP |
228 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
229 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
c2dde5f8 MP |
230 | return -EINVAL; |
231 | ||
661f7cb5 | 232 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
c2dde5f8 MP |
233 | |
234 | return 0; | |
235 | } | |
236 | ||
237 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
238 | unsigned long arg) | |
239 | { | |
240 | int ret = 0; | |
241 | struct dma_slave_config *config; | |
242 | struct edma_chan *echan = to_edma_chan(chan); | |
243 | ||
244 | switch (cmd) { | |
245 | case DMA_TERMINATE_ALL: | |
246 | edma_terminate_all(echan); | |
247 | break; | |
248 | case DMA_SLAVE_CONFIG: | |
249 | config = (struct dma_slave_config *)arg; | |
250 | ret = edma_slave_config(echan, config); | |
251 | break; | |
252 | default: | |
253 | ret = -ENOSYS; | |
254 | } | |
255 | ||
256 | return ret; | |
257 | } | |
258 | ||
fd009035 JF |
259 | /* |
260 | * A PaRAM set configuration abstraction used by other modes | |
261 | * @chan: Channel who's PaRAM set we're configuring | |
262 | * @pset: PaRAM set to initialize and setup. | |
263 | * @src_addr: Source address of the DMA | |
264 | * @dst_addr: Destination address of the DMA | |
265 | * @burst: In units of dev_width, how much to send | |
266 | * @dev_width: How much is the dev_width | |
267 | * @dma_length: Total length of the DMA transfer | |
268 | * @direction: Direction of the transfer | |
269 | */ | |
270 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | |
271 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | |
272 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | |
273 | enum dma_transfer_direction direction) | |
274 | { | |
275 | struct edma_chan *echan = to_edma_chan(chan); | |
276 | struct device *dev = chan->device->dev; | |
277 | int acnt, bcnt, ccnt, cidx; | |
278 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | |
279 | int absync; | |
280 | ||
281 | acnt = dev_width; | |
282 | /* | |
283 | * If the maxburst is equal to the fifo width, use | |
284 | * A-synced transfers. This allows for large contiguous | |
285 | * buffer transfers using only one PaRAM set. | |
286 | */ | |
287 | if (burst == 1) { | |
288 | /* | |
289 | * For the A-sync case, bcnt and ccnt are the remainder | |
290 | * and quotient respectively of the division of: | |
291 | * (dma_length / acnt) by (SZ_64K -1). This is so | |
292 | * that in case bcnt over flows, we have ccnt to use. | |
293 | * Note: In A-sync tranfer only, bcntrld is used, but it | |
294 | * only applies for sg_dma_len(sg) >= SZ_64K. | |
295 | * In this case, the best way adopted is- bccnt for the | |
296 | * first frame will be the remainder below. Then for | |
297 | * every successive frame, bcnt will be SZ_64K-1. This | |
298 | * is assured as bcntrld = 0xffff in end of function. | |
299 | */ | |
300 | absync = false; | |
301 | ccnt = dma_length / acnt / (SZ_64K - 1); | |
302 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); | |
303 | /* | |
304 | * If bcnt is non-zero, we have a remainder and hence an | |
305 | * extra frame to transfer, so increment ccnt. | |
306 | */ | |
307 | if (bcnt) | |
308 | ccnt++; | |
309 | else | |
310 | bcnt = SZ_64K - 1; | |
311 | cidx = acnt; | |
312 | } else { | |
313 | /* | |
314 | * If maxburst is greater than the fifo address_width, | |
315 | * use AB-synced transfers where A count is the fifo | |
316 | * address_width and B count is the maxburst. In this | |
317 | * case, we are limited to transfers of C count frames | |
318 | * of (address_width * maxburst) where C count is limited | |
319 | * to SZ_64K-1. This places an upper bound on the length | |
320 | * of an SG segment that can be handled. | |
321 | */ | |
322 | absync = true; | |
323 | bcnt = burst; | |
324 | ccnt = dma_length / (acnt * bcnt); | |
325 | if (ccnt > (SZ_64K - 1)) { | |
326 | dev_err(dev, "Exceeded max SG segment size\n"); | |
327 | return -EINVAL; | |
328 | } | |
329 | cidx = acnt * bcnt; | |
330 | } | |
331 | ||
332 | if (direction == DMA_MEM_TO_DEV) { | |
333 | src_bidx = acnt; | |
334 | src_cidx = cidx; | |
335 | dst_bidx = 0; | |
336 | dst_cidx = 0; | |
337 | } else if (direction == DMA_DEV_TO_MEM) { | |
338 | src_bidx = 0; | |
339 | src_cidx = 0; | |
340 | dst_bidx = acnt; | |
341 | dst_cidx = cidx; | |
342 | } else { | |
343 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | |
344 | return -EINVAL; | |
345 | } | |
346 | ||
347 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | |
348 | /* Configure A or AB synchronized transfers */ | |
349 | if (absync) | |
350 | pset->opt |= SYNCDIM; | |
351 | ||
352 | pset->src = src_addr; | |
353 | pset->dst = dst_addr; | |
354 | ||
355 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | |
356 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | |
357 | ||
358 | pset->a_b_cnt = bcnt << 16 | acnt; | |
359 | pset->ccnt = ccnt; | |
360 | /* | |
361 | * Only time when (bcntrld) auto reload is required is for | |
362 | * A-sync case, and in this case, a requirement of reload value | |
363 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | |
364 | * and then later will be populated by edma_execute. | |
365 | */ | |
366 | pset->link_bcntrld = 0xffffffff; | |
367 | return absync; | |
368 | } | |
369 | ||
c2dde5f8 MP |
370 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
371 | struct dma_chan *chan, struct scatterlist *sgl, | |
372 | unsigned int sg_len, enum dma_transfer_direction direction, | |
373 | unsigned long tx_flags, void *context) | |
374 | { | |
375 | struct edma_chan *echan = to_edma_chan(chan); | |
376 | struct device *dev = chan->device->dev; | |
377 | struct edma_desc *edesc; | |
fd009035 | 378 | dma_addr_t src_addr = 0, dst_addr = 0; |
661f7cb5 MP |
379 | enum dma_slave_buswidth dev_width; |
380 | u32 burst; | |
c2dde5f8 | 381 | struct scatterlist *sg; |
fd009035 | 382 | int i, nslots, ret; |
c2dde5f8 MP |
383 | |
384 | if (unlikely(!echan || !sgl || !sg_len)) | |
385 | return NULL; | |
386 | ||
661f7cb5 | 387 | if (direction == DMA_DEV_TO_MEM) { |
fd009035 | 388 | src_addr = echan->cfg.src_addr; |
661f7cb5 MP |
389 | dev_width = echan->cfg.src_addr_width; |
390 | burst = echan->cfg.src_maxburst; | |
391 | } else if (direction == DMA_MEM_TO_DEV) { | |
fd009035 | 392 | dst_addr = echan->cfg.dst_addr; |
661f7cb5 MP |
393 | dev_width = echan->cfg.dst_addr_width; |
394 | burst = echan->cfg.dst_maxburst; | |
395 | } else { | |
396 | dev_err(dev, "%s: bad direction?\n", __func__); | |
397 | return NULL; | |
398 | } | |
399 | ||
400 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
c2dde5f8 MP |
401 | dev_err(dev, "Undefined slave buswidth\n"); |
402 | return NULL; | |
403 | } | |
404 | ||
c2dde5f8 MP |
405 | edesc = kzalloc(sizeof(*edesc) + sg_len * |
406 | sizeof(edesc->pset[0]), GFP_ATOMIC); | |
407 | if (!edesc) { | |
408 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | |
409 | return NULL; | |
410 | } | |
411 | ||
412 | edesc->pset_nr = sg_len; | |
413 | ||
6fbe24da JF |
414 | /* Allocate a PaRAM slot, if needed */ |
415 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | |
416 | ||
417 | for (i = 0; i < nslots; i++) { | |
c2dde5f8 MP |
418 | if (echan->slot[i] < 0) { |
419 | echan->slot[i] = | |
420 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | |
421 | EDMA_SLOT_ANY); | |
422 | if (echan->slot[i] < 0) { | |
4b6271a6 | 423 | kfree(edesc); |
c2dde5f8 | 424 | dev_err(dev, "Failed to allocate slot\n"); |
2f6d8fad | 425 | kfree(edesc); |
c2dde5f8 MP |
426 | return NULL; |
427 | } | |
428 | } | |
6fbe24da JF |
429 | } |
430 | ||
431 | /* Configure PaRAM sets for each SG */ | |
432 | for_each_sg(sgl, sg, sg_len, i) { | |
fd009035 JF |
433 | /* Get address for each SG */ |
434 | if (direction == DMA_DEV_TO_MEM) | |
435 | dst_addr = sg_dma_address(sg); | |
436 | else | |
437 | src_addr = sg_dma_address(sg); | |
c2dde5f8 | 438 | |
fd009035 JF |
439 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, |
440 | dst_addr, burst, dev_width, | |
441 | sg_dma_len(sg), direction); | |
b967aecf VK |
442 | if (ret < 0) { |
443 | kfree(edesc); | |
fd009035 | 444 | return NULL; |
c2dde5f8 MP |
445 | } |
446 | ||
fd009035 | 447 | edesc->absync = ret; |
6fbe24da JF |
448 | |
449 | /* If this is the last in a current SG set of transactions, | |
450 | enable interrupts so that next set is processed */ | |
451 | if (!((i+1) % MAX_NR_SG)) | |
452 | edesc->pset[i].opt |= TCINTEN; | |
453 | ||
c2dde5f8 MP |
454 | /* If this is the last set, enable completion interrupt flag */ |
455 | if (i == sg_len - 1) | |
456 | edesc->pset[i].opt |= TCINTEN; | |
c2dde5f8 MP |
457 | } |
458 | ||
459 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | |
460 | } | |
461 | ||
462 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |
463 | { | |
464 | struct edma_chan *echan = data; | |
465 | struct device *dev = echan->vchan.chan.device->dev; | |
466 | struct edma_desc *edesc; | |
467 | unsigned long flags; | |
c5f47990 | 468 | struct edmacc_param p; |
c2dde5f8 | 469 | |
53407062 JF |
470 | /* Pause the channel */ |
471 | edma_pause(echan->ch_num); | |
c2dde5f8 MP |
472 | |
473 | switch (ch_status) { | |
db60d8da | 474 | case EDMA_DMA_COMPLETE: |
c2dde5f8 MP |
475 | spin_lock_irqsave(&echan->vchan.lock, flags); |
476 | ||
477 | edesc = echan->edesc; | |
478 | if (edesc) { | |
53407062 JF |
479 | if (edesc->processed == edesc->pset_nr) { |
480 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | |
481 | edma_stop(echan->ch_num); | |
482 | vchan_cookie_complete(&edesc->vdesc); | |
483 | } else { | |
484 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | |
485 | } | |
486 | ||
c2dde5f8 | 487 | edma_execute(echan); |
c2dde5f8 MP |
488 | } |
489 | ||
490 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
491 | ||
492 | break; | |
db60d8da | 493 | case EDMA_DMA_CC_ERROR: |
c5f47990 JF |
494 | spin_lock_irqsave(&echan->vchan.lock, flags); |
495 | ||
496 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | |
497 | ||
498 | /* | |
499 | * Issue later based on missed flag which will be sure | |
500 | * to happen as: | |
501 | * (1) we finished transmitting an intermediate slot and | |
502 | * edma_execute is coming up. | |
503 | * (2) or we finished current transfer and issue will | |
504 | * call edma_execute. | |
505 | * | |
506 | * Important note: issuing can be dangerous here and | |
507 | * lead to some nasty recursion when we are in a NULL | |
508 | * slot. So we avoid doing so and set the missed flag. | |
509 | */ | |
510 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | |
511 | dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); | |
512 | echan->missed = 1; | |
513 | } else { | |
514 | /* | |
515 | * The slot is already programmed but the event got | |
516 | * missed, so its safe to issue it here. | |
517 | */ | |
518 | dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); | |
519 | edma_clean_channel(echan->ch_num); | |
520 | edma_stop(echan->ch_num); | |
521 | edma_start(echan->ch_num); | |
522 | edma_trigger_channel(echan->ch_num); | |
523 | } | |
524 | ||
525 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
526 | ||
c2dde5f8 MP |
527 | break; |
528 | default: | |
529 | break; | |
530 | } | |
531 | } | |
532 | ||
533 | /* Alloc channel resources */ | |
534 | static int edma_alloc_chan_resources(struct dma_chan *chan) | |
535 | { | |
536 | struct edma_chan *echan = to_edma_chan(chan); | |
537 | struct device *dev = chan->device->dev; | |
538 | int ret; | |
539 | int a_ch_num; | |
540 | LIST_HEAD(descs); | |
541 | ||
542 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | |
543 | chan, EVENTQ_DEFAULT); | |
544 | ||
545 | if (a_ch_num < 0) { | |
546 | ret = -ENODEV; | |
547 | goto err_no_chan; | |
548 | } | |
549 | ||
550 | if (a_ch_num != echan->ch_num) { | |
551 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | |
552 | EDMA_CTLR(echan->ch_num), | |
553 | EDMA_CHAN_SLOT(echan->ch_num)); | |
554 | ret = -ENODEV; | |
555 | goto err_wrong_chan; | |
556 | } | |
557 | ||
558 | echan->alloced = true; | |
559 | echan->slot[0] = echan->ch_num; | |
560 | ||
561 | dev_info(dev, "allocated channel for %u:%u\n", | |
562 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | |
563 | ||
564 | return 0; | |
565 | ||
566 | err_wrong_chan: | |
567 | edma_free_channel(a_ch_num); | |
568 | err_no_chan: | |
569 | return ret; | |
570 | } | |
571 | ||
572 | /* Free channel resources */ | |
573 | static void edma_free_chan_resources(struct dma_chan *chan) | |
574 | { | |
575 | struct edma_chan *echan = to_edma_chan(chan); | |
576 | struct device *dev = chan->device->dev; | |
577 | int i; | |
578 | ||
579 | /* Terminate transfers */ | |
580 | edma_stop(echan->ch_num); | |
581 | ||
582 | vchan_free_chan_resources(&echan->vchan); | |
583 | ||
584 | /* Free EDMA PaRAM slots */ | |
585 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | |
586 | if (echan->slot[i] >= 0) { | |
587 | edma_free_slot(echan->slot[i]); | |
588 | echan->slot[i] = -1; | |
589 | } | |
590 | } | |
591 | ||
592 | /* Free EDMA channel */ | |
593 | if (echan->alloced) { | |
594 | edma_free_channel(echan->ch_num); | |
595 | echan->alloced = false; | |
596 | } | |
597 | ||
598 | dev_info(dev, "freeing channel for %u\n", echan->ch_num); | |
599 | } | |
600 | ||
601 | /* Send pending descriptor to hardware */ | |
602 | static void edma_issue_pending(struct dma_chan *chan) | |
603 | { | |
604 | struct edma_chan *echan = to_edma_chan(chan); | |
605 | unsigned long flags; | |
606 | ||
607 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
608 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | |
609 | edma_execute(echan); | |
610 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
611 | } | |
612 | ||
613 | static size_t edma_desc_size(struct edma_desc *edesc) | |
614 | { | |
615 | int i; | |
616 | size_t size; | |
617 | ||
618 | if (edesc->absync) | |
619 | for (size = i = 0; i < edesc->pset_nr; i++) | |
620 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | |
621 | (edesc->pset[i].a_b_cnt >> 16) * | |
622 | edesc->pset[i].ccnt; | |
623 | else | |
624 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | |
625 | (edesc->pset[0].a_b_cnt >> 16) + | |
626 | (edesc->pset[0].a_b_cnt & 0xffff) * | |
627 | (SZ_64K - 1) * edesc->pset[0].ccnt; | |
628 | ||
629 | return size; | |
630 | } | |
631 | ||
632 | /* Check request completion status */ | |
633 | static enum dma_status edma_tx_status(struct dma_chan *chan, | |
634 | dma_cookie_t cookie, | |
635 | struct dma_tx_state *txstate) | |
636 | { | |
637 | struct edma_chan *echan = to_edma_chan(chan); | |
638 | struct virt_dma_desc *vdesc; | |
639 | enum dma_status ret; | |
640 | unsigned long flags; | |
641 | ||
642 | ret = dma_cookie_status(chan, cookie, txstate); | |
9d386ec5 | 643 | if (ret == DMA_COMPLETE || !txstate) |
c2dde5f8 MP |
644 | return ret; |
645 | ||
646 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
647 | vdesc = vchan_find_desc(&echan->vchan, cookie); | |
648 | if (vdesc) { | |
649 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | |
650 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | |
651 | struct edma_desc *edesc = echan->edesc; | |
652 | txstate->residue = edma_desc_size(edesc); | |
c2dde5f8 MP |
653 | } |
654 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
655 | ||
656 | return ret; | |
657 | } | |
658 | ||
659 | static void __init edma_chan_init(struct edma_cc *ecc, | |
660 | struct dma_device *dma, | |
661 | struct edma_chan *echans) | |
662 | { | |
663 | int i, j; | |
664 | ||
665 | for (i = 0; i < EDMA_CHANS; i++) { | |
666 | struct edma_chan *echan = &echans[i]; | |
667 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | |
668 | echan->ecc = ecc; | |
669 | echan->vchan.desc_free = edma_desc_free; | |
670 | ||
671 | vchan_init(&echan->vchan, dma); | |
672 | ||
673 | INIT_LIST_HEAD(&echan->node); | |
674 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | |
675 | echan->slot[j] = -1; | |
676 | } | |
677 | } | |
678 | ||
679 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |
680 | struct device *dev) | |
681 | { | |
682 | dma->device_prep_slave_sg = edma_prep_slave_sg; | |
683 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | |
684 | dma->device_free_chan_resources = edma_free_chan_resources; | |
685 | dma->device_issue_pending = edma_issue_pending; | |
686 | dma->device_tx_status = edma_tx_status; | |
687 | dma->device_control = edma_control; | |
688 | dma->dev = dev; | |
689 | ||
690 | INIT_LIST_HEAD(&dma->channels); | |
691 | } | |
692 | ||
463a1f8b | 693 | static int edma_probe(struct platform_device *pdev) |
c2dde5f8 MP |
694 | { |
695 | struct edma_cc *ecc; | |
696 | int ret; | |
697 | ||
698 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | |
699 | if (!ecc) { | |
700 | dev_err(&pdev->dev, "Can't allocate controller\n"); | |
701 | return -ENOMEM; | |
702 | } | |
703 | ||
704 | ecc->ctlr = pdev->id; | |
705 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | |
706 | if (ecc->dummy_slot < 0) { | |
707 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | |
708 | return -EIO; | |
709 | } | |
710 | ||
711 | dma_cap_zero(ecc->dma_slave.cap_mask); | |
712 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | |
713 | ||
714 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | |
715 | ||
716 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | |
717 | ||
718 | ret = dma_async_device_register(&ecc->dma_slave); | |
719 | if (ret) | |
720 | goto err_reg1; | |
721 | ||
722 | platform_set_drvdata(pdev, ecc); | |
723 | ||
724 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | |
725 | ||
726 | return 0; | |
727 | ||
728 | err_reg1: | |
729 | edma_free_slot(ecc->dummy_slot); | |
730 | return ret; | |
731 | } | |
732 | ||
4bf27b8b | 733 | static int edma_remove(struct platform_device *pdev) |
c2dde5f8 MP |
734 | { |
735 | struct device *dev = &pdev->dev; | |
736 | struct edma_cc *ecc = dev_get_drvdata(dev); | |
737 | ||
738 | dma_async_device_unregister(&ecc->dma_slave); | |
739 | edma_free_slot(ecc->dummy_slot); | |
740 | ||
741 | return 0; | |
742 | } | |
743 | ||
744 | static struct platform_driver edma_driver = { | |
745 | .probe = edma_probe, | |
a7d6e3ec | 746 | .remove = edma_remove, |
c2dde5f8 MP |
747 | .driver = { |
748 | .name = "edma-dma-engine", | |
749 | .owner = THIS_MODULE, | |
750 | }, | |
751 | }; | |
752 | ||
753 | bool edma_filter_fn(struct dma_chan *chan, void *param) | |
754 | { | |
755 | if (chan->device->dev->driver == &edma_driver.driver) { | |
756 | struct edma_chan *echan = to_edma_chan(chan); | |
757 | unsigned ch_req = *(unsigned *)param; | |
758 | return ch_req == echan->ch_num; | |
759 | } | |
760 | return false; | |
761 | } | |
762 | EXPORT_SYMBOL(edma_filter_fn); | |
763 | ||
764 | static struct platform_device *pdev0, *pdev1; | |
765 | ||
766 | static const struct platform_device_info edma_dev_info0 = { | |
767 | .name = "edma-dma-engine", | |
768 | .id = 0, | |
c2dde5f8 MP |
769 | }; |
770 | ||
771 | static const struct platform_device_info edma_dev_info1 = { | |
772 | .name = "edma-dma-engine", | |
773 | .id = 1, | |
c2dde5f8 MP |
774 | }; |
775 | ||
776 | static int edma_init(void) | |
777 | { | |
778 | int ret = platform_driver_register(&edma_driver); | |
779 | ||
780 | if (ret == 0) { | |
781 | pdev0 = platform_device_register_full(&edma_dev_info0); | |
782 | if (IS_ERR(pdev0)) { | |
783 | platform_driver_unregister(&edma_driver); | |
784 | ret = PTR_ERR(pdev0); | |
785 | goto out; | |
786 | } | |
373459ee AS |
787 | pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask; |
788 | pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
c2dde5f8 MP |
789 | } |
790 | ||
791 | if (EDMA_CTLRS == 2) { | |
792 | pdev1 = platform_device_register_full(&edma_dev_info1); | |
793 | if (IS_ERR(pdev1)) { | |
794 | platform_driver_unregister(&edma_driver); | |
795 | platform_device_unregister(pdev0); | |
796 | ret = PTR_ERR(pdev1); | |
797 | } | |
373459ee AS |
798 | pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask; |
799 | pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
c2dde5f8 MP |
800 | } |
801 | ||
802 | out: | |
803 | return ret; | |
804 | } | |
805 | subsys_initcall(edma_init); | |
806 | ||
807 | static void __exit edma_exit(void) | |
808 | { | |
809 | platform_device_unregister(pdev0); | |
810 | if (pdev1) | |
811 | platform_device_unregister(pdev1); | |
812 | platform_driver_unregister(&edma_driver); | |
813 | } | |
814 | module_exit(edma_exit); | |
815 | ||
d71505b6 | 816 | MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); |
c2dde5f8 MP |
817 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); |
818 | MODULE_LICENSE("GPL v2"); |