]>
Commit | Line | Data |
---|---|---|
e8689e63 LW |
1 | /* |
2 | * Copyright (c) 2006 ARM Ltd. | |
3 | * Copyright (c) 2010 ST-Ericsson SA | |
4 | * | |
5 | * Author: Peter Pearse <peter.pearse@arm.com> | |
6 | * Author: Linus Walleij <linus.walleij@stericsson.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | * | |
e8b5e11d | 22 | * The full GNU General Public License is in this distribution in the |
e8689e63 LW |
23 | * file called COPYING. |
24 | * | |
25 | * Documentation: ARM DDI 0196G == PL080 | |
26 | * Documentation: ARM DDI 0218E == PL081 | |
27 | * | |
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to | |
29 | * any channel. | |
30 | * | |
31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | |
32 | * has only two channels. So on these DMA controllers the number of channels | |
33 | * and the number of incoming DMA signals are two totally different things. | |
34 | * It is usually not possible to theoretically handle all physical signals, | |
35 | * so a multiplexing scheme with possible denial of use is necessary. | |
36 | * | |
37 | * The PL080 has a dual bus master, PL081 has a single master. | |
38 | * | |
39 | * Memory to peripheral transfer may be visualized as | |
40 | * Get data from memory to DMAC | |
41 | * Until no data left | |
42 | * On burst request from peripheral | |
43 | * Destination burst from DMAC to peripheral | |
44 | * Clear burst request | |
45 | * Raise terminal count interrupt | |
46 | * | |
47 | * For peripherals with a FIFO: | |
48 | * Source burst size == half the depth of the peripheral FIFO | |
49 | * Destination burst size == the depth of the peripheral FIFO | |
50 | * | |
51 | * (Bursts are irrelevant for mem to mem transfers - there are no burst | |
52 | * signals, the DMA controller will simply facilitate its AHB master.) | |
53 | * | |
54 | * ASSUMES default (little) endianness for DMA transfers | |
55 | * | |
56 | * Only DMAC flow control is implemented | |
57 | * | |
58 | * Global TODO: | |
59 | * - Break out common code from arch/arm/mach-s3c64xx and share | |
60 | */ | |
61 | #include <linux/device.h> | |
62 | #include <linux/init.h> | |
63 | #include <linux/module.h> | |
64 | #include <linux/pci.h> | |
65 | #include <linux/interrupt.h> | |
66 | #include <linux/slab.h> | |
67 | #include <linux/dmapool.h> | |
68 | #include <linux/amba/bus.h> | |
69 | #include <linux/dmaengine.h> | |
70 | #include <linux/amba/pl08x.h> | |
71 | #include <linux/debugfs.h> | |
72 | #include <linux/seq_file.h> | |
73 | ||
74 | #include <asm/hardware/pl080.h> | |
75 | #include <asm/dma.h> | |
76 | #include <asm/mach/dma.h> | |
77 | #include <asm/atomic.h> | |
78 | #include <asm/processor.h> | |
79 | #include <asm/cacheflush.h> | |
80 | ||
81 | #define DRIVER_NAME "pl08xdmac" | |
82 | ||
83 | /** | |
84 | * struct vendor_data - vendor-specific config parameters | |
e8b5e11d | 85 | * for PL08x derivatives |
e8689e63 LW |
86 | * @name: the name of this specific variant |
87 | * @channels: the number of channels available in this variant | |
88 | * @dualmaster: whether this version supports dual AHB masters | |
89 | * or not. | |
90 | */ | |
91 | struct vendor_data { | |
92 | char *name; | |
93 | u8 channels; | |
94 | bool dualmaster; | |
95 | }; | |
96 | ||
97 | /* | |
98 | * PL08X private data structures | |
e8b5e11d RKAL |
99 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, |
100 | * start & end do not - their bus bit info is in cctl. | |
e8689e63 LW |
101 | */ |
102 | struct lli { | |
103 | dma_addr_t src; | |
104 | dma_addr_t dst; | |
105 | dma_addr_t next; | |
106 | u32 cctl; | |
107 | }; | |
108 | ||
109 | /** | |
110 | * struct pl08x_driver_data - the local state holder for the PL08x | |
111 | * @slave: slave engine for this instance | |
112 | * @memcpy: memcpy engine for this instance | |
113 | * @base: virtual memory base (remapped) for the PL08x | |
114 | * @adev: the corresponding AMBA (PrimeCell) bus entry | |
115 | * @vd: vendor data for this PL08x variant | |
116 | * @pd: platform data passed in from the platform/machine | |
117 | * @phy_chans: array of data for the physical channels | |
118 | * @pool: a pool for the LLI descriptors | |
119 | * @pool_ctr: counter of LLIs in the pool | |
120 | * @lock: a spinlock for this struct | |
121 | */ | |
122 | struct pl08x_driver_data { | |
123 | struct dma_device slave; | |
124 | struct dma_device memcpy; | |
125 | void __iomem *base; | |
126 | struct amba_device *adev; | |
127 | struct vendor_data *vd; | |
128 | struct pl08x_platform_data *pd; | |
129 | struct pl08x_phy_chan *phy_chans; | |
130 | struct dma_pool *pool; | |
131 | int pool_ctr; | |
132 | spinlock_t lock; | |
133 | }; | |
134 | ||
135 | /* | |
136 | * PL08X specific defines | |
137 | */ | |
138 | ||
139 | /* | |
140 | * Memory boundaries: the manual for PL08x says that the controller | |
141 | * cannot read past a 1KiB boundary, so these defines are used to | |
142 | * create transfer LLIs that do not cross such boundaries. | |
143 | */ | |
144 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | |
145 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | |
146 | ||
147 | /* Minimum period between work queue runs */ | |
148 | #define PL08X_WQ_PERIODMIN 20 | |
149 | ||
150 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | |
151 | # define PL08X_LLI_TSFR_SIZE 0x2000 | |
152 | ||
e8b5e11d | 153 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
e8689e63 LW |
154 | #define PL08X_MAX_ALLOCS 0x40 |
155 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | |
156 | #define PL08X_ALIGN 8 | |
157 | ||
158 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |
159 | { | |
160 | return container_of(chan, struct pl08x_dma_chan, chan); | |
161 | } | |
162 | ||
163 | /* | |
164 | * Physical channel handling | |
165 | */ | |
166 | ||
167 | /* Whether a certain channel is busy or not */ | |
168 | static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |
169 | { | |
170 | unsigned int val; | |
171 | ||
172 | val = readl(ch->base + PL080_CH_CONFIG); | |
173 | return val & PL080_CONFIG_ACTIVE; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Set the initial DMA register values i.e. those for the first LLI | |
e8b5e11d | 178 | * The next LLI pointer and the configuration interrupt bit have |
e8689e63 LW |
179 | * been set when the LLIs were constructed |
180 | */ | |
181 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | |
182 | struct pl08x_phy_chan *ch) | |
183 | { | |
184 | /* Wait for channel inactive */ | |
185 | while (pl08x_phy_channel_busy(ch)) | |
186 | ; | |
187 | ||
188 | dev_vdbg(&pl08x->adev->dev, | |
189 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | |
190 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | |
191 | ch->id, | |
192 | ch->csrc, | |
193 | ch->cdst, | |
194 | ch->cctl, | |
195 | ch->clli, | |
196 | ch->ccfg); | |
197 | ||
198 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | |
199 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | |
200 | writel(ch->clli, ch->base + PL080_CH_LLI); | |
201 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | |
202 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | |
203 | } | |
204 | ||
205 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | |
206 | { | |
207 | struct pl08x_channel_data *cd = plchan->cd; | |
208 | struct pl08x_phy_chan *phychan = plchan->phychan; | |
209 | struct pl08x_txd *txd = plchan->at; | |
210 | ||
211 | /* Copy the basic control register calculated at transfer config */ | |
212 | phychan->csrc = txd->csrc; | |
213 | phychan->cdst = txd->cdst; | |
214 | phychan->clli = txd->clli; | |
215 | phychan->cctl = txd->cctl; | |
216 | ||
217 | /* Assign the signal to the proper control registers */ | |
218 | phychan->ccfg = cd->ccfg; | |
219 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | |
220 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | |
221 | /* If it wasn't set from AMBA, ignore it */ | |
222 | if (txd->direction == DMA_TO_DEVICE) | |
223 | /* Select signal as destination */ | |
224 | phychan->ccfg |= | |
225 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | |
226 | else if (txd->direction == DMA_FROM_DEVICE) | |
227 | /* Select signal as source */ | |
228 | phychan->ccfg |= | |
229 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | |
230 | /* Always enable error interrupts */ | |
231 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | |
232 | /* Always enable terminal interrupts */ | |
233 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | |
234 | } | |
235 | ||
236 | /* | |
237 | * Enable the DMA channel | |
238 | * Assumes all other configuration bits have been set | |
239 | * as desired before this code is called | |
240 | */ | |
241 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | |
242 | struct pl08x_phy_chan *ch) | |
243 | { | |
244 | u32 val; | |
245 | ||
246 | /* | |
247 | * Do not access config register until channel shows as disabled | |
248 | */ | |
249 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | |
250 | ; | |
251 | ||
252 | /* | |
253 | * Do not access config register until channel shows as inactive | |
254 | */ | |
255 | val = readl(ch->base + PL080_CH_CONFIG); | |
256 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | |
257 | val = readl(ch->base + PL080_CH_CONFIG); | |
258 | ||
259 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | |
260 | } | |
261 | ||
262 | /* | |
263 | * Overall DMAC remains enabled always. | |
264 | * | |
265 | * Disabling individual channels could lose data. | |
266 | * | |
267 | * Disable the peripheral DMA after disabling the DMAC | |
268 | * in order to allow the DMAC FIFO to drain, and | |
269 | * hence allow the channel to show inactive | |
270 | * | |
271 | */ | |
272 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |
273 | { | |
274 | u32 val; | |
275 | ||
276 | /* Set the HALT bit and wait for the FIFO to drain */ | |
277 | val = readl(ch->base + PL080_CH_CONFIG); | |
278 | val |= PL080_CONFIG_HALT; | |
279 | writel(val, ch->base + PL080_CH_CONFIG); | |
280 | ||
281 | /* Wait for channel inactive */ | |
282 | while (pl08x_phy_channel_busy(ch)) | |
283 | ; | |
284 | } | |
285 | ||
286 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |
287 | { | |
288 | u32 val; | |
289 | ||
290 | /* Clear the HALT bit */ | |
291 | val = readl(ch->base + PL080_CH_CONFIG); | |
292 | val &= ~PL080_CONFIG_HALT; | |
293 | writel(val, ch->base + PL080_CH_CONFIG); | |
294 | } | |
295 | ||
296 | ||
297 | /* Stops the channel */ | |
298 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | |
299 | { | |
300 | u32 val; | |
301 | ||
302 | pl08x_pause_phy_chan(ch); | |
303 | ||
304 | /* Disable channel */ | |
305 | val = readl(ch->base + PL080_CH_CONFIG); | |
306 | val &= ~PL080_CONFIG_ENABLE; | |
307 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | |
308 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | |
309 | writel(val, ch->base + PL080_CH_CONFIG); | |
310 | } | |
311 | ||
312 | static inline u32 get_bytes_in_cctl(u32 cctl) | |
313 | { | |
314 | /* The source width defines the number of bytes */ | |
315 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; | |
316 | ||
317 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | |
318 | case PL080_WIDTH_8BIT: | |
319 | break; | |
320 | case PL080_WIDTH_16BIT: | |
321 | bytes *= 2; | |
322 | break; | |
323 | case PL080_WIDTH_32BIT: | |
324 | bytes *= 4; | |
325 | break; | |
326 | } | |
327 | return bytes; | |
328 | } | |
329 | ||
330 | /* The channel should be paused when calling this */ | |
331 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |
332 | { | |
333 | struct pl08x_phy_chan *ch; | |
334 | struct pl08x_txd *txdi = NULL; | |
335 | struct pl08x_txd *txd; | |
336 | unsigned long flags; | |
337 | u32 bytes = 0; | |
338 | ||
339 | spin_lock_irqsave(&plchan->lock, flags); | |
340 | ||
341 | ch = plchan->phychan; | |
342 | txd = plchan->at; | |
343 | ||
344 | /* | |
345 | * Next follow the LLIs to get the number of pending bytes in the | |
346 | * currently active transaction. | |
347 | */ | |
348 | if (ch && txd) { | |
349 | struct lli *llis_va = txd->llis_va; | |
350 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | |
351 | u32 clli = readl(ch->base + PL080_CH_LLI); | |
352 | ||
353 | /* First get the bytes in the current active LLI */ | |
354 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | |
355 | ||
356 | if (clli) { | |
357 | int i = 0; | |
358 | ||
359 | /* Forward to the LLI pointed to by clli */ | |
360 | while ((clli != (u32) &(llis_bus[i])) && | |
361 | (i < MAX_NUM_TSFR_LLIS)) | |
362 | i++; | |
363 | ||
364 | while (clli) { | |
365 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | |
366 | /* | |
e8b5e11d | 367 | * A LLI pointer of 0 terminates the LLI list |
e8689e63 LW |
368 | */ |
369 | clli = llis_va[i].next; | |
370 | i++; | |
371 | } | |
372 | } | |
373 | } | |
374 | ||
375 | /* Sum up all queued transactions */ | |
376 | if (!list_empty(&plchan->desc_list)) { | |
377 | list_for_each_entry(txdi, &plchan->desc_list, node) { | |
378 | bytes += txdi->len; | |
379 | } | |
380 | ||
381 | } | |
382 | ||
383 | spin_unlock_irqrestore(&plchan->lock, flags); | |
384 | ||
385 | return bytes; | |
386 | } | |
387 | ||
388 | /* | |
389 | * Allocate a physical channel for a virtual channel | |
390 | */ | |
391 | static struct pl08x_phy_chan * | |
392 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |
393 | struct pl08x_dma_chan *virt_chan) | |
394 | { | |
395 | struct pl08x_phy_chan *ch = NULL; | |
396 | unsigned long flags; | |
397 | int i; | |
398 | ||
399 | /* | |
400 | * Try to locate a physical channel to be used for | |
401 | * this transfer. If all are taken return NULL and | |
402 | * the requester will have to cope by using some fallback | |
403 | * PIO mode or retrying later. | |
404 | */ | |
405 | for (i = 0; i < pl08x->vd->channels; i++) { | |
406 | ch = &pl08x->phy_chans[i]; | |
407 | ||
408 | spin_lock_irqsave(&ch->lock, flags); | |
409 | ||
410 | if (!ch->serving) { | |
411 | ch->serving = virt_chan; | |
412 | ch->signal = -1; | |
413 | spin_unlock_irqrestore(&ch->lock, flags); | |
414 | break; | |
415 | } | |
416 | ||
417 | spin_unlock_irqrestore(&ch->lock, flags); | |
418 | } | |
419 | ||
420 | if (i == pl08x->vd->channels) { | |
421 | /* No physical channel available, cope with it */ | |
422 | return NULL; | |
423 | } | |
424 | ||
425 | return ch; | |
426 | } | |
427 | ||
428 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |
429 | struct pl08x_phy_chan *ch) | |
430 | { | |
431 | unsigned long flags; | |
432 | ||
433 | /* Stop the channel and clear its interrupts */ | |
434 | pl08x_stop_phy_chan(ch); | |
435 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | |
436 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | |
437 | ||
438 | /* Mark it as free */ | |
439 | spin_lock_irqsave(&ch->lock, flags); | |
440 | ch->serving = NULL; | |
441 | spin_unlock_irqrestore(&ch->lock, flags); | |
442 | } | |
443 | ||
444 | /* | |
445 | * LLI handling | |
446 | */ | |
447 | ||
448 | static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | |
449 | { | |
450 | switch (coded) { | |
451 | case PL080_WIDTH_8BIT: | |
452 | return 1; | |
453 | case PL080_WIDTH_16BIT: | |
454 | return 2; | |
455 | case PL080_WIDTH_32BIT: | |
456 | return 4; | |
457 | default: | |
458 | break; | |
459 | } | |
460 | BUG(); | |
461 | return 0; | |
462 | } | |
463 | ||
464 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |
465 | u32 tsize) | |
466 | { | |
467 | u32 retbits = cctl; | |
468 | ||
e8b5e11d | 469 | /* Remove all src, dst and transfer size bits */ |
e8689e63 LW |
470 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; |
471 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | |
472 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | |
473 | ||
474 | /* Then set the bits according to the parameters */ | |
475 | switch (srcwidth) { | |
476 | case 1: | |
477 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
478 | break; | |
479 | case 2: | |
480 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
481 | break; | |
482 | case 4: | |
483 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
484 | break; | |
485 | default: | |
486 | BUG(); | |
487 | break; | |
488 | } | |
489 | ||
490 | switch (dstwidth) { | |
491 | case 1: | |
492 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
493 | break; | |
494 | case 2: | |
495 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
496 | break; | |
497 | case 4: | |
498 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
499 | break; | |
500 | default: | |
501 | BUG(); | |
502 | break; | |
503 | } | |
504 | ||
505 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; | |
506 | return retbits; | |
507 | } | |
508 | ||
509 | /* | |
510 | * Autoselect a master bus to use for the transfer | |
511 | * this prefers the destination bus if both available | |
512 | * if fixed address on one bus the other will be chosen | |
513 | */ | |
514 | void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | |
515 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, | |
516 | struct pl08x_bus_data **sbus, u32 cctl) | |
517 | { | |
518 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | |
519 | *mbus = src_bus; | |
520 | *sbus = dst_bus; | |
521 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | |
522 | *mbus = dst_bus; | |
523 | *sbus = src_bus; | |
524 | } else { | |
525 | if (dst_bus->buswidth == 4) { | |
526 | *mbus = dst_bus; | |
527 | *sbus = src_bus; | |
528 | } else if (src_bus->buswidth == 4) { | |
529 | *mbus = src_bus; | |
530 | *sbus = dst_bus; | |
531 | } else if (dst_bus->buswidth == 2) { | |
532 | *mbus = dst_bus; | |
533 | *sbus = src_bus; | |
534 | } else if (src_bus->buswidth == 2) { | |
535 | *mbus = src_bus; | |
536 | *sbus = dst_bus; | |
537 | } else { | |
538 | /* src_bus->buswidth == 1 */ | |
539 | *mbus = dst_bus; | |
540 | *sbus = src_bus; | |
541 | } | |
542 | } | |
543 | } | |
544 | ||
545 | /* | |
546 | * Fills in one LLI for a certain transfer descriptor | |
547 | * and advance the counter | |
548 | */ | |
549 | int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, | |
550 | struct pl08x_txd *txd, int num_llis, int len, | |
551 | u32 cctl, u32 *remainder) | |
552 | { | |
553 | struct lli *llis_va = txd->llis_va; | |
554 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | |
555 | ||
556 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | |
557 | ||
558 | llis_va[num_llis].cctl = cctl; | |
559 | llis_va[num_llis].src = txd->srcbus.addr; | |
560 | llis_va[num_llis].dst = txd->dstbus.addr; | |
561 | ||
562 | /* | |
563 | * On versions with dual masters, you can optionally AND on | |
564 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | |
565 | * in new LLIs with that controller, but we always try to | |
566 | * choose AHB1 to point into memory. The idea is to have AHB2 | |
567 | * fixed on the peripheral and AHB1 messing around in the | |
568 | * memory. So we don't manipulate this bit currently. | |
569 | */ | |
570 | ||
571 | llis_va[num_llis].next = | |
572 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | |
573 | ||
574 | if (cctl & PL080_CONTROL_SRC_INCR) | |
575 | txd->srcbus.addr += len; | |
576 | if (cctl & PL080_CONTROL_DST_INCR) | |
577 | txd->dstbus.addr += len; | |
578 | ||
579 | *remainder -= len; | |
580 | ||
581 | return num_llis + 1; | |
582 | } | |
583 | ||
584 | /* | |
585 | * Return number of bytes to fill to boundary, or len | |
586 | */ | |
587 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | |
588 | { | |
589 | u32 boundary; | |
590 | ||
591 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | |
592 | << PL08X_BOUNDARY_SHIFT; | |
593 | ||
594 | if (boundary < addr + len) | |
595 | return boundary - addr; | |
596 | else | |
597 | return len; | |
598 | } | |
599 | ||
600 | /* | |
601 | * This fills in the table of LLIs for the transfer descriptor | |
602 | * Note that we assume we never have to change the burst sizes | |
603 | * Return 0 for error | |
604 | */ | |
605 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |
606 | struct pl08x_txd *txd) | |
607 | { | |
608 | struct pl08x_channel_data *cd = txd->cd; | |
609 | struct pl08x_bus_data *mbus, *sbus; | |
610 | u32 remainder; | |
611 | int num_llis = 0; | |
612 | u32 cctl; | |
613 | int max_bytes_per_lli; | |
614 | int total_bytes = 0; | |
615 | struct lli *llis_va; | |
616 | struct lli *llis_bus; | |
617 | ||
618 | if (!txd) { | |
619 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | |
620 | return 0; | |
621 | } | |
622 | ||
623 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | |
624 | &txd->llis_bus); | |
625 | if (!txd->llis_va) { | |
626 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | |
627 | return 0; | |
628 | } | |
629 | ||
630 | pl08x->pool_ctr++; | |
631 | ||
632 | /* | |
633 | * Initialize bus values for this transfer | |
634 | * from the passed optimal values | |
635 | */ | |
636 | if (!cd) { | |
637 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | |
638 | return 0; | |
639 | } | |
640 | ||
641 | /* Get the default CCTL from the platform data */ | |
642 | cctl = cd->cctl; | |
643 | ||
644 | /* | |
645 | * On the PL080 we have two bus masters and we | |
646 | * should select one for source and one for | |
647 | * destination. We try to use AHB2 for the | |
648 | * bus which does not increment (typically the | |
649 | * peripheral) else we just choose something. | |
650 | */ | |
651 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | |
652 | if (pl08x->vd->dualmaster) { | |
653 | if (cctl & PL080_CONTROL_SRC_INCR) | |
654 | /* Source increments, use AHB2 for destination */ | |
655 | cctl |= PL080_CONTROL_DST_AHB2; | |
656 | else if (cctl & PL080_CONTROL_DST_INCR) | |
657 | /* Destination increments, use AHB2 for source */ | |
658 | cctl |= PL080_CONTROL_SRC_AHB2; | |
659 | else | |
660 | /* Just pick something, source AHB1 dest AHB2 */ | |
661 | cctl |= PL080_CONTROL_DST_AHB2; | |
662 | } | |
663 | ||
664 | /* Find maximum width of the source bus */ | |
665 | txd->srcbus.maxwidth = | |
666 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | |
667 | PL080_CONTROL_SWIDTH_SHIFT); | |
668 | ||
669 | /* Find maximum width of the destination bus */ | |
670 | txd->dstbus.maxwidth = | |
671 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | |
672 | PL080_CONTROL_DWIDTH_SHIFT); | |
673 | ||
674 | /* Set up the bus widths to the maximum */ | |
675 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | |
676 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | |
677 | dev_vdbg(&pl08x->adev->dev, | |
678 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | |
679 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | |
680 | ||
681 | ||
682 | /* | |
683 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | |
684 | */ | |
685 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | |
686 | PL080_CONTROL_TRANSFER_SIZE_MASK; | |
687 | dev_vdbg(&pl08x->adev->dev, | |
688 | "%s max bytes per lli = %d\n", | |
689 | __func__, max_bytes_per_lli); | |
690 | ||
691 | /* We need to count this down to zero */ | |
692 | remainder = txd->len; | |
693 | dev_vdbg(&pl08x->adev->dev, | |
694 | "%s remainder = %d\n", | |
695 | __func__, remainder); | |
696 | ||
697 | /* | |
698 | * Choose bus to align to | |
699 | * - prefers destination bus if both available | |
700 | * - if fixed address on one bus chooses other | |
e8b5e11d | 701 | * - modifies cctl to choose an appropriate master |
e8689e63 LW |
702 | */ |
703 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | |
704 | &mbus, &sbus, cctl); | |
705 | ||
706 | ||
707 | /* | |
708 | * The lowest bit of the LLI register | |
709 | * is also used to indicate which master to | |
710 | * use for reading the LLIs. | |
711 | */ | |
712 | ||
713 | if (txd->len < mbus->buswidth) { | |
714 | /* | |
715 | * Less than a bus width available | |
716 | * - send as single bytes | |
717 | */ | |
718 | while (remainder) { | |
719 | dev_vdbg(&pl08x->adev->dev, | |
720 | "%s single byte LLIs for a transfer of " | |
721 | "less than a bus width (remain %08x)\n", | |
722 | __func__, remainder); | |
723 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
724 | num_llis = | |
725 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | |
726 | cctl, &remainder); | |
727 | total_bytes++; | |
728 | } | |
729 | } else { | |
730 | /* | |
731 | * Make one byte LLIs until master bus is aligned | |
732 | * - slave will then be aligned also | |
733 | */ | |
734 | while ((mbus->addr) % (mbus->buswidth)) { | |
735 | dev_vdbg(&pl08x->adev->dev, | |
736 | "%s adjustment lli for less than bus width " | |
737 | "(remain %08x)\n", | |
738 | __func__, remainder); | |
739 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
740 | num_llis = pl08x_fill_lli_for_desc | |
741 | (pl08x, txd, num_llis, 1, cctl, &remainder); | |
742 | total_bytes++; | |
743 | } | |
744 | ||
745 | /* | |
746 | * Master now aligned | |
747 | * - if slave is not then we must set its width down | |
748 | */ | |
749 | if (sbus->addr % sbus->buswidth) { | |
750 | dev_dbg(&pl08x->adev->dev, | |
751 | "%s set down bus width to one byte\n", | |
752 | __func__); | |
753 | ||
754 | sbus->buswidth = 1; | |
755 | } | |
756 | ||
757 | /* | |
758 | * Make largest possible LLIs until less than one bus | |
759 | * width left | |
760 | */ | |
761 | while (remainder > (mbus->buswidth - 1)) { | |
762 | int lli_len, target_len; | |
763 | int tsize; | |
764 | int odd_bytes; | |
765 | ||
766 | /* | |
767 | * If enough left try to send max possible, | |
768 | * otherwise try to send the remainder | |
769 | */ | |
770 | target_len = remainder; | |
771 | if (remainder > max_bytes_per_lli) | |
772 | target_len = max_bytes_per_lli; | |
773 | ||
774 | /* | |
e8b5e11d | 775 | * Set bus lengths for incrementing buses |
e8689e63 LW |
776 | * to number of bytes which fill to next memory |
777 | * boundary | |
778 | */ | |
779 | if (cctl & PL080_CONTROL_SRC_INCR) | |
780 | txd->srcbus.fill_bytes = | |
781 | pl08x_pre_boundary( | |
782 | txd->srcbus.addr, | |
783 | remainder); | |
784 | else | |
785 | txd->srcbus.fill_bytes = | |
786 | max_bytes_per_lli; | |
787 | ||
788 | if (cctl & PL080_CONTROL_DST_INCR) | |
789 | txd->dstbus.fill_bytes = | |
790 | pl08x_pre_boundary( | |
791 | txd->dstbus.addr, | |
792 | remainder); | |
793 | else | |
794 | txd->dstbus.fill_bytes = | |
795 | max_bytes_per_lli; | |
796 | ||
797 | /* | |
798 | * Find the nearest | |
799 | */ | |
800 | lli_len = min(txd->srcbus.fill_bytes, | |
801 | txd->dstbus.fill_bytes); | |
802 | ||
803 | BUG_ON(lli_len > remainder); | |
804 | ||
805 | if (lli_len <= 0) { | |
806 | dev_err(&pl08x->adev->dev, | |
807 | "%s lli_len is %d, <= 0\n", | |
808 | __func__, lli_len); | |
809 | return 0; | |
810 | } | |
811 | ||
812 | if (lli_len == target_len) { | |
813 | /* | |
814 | * Can send what we wanted | |
815 | */ | |
816 | /* | |
817 | * Maintain alignment | |
818 | */ | |
819 | lli_len = (lli_len/mbus->buswidth) * | |
820 | mbus->buswidth; | |
821 | odd_bytes = 0; | |
822 | } else { | |
823 | /* | |
824 | * So now we know how many bytes to transfer | |
825 | * to get to the nearest boundary | |
e8b5e11d | 826 | * The next LLI will past the boundary |
e8689e63 LW |
827 | * - however we may be working to a boundary |
828 | * on the slave bus | |
829 | * We need to ensure the master stays aligned | |
830 | */ | |
831 | odd_bytes = lli_len % mbus->buswidth; | |
832 | /* | |
833 | * - and that we are working in multiples | |
834 | * of the bus widths | |
835 | */ | |
836 | lli_len -= odd_bytes; | |
837 | ||
838 | } | |
839 | ||
840 | if (lli_len) { | |
841 | /* | |
842 | * Check against minimum bus alignment: | |
843 | * Calculate actual transfer size in relation | |
844 | * to bus width an get a maximum remainder of | |
845 | * the smallest bus width - 1 | |
846 | */ | |
847 | /* FIXME: use round_down()? */ | |
848 | tsize = lli_len / min(mbus->buswidth, | |
849 | sbus->buswidth); | |
850 | lli_len = tsize * min(mbus->buswidth, | |
851 | sbus->buswidth); | |
852 | ||
853 | if (target_len != lli_len) { | |
854 | dev_vdbg(&pl08x->adev->dev, | |
855 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | |
856 | __func__, target_len, lli_len, txd->len); | |
857 | } | |
858 | ||
859 | cctl = pl08x_cctl_bits(cctl, | |
860 | txd->srcbus.buswidth, | |
861 | txd->dstbus.buswidth, | |
862 | tsize); | |
863 | ||
864 | dev_vdbg(&pl08x->adev->dev, | |
865 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | |
866 | __func__, lli_len, remainder); | |
867 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | |
868 | num_llis, lli_len, cctl, | |
869 | &remainder); | |
870 | total_bytes += lli_len; | |
871 | } | |
872 | ||
873 | ||
874 | if (odd_bytes) { | |
875 | /* | |
876 | * Creep past the boundary, | |
877 | * maintaining master alignment | |
878 | */ | |
879 | int j; | |
880 | for (j = 0; (j < mbus->buswidth) | |
881 | && (remainder); j++) { | |
882 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
883 | dev_vdbg(&pl08x->adev->dev, | |
e8b5e11d | 884 | "%s align with boundary, single byte (remain %08x)\n", |
e8689e63 LW |
885 | __func__, remainder); |
886 | num_llis = | |
887 | pl08x_fill_lli_for_desc(pl08x, | |
888 | txd, num_llis, 1, | |
889 | cctl, &remainder); | |
890 | total_bytes++; | |
891 | } | |
892 | } | |
893 | } | |
894 | ||
895 | /* | |
896 | * Send any odd bytes | |
897 | */ | |
898 | if (remainder < 0) { | |
899 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | |
900 | __func__, remainder); | |
901 | return 0; | |
902 | } | |
903 | ||
904 | while (remainder) { | |
905 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
906 | dev_vdbg(&pl08x->adev->dev, | |
e8b5e11d | 907 | "%s align with boundary, single odd byte (remain %d)\n", |
e8689e63 LW |
908 | __func__, remainder); |
909 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | |
910 | 1, cctl, &remainder); | |
911 | total_bytes++; | |
912 | } | |
913 | } | |
914 | if (total_bytes != txd->len) { | |
915 | dev_err(&pl08x->adev->dev, | |
916 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | |
917 | __func__, total_bytes, txd->len); | |
918 | return 0; | |
919 | } | |
920 | ||
921 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | |
922 | dev_err(&pl08x->adev->dev, | |
923 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | |
924 | __func__, (u32) MAX_NUM_TSFR_LLIS); | |
925 | return 0; | |
926 | } | |
927 | /* | |
928 | * Decide whether this is a loop or a terminated transfer | |
929 | */ | |
930 | llis_va = txd->llis_va; | |
931 | llis_bus = (struct lli *) txd->llis_bus; | |
932 | ||
933 | if (cd->circular_buffer) { | |
934 | /* | |
935 | * Loop the circular buffer so that the next element | |
936 | * points back to the beginning of the LLI. | |
937 | */ | |
938 | llis_va[num_llis - 1].next = | |
939 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | |
940 | } else { | |
941 | /* | |
942 | * On non-circular buffers, the final LLI terminates | |
943 | * the LLI. | |
944 | */ | |
945 | llis_va[num_llis - 1].next = 0; | |
946 | /* | |
947 | * The final LLI element shall also fire an interrupt | |
948 | */ | |
949 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | |
950 | } | |
951 | ||
952 | /* Now store the channel register values */ | |
953 | txd->csrc = llis_va[0].src; | |
954 | txd->cdst = llis_va[0].dst; | |
955 | if (num_llis > 1) | |
956 | txd->clli = llis_va[0].next; | |
957 | else | |
958 | txd->clli = 0; | |
959 | ||
960 | txd->cctl = llis_va[0].cctl; | |
961 | /* ccfg will be set at physical channel allocation time */ | |
962 | ||
963 | #ifdef VERBOSE_DEBUG | |
964 | { | |
965 | int i; | |
966 | ||
967 | for (i = 0; i < num_llis; i++) { | |
968 | dev_vdbg(&pl08x->adev->dev, | |
969 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | |
970 | i, | |
971 | &llis_va[i], | |
972 | llis_va[i].src, | |
973 | llis_va[i].dst, | |
974 | llis_va[i].cctl, | |
975 | llis_va[i].next | |
976 | ); | |
977 | } | |
978 | } | |
979 | #endif | |
980 | ||
981 | return num_llis; | |
982 | } | |
983 | ||
984 | /* You should call this with the struct pl08x lock held */ | |
985 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |
986 | struct pl08x_txd *txd) | |
987 | { | |
988 | if (!txd) | |
989 | dev_err(&pl08x->adev->dev, | |
990 | "%s no descriptor to free\n", | |
991 | __func__); | |
992 | ||
993 | /* Free the LLI */ | |
994 | dma_pool_free(pl08x->pool, txd->llis_va, | |
995 | txd->llis_bus); | |
996 | ||
997 | pl08x->pool_ctr--; | |
998 | ||
999 | kfree(txd); | |
1000 | } | |
1001 | ||
1002 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |
1003 | struct pl08x_dma_chan *plchan) | |
1004 | { | |
1005 | struct pl08x_txd *txdi = NULL; | |
1006 | struct pl08x_txd *next; | |
1007 | ||
1008 | if (!list_empty(&plchan->desc_list)) { | |
1009 | list_for_each_entry_safe(txdi, | |
1010 | next, &plchan->desc_list, node) { | |
1011 | list_del(&txdi->node); | |
1012 | pl08x_free_txd(pl08x, txdi); | |
1013 | } | |
1014 | ||
1015 | } | |
1016 | } | |
1017 | ||
1018 | /* | |
1019 | * The DMA ENGINE API | |
1020 | */ | |
1021 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | |
1022 | { | |
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | static void pl08x_free_chan_resources(struct dma_chan *chan) | |
1027 | { | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * This should be called with the channel plchan->lock held | |
1032 | */ | |
1033 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |
1034 | struct pl08x_txd *txd) | |
1035 | { | |
1036 | struct pl08x_driver_data *pl08x = plchan->host; | |
1037 | struct pl08x_phy_chan *ch; | |
1038 | int ret; | |
1039 | ||
1040 | /* Check if we already have a channel */ | |
1041 | if (plchan->phychan) | |
1042 | return 0; | |
1043 | ||
1044 | ch = pl08x_get_phy_channel(pl08x, plchan); | |
1045 | if (!ch) { | |
1046 | /* No physical channel available, cope with it */ | |
1047 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | |
1048 | return -EBUSY; | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * OK we have a physical channel: for memcpy() this is all we | |
1053 | * need, but for slaves the physical signals may be muxed! | |
1054 | * Can the platform allow us to use this channel? | |
1055 | */ | |
1056 | if (plchan->slave && | |
1057 | ch->signal < 0 && | |
1058 | pl08x->pd->get_signal) { | |
1059 | ret = pl08x->pd->get_signal(plchan); | |
1060 | if (ret < 0) { | |
1061 | dev_dbg(&pl08x->adev->dev, | |
1062 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | |
1063 | ch->id, plchan->name); | |
1064 | /* Release physical channel & return */ | |
1065 | pl08x_put_phy_channel(pl08x, ch); | |
1066 | return -EBUSY; | |
1067 | } | |
1068 | ch->signal = ret; | |
1069 | } | |
1070 | ||
1071 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | |
1072 | ch->id, | |
1073 | ch->signal, | |
1074 | plchan->name); | |
1075 | ||
1076 | plchan->phychan = ch; | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |
1082 | { | |
1083 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | |
1084 | ||
1085 | atomic_inc(&plchan->last_issued); | |
1086 | tx->cookie = atomic_read(&plchan->last_issued); | |
1087 | /* This unlock follows the lock in the prep() function */ | |
1088 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | |
1089 | ||
1090 | return tx->cookie; | |
1091 | } | |
1092 | ||
1093 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |
1094 | struct dma_chan *chan, unsigned long flags) | |
1095 | { | |
1096 | struct dma_async_tx_descriptor *retval = NULL; | |
1097 | ||
1098 | return retval; | |
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * Code accessing dma_async_is_complete() in a tight loop | |
1103 | * may give problems - could schedule where indicated. | |
1104 | * If slaves are relying on interrupts to signal completion this | |
1105 | * function must not be called with interrupts disabled | |
1106 | */ | |
1107 | static enum dma_status | |
1108 | pl08x_dma_tx_status(struct dma_chan *chan, | |
1109 | dma_cookie_t cookie, | |
1110 | struct dma_tx_state *txstate) | |
1111 | { | |
1112 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1113 | dma_cookie_t last_used; | |
1114 | dma_cookie_t last_complete; | |
1115 | enum dma_status ret; | |
1116 | u32 bytesleft = 0; | |
1117 | ||
1118 | last_used = atomic_read(&plchan->last_issued); | |
1119 | last_complete = plchan->lc; | |
1120 | ||
1121 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
1122 | if (ret == DMA_SUCCESS) { | |
1123 | dma_set_tx_state(txstate, last_complete, last_used, 0); | |
1124 | return ret; | |
1125 | } | |
1126 | ||
1127 | /* | |
1128 | * schedule(); could be inserted here | |
1129 | */ | |
1130 | ||
1131 | /* | |
1132 | * This cookie not complete yet | |
1133 | */ | |
1134 | last_used = atomic_read(&plchan->last_issued); | |
1135 | last_complete = plchan->lc; | |
1136 | ||
1137 | /* Get number of bytes left in the active transactions and queue */ | |
1138 | bytesleft = pl08x_getbytes_chan(plchan); | |
1139 | ||
1140 | dma_set_tx_state(txstate, last_complete, last_used, | |
1141 | bytesleft); | |
1142 | ||
1143 | if (plchan->state == PL08X_CHAN_PAUSED) | |
1144 | return DMA_PAUSED; | |
1145 | ||
1146 | /* Whether waiting or running, we're in progress */ | |
1147 | return DMA_IN_PROGRESS; | |
1148 | } | |
1149 | ||
1150 | /* PrimeCell DMA extension */ | |
1151 | struct burst_table { | |
1152 | int burstwords; | |
1153 | u32 reg; | |
1154 | }; | |
1155 | ||
1156 | static const struct burst_table burst_sizes[] = { | |
1157 | { | |
1158 | .burstwords = 256, | |
1159 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1160 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1161 | }, | |
1162 | { | |
1163 | .burstwords = 128, | |
1164 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1165 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1166 | }, | |
1167 | { | |
1168 | .burstwords = 64, | |
1169 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1170 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1171 | }, | |
1172 | { | |
1173 | .burstwords = 32, | |
1174 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1175 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1176 | }, | |
1177 | { | |
1178 | .burstwords = 16, | |
1179 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1180 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1181 | }, | |
1182 | { | |
1183 | .burstwords = 8, | |
1184 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1185 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1186 | }, | |
1187 | { | |
1188 | .burstwords = 4, | |
1189 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1190 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1191 | }, | |
1192 | { | |
1193 | .burstwords = 1, | |
1194 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1195 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1196 | }, | |
1197 | }; | |
1198 | ||
1199 | static void dma_set_runtime_config(struct dma_chan *chan, | |
1200 | struct dma_slave_config *config) | |
1201 | { | |
1202 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1203 | struct pl08x_driver_data *pl08x = plchan->host; | |
1204 | struct pl08x_channel_data *cd = plchan->cd; | |
1205 | enum dma_slave_buswidth addr_width; | |
1206 | u32 maxburst; | |
1207 | u32 cctl = 0; | |
1208 | /* Mask out all except src and dst channel */ | |
1209 | u32 ccfg = cd->ccfg & 0x000003DEU; | |
4440aacf | 1210 | int i; |
e8689e63 LW |
1211 | |
1212 | /* Transfer direction */ | |
1213 | plchan->runtime_direction = config->direction; | |
1214 | if (config->direction == DMA_TO_DEVICE) { | |
1215 | plchan->runtime_addr = config->dst_addr; | |
1216 | cctl |= PL080_CONTROL_SRC_INCR; | |
1217 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | |
1218 | addr_width = config->dst_addr_width; | |
1219 | maxburst = config->dst_maxburst; | |
1220 | } else if (config->direction == DMA_FROM_DEVICE) { | |
1221 | plchan->runtime_addr = config->src_addr; | |
1222 | cctl |= PL080_CONTROL_DST_INCR; | |
1223 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | |
1224 | addr_width = config->src_addr_width; | |
1225 | maxburst = config->src_maxburst; | |
1226 | } else { | |
1227 | dev_err(&pl08x->adev->dev, | |
1228 | "bad runtime_config: alien transfer direction\n"); | |
1229 | return; | |
1230 | } | |
1231 | ||
1232 | switch (addr_width) { | |
1233 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
1234 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1235 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1236 | break; | |
1237 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
1238 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1239 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1240 | break; | |
1241 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
1242 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1243 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1244 | break; | |
1245 | default: | |
1246 | dev_err(&pl08x->adev->dev, | |
1247 | "bad runtime_config: alien address width\n"); | |
1248 | return; | |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * Now decide on a maxburst: | |
4440aacf RKAL |
1253 | * If this channel will only request single transfers, set this |
1254 | * down to ONE element. Also select one element if no maxburst | |
1255 | * is specified. | |
e8689e63 | 1256 | */ |
4440aacf | 1257 | if (plchan->cd->single || maxburst == 0) { |
e8689e63 LW |
1258 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | |
1259 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | |
1260 | } else { | |
4440aacf | 1261 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) |
e8689e63 LW |
1262 | if (burst_sizes[i].burstwords <= maxburst) |
1263 | break; | |
e8689e63 LW |
1264 | cctl |= burst_sizes[i].reg; |
1265 | } | |
1266 | ||
1267 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | |
1268 | cctl &= ~PL080_CONTROL_PROT_MASK; | |
1269 | cctl |= PL080_CONTROL_PROT_SYS; | |
1270 | ||
1271 | /* Modify the default channel data to fit PrimeCell request */ | |
1272 | cd->cctl = cctl; | |
1273 | cd->ccfg = ccfg; | |
1274 | ||
1275 | dev_dbg(&pl08x->adev->dev, | |
1276 | "configured channel %s (%s) for %s, data width %d, " | |
1277 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | |
1278 | dma_chan_name(chan), plchan->name, | |
1279 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | |
1280 | addr_width, | |
1281 | maxburst, | |
1282 | cctl, ccfg); | |
1283 | } | |
1284 | ||
1285 | /* | |
1286 | * Slave transactions callback to the slave device to allow | |
1287 | * synchronization of slave DMA signals with the DMAC enable | |
1288 | */ | |
1289 | static void pl08x_issue_pending(struct dma_chan *chan) | |
1290 | { | |
1291 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1292 | struct pl08x_driver_data *pl08x = plchan->host; | |
1293 | unsigned long flags; | |
1294 | ||
1295 | spin_lock_irqsave(&plchan->lock, flags); | |
1296 | /* Something is already active */ | |
1297 | if (plchan->at) { | |
1298 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1299 | return; | |
1300 | } | |
1301 | ||
1302 | /* Didn't get a physical channel so waiting for it ... */ | |
1303 | if (plchan->state == PL08X_CHAN_WAITING) | |
1304 | return; | |
1305 | ||
1306 | /* Take the first element in the queue and execute it */ | |
1307 | if (!list_empty(&plchan->desc_list)) { | |
1308 | struct pl08x_txd *next; | |
1309 | ||
1310 | next = list_first_entry(&plchan->desc_list, | |
1311 | struct pl08x_txd, | |
1312 | node); | |
1313 | list_del(&next->node); | |
1314 | plchan->at = next; | |
1315 | plchan->state = PL08X_CHAN_RUNNING; | |
1316 | ||
1317 | /* Configure the physical channel for the active txd */ | |
1318 | pl08x_config_phychan_for_txd(plchan); | |
1319 | pl08x_set_cregs(pl08x, plchan->phychan); | |
1320 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | |
1321 | } | |
1322 | ||
1323 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1324 | } | |
1325 | ||
1326 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |
1327 | struct pl08x_txd *txd) | |
1328 | { | |
1329 | int num_llis; | |
1330 | struct pl08x_driver_data *pl08x = plchan->host; | |
1331 | int ret; | |
1332 | ||
1333 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | |
1334 | ||
1335 | if (!num_llis) | |
1336 | return -EINVAL; | |
1337 | ||
1338 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | |
1339 | ||
1340 | /* | |
1341 | * If this device is not using a circular buffer then | |
1342 | * queue this new descriptor for transfer. | |
1343 | * The descriptor for a circular buffer continues | |
1344 | * to be used until the channel is freed. | |
1345 | */ | |
1346 | if (txd->cd->circular_buffer) | |
1347 | dev_err(&pl08x->adev->dev, | |
1348 | "%s attempting to queue a circular buffer\n", | |
1349 | __func__); | |
1350 | else | |
1351 | list_add_tail(&txd->node, | |
1352 | &plchan->desc_list); | |
1353 | ||
1354 | /* | |
1355 | * See if we already have a physical channel allocated, | |
1356 | * else this is the time to try to get one. | |
1357 | */ | |
1358 | ret = prep_phy_channel(plchan, txd); | |
1359 | if (ret) { | |
1360 | /* | |
1361 | * No physical channel available, we will | |
1362 | * stack up the memcpy channels until there is a channel | |
1363 | * available to handle it whereas slave transfers may | |
1364 | * have been denied due to platform channel muxing restrictions | |
1365 | * and since there is no guarantee that this will ever be | |
e8b5e11d RKAL |
1366 | * resolved, and since the signal must be acquired AFTER |
1367 | * acquiring the physical channel, we will let them be NACK:ed | |
e8689e63 LW |
1368 | * with -EBUSY here. The drivers can alway retry the prep() |
1369 | * call if they are eager on doing this using DMA. | |
1370 | */ | |
1371 | if (plchan->slave) { | |
1372 | pl08x_free_txd_list(pl08x, plchan); | |
1373 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | |
1374 | return -EBUSY; | |
1375 | } | |
1376 | /* Do this memcpy whenever there is a channel ready */ | |
1377 | plchan->state = PL08X_CHAN_WAITING; | |
1378 | plchan->waiting = txd; | |
1379 | } else | |
1380 | /* | |
1381 | * Else we're all set, paused and ready to roll, | |
1382 | * status will switch to PL08X_CHAN_RUNNING when | |
1383 | * we call issue_pending(). If there is something | |
1384 | * running on the channel already we don't change | |
1385 | * its state. | |
1386 | */ | |
1387 | if (plchan->state == PL08X_CHAN_IDLE) | |
1388 | plchan->state = PL08X_CHAN_PAUSED; | |
1389 | ||
1390 | /* | |
1391 | * Notice that we leave plchan->lock locked on purpose: | |
1392 | * it will be unlocked in the subsequent tx_submit() | |
1393 | * call. This is a consequence of the current API. | |
1394 | */ | |
1395 | ||
1396 | return 0; | |
1397 | } | |
1398 | ||
1399 | /* | |
1400 | * Initialize a descriptor to be used by memcpy submit | |
1401 | */ | |
1402 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |
1403 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
1404 | size_t len, unsigned long flags) | |
1405 | { | |
1406 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1407 | struct pl08x_driver_data *pl08x = plchan->host; | |
1408 | struct pl08x_txd *txd; | |
1409 | int ret; | |
1410 | ||
1411 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | |
1412 | if (!txd) { | |
1413 | dev_err(&pl08x->adev->dev, | |
1414 | "%s no memory for descriptor\n", __func__); | |
1415 | return NULL; | |
1416 | } | |
1417 | ||
1418 | dma_async_tx_descriptor_init(&txd->tx, chan); | |
1419 | txd->direction = DMA_NONE; | |
1420 | txd->srcbus.addr = src; | |
1421 | txd->dstbus.addr = dest; | |
1422 | ||
1423 | /* Set platform data for m2m */ | |
1424 | txd->cd = &pl08x->pd->memcpy_channel; | |
1425 | /* Both to be incremented or the code will break */ | |
1426 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | |
1427 | txd->tx.tx_submit = pl08x_tx_submit; | |
1428 | txd->tx.callback = NULL; | |
1429 | txd->tx.callback_param = NULL; | |
1430 | txd->len = len; | |
1431 | ||
1432 | INIT_LIST_HEAD(&txd->node); | |
1433 | ret = pl08x_prep_channel_resources(plchan, txd); | |
1434 | if (ret) | |
1435 | return NULL; | |
1436 | /* | |
1437 | * NB: the channel lock is held at this point so tx_submit() | |
1438 | * must be called in direct succession. | |
1439 | */ | |
1440 | ||
1441 | return &txd->tx; | |
1442 | } | |
1443 | ||
1444 | struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |
1445 | struct dma_chan *chan, struct scatterlist *sgl, | |
1446 | unsigned int sg_len, enum dma_data_direction direction, | |
1447 | unsigned long flags) | |
1448 | { | |
1449 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1450 | struct pl08x_driver_data *pl08x = plchan->host; | |
1451 | struct pl08x_txd *txd; | |
1452 | int ret; | |
1453 | ||
1454 | /* | |
1455 | * Current implementation ASSUMES only one sg | |
1456 | */ | |
1457 | if (sg_len != 1) { | |
1458 | dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", | |
1459 | __func__); | |
1460 | BUG(); | |
1461 | } | |
1462 | ||
1463 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | |
1464 | __func__, sgl->length, plchan->name); | |
1465 | ||
1466 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | |
1467 | if (!txd) { | |
1468 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | |
1469 | return NULL; | |
1470 | } | |
1471 | ||
1472 | dma_async_tx_descriptor_init(&txd->tx, chan); | |
1473 | ||
1474 | if (direction != plchan->runtime_direction) | |
1475 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | |
1476 | "the direction configured for the PrimeCell\n", | |
1477 | __func__); | |
1478 | ||
1479 | /* | |
1480 | * Set up addresses, the PrimeCell configured address | |
1481 | * will take precedence since this may configure the | |
1482 | * channel target address dynamically at runtime. | |
1483 | */ | |
1484 | txd->direction = direction; | |
1485 | if (direction == DMA_TO_DEVICE) { | |
1486 | txd->srcbus.addr = sgl->dma_address; | |
1487 | if (plchan->runtime_addr) | |
1488 | txd->dstbus.addr = plchan->runtime_addr; | |
1489 | else | |
1490 | txd->dstbus.addr = plchan->cd->addr; | |
1491 | } else if (direction == DMA_FROM_DEVICE) { | |
1492 | if (plchan->runtime_addr) | |
1493 | txd->srcbus.addr = plchan->runtime_addr; | |
1494 | else | |
1495 | txd->srcbus.addr = plchan->cd->addr; | |
1496 | txd->dstbus.addr = sgl->dma_address; | |
1497 | } else { | |
1498 | dev_err(&pl08x->adev->dev, | |
1499 | "%s direction unsupported\n", __func__); | |
1500 | return NULL; | |
1501 | } | |
1502 | txd->cd = plchan->cd; | |
1503 | txd->tx.tx_submit = pl08x_tx_submit; | |
1504 | txd->tx.callback = NULL; | |
1505 | txd->tx.callback_param = NULL; | |
1506 | txd->len = sgl->length; | |
1507 | INIT_LIST_HEAD(&txd->node); | |
1508 | ||
1509 | ret = pl08x_prep_channel_resources(plchan, txd); | |
1510 | if (ret) | |
1511 | return NULL; | |
1512 | /* | |
1513 | * NB: the channel lock is held at this point so tx_submit() | |
1514 | * must be called in direct succession. | |
1515 | */ | |
1516 | ||
1517 | return &txd->tx; | |
1518 | } | |
1519 | ||
1520 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
1521 | unsigned long arg) | |
1522 | { | |
1523 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1524 | struct pl08x_driver_data *pl08x = plchan->host; | |
1525 | unsigned long flags; | |
1526 | int ret = 0; | |
1527 | ||
1528 | /* Controls applicable to inactive channels */ | |
1529 | if (cmd == DMA_SLAVE_CONFIG) { | |
1530 | dma_set_runtime_config(chan, | |
1531 | (struct dma_slave_config *) | |
1532 | arg); | |
1533 | return 0; | |
1534 | } | |
1535 | ||
1536 | /* | |
1537 | * Anything succeeds on channels with no physical allocation and | |
1538 | * no queued transfers. | |
1539 | */ | |
1540 | spin_lock_irqsave(&plchan->lock, flags); | |
1541 | if (!plchan->phychan && !plchan->at) { | |
1542 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | switch (cmd) { | |
1547 | case DMA_TERMINATE_ALL: | |
1548 | plchan->state = PL08X_CHAN_IDLE; | |
1549 | ||
1550 | if (plchan->phychan) { | |
1551 | pl08x_stop_phy_chan(plchan->phychan); | |
1552 | ||
1553 | /* | |
1554 | * Mark physical channel as free and free any slave | |
1555 | * signal | |
1556 | */ | |
1557 | if ((plchan->phychan->signal >= 0) && | |
1558 | pl08x->pd->put_signal) { | |
1559 | pl08x->pd->put_signal(plchan); | |
1560 | plchan->phychan->signal = -1; | |
1561 | } | |
1562 | pl08x_put_phy_channel(pl08x, plchan->phychan); | |
1563 | plchan->phychan = NULL; | |
1564 | } | |
1565 | /* Stop any pending tasklet */ | |
1566 | tasklet_disable(&plchan->tasklet); | |
1567 | /* Dequeue jobs and free LLIs */ | |
1568 | if (plchan->at) { | |
1569 | pl08x_free_txd(pl08x, plchan->at); | |
1570 | plchan->at = NULL; | |
1571 | } | |
1572 | /* Dequeue jobs not yet fired as well */ | |
1573 | pl08x_free_txd_list(pl08x, plchan); | |
1574 | break; | |
1575 | case DMA_PAUSE: | |
1576 | pl08x_pause_phy_chan(plchan->phychan); | |
1577 | plchan->state = PL08X_CHAN_PAUSED; | |
1578 | break; | |
1579 | case DMA_RESUME: | |
1580 | pl08x_resume_phy_chan(plchan->phychan); | |
1581 | plchan->state = PL08X_CHAN_RUNNING; | |
1582 | break; | |
1583 | default: | |
1584 | /* Unknown command */ | |
1585 | ret = -ENXIO; | |
1586 | break; | |
1587 | } | |
1588 | ||
1589 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1590 | ||
1591 | return ret; | |
1592 | } | |
1593 | ||
1594 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |
1595 | { | |
1596 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1597 | char *name = chan_id; | |
1598 | ||
1599 | /* Check that the channel is not taken! */ | |
1600 | if (!strcmp(plchan->name, name)) | |
1601 | return true; | |
1602 | ||
1603 | return false; | |
1604 | } | |
1605 | ||
1606 | /* | |
1607 | * Just check that the device is there and active | |
1608 | * TODO: turn this bit on/off depending on the number of | |
1609 | * physical channels actually used, if it is zero... well | |
1610 | * shut it off. That will save some power. Cut the clock | |
1611 | * at the same time. | |
1612 | */ | |
1613 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |
1614 | { | |
1615 | u32 val; | |
1616 | ||
1617 | val = readl(pl08x->base + PL080_CONFIG); | |
1618 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | |
e8b5e11d | 1619 | /* We implicitly clear bit 1 and that means little-endian mode */ |
e8689e63 LW |
1620 | val |= PL080_CONFIG_ENABLE; |
1621 | writel(val, pl08x->base + PL080_CONFIG); | |
1622 | } | |
1623 | ||
1624 | static void pl08x_tasklet(unsigned long data) | |
1625 | { | |
1626 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | |
1627 | struct pl08x_phy_chan *phychan = plchan->phychan; | |
1628 | struct pl08x_driver_data *pl08x = plchan->host; | |
1629 | ||
1630 | if (!plchan) | |
1631 | BUG(); | |
1632 | ||
1633 | spin_lock(&plchan->lock); | |
1634 | ||
1635 | if (plchan->at) { | |
1636 | dma_async_tx_callback callback = | |
1637 | plchan->at->tx.callback; | |
1638 | void *callback_param = | |
1639 | plchan->at->tx.callback_param; | |
1640 | ||
1641 | /* | |
1642 | * Update last completed | |
1643 | */ | |
1644 | plchan->lc = | |
1645 | (plchan->at->tx.cookie); | |
1646 | ||
1647 | /* | |
1648 | * Callback to signal completion | |
1649 | */ | |
1650 | if (callback) | |
1651 | callback(callback_param); | |
1652 | ||
1653 | /* | |
1654 | * Device callbacks should NOT clear | |
1655 | * the current transaction on the channel | |
1656 | * Linus: sometimes they should? | |
1657 | */ | |
1658 | if (!plchan->at) | |
1659 | BUG(); | |
1660 | ||
1661 | /* | |
1662 | * Free the descriptor if it's not for a device | |
1663 | * using a circular buffer | |
1664 | */ | |
1665 | if (!plchan->at->cd->circular_buffer) { | |
1666 | pl08x_free_txd(pl08x, plchan->at); | |
1667 | plchan->at = NULL; | |
1668 | } | |
1669 | /* | |
1670 | * else descriptor for circular | |
1671 | * buffers only freed when | |
1672 | * client has disabled dma | |
1673 | */ | |
1674 | } | |
1675 | /* | |
1676 | * If a new descriptor is queued, set it up | |
1677 | * plchan->at is NULL here | |
1678 | */ | |
1679 | if (!list_empty(&plchan->desc_list)) { | |
1680 | struct pl08x_txd *next; | |
1681 | ||
1682 | next = list_first_entry(&plchan->desc_list, | |
1683 | struct pl08x_txd, | |
1684 | node); | |
1685 | list_del(&next->node); | |
1686 | plchan->at = next; | |
1687 | /* Configure the physical channel for the next txd */ | |
1688 | pl08x_config_phychan_for_txd(plchan); | |
1689 | pl08x_set_cregs(pl08x, plchan->phychan); | |
1690 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | |
1691 | } else { | |
1692 | struct pl08x_dma_chan *waiting = NULL; | |
1693 | ||
1694 | /* | |
1695 | * No more jobs, so free up the physical channel | |
1696 | * Free any allocated signal on slave transfers too | |
1697 | */ | |
1698 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | |
1699 | pl08x->pd->put_signal(plchan); | |
1700 | phychan->signal = -1; | |
1701 | } | |
1702 | pl08x_put_phy_channel(pl08x, phychan); | |
1703 | plchan->phychan = NULL; | |
1704 | plchan->state = PL08X_CHAN_IDLE; | |
1705 | ||
1706 | /* | |
1707 | * And NOW before anyone else can grab that free:d | |
1708 | * up physical channel, see if there is some memcpy | |
1709 | * pending that seriously needs to start because of | |
1710 | * being stacked up while we were choking the | |
1711 | * physical channels with data. | |
1712 | */ | |
1713 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | |
1714 | chan.device_node) { | |
1715 | if (waiting->state == PL08X_CHAN_WAITING && | |
1716 | waiting->waiting != NULL) { | |
1717 | int ret; | |
1718 | ||
1719 | /* This should REALLY not fail now */ | |
1720 | ret = prep_phy_channel(waiting, | |
1721 | waiting->waiting); | |
1722 | BUG_ON(ret); | |
1723 | waiting->state = PL08X_CHAN_RUNNING; | |
1724 | waiting->waiting = NULL; | |
1725 | pl08x_issue_pending(&waiting->chan); | |
1726 | break; | |
1727 | } | |
1728 | } | |
1729 | } | |
1730 | ||
1731 | spin_unlock(&plchan->lock); | |
1732 | } | |
1733 | ||
1734 | static irqreturn_t pl08x_irq(int irq, void *dev) | |
1735 | { | |
1736 | struct pl08x_driver_data *pl08x = dev; | |
1737 | u32 mask = 0; | |
1738 | u32 val; | |
1739 | int i; | |
1740 | ||
1741 | val = readl(pl08x->base + PL080_ERR_STATUS); | |
1742 | if (val) { | |
1743 | /* | |
1744 | * An error interrupt (on one or more channels) | |
1745 | */ | |
1746 | dev_err(&pl08x->adev->dev, | |
1747 | "%s error interrupt, register value 0x%08x\n", | |
1748 | __func__, val); | |
1749 | /* | |
1750 | * Simply clear ALL PL08X error interrupts, | |
1751 | * regardless of channel and cause | |
1752 | * FIXME: should be 0x00000003 on PL081 really. | |
1753 | */ | |
1754 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | |
1755 | } | |
1756 | val = readl(pl08x->base + PL080_INT_STATUS); | |
1757 | for (i = 0; i < pl08x->vd->channels; i++) { | |
1758 | if ((1 << i) & val) { | |
1759 | /* Locate physical channel */ | |
1760 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | |
1761 | struct pl08x_dma_chan *plchan = phychan->serving; | |
1762 | ||
1763 | /* Schedule tasklet on this channel */ | |
1764 | tasklet_schedule(&plchan->tasklet); | |
1765 | ||
1766 | mask |= (1 << i); | |
1767 | } | |
1768 | } | |
1769 | /* | |
1770 | * Clear only the terminal interrupts on channels we processed | |
1771 | */ | |
1772 | writel(mask, pl08x->base + PL080_TC_CLEAR); | |
1773 | ||
1774 | return mask ? IRQ_HANDLED : IRQ_NONE; | |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * Initialise the DMAC memcpy/slave channels. | |
1779 | * Make a local wrapper to hold required data | |
1780 | */ | |
1781 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |
1782 | struct dma_device *dmadev, | |
1783 | unsigned int channels, | |
1784 | bool slave) | |
1785 | { | |
1786 | struct pl08x_dma_chan *chan; | |
1787 | int i; | |
1788 | ||
1789 | INIT_LIST_HEAD(&dmadev->channels); | |
1790 | /* | |
1791 | * Register as many many memcpy as we have physical channels, | |
1792 | * we won't always be able to use all but the code will have | |
1793 | * to cope with that situation. | |
1794 | */ | |
1795 | for (i = 0; i < channels; i++) { | |
1796 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); | |
1797 | if (!chan) { | |
1798 | dev_err(&pl08x->adev->dev, | |
1799 | "%s no memory for channel\n", __func__); | |
1800 | return -ENOMEM; | |
1801 | } | |
1802 | ||
1803 | chan->host = pl08x; | |
1804 | chan->state = PL08X_CHAN_IDLE; | |
1805 | ||
1806 | if (slave) { | |
1807 | chan->slave = true; | |
1808 | chan->name = pl08x->pd->slave_channels[i].bus_id; | |
1809 | chan->cd = &pl08x->pd->slave_channels[i]; | |
1810 | } else { | |
1811 | chan->cd = &pl08x->pd->memcpy_channel; | |
1812 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | |
1813 | if (!chan->name) { | |
1814 | kfree(chan); | |
1815 | return -ENOMEM; | |
1816 | } | |
1817 | } | |
1818 | dev_info(&pl08x->adev->dev, | |
1819 | "initialize virtual channel \"%s\"\n", | |
1820 | chan->name); | |
1821 | ||
1822 | chan->chan.device = dmadev; | |
1823 | atomic_set(&chan->last_issued, 0); | |
1824 | chan->lc = atomic_read(&chan->last_issued); | |
1825 | ||
1826 | spin_lock_init(&chan->lock); | |
1827 | INIT_LIST_HEAD(&chan->desc_list); | |
1828 | tasklet_init(&chan->tasklet, pl08x_tasklet, | |
1829 | (unsigned long) chan); | |
1830 | ||
1831 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | |
1832 | } | |
1833 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | |
1834 | i, slave ? "slave" : "memcpy"); | |
1835 | return i; | |
1836 | } | |
1837 | ||
1838 | static void pl08x_free_virtual_channels(struct dma_device *dmadev) | |
1839 | { | |
1840 | struct pl08x_dma_chan *chan = NULL; | |
1841 | struct pl08x_dma_chan *next; | |
1842 | ||
1843 | list_for_each_entry_safe(chan, | |
1844 | next, &dmadev->channels, chan.device_node) { | |
1845 | list_del(&chan->chan.device_node); | |
1846 | kfree(chan); | |
1847 | } | |
1848 | } | |
1849 | ||
1850 | #ifdef CONFIG_DEBUG_FS | |
1851 | static const char *pl08x_state_str(enum pl08x_dma_chan_state state) | |
1852 | { | |
1853 | switch (state) { | |
1854 | case PL08X_CHAN_IDLE: | |
1855 | return "idle"; | |
1856 | case PL08X_CHAN_RUNNING: | |
1857 | return "running"; | |
1858 | case PL08X_CHAN_PAUSED: | |
1859 | return "paused"; | |
1860 | case PL08X_CHAN_WAITING: | |
1861 | return "waiting"; | |
1862 | default: | |
1863 | break; | |
1864 | } | |
1865 | return "UNKNOWN STATE"; | |
1866 | } | |
1867 | ||
1868 | static int pl08x_debugfs_show(struct seq_file *s, void *data) | |
1869 | { | |
1870 | struct pl08x_driver_data *pl08x = s->private; | |
1871 | struct pl08x_dma_chan *chan; | |
1872 | struct pl08x_phy_chan *ch; | |
1873 | unsigned long flags; | |
1874 | int i; | |
1875 | ||
1876 | seq_printf(s, "PL08x physical channels:\n"); | |
1877 | seq_printf(s, "CHANNEL:\tUSER:\n"); | |
1878 | seq_printf(s, "--------\t-----\n"); | |
1879 | for (i = 0; i < pl08x->vd->channels; i++) { | |
1880 | struct pl08x_dma_chan *virt_chan; | |
1881 | ||
1882 | ch = &pl08x->phy_chans[i]; | |
1883 | ||
1884 | spin_lock_irqsave(&ch->lock, flags); | |
1885 | virt_chan = ch->serving; | |
1886 | ||
1887 | seq_printf(s, "%d\t\t%s\n", | |
1888 | ch->id, virt_chan ? virt_chan->name : "(none)"); | |
1889 | ||
1890 | spin_unlock_irqrestore(&ch->lock, flags); | |
1891 | } | |
1892 | ||
1893 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | |
1894 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | |
1895 | seq_printf(s, "--------\t------\n"); | |
1896 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | |
1897 | seq_printf(s, "%s\t\t\%s\n", chan->name, | |
1898 | pl08x_state_str(chan->state)); | |
1899 | } | |
1900 | ||
1901 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | |
1902 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | |
1903 | seq_printf(s, "--------\t------\n"); | |
1904 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | |
1905 | seq_printf(s, "%s\t\t\%s\n", chan->name, | |
1906 | pl08x_state_str(chan->state)); | |
1907 | } | |
1908 | ||
1909 | return 0; | |
1910 | } | |
1911 | ||
1912 | static int pl08x_debugfs_open(struct inode *inode, struct file *file) | |
1913 | { | |
1914 | return single_open(file, pl08x_debugfs_show, inode->i_private); | |
1915 | } | |
1916 | ||
1917 | static const struct file_operations pl08x_debugfs_operations = { | |
1918 | .open = pl08x_debugfs_open, | |
1919 | .read = seq_read, | |
1920 | .llseek = seq_lseek, | |
1921 | .release = single_release, | |
1922 | }; | |
1923 | ||
1924 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |
1925 | { | |
1926 | /* Expose a simple debugfs interface to view all clocks */ | |
1927 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, | |
1928 | NULL, pl08x, | |
1929 | &pl08x_debugfs_operations); | |
1930 | } | |
1931 | ||
1932 | #else | |
1933 | static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |
1934 | { | |
1935 | } | |
1936 | #endif | |
1937 | ||
1938 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |
1939 | { | |
1940 | struct pl08x_driver_data *pl08x; | |
1941 | struct vendor_data *vd = id->data; | |
1942 | int ret = 0; | |
1943 | int i; | |
1944 | ||
1945 | ret = amba_request_regions(adev, NULL); | |
1946 | if (ret) | |
1947 | return ret; | |
1948 | ||
1949 | /* Create the driver state holder */ | |
1950 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); | |
1951 | if (!pl08x) { | |
1952 | ret = -ENOMEM; | |
1953 | goto out_no_pl08x; | |
1954 | } | |
1955 | ||
1956 | /* Initialize memcpy engine */ | |
1957 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | |
1958 | pl08x->memcpy.dev = &adev->dev; | |
1959 | pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; | |
1960 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; | |
1961 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; | |
1962 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | |
1963 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; | |
1964 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; | |
1965 | pl08x->memcpy.device_control = pl08x_control; | |
1966 | ||
1967 | /* Initialize slave engine */ | |
1968 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | |
1969 | pl08x->slave.dev = &adev->dev; | |
1970 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | |
1971 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | |
1972 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | |
1973 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | |
1974 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | |
1975 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | |
1976 | pl08x->slave.device_control = pl08x_control; | |
1977 | ||
1978 | /* Get the platform data */ | |
1979 | pl08x->pd = dev_get_platdata(&adev->dev); | |
1980 | if (!pl08x->pd) { | |
1981 | dev_err(&adev->dev, "no platform data supplied\n"); | |
1982 | goto out_no_platdata; | |
1983 | } | |
1984 | ||
1985 | /* Assign useful pointers to the driver state */ | |
1986 | pl08x->adev = adev; | |
1987 | pl08x->vd = vd; | |
1988 | ||
1989 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | |
1990 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | |
1991 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | |
1992 | if (!pl08x->pool) { | |
1993 | ret = -ENOMEM; | |
1994 | goto out_no_lli_pool; | |
1995 | } | |
1996 | ||
1997 | spin_lock_init(&pl08x->lock); | |
1998 | ||
1999 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | |
2000 | if (!pl08x->base) { | |
2001 | ret = -ENOMEM; | |
2002 | goto out_no_ioremap; | |
2003 | } | |
2004 | ||
2005 | /* Turn on the PL08x */ | |
2006 | pl08x_ensure_on(pl08x); | |
2007 | ||
2008 | /* | |
2009 | * Attach the interrupt handler | |
2010 | */ | |
2011 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | |
2012 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | |
2013 | ||
2014 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | |
2015 | vd->name, pl08x); | |
2016 | if (ret) { | |
2017 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | |
2018 | __func__, adev->irq[0]); | |
2019 | goto out_no_irq; | |
2020 | } | |
2021 | ||
2022 | /* Initialize physical channels */ | |
2023 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), | |
2024 | GFP_KERNEL); | |
2025 | if (!pl08x->phy_chans) { | |
2026 | dev_err(&adev->dev, "%s failed to allocate " | |
2027 | "physical channel holders\n", | |
2028 | __func__); | |
2029 | goto out_no_phychans; | |
2030 | } | |
2031 | ||
2032 | for (i = 0; i < vd->channels; i++) { | |
2033 | struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; | |
2034 | ||
2035 | ch->id = i; | |
2036 | ch->base = pl08x->base + PL080_Cx_BASE(i); | |
2037 | spin_lock_init(&ch->lock); | |
2038 | ch->serving = NULL; | |
2039 | ch->signal = -1; | |
2040 | dev_info(&adev->dev, | |
2041 | "physical channel %d is %s\n", i, | |
2042 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | |
2043 | } | |
2044 | ||
2045 | /* Register as many memcpy channels as there are physical channels */ | |
2046 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, | |
2047 | pl08x->vd->channels, false); | |
2048 | if (ret <= 0) { | |
2049 | dev_warn(&pl08x->adev->dev, | |
2050 | "%s failed to enumerate memcpy channels - %d\n", | |
2051 | __func__, ret); | |
2052 | goto out_no_memcpy; | |
2053 | } | |
2054 | pl08x->memcpy.chancnt = ret; | |
2055 | ||
2056 | /* Register slave channels */ | |
2057 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | |
2058 | pl08x->pd->num_slave_channels, | |
2059 | true); | |
2060 | if (ret <= 0) { | |
2061 | dev_warn(&pl08x->adev->dev, | |
2062 | "%s failed to enumerate slave channels - %d\n", | |
2063 | __func__, ret); | |
2064 | goto out_no_slave; | |
2065 | } | |
2066 | pl08x->slave.chancnt = ret; | |
2067 | ||
2068 | ret = dma_async_device_register(&pl08x->memcpy); | |
2069 | if (ret) { | |
2070 | dev_warn(&pl08x->adev->dev, | |
2071 | "%s failed to register memcpy as an async device - %d\n", | |
2072 | __func__, ret); | |
2073 | goto out_no_memcpy_reg; | |
2074 | } | |
2075 | ||
2076 | ret = dma_async_device_register(&pl08x->slave); | |
2077 | if (ret) { | |
2078 | dev_warn(&pl08x->adev->dev, | |
2079 | "%s failed to register slave as an async device - %d\n", | |
2080 | __func__, ret); | |
2081 | goto out_no_slave_reg; | |
2082 | } | |
2083 | ||
2084 | amba_set_drvdata(adev, pl08x); | |
2085 | init_pl08x_debugfs(pl08x); | |
2086 | dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", | |
2087 | vd->name, adev->res.start); | |
2088 | return 0; | |
2089 | ||
2090 | out_no_slave_reg: | |
2091 | dma_async_device_unregister(&pl08x->memcpy); | |
2092 | out_no_memcpy_reg: | |
2093 | pl08x_free_virtual_channels(&pl08x->slave); | |
2094 | out_no_slave: | |
2095 | pl08x_free_virtual_channels(&pl08x->memcpy); | |
2096 | out_no_memcpy: | |
2097 | kfree(pl08x->phy_chans); | |
2098 | out_no_phychans: | |
2099 | free_irq(adev->irq[0], pl08x); | |
2100 | out_no_irq: | |
2101 | iounmap(pl08x->base); | |
2102 | out_no_ioremap: | |
2103 | dma_pool_destroy(pl08x->pool); | |
2104 | out_no_lli_pool: | |
2105 | out_no_platdata: | |
2106 | kfree(pl08x); | |
2107 | out_no_pl08x: | |
2108 | amba_release_regions(adev); | |
2109 | return ret; | |
2110 | } | |
2111 | ||
2112 | /* PL080 has 8 channels and the PL080 have just 2 */ | |
2113 | static struct vendor_data vendor_pl080 = { | |
2114 | .name = "PL080", | |
2115 | .channels = 8, | |
2116 | .dualmaster = true, | |
2117 | }; | |
2118 | ||
2119 | static struct vendor_data vendor_pl081 = { | |
2120 | .name = "PL081", | |
2121 | .channels = 2, | |
2122 | .dualmaster = false, | |
2123 | }; | |
2124 | ||
2125 | static struct amba_id pl08x_ids[] = { | |
2126 | /* PL080 */ | |
2127 | { | |
2128 | .id = 0x00041080, | |
2129 | .mask = 0x000fffff, | |
2130 | .data = &vendor_pl080, | |
2131 | }, | |
2132 | /* PL081 */ | |
2133 | { | |
2134 | .id = 0x00041081, | |
2135 | .mask = 0x000fffff, | |
2136 | .data = &vendor_pl081, | |
2137 | }, | |
2138 | /* Nomadik 8815 PL080 variant */ | |
2139 | { | |
2140 | .id = 0x00280880, | |
2141 | .mask = 0x00ffffff, | |
2142 | .data = &vendor_pl080, | |
2143 | }, | |
2144 | { 0, 0 }, | |
2145 | }; | |
2146 | ||
2147 | static struct amba_driver pl08x_amba_driver = { | |
2148 | .drv.name = DRIVER_NAME, | |
2149 | .id_table = pl08x_ids, | |
2150 | .probe = pl08x_probe, | |
2151 | }; | |
2152 | ||
2153 | static int __init pl08x_init(void) | |
2154 | { | |
2155 | int retval; | |
2156 | retval = amba_driver_register(&pl08x_amba_driver); | |
2157 | if (retval) | |
2158 | printk(KERN_WARNING DRIVER_NAME | |
e8b5e11d | 2159 | "failed to register as an AMBA device (%d)\n", |
e8689e63 LW |
2160 | retval); |
2161 | return retval; | |
2162 | } | |
2163 | subsys_initcall(pl08x_init); |