]>
Commit | Line | Data |
---|---|---|
c8acd6aa ZG |
1 | /* |
2 | * Copyright 2012 Marvell International Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
7331205a | 8 | #include <linux/err.h> |
c8acd6aa ZG |
9 | #include <linux/module.h> |
10 | #include <linux/init.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/device.h> | |
18 | #include <linux/platform_data/mmp_dma.h> | |
19 | #include <linux/dmapool.h> | |
20 | #include <linux/of_device.h> | |
a9a7cf08 | 21 | #include <linux/of_dma.h> |
c8acd6aa | 22 | #include <linux/of.h> |
13b3006b | 23 | #include <linux/dma/mmp-pdma.h> |
c8acd6aa ZG |
24 | |
25 | #include "dmaengine.h" | |
26 | ||
27 | #define DCSR 0x0000 | |
28 | #define DALGN 0x00a0 | |
29 | #define DINT 0x00f0 | |
30 | #define DDADR 0x0200 | |
31 | #define DSADR 0x0204 | |
32 | #define DTADR 0x0208 | |
33 | #define DCMD 0x020c | |
34 | ||
35 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | |
36 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | |
37 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | |
38 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | |
39 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | |
40 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | |
41 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | |
42 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | |
43 | ||
44 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | |
45 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | |
46 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | |
47 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | |
48 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | |
49 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | |
50 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | |
51 | ||
8b298ded DM |
52 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ |
53 | (((n) & 0x3f) << 2)) | |
c8acd6aa ZG |
54 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ |
55 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | |
56 | ||
57 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | |
58 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | |
59 | ||
60 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | |
61 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | |
62 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | |
63 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | |
64 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | |
65 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | |
66 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | |
67 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | |
68 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | |
69 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | |
70 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | |
71 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | |
72 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | |
73 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | |
74 | ||
75 | #define PDMA_ALIGNMENT 3 | |
1ac0e845 | 76 | #define PDMA_MAX_DESC_BYTES DCMD_LENGTH |
c8acd6aa ZG |
77 | |
78 | struct mmp_pdma_desc_hw { | |
79 | u32 ddadr; /* Points to the next descriptor + flags */ | |
80 | u32 dsadr; /* DSADR value for the current transfer */ | |
81 | u32 dtadr; /* DTADR value for the current transfer */ | |
82 | u32 dcmd; /* DCMD value for the current transfer */ | |
83 | } __aligned(32); | |
84 | ||
85 | struct mmp_pdma_desc_sw { | |
86 | struct mmp_pdma_desc_hw desc; | |
87 | struct list_head node; | |
88 | struct list_head tx_list; | |
89 | struct dma_async_tx_descriptor async_tx; | |
90 | }; | |
91 | ||
92 | struct mmp_pdma_phy; | |
93 | ||
94 | struct mmp_pdma_chan { | |
95 | struct device *dev; | |
96 | struct dma_chan chan; | |
97 | struct dma_async_tx_descriptor desc; | |
98 | struct mmp_pdma_phy *phy; | |
99 | enum dma_transfer_direction dir; | |
100 | ||
101 | /* channel's basic info */ | |
102 | struct tasklet_struct tasklet; | |
103 | u32 dcmd; | |
104 | u32 drcmr; | |
105 | u32 dev_addr; | |
106 | ||
107 | /* list for desc */ | |
108 | spinlock_t desc_lock; /* Descriptor list lock */ | |
109 | struct list_head chain_pending; /* Link descriptors queue for pending */ | |
110 | struct list_head chain_running; /* Link descriptors queue for running */ | |
111 | bool idle; /* channel statue machine */ | |
6fc4573c | 112 | bool byte_align; |
c8acd6aa ZG |
113 | |
114 | struct dma_pool *desc_pool; /* Descriptors pool */ | |
115 | }; | |
116 | ||
117 | struct mmp_pdma_phy { | |
118 | int idx; | |
119 | void __iomem *base; | |
120 | struct mmp_pdma_chan *vchan; | |
121 | }; | |
122 | ||
123 | struct mmp_pdma_device { | |
124 | int dma_channels; | |
125 | void __iomem *base; | |
126 | struct device *dev; | |
127 | struct dma_device device; | |
128 | struct mmp_pdma_phy *phy; | |
027f28b7 | 129 | spinlock_t phy_lock; /* protect alloc/free phy channels */ |
c8acd6aa ZG |
130 | }; |
131 | ||
132 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | |
133 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | |
134 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | |
135 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | |
136 | ||
137 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | |
138 | { | |
139 | u32 reg = (phy->idx << 4) + DDADR; | |
140 | ||
141 | writel(addr, phy->base + reg); | |
142 | } | |
143 | ||
144 | static void enable_chan(struct mmp_pdma_phy *phy) | |
145 | { | |
6fc4573c | 146 | u32 reg, dalgn; |
c8acd6aa ZG |
147 | |
148 | if (!phy->vchan) | |
149 | return; | |
150 | ||
8b298ded | 151 | reg = DRCMR(phy->vchan->drcmr); |
c8acd6aa ZG |
152 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); |
153 | ||
6fc4573c DM |
154 | dalgn = readl(phy->base + DALGN); |
155 | if (phy->vchan->byte_align) | |
156 | dalgn |= 1 << phy->idx; | |
157 | else | |
158 | dalgn &= ~(1 << phy->idx); | |
159 | writel(dalgn, phy->base + DALGN); | |
160 | ||
c8acd6aa ZG |
161 | reg = (phy->idx << 2) + DCSR; |
162 | writel(readl(phy->base + reg) | DCSR_RUN, | |
163 | phy->base + reg); | |
164 | } | |
165 | ||
166 | static void disable_chan(struct mmp_pdma_phy *phy) | |
167 | { | |
168 | u32 reg; | |
169 | ||
170 | if (phy) { | |
171 | reg = (phy->idx << 2) + DCSR; | |
172 | writel(readl(phy->base + reg) & ~DCSR_RUN, | |
173 | phy->base + reg); | |
174 | } | |
175 | } | |
176 | ||
177 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | |
178 | { | |
179 | u32 dcsr; | |
180 | u32 dint = readl(phy->base + DINT); | |
181 | u32 reg = (phy->idx << 2) + DCSR; | |
182 | ||
183 | if (dint & BIT(phy->idx)) { | |
184 | /* clear irq */ | |
185 | dcsr = readl(phy->base + reg); | |
186 | writel(dcsr, phy->base + reg); | |
187 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | |
188 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | |
189 | return 0; | |
190 | } | |
191 | return -EAGAIN; | |
192 | } | |
193 | ||
194 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | |
195 | { | |
196 | struct mmp_pdma_phy *phy = dev_id; | |
197 | ||
198 | if (clear_chan_irq(phy) == 0) { | |
199 | tasklet_schedule(&phy->vchan->tasklet); | |
200 | return IRQ_HANDLED; | |
201 | } else | |
202 | return IRQ_NONE; | |
203 | } | |
204 | ||
205 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | |
206 | { | |
207 | struct mmp_pdma_device *pdev = dev_id; | |
208 | struct mmp_pdma_phy *phy; | |
209 | u32 dint = readl(pdev->base + DINT); | |
210 | int i, ret; | |
211 | int irq_num = 0; | |
212 | ||
213 | while (dint) { | |
214 | i = __ffs(dint); | |
215 | dint &= (dint - 1); | |
216 | phy = &pdev->phy[i]; | |
217 | ret = mmp_pdma_chan_handler(irq, phy); | |
218 | if (ret == IRQ_HANDLED) | |
219 | irq_num++; | |
220 | } | |
221 | ||
222 | if (irq_num) | |
223 | return IRQ_HANDLED; | |
224 | else | |
225 | return IRQ_NONE; | |
226 | } | |
227 | ||
228 | /* lookup free phy channel as descending priority */ | |
229 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |
230 | { | |
231 | int prio, i; | |
232 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | |
638a542c | 233 | struct mmp_pdma_phy *phy, *found = NULL; |
027f28b7 | 234 | unsigned long flags; |
c8acd6aa ZG |
235 | |
236 | /* | |
237 | * dma channel priorities | |
238 | * ch 0 - 3, 16 - 19 <--> (0) | |
239 | * ch 4 - 7, 20 - 23 <--> (1) | |
240 | * ch 8 - 11, 24 - 27 <--> (2) | |
241 | * ch 12 - 15, 28 - 31 <--> (3) | |
242 | */ | |
027f28b7 XW |
243 | |
244 | spin_lock_irqsave(&pdev->phy_lock, flags); | |
c8acd6aa ZG |
245 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { |
246 | for (i = 0; i < pdev->dma_channels; i++) { | |
247 | if (prio != ((i & 0xf) >> 2)) | |
248 | continue; | |
249 | phy = &pdev->phy[i]; | |
250 | if (!phy->vchan) { | |
251 | phy->vchan = pchan; | |
638a542c DM |
252 | found = phy; |
253 | goto out_unlock; | |
c8acd6aa ZG |
254 | } |
255 | } | |
256 | } | |
257 | ||
638a542c | 258 | out_unlock: |
027f28b7 | 259 | spin_unlock_irqrestore(&pdev->phy_lock, flags); |
638a542c | 260 | return found; |
c8acd6aa ZG |
261 | } |
262 | ||
027f28b7 XW |
263 | static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) |
264 | { | |
265 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | |
266 | unsigned long flags; | |
26a2dfde | 267 | u32 reg; |
027f28b7 XW |
268 | |
269 | if (!pchan->phy) | |
270 | return; | |
271 | ||
26a2dfde | 272 | /* clear the channel mapping in DRCMR */ |
8b298ded | 273 | reg = DRCMR(pchan->phy->vchan->drcmr); |
26a2dfde XW |
274 | writel(0, pchan->phy->base + reg); |
275 | ||
027f28b7 XW |
276 | spin_lock_irqsave(&pdev->phy_lock, flags); |
277 | pchan->phy->vchan = NULL; | |
278 | pchan->phy = NULL; | |
279 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | |
280 | } | |
281 | ||
c8acd6aa ZG |
282 | /** |
283 | * start_pending_queue - transfer any pending transactions | |
284 | * pending list ==> running list | |
285 | */ | |
286 | static void start_pending_queue(struct mmp_pdma_chan *chan) | |
287 | { | |
288 | struct mmp_pdma_desc_sw *desc; | |
289 | ||
290 | /* still in running, irq will start the pending list */ | |
291 | if (!chan->idle) { | |
292 | dev_dbg(chan->dev, "DMA controller still busy\n"); | |
293 | return; | |
294 | } | |
295 | ||
296 | if (list_empty(&chan->chain_pending)) { | |
297 | /* chance to re-fetch phy channel with higher prio */ | |
027f28b7 | 298 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
299 | dev_dbg(chan->dev, "no pending list\n"); |
300 | return; | |
301 | } | |
302 | ||
303 | if (!chan->phy) { | |
304 | chan->phy = lookup_phy(chan); | |
305 | if (!chan->phy) { | |
306 | dev_dbg(chan->dev, "no free dma channel\n"); | |
307 | return; | |
308 | } | |
309 | } | |
310 | ||
311 | /* | |
312 | * pending -> running | |
313 | * reintilize pending list | |
314 | */ | |
315 | desc = list_first_entry(&chan->chain_pending, | |
316 | struct mmp_pdma_desc_sw, node); | |
317 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | |
318 | ||
319 | /* | |
320 | * Program the descriptor's address into the DMA controller, | |
321 | * then start the DMA transaction | |
322 | */ | |
323 | set_desc(chan->phy, desc->async_tx.phys); | |
324 | enable_chan(chan->phy); | |
325 | chan->idle = false; | |
326 | } | |
327 | ||
328 | ||
329 | /* desc->tx_list ==> pending list */ | |
330 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
331 | { | |
332 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | |
333 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | |
334 | struct mmp_pdma_desc_sw *child; | |
335 | unsigned long flags; | |
336 | dma_cookie_t cookie = -EBUSY; | |
337 | ||
338 | spin_lock_irqsave(&chan->desc_lock, flags); | |
339 | ||
340 | list_for_each_entry(child, &desc->tx_list, node) { | |
341 | cookie = dma_cookie_assign(&child->async_tx); | |
342 | } | |
343 | ||
0cd61561 DM |
344 | /* softly link to pending list - desc->tx_list ==> pending list */ |
345 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | |
c8acd6aa ZG |
346 | |
347 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
348 | ||
349 | return cookie; | |
350 | } | |
351 | ||
69c9f0ae JH |
352 | static struct mmp_pdma_desc_sw * |
353 | mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | |
c8acd6aa ZG |
354 | { |
355 | struct mmp_pdma_desc_sw *desc; | |
356 | dma_addr_t pdesc; | |
357 | ||
358 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | |
359 | if (!desc) { | |
360 | dev_err(chan->dev, "out of memory for link descriptor\n"); | |
361 | return NULL; | |
362 | } | |
363 | ||
364 | memset(desc, 0, sizeof(*desc)); | |
365 | INIT_LIST_HEAD(&desc->tx_list); | |
366 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | |
367 | /* each desc has submit */ | |
368 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | |
369 | desc->async_tx.phys = pdesc; | |
370 | ||
371 | return desc; | |
372 | } | |
373 | ||
374 | /** | |
375 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. | |
376 | * | |
377 | * This function will create a dma pool for descriptor allocation. | |
378 | * Request irq only when channel is requested | |
379 | * Return - The number of allocated descriptors. | |
380 | */ | |
381 | ||
382 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | |
383 | { | |
384 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
385 | ||
386 | if (chan->desc_pool) | |
387 | return 1; | |
388 | ||
389 | chan->desc_pool = | |
390 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | |
391 | sizeof(struct mmp_pdma_desc_sw), | |
392 | __alignof__(struct mmp_pdma_desc_sw), 0); | |
393 | if (!chan->desc_pool) { | |
394 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | |
395 | return -ENOMEM; | |
396 | } | |
027f28b7 | 397 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
398 | chan->idle = true; |
399 | chan->dev_addr = 0; | |
400 | return 1; | |
401 | } | |
402 | ||
403 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | |
404 | struct list_head *list) | |
405 | { | |
406 | struct mmp_pdma_desc_sw *desc, *_desc; | |
407 | ||
408 | list_for_each_entry_safe(desc, _desc, list, node) { | |
409 | list_del(&desc->node); | |
410 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | |
411 | } | |
412 | } | |
413 | ||
414 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | |
415 | { | |
416 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
417 | unsigned long flags; | |
418 | ||
419 | spin_lock_irqsave(&chan->desc_lock, flags); | |
420 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | |
421 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | |
422 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
423 | ||
424 | dma_pool_destroy(chan->desc_pool); | |
425 | chan->desc_pool = NULL; | |
426 | chan->idle = true; | |
427 | chan->dev_addr = 0; | |
027f28b7 | 428 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
429 | return; |
430 | } | |
431 | ||
432 | static struct dma_async_tx_descriptor * | |
433 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |
434 | dma_addr_t dma_dst, dma_addr_t dma_src, | |
435 | size_t len, unsigned long flags) | |
436 | { | |
437 | struct mmp_pdma_chan *chan; | |
438 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | |
439 | size_t copy = 0; | |
440 | ||
441 | if (!dchan) | |
442 | return NULL; | |
443 | ||
444 | if (!len) | |
445 | return NULL; | |
446 | ||
447 | chan = to_mmp_pdma_chan(dchan); | |
6fc4573c | 448 | chan->byte_align = false; |
c8acd6aa ZG |
449 | |
450 | if (!chan->dir) { | |
451 | chan->dir = DMA_MEM_TO_MEM; | |
452 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | |
453 | chan->dcmd |= DCMD_BURST32; | |
454 | } | |
455 | ||
456 | do { | |
457 | /* Allocate the link descriptor from DMA pool */ | |
458 | new = mmp_pdma_alloc_descriptor(chan); | |
459 | if (!new) { | |
460 | dev_err(chan->dev, "no memory for desc\n"); | |
461 | goto fail; | |
462 | } | |
463 | ||
464 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | |
6fc4573c DM |
465 | if (dma_src & 0x7 || dma_dst & 0x7) |
466 | chan->byte_align = true; | |
c8acd6aa ZG |
467 | |
468 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | |
469 | new->desc.dsadr = dma_src; | |
470 | new->desc.dtadr = dma_dst; | |
471 | ||
472 | if (!first) | |
473 | first = new; | |
474 | else | |
475 | prev->desc.ddadr = new->async_tx.phys; | |
476 | ||
477 | new->async_tx.cookie = 0; | |
478 | async_tx_ack(&new->async_tx); | |
479 | ||
480 | prev = new; | |
481 | len -= copy; | |
482 | ||
483 | if (chan->dir == DMA_MEM_TO_DEV) { | |
484 | dma_src += copy; | |
485 | } else if (chan->dir == DMA_DEV_TO_MEM) { | |
486 | dma_dst += copy; | |
487 | } else if (chan->dir == DMA_MEM_TO_MEM) { | |
488 | dma_src += copy; | |
489 | dma_dst += copy; | |
490 | } | |
491 | ||
492 | /* Insert the link descriptor to the LD ring */ | |
493 | list_add_tail(&new->node, &first->tx_list); | |
494 | } while (len); | |
495 | ||
496 | first->async_tx.flags = flags; /* client is in control of this ack */ | |
497 | first->async_tx.cookie = -EBUSY; | |
498 | ||
499 | /* last desc and fire IRQ */ | |
500 | new->desc.ddadr = DDADR_STOP; | |
501 | new->desc.dcmd |= DCMD_ENDIRQEN; | |
502 | ||
503 | return &first->async_tx; | |
504 | ||
505 | fail: | |
506 | if (first) | |
507 | mmp_pdma_free_desc_list(chan, &first->tx_list); | |
508 | return NULL; | |
509 | } | |
510 | ||
511 | static struct dma_async_tx_descriptor * | |
512 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |
513 | unsigned int sg_len, enum dma_transfer_direction dir, | |
514 | unsigned long flags, void *context) | |
515 | { | |
516 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
517 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | |
518 | size_t len, avail; | |
519 | struct scatterlist *sg; | |
520 | dma_addr_t addr; | |
521 | int i; | |
522 | ||
523 | if ((sgl == NULL) || (sg_len == 0)) | |
524 | return NULL; | |
525 | ||
6fc4573c DM |
526 | chan->byte_align = false; |
527 | ||
c8acd6aa ZG |
528 | for_each_sg(sgl, sg, sg_len, i) { |
529 | addr = sg_dma_address(sg); | |
530 | avail = sg_dma_len(sgl); | |
531 | ||
532 | do { | |
533 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | |
6fc4573c DM |
534 | if (addr & 0x7) |
535 | chan->byte_align = true; | |
c8acd6aa ZG |
536 | |
537 | /* allocate and populate the descriptor */ | |
538 | new = mmp_pdma_alloc_descriptor(chan); | |
539 | if (!new) { | |
540 | dev_err(chan->dev, "no memory for desc\n"); | |
541 | goto fail; | |
542 | } | |
543 | ||
544 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | |
545 | if (dir == DMA_MEM_TO_DEV) { | |
546 | new->desc.dsadr = addr; | |
547 | new->desc.dtadr = chan->dev_addr; | |
548 | } else { | |
549 | new->desc.dsadr = chan->dev_addr; | |
550 | new->desc.dtadr = addr; | |
551 | } | |
552 | ||
553 | if (!first) | |
554 | first = new; | |
555 | else | |
556 | prev->desc.ddadr = new->async_tx.phys; | |
557 | ||
558 | new->async_tx.cookie = 0; | |
559 | async_tx_ack(&new->async_tx); | |
560 | prev = new; | |
561 | ||
562 | /* Insert the link descriptor to the LD ring */ | |
563 | list_add_tail(&new->node, &first->tx_list); | |
564 | ||
565 | /* update metadata */ | |
566 | addr += len; | |
567 | avail -= len; | |
568 | } while (avail); | |
569 | } | |
570 | ||
571 | first->async_tx.cookie = -EBUSY; | |
572 | first->async_tx.flags = flags; | |
573 | ||
574 | /* last desc and fire IRQ */ | |
575 | new->desc.ddadr = DDADR_STOP; | |
576 | new->desc.dcmd |= DCMD_ENDIRQEN; | |
577 | ||
578 | return &first->async_tx; | |
579 | ||
580 | fail: | |
581 | if (first) | |
582 | mmp_pdma_free_desc_list(chan, &first->tx_list); | |
583 | return NULL; | |
584 | } | |
585 | ||
586 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |
587 | unsigned long arg) | |
588 | { | |
589 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
590 | struct dma_slave_config *cfg = (void *)arg; | |
591 | unsigned long flags; | |
592 | int ret = 0; | |
593 | u32 maxburst = 0, addr = 0; | |
594 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | |
595 | ||
596 | if (!dchan) | |
597 | return -EINVAL; | |
598 | ||
599 | switch (cmd) { | |
600 | case DMA_TERMINATE_ALL: | |
601 | disable_chan(chan->phy); | |
027f28b7 | 602 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
603 | spin_lock_irqsave(&chan->desc_lock, flags); |
604 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | |
605 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | |
606 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
607 | chan->idle = true; | |
608 | break; | |
609 | case DMA_SLAVE_CONFIG: | |
610 | if (cfg->direction == DMA_DEV_TO_MEM) { | |
611 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | |
612 | maxburst = cfg->src_maxburst; | |
613 | width = cfg->src_addr_width; | |
614 | addr = cfg->src_addr; | |
615 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | |
616 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | |
617 | maxburst = cfg->dst_maxburst; | |
618 | width = cfg->dst_addr_width; | |
619 | addr = cfg->dst_addr; | |
620 | } | |
621 | ||
622 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
623 | chan->dcmd |= DCMD_WIDTH1; | |
624 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | |
625 | chan->dcmd |= DCMD_WIDTH2; | |
626 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | |
627 | chan->dcmd |= DCMD_WIDTH4; | |
628 | ||
629 | if (maxburst == 8) | |
630 | chan->dcmd |= DCMD_BURST8; | |
631 | else if (maxburst == 16) | |
632 | chan->dcmd |= DCMD_BURST16; | |
633 | else if (maxburst == 32) | |
634 | chan->dcmd |= DCMD_BURST32; | |
635 | ||
ed30933e | 636 | chan->dir = cfg->direction; |
c8acd6aa | 637 | chan->dev_addr = addr; |
13b3006b DM |
638 | /* FIXME: drivers should be ported over to use the filter |
639 | * function. Once that's done, the following two lines can | |
640 | * be removed. | |
641 | */ | |
642 | if (cfg->slave_id) | |
643 | chan->drcmr = cfg->slave_id; | |
c8acd6aa ZG |
644 | break; |
645 | default: | |
646 | return -ENOSYS; | |
647 | } | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
652 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | |
653 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
654 | { | |
4aa9fe0a | 655 | return dma_cookie_status(dchan, cookie, txstate); |
c8acd6aa ZG |
656 | } |
657 | ||
658 | /** | |
659 | * mmp_pdma_issue_pending - Issue the DMA start command | |
660 | * pending list ==> running list | |
661 | */ | |
662 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | |
663 | { | |
664 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
665 | unsigned long flags; | |
666 | ||
667 | spin_lock_irqsave(&chan->desc_lock, flags); | |
668 | start_pending_queue(chan); | |
669 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
670 | } | |
671 | ||
672 | /* | |
673 | * dma_do_tasklet | |
674 | * Do call back | |
675 | * Start pending list | |
676 | */ | |
677 | static void dma_do_tasklet(unsigned long data) | |
678 | { | |
679 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; | |
680 | struct mmp_pdma_desc_sw *desc, *_desc; | |
681 | LIST_HEAD(chain_cleanup); | |
682 | unsigned long flags; | |
683 | ||
684 | /* submit pending list; callback for each desc; free desc */ | |
685 | ||
686 | spin_lock_irqsave(&chan->desc_lock, flags); | |
687 | ||
b721f9e8 DM |
688 | list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { |
689 | /* | |
690 | * move the descriptors to a temporary list so we can drop | |
691 | * the lock during the entire cleanup operation | |
692 | */ | |
693 | list_del(&desc->node); | |
694 | list_add(&desc->node, &chain_cleanup); | |
c8acd6aa | 695 | |
b721f9e8 DM |
696 | /* |
697 | * Look for the first list entry which has the ENDIRQEN flag | |
698 | * set. That is the descriptor we got an interrupt for, so | |
699 | * complete that transaction and its cookie. | |
700 | */ | |
701 | if (desc->desc.dcmd & DCMD_ENDIRQEN) { | |
702 | dma_cookie_t cookie = desc->async_tx.cookie; | |
703 | dma_cookie_complete(&desc->async_tx); | |
704 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | |
705 | break; | |
706 | } | |
c8acd6aa ZG |
707 | } |
708 | ||
709 | /* | |
b721f9e8 DM |
710 | * The hardware is idle and ready for more when the |
711 | * chain_running list is empty. | |
c8acd6aa | 712 | */ |
b721f9e8 | 713 | chan->idle = list_empty(&chan->chain_running); |
c8acd6aa ZG |
714 | |
715 | /* Start any pending transactions automatically */ | |
716 | start_pending_queue(chan); | |
717 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
718 | ||
719 | /* Run the callback for each descriptor, in order */ | |
720 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | |
721 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | |
722 | ||
723 | /* Remove from the list of transactions */ | |
724 | list_del(&desc->node); | |
725 | /* Run the link descriptor callback function */ | |
726 | if (txd->callback) | |
727 | txd->callback(txd->callback_param); | |
728 | ||
729 | dma_pool_free(chan->desc_pool, desc, txd->phys); | |
730 | } | |
731 | } | |
732 | ||
4bf27b8b | 733 | static int mmp_pdma_remove(struct platform_device *op) |
c8acd6aa ZG |
734 | { |
735 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | |
736 | ||
737 | dma_async_device_unregister(&pdev->device); | |
738 | return 0; | |
739 | } | |
740 | ||
463a1f8b | 741 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, |
c8acd6aa ZG |
742 | int idx, int irq) |
743 | { | |
744 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | |
745 | struct mmp_pdma_chan *chan; | |
746 | int ret; | |
747 | ||
748 | chan = devm_kzalloc(pdev->dev, | |
749 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | |
750 | if (chan == NULL) | |
751 | return -ENOMEM; | |
752 | ||
753 | phy->idx = idx; | |
754 | phy->base = pdev->base; | |
755 | ||
756 | if (irq) { | |
757 | ret = devm_request_irq(pdev->dev, irq, | |
758 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | |
759 | if (ret) { | |
760 | dev_err(pdev->dev, "channel request irq fail!\n"); | |
761 | return ret; | |
762 | } | |
763 | } | |
764 | ||
765 | spin_lock_init(&chan->desc_lock); | |
766 | chan->dev = pdev->dev; | |
767 | chan->chan.device = &pdev->device; | |
768 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | |
769 | INIT_LIST_HEAD(&chan->chain_pending); | |
770 | INIT_LIST_HEAD(&chan->chain_running); | |
771 | ||
772 | /* register virt channel to dma engine */ | |
773 | list_add_tail(&chan->chan.device_node, | |
774 | &pdev->device.channels); | |
775 | ||
776 | return 0; | |
777 | } | |
778 | ||
779 | static struct of_device_id mmp_pdma_dt_ids[] = { | |
780 | { .compatible = "marvell,pdma-1.0", }, | |
781 | {} | |
782 | }; | |
783 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | |
784 | ||
a9a7cf08 DM |
785 | static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, |
786 | struct of_dma *ofdma) | |
787 | { | |
788 | struct mmp_pdma_device *d = ofdma->of_dma_data; | |
789 | struct dma_chan *chan, *candidate; | |
790 | ||
791 | retry: | |
792 | candidate = NULL; | |
793 | ||
794 | /* walk the list of channels registered with the current instance and | |
795 | * find one that is currently unused */ | |
796 | list_for_each_entry(chan, &d->device.channels, device_node) | |
797 | if (chan->client_count == 0) { | |
798 | candidate = chan; | |
799 | break; | |
800 | } | |
801 | ||
802 | if (!candidate) | |
803 | return NULL; | |
804 | ||
805 | /* dma_get_slave_channel will return NULL if we lost a race between | |
806 | * the lookup and the reservation */ | |
807 | chan = dma_get_slave_channel(candidate); | |
808 | ||
809 | if (chan) { | |
810 | struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); | |
811 | c->drcmr = dma_spec->args[0]; | |
812 | return chan; | |
813 | } | |
814 | ||
815 | goto retry; | |
816 | } | |
817 | ||
463a1f8b | 818 | static int mmp_pdma_probe(struct platform_device *op) |
c8acd6aa ZG |
819 | { |
820 | struct mmp_pdma_device *pdev; | |
821 | const struct of_device_id *of_id; | |
822 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | |
823 | struct resource *iores; | |
824 | int i, ret, irq = 0; | |
825 | int dma_channels = 0, irq_num = 0; | |
826 | ||
827 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | |
828 | if (!pdev) | |
829 | return -ENOMEM; | |
830 | pdev->dev = &op->dev; | |
831 | ||
027f28b7 XW |
832 | spin_lock_init(&pdev->phy_lock); |
833 | ||
c8acd6aa | 834 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); |
7331205a TR |
835 | pdev->base = devm_ioremap_resource(pdev->dev, iores); |
836 | if (IS_ERR(pdev->base)) | |
837 | return PTR_ERR(pdev->base); | |
c8acd6aa ZG |
838 | |
839 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | |
840 | if (of_id) | |
841 | of_property_read_u32(pdev->dev->of_node, | |
842 | "#dma-channels", &dma_channels); | |
843 | else if (pdata && pdata->dma_channels) | |
844 | dma_channels = pdata->dma_channels; | |
845 | else | |
846 | dma_channels = 32; /* default 32 channel */ | |
847 | pdev->dma_channels = dma_channels; | |
848 | ||
849 | for (i = 0; i < dma_channels; i++) { | |
850 | if (platform_get_irq(op, i) > 0) | |
851 | irq_num++; | |
852 | } | |
853 | ||
854 | pdev->phy = devm_kzalloc(pdev->dev, | |
855 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | |
856 | if (pdev->phy == NULL) | |
857 | return -ENOMEM; | |
858 | ||
859 | INIT_LIST_HEAD(&pdev->device.channels); | |
860 | ||
861 | if (irq_num != dma_channels) { | |
862 | /* all chan share one irq, demux inside */ | |
863 | irq = platform_get_irq(op, 0); | |
864 | ret = devm_request_irq(pdev->dev, irq, | |
865 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | |
866 | if (ret) | |
867 | return ret; | |
868 | } | |
869 | ||
870 | for (i = 0; i < dma_channels; i++) { | |
871 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | |
872 | ret = mmp_pdma_chan_init(pdev, i, irq); | |
873 | if (ret) | |
874 | return ret; | |
875 | } | |
876 | ||
877 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | |
878 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | |
c8acd6aa ZG |
879 | pdev->device.dev = &op->dev; |
880 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | |
881 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | |
882 | pdev->device.device_tx_status = mmp_pdma_tx_status; | |
883 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | |
884 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | |
885 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | |
886 | pdev->device.device_control = mmp_pdma_control; | |
887 | pdev->device.copy_align = PDMA_ALIGNMENT; | |
888 | ||
889 | if (pdev->dev->coherent_dma_mask) | |
890 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | |
891 | else | |
892 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | |
893 | ||
894 | ret = dma_async_device_register(&pdev->device); | |
895 | if (ret) { | |
896 | dev_err(pdev->device.dev, "unable to register\n"); | |
897 | return ret; | |
898 | } | |
899 | ||
a9a7cf08 DM |
900 | if (op->dev.of_node) { |
901 | /* Device-tree DMA controller registration */ | |
902 | ret = of_dma_controller_register(op->dev.of_node, | |
903 | mmp_pdma_dma_xlate, pdev); | |
904 | if (ret < 0) { | |
905 | dev_err(&op->dev, "of_dma_controller_register failed\n"); | |
906 | return ret; | |
907 | } | |
908 | } | |
909 | ||
419d1f12 | 910 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); |
c8acd6aa ZG |
911 | return 0; |
912 | } | |
913 | ||
914 | static const struct platform_device_id mmp_pdma_id_table[] = { | |
915 | { "mmp-pdma", }, | |
916 | { }, | |
917 | }; | |
918 | ||
919 | static struct platform_driver mmp_pdma_driver = { | |
920 | .driver = { | |
921 | .name = "mmp-pdma", | |
922 | .owner = THIS_MODULE, | |
923 | .of_match_table = mmp_pdma_dt_ids, | |
924 | }, | |
925 | .id_table = mmp_pdma_id_table, | |
926 | .probe = mmp_pdma_probe, | |
a7d6e3ec | 927 | .remove = mmp_pdma_remove, |
c8acd6aa ZG |
928 | }; |
929 | ||
13b3006b DM |
930 | bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) |
931 | { | |
932 | struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); | |
933 | ||
934 | if (chan->device->dev->driver != &mmp_pdma_driver.driver) | |
935 | return false; | |
936 | ||
937 | c->drcmr = *(unsigned int *) param; | |
938 | ||
939 | return true; | |
940 | } | |
941 | EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); | |
942 | ||
c8acd6aa ZG |
943 | module_platform_driver(mmp_pdma_driver); |
944 | ||
945 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | |
946 | MODULE_AUTHOR("Marvell International Ltd."); | |
947 | MODULE_LICENSE("GPL v2"); |