]>
Commit | Line | Data |
---|---|---|
3bfb1d20 HS |
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | |
3 | * AVR32 systems.) | |
4 | * | |
5 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/slab.h> | |
23 | ||
24 | #include "dw_dmac_regs.h" | |
25 | ||
26 | /* | |
27 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
28 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
29 | * of which use ARM any more). See the "Databook" from Synopsys for | |
30 | * information beyond what licensees probably provide. | |
31 | * | |
32 | * The driver has currently been tested only with the Atmel AT32AP7000, | |
33 | * which does not support descriptor writeback. | |
34 | */ | |
35 | ||
f301c062 JI |
36 | #define DWC_DEFAULT_CTLLO(private) ({ \ |
37 | struct dw_dma_slave *__slave = (private); \ | |
38 | int dms = __slave ? __slave->dst_master : 0; \ | |
39 | int sms = __slave ? __slave->src_master : 1; \ | |
e51dc53b VK |
40 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ |
41 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ | |
f301c062 | 42 | \ |
ee66509d VK |
43 | (DWC_CTLL_DST_MSIZE(dmsize) \ |
44 | | DWC_CTLL_SRC_MSIZE(smsize) \ | |
f301c062 JI |
45 | | DWC_CTLL_LLP_D_EN \ |
46 | | DWC_CTLL_LLP_S_EN \ | |
47 | | DWC_CTLL_DMS(dms) \ | |
48 | | DWC_CTLL_SMS(sms)); \ | |
49 | }) | |
3bfb1d20 HS |
50 | |
51 | /* | |
52 | * This is configuration-dependent and usually a funny size like 4095. | |
3bfb1d20 HS |
53 | * |
54 | * Note that this is a transfer count, i.e. if we transfer 32-bit | |
418e7407 | 55 | * words, we can do 16380 bytes per descriptor. |
3bfb1d20 HS |
56 | * |
57 | * This parameter is also system-specific. | |
58 | */ | |
418e7407 | 59 | #define DWC_MAX_COUNT 4095U |
3bfb1d20 HS |
60 | |
61 | /* | |
62 | * Number of descriptors to allocate for each channel. This should be | |
63 | * made configurable somehow; preferably, the clients (at least the | |
64 | * ones using slave transfers) should be able to give us a hint. | |
65 | */ | |
66 | #define NR_DESCS_PER_CHANNEL 64 | |
67 | ||
68 | /*----------------------------------------------------------------------*/ | |
69 | ||
70 | /* | |
71 | * Because we're not relying on writeback from the controller (it may not | |
72 | * even be configured into the core!) we don't need to use dma_pool. These | |
73 | * descriptors -- and associated data -- are cacheable. We do need to make | |
74 | * sure their dcache entries are written back before handing them off to | |
75 | * the controller, though. | |
76 | */ | |
77 | ||
41d5e59c DW |
78 | static struct device *chan2dev(struct dma_chan *chan) |
79 | { | |
80 | return &chan->dev->device; | |
81 | } | |
82 | static struct device *chan2parent(struct dma_chan *chan) | |
83 | { | |
84 | return chan->dev->device.parent; | |
85 | } | |
86 | ||
3bfb1d20 HS |
87 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
88 | { | |
89 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | |
90 | } | |
91 | ||
3bfb1d20 HS |
92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
93 | { | |
94 | struct dw_desc *desc, *_desc; | |
95 | struct dw_desc *ret = NULL; | |
96 | unsigned int i = 0; | |
69cea5a0 | 97 | unsigned long flags; |
3bfb1d20 | 98 | |
69cea5a0 | 99 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
100 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
101 | if (async_tx_test_ack(&desc->txd)) { | |
102 | list_del(&desc->desc_node); | |
103 | ret = desc; | |
104 | break; | |
105 | } | |
41d5e59c | 106 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 HS |
107 | i++; |
108 | } | |
69cea5a0 | 109 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 110 | |
41d5e59c | 111 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
112 | |
113 | return ret; | |
114 | } | |
115 | ||
116 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
117 | { | |
118 | struct dw_desc *child; | |
119 | ||
e0bd0f8c | 120 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 121 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
122 | child->txd.phys, sizeof(child->lli), |
123 | DMA_TO_DEVICE); | |
41d5e59c | 124 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
125 | desc->txd.phys, sizeof(desc->lli), |
126 | DMA_TO_DEVICE); | |
127 | } | |
128 | ||
129 | /* | |
130 | * Move a descriptor, including any children, to the free list. | |
131 | * `desc' must not be on any lists. | |
132 | */ | |
133 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
134 | { | |
69cea5a0 VK |
135 | unsigned long flags; |
136 | ||
3bfb1d20 HS |
137 | if (desc) { |
138 | struct dw_desc *child; | |
139 | ||
140 | dwc_sync_desc_for_cpu(dwc, desc); | |
141 | ||
69cea5a0 | 142 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 143 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 144 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
145 | "moving child desc %p to freelist\n", |
146 | child); | |
e0bd0f8c | 147 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 148 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 149 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 150 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
151 | } |
152 | } | |
153 | ||
154 | /* Called with dwc->lock held and bh disabled */ | |
155 | static dma_cookie_t | |
156 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
157 | { | |
158 | dma_cookie_t cookie = dwc->chan.cookie; | |
159 | ||
160 | if (++cookie < 0) | |
161 | cookie = 1; | |
162 | ||
163 | dwc->chan.cookie = cookie; | |
164 | desc->txd.cookie = cookie; | |
165 | ||
166 | return cookie; | |
167 | } | |
168 | ||
61e183f8 VK |
169 | static void dwc_initialize(struct dw_dma_chan *dwc) |
170 | { | |
171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
172 | struct dw_dma_slave *dws = dwc->chan.private; | |
173 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
174 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
175 | ||
176 | if (dwc->initialized == true) | |
177 | return; | |
178 | ||
179 | if (dws) { | |
180 | /* | |
181 | * We need controller-specific data to set up slave | |
182 | * transfers. | |
183 | */ | |
184 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
185 | ||
186 | cfghi = dws->cfg_hi; | |
187 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | |
188 | } | |
189 | ||
190 | channel_writel(dwc, CFG_LO, cfglo); | |
191 | channel_writel(dwc, CFG_HI, cfghi); | |
192 | ||
193 | /* Enable interrupts */ | |
194 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
195 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
196 | ||
197 | dwc->initialized = true; | |
198 | } | |
199 | ||
3bfb1d20 HS |
200 | /*----------------------------------------------------------------------*/ |
201 | ||
202 | /* Called with dwc->lock held and bh disabled */ | |
203 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
204 | { | |
205 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
206 | ||
207 | /* ASSERT: channel is idle */ | |
208 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 209 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 | 210 | "BUG: Attempted to start non-idle channel\n"); |
41d5e59c | 211 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
213 | channel_readl(dwc, SAR), | |
214 | channel_readl(dwc, DAR), | |
215 | channel_readl(dwc, LLP), | |
216 | channel_readl(dwc, CTL_HI), | |
217 | channel_readl(dwc, CTL_LO)); | |
218 | ||
219 | /* The tasklet will hopefully advance the queue... */ | |
220 | return; | |
221 | } | |
222 | ||
61e183f8 VK |
223 | dwc_initialize(dwc); |
224 | ||
3bfb1d20 HS |
225 | channel_writel(dwc, LLP, first->txd.phys); |
226 | channel_writel(dwc, CTL_LO, | |
227 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
228 | channel_writel(dwc, CTL_HI, 0); | |
229 | channel_set_bit(dw, CH_EN, dwc->mask); | |
230 | } | |
231 | ||
232 | /*----------------------------------------------------------------------*/ | |
233 | ||
234 | static void | |
5fedefb8 VK |
235 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
236 | bool callback_required) | |
3bfb1d20 | 237 | { |
5fedefb8 VK |
238 | dma_async_tx_callback callback = NULL; |
239 | void *param = NULL; | |
3bfb1d20 | 240 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 241 | struct dw_desc *child; |
69cea5a0 | 242 | unsigned long flags; |
3bfb1d20 | 243 | |
41d5e59c | 244 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 245 | |
69cea5a0 | 246 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 247 | dwc->completed = txd->cookie; |
5fedefb8 VK |
248 | if (callback_required) { |
249 | callback = txd->callback; | |
250 | param = txd->callback_param; | |
251 | } | |
3bfb1d20 HS |
252 | |
253 | dwc_sync_desc_for_cpu(dwc, desc); | |
e518076e VK |
254 | |
255 | /* async_tx_ack */ | |
256 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
257 | async_tx_ack(&child->txd); | |
258 | async_tx_ack(&desc->txd); | |
259 | ||
e0bd0f8c | 260 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
261 | list_move(&desc->desc_node, &dwc->free_list); |
262 | ||
657a77fa AN |
263 | if (!dwc->chan.private) { |
264 | struct device *parent = chan2parent(&dwc->chan); | |
265 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
266 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | |
267 | dma_unmap_single(parent, desc->lli.dar, | |
268 | desc->len, DMA_FROM_DEVICE); | |
269 | else | |
270 | dma_unmap_page(parent, desc->lli.dar, | |
271 | desc->len, DMA_FROM_DEVICE); | |
272 | } | |
273 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
274 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | |
275 | dma_unmap_single(parent, desc->lli.sar, | |
276 | desc->len, DMA_TO_DEVICE); | |
277 | else | |
278 | dma_unmap_page(parent, desc->lli.sar, | |
279 | desc->len, DMA_TO_DEVICE); | |
280 | } | |
281 | } | |
3bfb1d20 | 282 | |
69cea5a0 VK |
283 | spin_unlock_irqrestore(&dwc->lock, flags); |
284 | ||
5fedefb8 | 285 | if (callback_required && callback) |
3bfb1d20 HS |
286 | callback(param); |
287 | } | |
288 | ||
289 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
290 | { | |
291 | struct dw_desc *desc, *_desc; | |
292 | LIST_HEAD(list); | |
69cea5a0 | 293 | unsigned long flags; |
3bfb1d20 | 294 | |
69cea5a0 | 295 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 296 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 297 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
298 | "BUG: XFER bit set, but channel not idle!\n"); |
299 | ||
300 | /* Try to continue after resetting the channel... */ | |
301 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
302 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
303 | cpu_relax(); | |
304 | } | |
305 | ||
306 | /* | |
307 | * Submit queued descriptors ASAP, i.e. before we go through | |
308 | * the completed ones. | |
309 | */ | |
3bfb1d20 | 310 | list_splice_init(&dwc->active_list, &list); |
f336e42f VK |
311 | if (!list_empty(&dwc->queue)) { |
312 | list_move(dwc->queue.next, &dwc->active_list); | |
313 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
314 | } | |
3bfb1d20 | 315 | |
69cea5a0 VK |
316 | spin_unlock_irqrestore(&dwc->lock, flags); |
317 | ||
3bfb1d20 | 318 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 319 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
320 | } |
321 | ||
322 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
323 | { | |
324 | dma_addr_t llp; | |
325 | struct dw_desc *desc, *_desc; | |
326 | struct dw_desc *child; | |
327 | u32 status_xfer; | |
69cea5a0 | 328 | unsigned long flags; |
3bfb1d20 | 329 | |
69cea5a0 | 330 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
331 | llp = channel_readl(dwc, LLP); |
332 | status_xfer = dma_readl(dw, RAW.XFER); | |
333 | ||
334 | if (status_xfer & dwc->mask) { | |
335 | /* Everything we've submitted is done */ | |
336 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
69cea5a0 VK |
337 | spin_unlock_irqrestore(&dwc->lock, flags); |
338 | ||
3bfb1d20 HS |
339 | dwc_complete_all(dw, dwc); |
340 | return; | |
341 | } | |
342 | ||
69cea5a0 VK |
343 | if (list_empty(&dwc->active_list)) { |
344 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 345 | return; |
69cea5a0 | 346 | } |
087809fc | 347 | |
41d5e59c | 348 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
3bfb1d20 HS |
349 | |
350 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
84adccfb | 351 | /* check first descriptors addr */ |
69cea5a0 VK |
352 | if (desc->txd.phys == llp) { |
353 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 354 | return; |
69cea5a0 | 355 | } |
84adccfb VK |
356 | |
357 | /* check first descriptors llp */ | |
69cea5a0 | 358 | if (desc->lli.llp == llp) { |
3bfb1d20 | 359 | /* This one is currently in progress */ |
69cea5a0 | 360 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 361 | return; |
69cea5a0 | 362 | } |
3bfb1d20 | 363 | |
e0bd0f8c | 364 | list_for_each_entry(child, &desc->tx_list, desc_node) |
69cea5a0 | 365 | if (child->lli.llp == llp) { |
3bfb1d20 | 366 | /* Currently in progress */ |
69cea5a0 | 367 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 368 | return; |
69cea5a0 | 369 | } |
3bfb1d20 HS |
370 | |
371 | /* | |
372 | * No descriptors so far seem to be in progress, i.e. | |
373 | * this one must be done. | |
374 | */ | |
69cea5a0 | 375 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 376 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 377 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
378 | } |
379 | ||
41d5e59c | 380 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
381 | "BUG: All descriptors done, but channel not idle!\n"); |
382 | ||
383 | /* Try to continue after resetting the channel... */ | |
384 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
385 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
386 | cpu_relax(); | |
387 | ||
388 | if (!list_empty(&dwc->queue)) { | |
f336e42f VK |
389 | list_move(dwc->queue.next, &dwc->active_list); |
390 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
3bfb1d20 | 391 | } |
69cea5a0 | 392 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
393 | } |
394 | ||
395 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | |
396 | { | |
41d5e59c | 397 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
398 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
399 | lli->sar, lli->dar, lli->llp, | |
400 | lli->ctlhi, lli->ctllo); | |
401 | } | |
402 | ||
403 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
404 | { | |
405 | struct dw_desc *bad_desc; | |
406 | struct dw_desc *child; | |
69cea5a0 | 407 | unsigned long flags; |
3bfb1d20 HS |
408 | |
409 | dwc_scan_descriptors(dw, dwc); | |
410 | ||
69cea5a0 VK |
411 | spin_lock_irqsave(&dwc->lock, flags); |
412 | ||
3bfb1d20 HS |
413 | /* |
414 | * The descriptor currently at the head of the active list is | |
415 | * borked. Since we don't have any way to report errors, we'll | |
416 | * just have to scream loudly and try to carry on. | |
417 | */ | |
418 | bad_desc = dwc_first_active(dwc); | |
419 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 420 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
421 | |
422 | /* Clear the error flag and try to restart the controller */ | |
423 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
424 | if (!list_empty(&dwc->active_list)) | |
425 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
426 | ||
427 | /* | |
428 | * KERN_CRITICAL may seem harsh, but since this only happens | |
429 | * when someone submits a bad physical address in a | |
430 | * descriptor, we should consider ourselves lucky that the | |
431 | * controller flagged an error instead of scribbling over | |
432 | * random memory locations. | |
433 | */ | |
41d5e59c | 434 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 435 | "Bad descriptor submitted for DMA!\n"); |
41d5e59c | 436 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
437 | " cookie: %d\n", bad_desc->txd.cookie); |
438 | dwc_dump_lli(dwc, &bad_desc->lli); | |
e0bd0f8c | 439 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
440 | dwc_dump_lli(dwc, &child->lli); |
441 | ||
69cea5a0 VK |
442 | spin_unlock_irqrestore(&dwc->lock, flags); |
443 | ||
3bfb1d20 | 444 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 445 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
446 | } |
447 | ||
d9de4519 HCE |
448 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
449 | ||
450 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | |
451 | { | |
452 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
453 | return channel_readl(dwc, SAR); | |
454 | } | |
455 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
456 | ||
457 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |
458 | { | |
459 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
460 | return channel_readl(dwc, DAR); | |
461 | } | |
462 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
463 | ||
464 | /* called with dwc->lock held and all DMAC interrupts disabled */ | |
465 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |
ff7b05f2 | 466 | u32 status_err, u32 status_xfer) |
d9de4519 | 467 | { |
69cea5a0 VK |
468 | unsigned long flags; |
469 | ||
ff7b05f2 | 470 | if (dwc->mask) { |
d9de4519 HCE |
471 | void (*callback)(void *param); |
472 | void *callback_param; | |
473 | ||
474 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
475 | channel_readl(dwc, LLP)); | |
d9de4519 HCE |
476 | |
477 | callback = dwc->cdesc->period_callback; | |
478 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
479 | |
480 | if (callback) | |
d9de4519 | 481 | callback(callback_param); |
d9de4519 HCE |
482 | } |
483 | ||
484 | /* | |
485 | * Error and transfer complete are highly unlikely, and will most | |
486 | * likely be due to a configuration error by the user. | |
487 | */ | |
488 | if (unlikely(status_err & dwc->mask) || | |
489 | unlikely(status_xfer & dwc->mask)) { | |
490 | int i; | |
491 | ||
492 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | |
493 | "interrupt, stopping DMA transfer\n", | |
494 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
495 | |
496 | spin_lock_irqsave(&dwc->lock, flags); | |
497 | ||
d9de4519 HCE |
498 | dev_err(chan2dev(&dwc->chan), |
499 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
500 | channel_readl(dwc, SAR), | |
501 | channel_readl(dwc, DAR), | |
502 | channel_readl(dwc, LLP), | |
503 | channel_readl(dwc, CTL_HI), | |
504 | channel_readl(dwc, CTL_LO)); | |
505 | ||
506 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
507 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
508 | cpu_relax(); | |
509 | ||
510 | /* make sure DMA does not restart by loading a new list */ | |
511 | channel_writel(dwc, LLP, 0); | |
512 | channel_writel(dwc, CTL_LO, 0); | |
513 | channel_writel(dwc, CTL_HI, 0); | |
514 | ||
d9de4519 HCE |
515 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
516 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
517 | ||
518 | for (i = 0; i < dwc->cdesc->periods; i++) | |
519 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
520 | |
521 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 HCE |
522 | } |
523 | } | |
524 | ||
525 | /* ------------------------------------------------------------------------- */ | |
526 | ||
3bfb1d20 HS |
527 | static void dw_dma_tasklet(unsigned long data) |
528 | { | |
529 | struct dw_dma *dw = (struct dw_dma *)data; | |
530 | struct dw_dma_chan *dwc; | |
3bfb1d20 HS |
531 | u32 status_xfer; |
532 | u32 status_err; | |
533 | int i; | |
534 | ||
7fe7b2f4 | 535 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
536 | status_err = dma_readl(dw, RAW.ERROR); |
537 | ||
ff7b05f2 | 538 | dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); |
3bfb1d20 HS |
539 | |
540 | for (i = 0; i < dw->dma.chancnt; i++) { | |
541 | dwc = &dw->chan[i]; | |
d9de4519 | 542 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
ff7b05f2 | 543 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
d9de4519 | 544 | else if (status_err & (1 << i)) |
3bfb1d20 | 545 | dwc_handle_error(dw, dwc); |
ff7b05f2 | 546 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 547 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
548 | } |
549 | ||
550 | /* | |
ff7b05f2 | 551 | * Re-enable interrupts. |
3bfb1d20 HS |
552 | */ |
553 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
554 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
555 | } | |
556 | ||
557 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
558 | { | |
559 | struct dw_dma *dw = dev_id; | |
560 | u32 status; | |
561 | ||
562 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | |
563 | dma_readl(dw, STATUS_INT)); | |
564 | ||
565 | /* | |
566 | * Just disable the interrupts. We'll turn them back on in the | |
567 | * softirq handler. | |
568 | */ | |
569 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
570 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
571 | ||
572 | status = dma_readl(dw, STATUS_INT); | |
573 | if (status) { | |
574 | dev_err(dw->dma.dev, | |
575 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
576 | status); | |
577 | ||
578 | /* Try to recover */ | |
579 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
3bfb1d20 HS |
580 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
581 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
582 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
583 | } | |
584 | ||
585 | tasklet_schedule(&dw->tasklet); | |
586 | ||
587 | return IRQ_HANDLED; | |
588 | } | |
589 | ||
590 | /*----------------------------------------------------------------------*/ | |
591 | ||
592 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
593 | { | |
594 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
595 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
596 | dma_cookie_t cookie; | |
69cea5a0 | 597 | unsigned long flags; |
3bfb1d20 | 598 | |
69cea5a0 | 599 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
600 | cookie = dwc_assign_cookie(dwc, desc); |
601 | ||
602 | /* | |
603 | * REVISIT: We should attempt to chain as many descriptors as | |
604 | * possible, perhaps even appending to those already submitted | |
605 | * for DMA. But this is hard to do in a race-free manner. | |
606 | */ | |
607 | if (list_empty(&dwc->active_list)) { | |
41d5e59c | 608 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
3bfb1d20 | 609 | desc->txd.cookie); |
3bfb1d20 | 610 | list_add_tail(&desc->desc_node, &dwc->active_list); |
f336e42f | 611 | dwc_dostart(dwc, dwc_first_active(dwc)); |
3bfb1d20 | 612 | } else { |
41d5e59c | 613 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
3bfb1d20 HS |
614 | desc->txd.cookie); |
615 | ||
616 | list_add_tail(&desc->desc_node, &dwc->queue); | |
617 | } | |
618 | ||
69cea5a0 | 619 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
620 | |
621 | return cookie; | |
622 | } | |
623 | ||
624 | static struct dma_async_tx_descriptor * | |
625 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
626 | size_t len, unsigned long flags) | |
627 | { | |
628 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
629 | struct dw_desc *desc; | |
630 | struct dw_desc *first; | |
631 | struct dw_desc *prev; | |
632 | size_t xfer_count; | |
633 | size_t offset; | |
634 | unsigned int src_width; | |
635 | unsigned int dst_width; | |
636 | u32 ctllo; | |
637 | ||
41d5e59c | 638 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
3bfb1d20 HS |
639 | dest, src, len, flags); |
640 | ||
641 | if (unlikely(!len)) { | |
41d5e59c | 642 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
3bfb1d20 HS |
643 | return NULL; |
644 | } | |
645 | ||
646 | /* | |
647 | * We can be a lot more clever here, but this should take care | |
648 | * of the most common optimization. | |
649 | */ | |
a0227456 VK |
650 | if (!((src | dest | len) & 7)) |
651 | src_width = dst_width = 3; | |
652 | else if (!((src | dest | len) & 3)) | |
3bfb1d20 HS |
653 | src_width = dst_width = 2; |
654 | else if (!((src | dest | len) & 1)) | |
655 | src_width = dst_width = 1; | |
656 | else | |
657 | src_width = dst_width = 0; | |
658 | ||
f301c062 | 659 | ctllo = DWC_DEFAULT_CTLLO(chan->private) |
3bfb1d20 HS |
660 | | DWC_CTLL_DST_WIDTH(dst_width) |
661 | | DWC_CTLL_SRC_WIDTH(src_width) | |
662 | | DWC_CTLL_DST_INC | |
663 | | DWC_CTLL_SRC_INC | |
664 | | DWC_CTLL_FC_M2M; | |
665 | prev = first = NULL; | |
666 | ||
667 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
668 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
669 | DWC_MAX_COUNT); | |
670 | ||
671 | desc = dwc_desc_get(dwc); | |
672 | if (!desc) | |
673 | goto err_desc_get; | |
674 | ||
675 | desc->lli.sar = src + offset; | |
676 | desc->lli.dar = dest + offset; | |
677 | desc->lli.ctllo = ctllo; | |
678 | desc->lli.ctlhi = xfer_count; | |
679 | ||
680 | if (!first) { | |
681 | first = desc; | |
682 | } else { | |
683 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 684 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
685 | prev->txd.phys, sizeof(prev->lli), |
686 | DMA_TO_DEVICE); | |
687 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 688 | &first->tx_list); |
3bfb1d20 HS |
689 | } |
690 | prev = desc; | |
691 | } | |
692 | ||
693 | ||
694 | if (flags & DMA_PREP_INTERRUPT) | |
695 | /* Trigger interrupt after last block */ | |
696 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
697 | ||
698 | prev->lli.llp = 0; | |
41d5e59c | 699 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
700 | prev->txd.phys, sizeof(prev->lli), |
701 | DMA_TO_DEVICE); | |
702 | ||
703 | first->txd.flags = flags; | |
704 | first->len = len; | |
705 | ||
706 | return &first->txd; | |
707 | ||
708 | err_desc_get: | |
709 | dwc_desc_put(dwc, first); | |
710 | return NULL; | |
711 | } | |
712 | ||
713 | static struct dma_async_tx_descriptor * | |
714 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 715 | unsigned int sg_len, enum dma_transfer_direction direction, |
3bfb1d20 HS |
716 | unsigned long flags) |
717 | { | |
718 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
287d8592 | 719 | struct dw_dma_slave *dws = chan->private; |
3bfb1d20 HS |
720 | struct dw_desc *prev; |
721 | struct dw_desc *first; | |
722 | u32 ctllo; | |
723 | dma_addr_t reg; | |
724 | unsigned int reg_width; | |
725 | unsigned int mem_width; | |
726 | unsigned int i; | |
727 | struct scatterlist *sg; | |
728 | size_t total_len = 0; | |
729 | ||
41d5e59c | 730 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
3bfb1d20 HS |
731 | |
732 | if (unlikely(!dws || !sg_len)) | |
733 | return NULL; | |
734 | ||
74465b4f | 735 | reg_width = dws->reg_width; |
3bfb1d20 HS |
736 | prev = first = NULL; |
737 | ||
3bfb1d20 | 738 | switch (direction) { |
db8196df | 739 | case DMA_MEM_TO_DEV: |
f301c062 | 740 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
3bfb1d20 HS |
741 | | DWC_CTLL_DST_WIDTH(reg_width) |
742 | | DWC_CTLL_DST_FIX | |
743 | | DWC_CTLL_SRC_INC | |
ee66509d | 744 | | DWC_CTLL_FC(dws->fc)); |
74465b4f | 745 | reg = dws->tx_reg; |
3bfb1d20 HS |
746 | for_each_sg(sgl, sg, sg_len, i) { |
747 | struct dw_desc *desc; | |
69dc14b5 | 748 | u32 len, dlen, mem; |
3bfb1d20 | 749 | |
69dc14b5 VK |
750 | mem = sg_phys(sg); |
751 | len = sg_dma_len(sg); | |
6bc711f6 VK |
752 | |
753 | if (!((mem | len) & 7)) | |
754 | mem_width = 3; | |
755 | else if (!((mem | len) & 3)) | |
756 | mem_width = 2; | |
757 | else if (!((mem | len) & 1)) | |
758 | mem_width = 1; | |
759 | else | |
69dc14b5 | 760 | mem_width = 0; |
3bfb1d20 | 761 | |
69dc14b5 | 762 | slave_sg_todev_fill_desc: |
3bfb1d20 HS |
763 | desc = dwc_desc_get(dwc); |
764 | if (!desc) { | |
41d5e59c | 765 | dev_err(chan2dev(chan), |
3bfb1d20 HS |
766 | "not enough descriptors available\n"); |
767 | goto err_desc_get; | |
768 | } | |
769 | ||
3bfb1d20 HS |
770 | desc->lli.sar = mem; |
771 | desc->lli.dar = reg; | |
772 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
69dc14b5 VK |
773 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
774 | dlen = DWC_MAX_COUNT << mem_width; | |
775 | mem += dlen; | |
776 | len -= dlen; | |
777 | } else { | |
778 | dlen = len; | |
779 | len = 0; | |
780 | } | |
781 | ||
782 | desc->lli.ctlhi = dlen >> mem_width; | |
3bfb1d20 HS |
783 | |
784 | if (!first) { | |
785 | first = desc; | |
786 | } else { | |
787 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 788 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
789 | prev->txd.phys, |
790 | sizeof(prev->lli), | |
791 | DMA_TO_DEVICE); | |
792 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 793 | &first->tx_list); |
3bfb1d20 HS |
794 | } |
795 | prev = desc; | |
69dc14b5 VK |
796 | total_len += dlen; |
797 | ||
798 | if (len) | |
799 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
800 | } |
801 | break; | |
db8196df | 802 | case DMA_DEV_TO_MEM: |
f301c062 | 803 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
3bfb1d20 HS |
804 | | DWC_CTLL_SRC_WIDTH(reg_width) |
805 | | DWC_CTLL_DST_INC | |
806 | | DWC_CTLL_SRC_FIX | |
ee66509d | 807 | | DWC_CTLL_FC(dws->fc)); |
3bfb1d20 | 808 | |
74465b4f | 809 | reg = dws->rx_reg; |
3bfb1d20 HS |
810 | for_each_sg(sgl, sg, sg_len, i) { |
811 | struct dw_desc *desc; | |
69dc14b5 | 812 | u32 len, dlen, mem; |
3bfb1d20 HS |
813 | |
814 | mem = sg_phys(sg); | |
815 | len = sg_dma_len(sg); | |
6bc711f6 VK |
816 | |
817 | if (!((mem | len) & 7)) | |
818 | mem_width = 3; | |
819 | else if (!((mem | len) & 3)) | |
820 | mem_width = 2; | |
821 | else if (!((mem | len) & 1)) | |
822 | mem_width = 1; | |
823 | else | |
3bfb1d20 HS |
824 | mem_width = 0; |
825 | ||
69dc14b5 VK |
826 | slave_sg_fromdev_fill_desc: |
827 | desc = dwc_desc_get(dwc); | |
828 | if (!desc) { | |
829 | dev_err(chan2dev(chan), | |
830 | "not enough descriptors available\n"); | |
831 | goto err_desc_get; | |
832 | } | |
833 | ||
3bfb1d20 HS |
834 | desc->lli.sar = reg; |
835 | desc->lli.dar = mem; | |
836 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
69dc14b5 VK |
837 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
838 | dlen = DWC_MAX_COUNT << reg_width; | |
839 | mem += dlen; | |
840 | len -= dlen; | |
841 | } else { | |
842 | dlen = len; | |
843 | len = 0; | |
844 | } | |
845 | desc->lli.ctlhi = dlen >> reg_width; | |
3bfb1d20 HS |
846 | |
847 | if (!first) { | |
848 | first = desc; | |
849 | } else { | |
850 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 851 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
852 | prev->txd.phys, |
853 | sizeof(prev->lli), | |
854 | DMA_TO_DEVICE); | |
855 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 856 | &first->tx_list); |
3bfb1d20 HS |
857 | } |
858 | prev = desc; | |
69dc14b5 VK |
859 | total_len += dlen; |
860 | ||
861 | if (len) | |
862 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
863 | } |
864 | break; | |
865 | default: | |
866 | return NULL; | |
867 | } | |
868 | ||
869 | if (flags & DMA_PREP_INTERRUPT) | |
870 | /* Trigger interrupt after last block */ | |
871 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
872 | ||
873 | prev->lli.llp = 0; | |
41d5e59c | 874 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
875 | prev->txd.phys, sizeof(prev->lli), |
876 | DMA_TO_DEVICE); | |
877 | ||
878 | first->len = total_len; | |
879 | ||
880 | return &first->txd; | |
881 | ||
882 | err_desc_get: | |
883 | dwc_desc_put(dwc, first); | |
884 | return NULL; | |
885 | } | |
886 | ||
05827630 LW |
887 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
888 | unsigned long arg) | |
3bfb1d20 HS |
889 | { |
890 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
891 | struct dw_dma *dw = to_dw_dma(chan->device); | |
892 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 893 | unsigned long flags; |
a7c57cf7 | 894 | u32 cfglo; |
3bfb1d20 HS |
895 | LIST_HEAD(list); |
896 | ||
a7c57cf7 LW |
897 | if (cmd == DMA_PAUSE) { |
898 | spin_lock_irqsave(&dwc->lock, flags); | |
c3635c78 | 899 | |
a7c57cf7 LW |
900 | cfglo = channel_readl(dwc, CFG_LO); |
901 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | |
902 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | |
903 | cpu_relax(); | |
3bfb1d20 | 904 | |
a7c57cf7 LW |
905 | dwc->paused = true; |
906 | spin_unlock_irqrestore(&dwc->lock, flags); | |
907 | } else if (cmd == DMA_RESUME) { | |
908 | if (!dwc->paused) | |
909 | return 0; | |
3bfb1d20 | 910 | |
a7c57cf7 | 911 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 912 | |
a7c57cf7 LW |
913 | cfglo = channel_readl(dwc, CFG_LO); |
914 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
915 | dwc->paused = false; | |
3bfb1d20 | 916 | |
a7c57cf7 LW |
917 | spin_unlock_irqrestore(&dwc->lock, flags); |
918 | } else if (cmd == DMA_TERMINATE_ALL) { | |
919 | spin_lock_irqsave(&dwc->lock, flags); | |
3bfb1d20 | 920 | |
a7c57cf7 LW |
921 | channel_clear_bit(dw, CH_EN, dwc->mask); |
922 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
923 | cpu_relax(); | |
924 | ||
925 | dwc->paused = false; | |
926 | ||
927 | /* active_list entries will end up before queued entries */ | |
928 | list_splice_init(&dwc->queue, &list); | |
929 | list_splice_init(&dwc->active_list, &list); | |
930 | ||
931 | spin_unlock_irqrestore(&dwc->lock, flags); | |
932 | ||
933 | /* Flush all pending and queued descriptors */ | |
934 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
935 | dwc_descriptor_complete(dwc, desc, false); | |
936 | } else | |
937 | return -ENXIO; | |
c3635c78 LW |
938 | |
939 | return 0; | |
3bfb1d20 HS |
940 | } |
941 | ||
942 | static enum dma_status | |
07934481 LW |
943 | dwc_tx_status(struct dma_chan *chan, |
944 | dma_cookie_t cookie, | |
945 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
946 | { |
947 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
948 | dma_cookie_t last_used; | |
949 | dma_cookie_t last_complete; | |
950 | int ret; | |
951 | ||
952 | last_complete = dwc->completed; | |
953 | last_used = chan->cookie; | |
954 | ||
955 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
956 | if (ret != DMA_SUCCESS) { | |
957 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
958 | ||
959 | last_complete = dwc->completed; | |
960 | last_used = chan->cookie; | |
961 | ||
962 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
963 | } | |
964 | ||
abf53902 VK |
965 | if (ret != DMA_SUCCESS) |
966 | dma_set_tx_state(txstate, last_complete, last_used, | |
967 | dwc_first_active(dwc)->len); | |
968 | else | |
969 | dma_set_tx_state(txstate, last_complete, last_used, 0); | |
3bfb1d20 | 970 | |
a7c57cf7 LW |
971 | if (dwc->paused) |
972 | return DMA_PAUSED; | |
3bfb1d20 HS |
973 | |
974 | return ret; | |
975 | } | |
976 | ||
977 | static void dwc_issue_pending(struct dma_chan *chan) | |
978 | { | |
979 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
980 | ||
3bfb1d20 HS |
981 | if (!list_empty(&dwc->queue)) |
982 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
3bfb1d20 HS |
983 | } |
984 | ||
aa1e6f1a | 985 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
986 | { |
987 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
988 | struct dw_dma *dw = to_dw_dma(chan->device); | |
989 | struct dw_desc *desc; | |
3bfb1d20 | 990 | int i; |
69cea5a0 | 991 | unsigned long flags; |
3bfb1d20 | 992 | |
41d5e59c | 993 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
3bfb1d20 | 994 | |
3bfb1d20 HS |
995 | /* ASSERT: channel is idle */ |
996 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 997 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
998 | return -EIO; |
999 | } | |
1000 | ||
1001 | dwc->completed = chan->cookie = 1; | |
1002 | ||
3bfb1d20 HS |
1003 | /* |
1004 | * NOTE: some controllers may have additional features that we | |
1005 | * need to initialize here, like "scatter-gather" (which | |
1006 | * doesn't mean what you think it means), and status writeback. | |
1007 | */ | |
1008 | ||
69cea5a0 | 1009 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1010 | i = dwc->descs_allocated; |
1011 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
69cea5a0 | 1012 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1013 | |
1014 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | |
1015 | if (!desc) { | |
41d5e59c | 1016 | dev_info(chan2dev(chan), |
3bfb1d20 | 1017 | "only allocated %d descriptors\n", i); |
69cea5a0 | 1018 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1019 | break; |
1020 | } | |
1021 | ||
e0bd0f8c | 1022 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1023 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1024 | desc->txd.tx_submit = dwc_tx_submit; | |
1025 | desc->txd.flags = DMA_CTRL_ACK; | |
41d5e59c | 1026 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
3bfb1d20 HS |
1027 | sizeof(desc->lli), DMA_TO_DEVICE); |
1028 | dwc_desc_put(dwc, desc); | |
1029 | ||
69cea5a0 | 1030 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1031 | i = ++dwc->descs_allocated; |
1032 | } | |
1033 | ||
69cea5a0 | 1034 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1035 | |
41d5e59c | 1036 | dev_dbg(chan2dev(chan), |
3bfb1d20 HS |
1037 | "alloc_chan_resources allocated %d descriptors\n", i); |
1038 | ||
1039 | return i; | |
1040 | } | |
1041 | ||
1042 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1043 | { | |
1044 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1045 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1046 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1047 | unsigned long flags; |
3bfb1d20 HS |
1048 | LIST_HEAD(list); |
1049 | ||
41d5e59c | 1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
3bfb1d20 HS |
1051 | dwc->descs_allocated); |
1052 | ||
1053 | /* ASSERT: channel is idle */ | |
1054 | BUG_ON(!list_empty(&dwc->active_list)); | |
1055 | BUG_ON(!list_empty(&dwc->queue)); | |
1056 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1057 | ||
69cea5a0 | 1058 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1059 | list_splice_init(&dwc->free_list, &list); |
1060 | dwc->descs_allocated = 0; | |
61e183f8 | 1061 | dwc->initialized = false; |
3bfb1d20 HS |
1062 | |
1063 | /* Disable interrupts */ | |
1064 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
3bfb1d20 HS |
1065 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1066 | ||
69cea5a0 | 1067 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1068 | |
1069 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
41d5e59c DW |
1070 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1071 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
3bfb1d20 HS |
1072 | sizeof(desc->lli), DMA_TO_DEVICE); |
1073 | kfree(desc); | |
1074 | } | |
1075 | ||
41d5e59c | 1076 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
3bfb1d20 HS |
1077 | } |
1078 | ||
d9de4519 HCE |
1079 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1080 | ||
1081 | /** | |
1082 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1083 | * @chan: the DMA channel to start | |
1084 | * | |
1085 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1086 | * -errno on failure. | |
1087 | */ | |
1088 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1089 | { | |
1090 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1091 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1092 | unsigned long flags; |
d9de4519 HCE |
1093 | |
1094 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1095 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1096 | return -ENODEV; | |
1097 | } | |
1098 | ||
69cea5a0 | 1099 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1100 | |
1101 | /* assert channel is idle */ | |
1102 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
1103 | dev_err(chan2dev(&dwc->chan), | |
1104 | "BUG: Attempted to start non-idle channel\n"); | |
1105 | dev_err(chan2dev(&dwc->chan), | |
1106 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
1107 | channel_readl(dwc, SAR), | |
1108 | channel_readl(dwc, DAR), | |
1109 | channel_readl(dwc, LLP), | |
1110 | channel_readl(dwc, CTL_HI), | |
1111 | channel_readl(dwc, CTL_LO)); | |
69cea5a0 | 1112 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1113 | return -EBUSY; |
1114 | } | |
1115 | ||
d9de4519 HCE |
1116 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1117 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1118 | ||
1119 | /* setup DMAC channel registers */ | |
1120 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | |
1121 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
1122 | channel_writel(dwc, CTL_HI, 0); | |
1123 | ||
1124 | channel_set_bit(dw, CH_EN, dwc->mask); | |
1125 | ||
69cea5a0 | 1126 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1127 | |
1128 | return 0; | |
1129 | } | |
1130 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1131 | ||
1132 | /** | |
1133 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1134 | * @chan: the DMA channel to stop | |
1135 | * | |
1136 | * Must be called with soft interrupts disabled. | |
1137 | */ | |
1138 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1139 | { | |
1140 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1141 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1142 | unsigned long flags; |
d9de4519 | 1143 | |
69cea5a0 | 1144 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1145 | |
1146 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1147 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1148 | cpu_relax(); | |
1149 | ||
69cea5a0 | 1150 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1151 | } |
1152 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1153 | ||
1154 | /** | |
1155 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1156 | * @chan: the DMA channel to prepare | |
1157 | * @buf_addr: physical DMA address where the buffer starts | |
1158 | * @buf_len: total number of bytes for the entire buffer | |
1159 | * @period_len: number of bytes for each period | |
1160 | * @direction: transfer direction, to or from device | |
1161 | * | |
1162 | * Must be called before trying to start the transfer. Returns a valid struct | |
1163 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1164 | */ | |
1165 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1166 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1167 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1168 | { |
1169 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1170 | struct dw_cyclic_desc *cdesc; | |
1171 | struct dw_cyclic_desc *retval = NULL; | |
1172 | struct dw_desc *desc; | |
1173 | struct dw_desc *last = NULL; | |
1174 | struct dw_dma_slave *dws = chan->private; | |
1175 | unsigned long was_cyclic; | |
1176 | unsigned int reg_width; | |
1177 | unsigned int periods; | |
1178 | unsigned int i; | |
69cea5a0 | 1179 | unsigned long flags; |
d9de4519 | 1180 | |
69cea5a0 | 1181 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1182 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1183 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1184 | dev_dbg(chan2dev(&dwc->chan), |
1185 | "queue and/or active list are not empty\n"); | |
1186 | return ERR_PTR(-EBUSY); | |
1187 | } | |
1188 | ||
1189 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1190 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1191 | if (was_cyclic) { |
1192 | dev_dbg(chan2dev(&dwc->chan), | |
1193 | "channel already prepared for cyclic DMA\n"); | |
1194 | return ERR_PTR(-EBUSY); | |
1195 | } | |
1196 | ||
1197 | retval = ERR_PTR(-EINVAL); | |
1198 | reg_width = dws->reg_width; | |
1199 | periods = buf_len / period_len; | |
1200 | ||
1201 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
1202 | if (period_len > (DWC_MAX_COUNT << reg_width)) | |
1203 | goto out_err; | |
1204 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1205 | goto out_err; | |
1206 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1207 | goto out_err; | |
db8196df | 1208 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
d9de4519 HCE |
1209 | goto out_err; |
1210 | ||
1211 | retval = ERR_PTR(-ENOMEM); | |
1212 | ||
1213 | if (periods > NR_DESCS_PER_CHANNEL) | |
1214 | goto out_err; | |
1215 | ||
1216 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1217 | if (!cdesc) | |
1218 | goto out_err; | |
1219 | ||
1220 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1221 | if (!cdesc->desc) | |
1222 | goto out_err_alloc; | |
1223 | ||
1224 | for (i = 0; i < periods; i++) { | |
1225 | desc = dwc_desc_get(dwc); | |
1226 | if (!desc) | |
1227 | goto out_err_desc_get; | |
1228 | ||
1229 | switch (direction) { | |
db8196df | 1230 | case DMA_MEM_TO_DEV: |
d9de4519 HCE |
1231 | desc->lli.dar = dws->tx_reg; |
1232 | desc->lli.sar = buf_addr + (period_len * i); | |
f301c062 | 1233 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
d9de4519 HCE |
1234 | | DWC_CTLL_DST_WIDTH(reg_width) |
1235 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1236 | | DWC_CTLL_DST_FIX | |
1237 | | DWC_CTLL_SRC_INC | |
ee66509d | 1238 | | DWC_CTLL_FC(dws->fc) |
d9de4519 HCE |
1239 | | DWC_CTLL_INT_EN); |
1240 | break; | |
db8196df | 1241 | case DMA_DEV_TO_MEM: |
d9de4519 HCE |
1242 | desc->lli.dar = buf_addr + (period_len * i); |
1243 | desc->lli.sar = dws->rx_reg; | |
f301c062 | 1244 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
d9de4519 HCE |
1245 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1246 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1247 | | DWC_CTLL_DST_INC | |
1248 | | DWC_CTLL_SRC_FIX | |
ee66509d | 1249 | | DWC_CTLL_FC(dws->fc) |
d9de4519 HCE |
1250 | | DWC_CTLL_INT_EN); |
1251 | break; | |
1252 | default: | |
1253 | break; | |
1254 | } | |
1255 | ||
1256 | desc->lli.ctlhi = (period_len >> reg_width); | |
1257 | cdesc->desc[i] = desc; | |
1258 | ||
1259 | if (last) { | |
1260 | last->lli.llp = desc->txd.phys; | |
1261 | dma_sync_single_for_device(chan2parent(chan), | |
1262 | last->txd.phys, sizeof(last->lli), | |
1263 | DMA_TO_DEVICE); | |
1264 | } | |
1265 | ||
1266 | last = desc; | |
1267 | } | |
1268 | ||
1269 | /* lets make a cyclic list */ | |
1270 | last->lli.llp = cdesc->desc[0]->txd.phys; | |
1271 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | |
1272 | sizeof(last->lli), DMA_TO_DEVICE); | |
1273 | ||
1274 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | |
1275 | "period %zu periods %d\n", buf_addr, buf_len, | |
1276 | period_len, periods); | |
1277 | ||
1278 | cdesc->periods = periods; | |
1279 | dwc->cdesc = cdesc; | |
1280 | ||
1281 | return cdesc; | |
1282 | ||
1283 | out_err_desc_get: | |
1284 | while (i--) | |
1285 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1286 | out_err_alloc: | |
1287 | kfree(cdesc); | |
1288 | out_err: | |
1289 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1290 | return (struct dw_cyclic_desc *)retval; | |
1291 | } | |
1292 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1293 | ||
1294 | /** | |
1295 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1296 | * @chan: the DMA channel to free | |
1297 | */ | |
1298 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1299 | { | |
1300 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1301 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1302 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1303 | int i; | |
69cea5a0 | 1304 | unsigned long flags; |
d9de4519 HCE |
1305 | |
1306 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | |
1307 | ||
1308 | if (!cdesc) | |
1309 | return; | |
1310 | ||
69cea5a0 | 1311 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1312 | |
1313 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1314 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1315 | cpu_relax(); | |
1316 | ||
d9de4519 HCE |
1317 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1318 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1319 | ||
69cea5a0 | 1320 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1321 | |
1322 | for (i = 0; i < cdesc->periods; i++) | |
1323 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1324 | ||
1325 | kfree(cdesc->desc); | |
1326 | kfree(cdesc); | |
1327 | ||
1328 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1329 | } | |
1330 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1331 | ||
3bfb1d20 HS |
1332 | /*----------------------------------------------------------------------*/ |
1333 | ||
1334 | static void dw_dma_off(struct dw_dma *dw) | |
1335 | { | |
61e183f8 VK |
1336 | int i; |
1337 | ||
3bfb1d20 HS |
1338 | dma_writel(dw, CFG, 0); |
1339 | ||
1340 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1341 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1342 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1343 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1344 | ||
1345 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1346 | cpu_relax(); | |
61e183f8 VK |
1347 | |
1348 | for (i = 0; i < dw->dma.chancnt; i++) | |
1349 | dw->chan[i].initialized = false; | |
3bfb1d20 HS |
1350 | } |
1351 | ||
1352 | static int __init dw_probe(struct platform_device *pdev) | |
1353 | { | |
1354 | struct dw_dma_platform_data *pdata; | |
1355 | struct resource *io; | |
1356 | struct dw_dma *dw; | |
1357 | size_t size; | |
1358 | int irq; | |
1359 | int err; | |
1360 | int i; | |
1361 | ||
6c618c9d | 1362 | pdata = dev_get_platdata(&pdev->dev); |
3bfb1d20 HS |
1363 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1364 | return -EINVAL; | |
1365 | ||
1366 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1367 | if (!io) | |
1368 | return -EINVAL; | |
1369 | ||
1370 | irq = platform_get_irq(pdev, 0); | |
1371 | if (irq < 0) | |
1372 | return irq; | |
1373 | ||
1374 | size = sizeof(struct dw_dma); | |
1375 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | |
1376 | dw = kzalloc(size, GFP_KERNEL); | |
1377 | if (!dw) | |
1378 | return -ENOMEM; | |
1379 | ||
1380 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | |
1381 | err = -EBUSY; | |
1382 | goto err_kfree; | |
1383 | } | |
1384 | ||
3bfb1d20 HS |
1385 | dw->regs = ioremap(io->start, DW_REGLEN); |
1386 | if (!dw->regs) { | |
1387 | err = -ENOMEM; | |
1388 | goto err_release_r; | |
1389 | } | |
1390 | ||
1391 | dw->clk = clk_get(&pdev->dev, "hclk"); | |
1392 | if (IS_ERR(dw->clk)) { | |
1393 | err = PTR_ERR(dw->clk); | |
1394 | goto err_clk; | |
1395 | } | |
1396 | clk_enable(dw->clk); | |
1397 | ||
1398 | /* force dma off, just in case */ | |
1399 | dw_dma_off(dw); | |
1400 | ||
1401 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | |
1402 | if (err) | |
1403 | goto err_irq; | |
1404 | ||
1405 | platform_set_drvdata(pdev, dw); | |
1406 | ||
1407 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | |
1408 | ||
1409 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | |
1410 | ||
1411 | INIT_LIST_HEAD(&dw->dma.channels); | |
46389470 | 1412 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1413 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1414 | ||
1415 | dwc->chan.device = &dw->dma; | |
1416 | dwc->chan.cookie = dwc->completed = 1; | |
b0c3130d VK |
1417 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1418 | list_add_tail(&dwc->chan.device_node, | |
1419 | &dw->dma.channels); | |
1420 | else | |
1421 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1422 | |
93317e8e VK |
1423 | /* 7 is highest priority & 0 is lowest. */ |
1424 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
e8d9f875 | 1425 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1426 | else |
1427 | dwc->priority = i; | |
1428 | ||
3bfb1d20 HS |
1429 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1430 | spin_lock_init(&dwc->lock); | |
1431 | dwc->mask = 1 << i; | |
1432 | ||
1433 | INIT_LIST_HEAD(&dwc->active_list); | |
1434 | INIT_LIST_HEAD(&dwc->queue); | |
1435 | INIT_LIST_HEAD(&dwc->free_list); | |
1436 | ||
1437 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1438 | } | |
1439 | ||
1440 | /* Clear/disable all interrupts on all channels. */ | |
1441 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1442 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1443 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1444 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1445 | ||
1446 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1447 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1448 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1449 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1450 | ||
1451 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1452 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | |
95ea759e JI |
1453 | if (pdata->is_private) |
1454 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
3bfb1d20 HS |
1455 | dw->dma.dev = &pdev->dev; |
1456 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | |
1457 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1458 | ||
1459 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
1460 | ||
1461 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | |
c3635c78 | 1462 | dw->dma.device_control = dwc_control; |
3bfb1d20 | 1463 | |
07934481 | 1464 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1465 | dw->dma.device_issue_pending = dwc_issue_pending; |
1466 | ||
1467 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1468 | ||
1469 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | |
46389470 | 1470 | dev_name(&pdev->dev), pdata->nr_channels); |
3bfb1d20 HS |
1471 | |
1472 | dma_async_device_register(&dw->dma); | |
1473 | ||
1474 | return 0; | |
1475 | ||
1476 | err_irq: | |
1477 | clk_disable(dw->clk); | |
1478 | clk_put(dw->clk); | |
1479 | err_clk: | |
1480 | iounmap(dw->regs); | |
1481 | dw->regs = NULL; | |
1482 | err_release_r: | |
1483 | release_resource(io); | |
1484 | err_kfree: | |
1485 | kfree(dw); | |
1486 | return err; | |
1487 | } | |
1488 | ||
1489 | static int __exit dw_remove(struct platform_device *pdev) | |
1490 | { | |
1491 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1492 | struct dw_dma_chan *dwc, *_dwc; | |
1493 | struct resource *io; | |
1494 | ||
1495 | dw_dma_off(dw); | |
1496 | dma_async_device_unregister(&dw->dma); | |
1497 | ||
1498 | free_irq(platform_get_irq(pdev, 0), dw); | |
1499 | tasklet_kill(&dw->tasklet); | |
1500 | ||
1501 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1502 | chan.device_node) { | |
1503 | list_del(&dwc->chan.device_node); | |
1504 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1505 | } | |
1506 | ||
1507 | clk_disable(dw->clk); | |
1508 | clk_put(dw->clk); | |
1509 | ||
1510 | iounmap(dw->regs); | |
1511 | dw->regs = NULL; | |
1512 | ||
1513 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1514 | release_mem_region(io->start, DW_REGLEN); | |
1515 | ||
1516 | kfree(dw); | |
1517 | ||
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | static void dw_shutdown(struct platform_device *pdev) | |
1522 | { | |
1523 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1524 | ||
1525 | dw_dma_off(platform_get_drvdata(pdev)); | |
1526 | clk_disable(dw->clk); | |
1527 | } | |
1528 | ||
4a256b5f | 1529 | static int dw_suspend_noirq(struct device *dev) |
3bfb1d20 | 1530 | { |
4a256b5f | 1531 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1532 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1533 | ||
1534 | dw_dma_off(platform_get_drvdata(pdev)); | |
1535 | clk_disable(dw->clk); | |
61e183f8 | 1536 | |
3bfb1d20 HS |
1537 | return 0; |
1538 | } | |
1539 | ||
4a256b5f | 1540 | static int dw_resume_noirq(struct device *dev) |
3bfb1d20 | 1541 | { |
4a256b5f | 1542 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1543 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1544 | ||
1545 | clk_enable(dw->clk); | |
1546 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1547 | return 0; | |
3bfb1d20 HS |
1548 | } |
1549 | ||
47145210 | 1550 | static const struct dev_pm_ops dw_dev_pm_ops = { |
4a256b5f MD |
1551 | .suspend_noirq = dw_suspend_noirq, |
1552 | .resume_noirq = dw_resume_noirq, | |
7414a1b8 RK |
1553 | .freeze_noirq = dw_suspend_noirq, |
1554 | .thaw_noirq = dw_resume_noirq, | |
1555 | .restore_noirq = dw_resume_noirq, | |
1556 | .poweroff_noirq = dw_suspend_noirq, | |
4a256b5f MD |
1557 | }; |
1558 | ||
3bfb1d20 HS |
1559 | static struct platform_driver dw_driver = { |
1560 | .remove = __exit_p(dw_remove), | |
1561 | .shutdown = dw_shutdown, | |
3bfb1d20 HS |
1562 | .driver = { |
1563 | .name = "dw_dmac", | |
4a256b5f | 1564 | .pm = &dw_dev_pm_ops, |
3bfb1d20 HS |
1565 | }, |
1566 | }; | |
1567 | ||
1568 | static int __init dw_init(void) | |
1569 | { | |
1570 | return platform_driver_probe(&dw_driver, dw_probe); | |
1571 | } | |
cb689a70 | 1572 | subsys_initcall(dw_init); |
3bfb1d20 HS |
1573 | |
1574 | static void __exit dw_exit(void) | |
1575 | { | |
1576 | platform_driver_unregister(&dw_driver); | |
1577 | } | |
1578 | module_exit(dw_exit); | |
1579 | ||
1580 | MODULE_LICENSE("GPL v2"); | |
1581 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | |
e05503ef | 1582 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
aecb7b64 | 1583 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); |