]>
Commit | Line | Data |
---|---|---|
3bfb1d20 HS |
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | |
3 | * AVR32 systems.) | |
4 | * | |
5 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
327e6970 | 12 | #include <linux/bitops.h> |
3bfb1d20 HS |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | ||
25 | #include "dw_dmac_regs.h" | |
d2ebfb33 | 26 | #include "dmaengine.h" |
3bfb1d20 HS |
27 | |
28 | /* | |
29 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
30 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
31 | * of which use ARM any more). See the "Databook" from Synopsys for | |
32 | * information beyond what licensees probably provide. | |
33 | * | |
34 | * The driver has currently been tested only with the Atmel AT32AP7000, | |
35 | * which does not support descriptor writeback. | |
36 | */ | |
37 | ||
327e6970 VK |
38 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
39 | struct dw_dma_slave *__slave = (_chan->private); \ | |
40 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | |
41 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
42 | int _dms = __slave ? __slave->dst_master : 0; \ | |
43 | int _sms = __slave ? __slave->src_master : 1; \ | |
44 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | |
45 | DW_DMA_MSIZE_16; \ | |
46 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | |
47 | DW_DMA_MSIZE_16; \ | |
f301c062 | 48 | \ |
327e6970 VK |
49 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
50 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
51 | | DWC_CTLL_LLP_D_EN \ |
52 | | DWC_CTLL_LLP_S_EN \ | |
327e6970 VK |
53 | | DWC_CTLL_DMS(_dms) \ |
54 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 55 | }) |
3bfb1d20 HS |
56 | |
57 | /* | |
58 | * This is configuration-dependent and usually a funny size like 4095. | |
3bfb1d20 HS |
59 | * |
60 | * Note that this is a transfer count, i.e. if we transfer 32-bit | |
418e7407 | 61 | * words, we can do 16380 bytes per descriptor. |
3bfb1d20 HS |
62 | * |
63 | * This parameter is also system-specific. | |
64 | */ | |
418e7407 | 65 | #define DWC_MAX_COUNT 4095U |
3bfb1d20 HS |
66 | |
67 | /* | |
68 | * Number of descriptors to allocate for each channel. This should be | |
69 | * made configurable somehow; preferably, the clients (at least the | |
70 | * ones using slave transfers) should be able to give us a hint. | |
71 | */ | |
72 | #define NR_DESCS_PER_CHANNEL 64 | |
73 | ||
74 | /*----------------------------------------------------------------------*/ | |
75 | ||
76 | /* | |
77 | * Because we're not relying on writeback from the controller (it may not | |
78 | * even be configured into the core!) we don't need to use dma_pool. These | |
79 | * descriptors -- and associated data -- are cacheable. We do need to make | |
80 | * sure their dcache entries are written back before handing them off to | |
81 | * the controller, though. | |
82 | */ | |
83 | ||
41d5e59c DW |
84 | static struct device *chan2dev(struct dma_chan *chan) |
85 | { | |
86 | return &chan->dev->device; | |
87 | } | |
88 | static struct device *chan2parent(struct dma_chan *chan) | |
89 | { | |
90 | return chan->dev->device.parent; | |
91 | } | |
92 | ||
3bfb1d20 HS |
93 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
94 | { | |
95 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | |
96 | } | |
97 | ||
3bfb1d20 HS |
98 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
99 | { | |
100 | struct dw_desc *desc, *_desc; | |
101 | struct dw_desc *ret = NULL; | |
102 | unsigned int i = 0; | |
69cea5a0 | 103 | unsigned long flags; |
3bfb1d20 | 104 | |
69cea5a0 | 105 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
106 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
107 | if (async_tx_test_ack(&desc->txd)) { | |
108 | list_del(&desc->desc_node); | |
109 | ret = desc; | |
110 | break; | |
111 | } | |
41d5e59c | 112 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 HS |
113 | i++; |
114 | } | |
69cea5a0 | 115 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 116 | |
41d5e59c | 117 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
118 | |
119 | return ret; | |
120 | } | |
121 | ||
122 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
123 | { | |
124 | struct dw_desc *child; | |
125 | ||
e0bd0f8c | 126 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 127 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
128 | child->txd.phys, sizeof(child->lli), |
129 | DMA_TO_DEVICE); | |
41d5e59c | 130 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
131 | desc->txd.phys, sizeof(desc->lli), |
132 | DMA_TO_DEVICE); | |
133 | } | |
134 | ||
135 | /* | |
136 | * Move a descriptor, including any children, to the free list. | |
137 | * `desc' must not be on any lists. | |
138 | */ | |
139 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
140 | { | |
69cea5a0 VK |
141 | unsigned long flags; |
142 | ||
3bfb1d20 HS |
143 | if (desc) { |
144 | struct dw_desc *child; | |
145 | ||
146 | dwc_sync_desc_for_cpu(dwc, desc); | |
147 | ||
69cea5a0 | 148 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 149 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 150 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
151 | "moving child desc %p to freelist\n", |
152 | child); | |
e0bd0f8c | 153 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 154 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 155 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 156 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
157 | } |
158 | } | |
159 | ||
61e183f8 VK |
160 | static void dwc_initialize(struct dw_dma_chan *dwc) |
161 | { | |
162 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
163 | struct dw_dma_slave *dws = dwc->chan.private; | |
164 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
165 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
166 | ||
167 | if (dwc->initialized == true) | |
168 | return; | |
169 | ||
170 | if (dws) { | |
171 | /* | |
172 | * We need controller-specific data to set up slave | |
173 | * transfers. | |
174 | */ | |
175 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
176 | ||
177 | cfghi = dws->cfg_hi; | |
178 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | |
179 | } | |
180 | ||
181 | channel_writel(dwc, CFG_LO, cfglo); | |
182 | channel_writel(dwc, CFG_HI, cfghi); | |
183 | ||
184 | /* Enable interrupts */ | |
185 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
186 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
187 | ||
188 | dwc->initialized = true; | |
189 | } | |
190 | ||
3bfb1d20 HS |
191 | /*----------------------------------------------------------------------*/ |
192 | ||
193 | /* Called with dwc->lock held and bh disabled */ | |
194 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
195 | { | |
196 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
197 | ||
198 | /* ASSERT: channel is idle */ | |
199 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 200 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 | 201 | "BUG: Attempted to start non-idle channel\n"); |
41d5e59c | 202 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
203 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
204 | channel_readl(dwc, SAR), | |
205 | channel_readl(dwc, DAR), | |
206 | channel_readl(dwc, LLP), | |
207 | channel_readl(dwc, CTL_HI), | |
208 | channel_readl(dwc, CTL_LO)); | |
209 | ||
210 | /* The tasklet will hopefully advance the queue... */ | |
211 | return; | |
212 | } | |
213 | ||
61e183f8 VK |
214 | dwc_initialize(dwc); |
215 | ||
3bfb1d20 HS |
216 | channel_writel(dwc, LLP, first->txd.phys); |
217 | channel_writel(dwc, CTL_LO, | |
218 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
219 | channel_writel(dwc, CTL_HI, 0); | |
220 | channel_set_bit(dw, CH_EN, dwc->mask); | |
221 | } | |
222 | ||
223 | /*----------------------------------------------------------------------*/ | |
224 | ||
225 | static void | |
5fedefb8 VK |
226 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
227 | bool callback_required) | |
3bfb1d20 | 228 | { |
5fedefb8 VK |
229 | dma_async_tx_callback callback = NULL; |
230 | void *param = NULL; | |
3bfb1d20 | 231 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 232 | struct dw_desc *child; |
69cea5a0 | 233 | unsigned long flags; |
3bfb1d20 | 234 | |
41d5e59c | 235 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 236 | |
69cea5a0 | 237 | spin_lock_irqsave(&dwc->lock, flags); |
4d4e58de | 238 | dwc->chan.completed_cookie = txd->cookie; |
5fedefb8 VK |
239 | if (callback_required) { |
240 | callback = txd->callback; | |
241 | param = txd->callback_param; | |
242 | } | |
3bfb1d20 HS |
243 | |
244 | dwc_sync_desc_for_cpu(dwc, desc); | |
e518076e VK |
245 | |
246 | /* async_tx_ack */ | |
247 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
248 | async_tx_ack(&child->txd); | |
249 | async_tx_ack(&desc->txd); | |
250 | ||
e0bd0f8c | 251 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
252 | list_move(&desc->desc_node, &dwc->free_list); |
253 | ||
657a77fa AN |
254 | if (!dwc->chan.private) { |
255 | struct device *parent = chan2parent(&dwc->chan); | |
256 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
257 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | |
258 | dma_unmap_single(parent, desc->lli.dar, | |
259 | desc->len, DMA_FROM_DEVICE); | |
260 | else | |
261 | dma_unmap_page(parent, desc->lli.dar, | |
262 | desc->len, DMA_FROM_DEVICE); | |
263 | } | |
264 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
265 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | |
266 | dma_unmap_single(parent, desc->lli.sar, | |
267 | desc->len, DMA_TO_DEVICE); | |
268 | else | |
269 | dma_unmap_page(parent, desc->lli.sar, | |
270 | desc->len, DMA_TO_DEVICE); | |
271 | } | |
272 | } | |
3bfb1d20 | 273 | |
69cea5a0 VK |
274 | spin_unlock_irqrestore(&dwc->lock, flags); |
275 | ||
5fedefb8 | 276 | if (callback_required && callback) |
3bfb1d20 HS |
277 | callback(param); |
278 | } | |
279 | ||
280 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
281 | { | |
282 | struct dw_desc *desc, *_desc; | |
283 | LIST_HEAD(list); | |
69cea5a0 | 284 | unsigned long flags; |
3bfb1d20 | 285 | |
69cea5a0 | 286 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 287 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 288 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
289 | "BUG: XFER bit set, but channel not idle!\n"); |
290 | ||
291 | /* Try to continue after resetting the channel... */ | |
292 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
293 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
294 | cpu_relax(); | |
295 | } | |
296 | ||
297 | /* | |
298 | * Submit queued descriptors ASAP, i.e. before we go through | |
299 | * the completed ones. | |
300 | */ | |
3bfb1d20 | 301 | list_splice_init(&dwc->active_list, &list); |
f336e42f VK |
302 | if (!list_empty(&dwc->queue)) { |
303 | list_move(dwc->queue.next, &dwc->active_list); | |
304 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
305 | } | |
3bfb1d20 | 306 | |
69cea5a0 VK |
307 | spin_unlock_irqrestore(&dwc->lock, flags); |
308 | ||
3bfb1d20 | 309 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 310 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
311 | } |
312 | ||
313 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
314 | { | |
315 | dma_addr_t llp; | |
316 | struct dw_desc *desc, *_desc; | |
317 | struct dw_desc *child; | |
318 | u32 status_xfer; | |
69cea5a0 | 319 | unsigned long flags; |
3bfb1d20 | 320 | |
69cea5a0 | 321 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
322 | llp = channel_readl(dwc, LLP); |
323 | status_xfer = dma_readl(dw, RAW.XFER); | |
324 | ||
325 | if (status_xfer & dwc->mask) { | |
326 | /* Everything we've submitted is done */ | |
327 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
69cea5a0 VK |
328 | spin_unlock_irqrestore(&dwc->lock, flags); |
329 | ||
3bfb1d20 HS |
330 | dwc_complete_all(dw, dwc); |
331 | return; | |
332 | } | |
333 | ||
69cea5a0 VK |
334 | if (list_empty(&dwc->active_list)) { |
335 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 336 | return; |
69cea5a0 | 337 | } |
087809fc | 338 | |
41d5e59c | 339 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
3bfb1d20 HS |
340 | |
341 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
84adccfb | 342 | /* check first descriptors addr */ |
69cea5a0 VK |
343 | if (desc->txd.phys == llp) { |
344 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 345 | return; |
69cea5a0 | 346 | } |
84adccfb VK |
347 | |
348 | /* check first descriptors llp */ | |
69cea5a0 | 349 | if (desc->lli.llp == llp) { |
3bfb1d20 | 350 | /* This one is currently in progress */ |
69cea5a0 | 351 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 352 | return; |
69cea5a0 | 353 | } |
3bfb1d20 | 354 | |
e0bd0f8c | 355 | list_for_each_entry(child, &desc->tx_list, desc_node) |
69cea5a0 | 356 | if (child->lli.llp == llp) { |
3bfb1d20 | 357 | /* Currently in progress */ |
69cea5a0 | 358 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 359 | return; |
69cea5a0 | 360 | } |
3bfb1d20 HS |
361 | |
362 | /* | |
363 | * No descriptors so far seem to be in progress, i.e. | |
364 | * this one must be done. | |
365 | */ | |
69cea5a0 | 366 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 367 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 368 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
369 | } |
370 | ||
41d5e59c | 371 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
372 | "BUG: All descriptors done, but channel not idle!\n"); |
373 | ||
374 | /* Try to continue after resetting the channel... */ | |
375 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
376 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
377 | cpu_relax(); | |
378 | ||
379 | if (!list_empty(&dwc->queue)) { | |
f336e42f VK |
380 | list_move(dwc->queue.next, &dwc->active_list); |
381 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
3bfb1d20 | 382 | } |
69cea5a0 | 383 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
384 | } |
385 | ||
386 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | |
387 | { | |
41d5e59c | 388 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
389 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
390 | lli->sar, lli->dar, lli->llp, | |
391 | lli->ctlhi, lli->ctllo); | |
392 | } | |
393 | ||
394 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
395 | { | |
396 | struct dw_desc *bad_desc; | |
397 | struct dw_desc *child; | |
69cea5a0 | 398 | unsigned long flags; |
3bfb1d20 HS |
399 | |
400 | dwc_scan_descriptors(dw, dwc); | |
401 | ||
69cea5a0 VK |
402 | spin_lock_irqsave(&dwc->lock, flags); |
403 | ||
3bfb1d20 HS |
404 | /* |
405 | * The descriptor currently at the head of the active list is | |
406 | * borked. Since we don't have any way to report errors, we'll | |
407 | * just have to scream loudly and try to carry on. | |
408 | */ | |
409 | bad_desc = dwc_first_active(dwc); | |
410 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 411 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
412 | |
413 | /* Clear the error flag and try to restart the controller */ | |
414 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
415 | if (!list_empty(&dwc->active_list)) | |
416 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
417 | ||
418 | /* | |
419 | * KERN_CRITICAL may seem harsh, but since this only happens | |
420 | * when someone submits a bad physical address in a | |
421 | * descriptor, we should consider ourselves lucky that the | |
422 | * controller flagged an error instead of scribbling over | |
423 | * random memory locations. | |
424 | */ | |
41d5e59c | 425 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 426 | "Bad descriptor submitted for DMA!\n"); |
41d5e59c | 427 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
428 | " cookie: %d\n", bad_desc->txd.cookie); |
429 | dwc_dump_lli(dwc, &bad_desc->lli); | |
e0bd0f8c | 430 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
431 | dwc_dump_lli(dwc, &child->lli); |
432 | ||
69cea5a0 VK |
433 | spin_unlock_irqrestore(&dwc->lock, flags); |
434 | ||
3bfb1d20 | 435 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 436 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
437 | } |
438 | ||
d9de4519 HCE |
439 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
440 | ||
441 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | |
442 | { | |
443 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
444 | return channel_readl(dwc, SAR); | |
445 | } | |
446 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
447 | ||
448 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |
449 | { | |
450 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
451 | return channel_readl(dwc, DAR); | |
452 | } | |
453 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
454 | ||
455 | /* called with dwc->lock held and all DMAC interrupts disabled */ | |
456 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |
ff7b05f2 | 457 | u32 status_err, u32 status_xfer) |
d9de4519 | 458 | { |
69cea5a0 VK |
459 | unsigned long flags; |
460 | ||
ff7b05f2 | 461 | if (dwc->mask) { |
d9de4519 HCE |
462 | void (*callback)(void *param); |
463 | void *callback_param; | |
464 | ||
465 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
466 | channel_readl(dwc, LLP)); | |
d9de4519 HCE |
467 | |
468 | callback = dwc->cdesc->period_callback; | |
469 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
470 | |
471 | if (callback) | |
d9de4519 | 472 | callback(callback_param); |
d9de4519 HCE |
473 | } |
474 | ||
475 | /* | |
476 | * Error and transfer complete are highly unlikely, and will most | |
477 | * likely be due to a configuration error by the user. | |
478 | */ | |
479 | if (unlikely(status_err & dwc->mask) || | |
480 | unlikely(status_xfer & dwc->mask)) { | |
481 | int i; | |
482 | ||
483 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | |
484 | "interrupt, stopping DMA transfer\n", | |
485 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
486 | |
487 | spin_lock_irqsave(&dwc->lock, flags); | |
488 | ||
d9de4519 HCE |
489 | dev_err(chan2dev(&dwc->chan), |
490 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
491 | channel_readl(dwc, SAR), | |
492 | channel_readl(dwc, DAR), | |
493 | channel_readl(dwc, LLP), | |
494 | channel_readl(dwc, CTL_HI), | |
495 | channel_readl(dwc, CTL_LO)); | |
496 | ||
497 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
498 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
499 | cpu_relax(); | |
500 | ||
501 | /* make sure DMA does not restart by loading a new list */ | |
502 | channel_writel(dwc, LLP, 0); | |
503 | channel_writel(dwc, CTL_LO, 0); | |
504 | channel_writel(dwc, CTL_HI, 0); | |
505 | ||
d9de4519 HCE |
506 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
507 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
508 | ||
509 | for (i = 0; i < dwc->cdesc->periods; i++) | |
510 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
511 | |
512 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 HCE |
513 | } |
514 | } | |
515 | ||
516 | /* ------------------------------------------------------------------------- */ | |
517 | ||
3bfb1d20 HS |
518 | static void dw_dma_tasklet(unsigned long data) |
519 | { | |
520 | struct dw_dma *dw = (struct dw_dma *)data; | |
521 | struct dw_dma_chan *dwc; | |
3bfb1d20 HS |
522 | u32 status_xfer; |
523 | u32 status_err; | |
524 | int i; | |
525 | ||
7fe7b2f4 | 526 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
527 | status_err = dma_readl(dw, RAW.ERROR); |
528 | ||
ff7b05f2 | 529 | dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); |
3bfb1d20 HS |
530 | |
531 | for (i = 0; i < dw->dma.chancnt; i++) { | |
532 | dwc = &dw->chan[i]; | |
d9de4519 | 533 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
ff7b05f2 | 534 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
d9de4519 | 535 | else if (status_err & (1 << i)) |
3bfb1d20 | 536 | dwc_handle_error(dw, dwc); |
ff7b05f2 | 537 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 538 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
539 | } |
540 | ||
541 | /* | |
ff7b05f2 | 542 | * Re-enable interrupts. |
3bfb1d20 HS |
543 | */ |
544 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
545 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
546 | } | |
547 | ||
548 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
549 | { | |
550 | struct dw_dma *dw = dev_id; | |
551 | u32 status; | |
552 | ||
553 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | |
554 | dma_readl(dw, STATUS_INT)); | |
555 | ||
556 | /* | |
557 | * Just disable the interrupts. We'll turn them back on in the | |
558 | * softirq handler. | |
559 | */ | |
560 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
561 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
562 | ||
563 | status = dma_readl(dw, STATUS_INT); | |
564 | if (status) { | |
565 | dev_err(dw->dma.dev, | |
566 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
567 | status); | |
568 | ||
569 | /* Try to recover */ | |
570 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
3bfb1d20 HS |
571 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
572 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
573 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
574 | } | |
575 | ||
576 | tasklet_schedule(&dw->tasklet); | |
577 | ||
578 | return IRQ_HANDLED; | |
579 | } | |
580 | ||
581 | /*----------------------------------------------------------------------*/ | |
582 | ||
583 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
584 | { | |
585 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
586 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
587 | dma_cookie_t cookie; | |
69cea5a0 | 588 | unsigned long flags; |
3bfb1d20 | 589 | |
69cea5a0 | 590 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 591 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
592 | |
593 | /* | |
594 | * REVISIT: We should attempt to chain as many descriptors as | |
595 | * possible, perhaps even appending to those already submitted | |
596 | * for DMA. But this is hard to do in a race-free manner. | |
597 | */ | |
598 | if (list_empty(&dwc->active_list)) { | |
41d5e59c | 599 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
3bfb1d20 | 600 | desc->txd.cookie); |
3bfb1d20 | 601 | list_add_tail(&desc->desc_node, &dwc->active_list); |
f336e42f | 602 | dwc_dostart(dwc, dwc_first_active(dwc)); |
3bfb1d20 | 603 | } else { |
41d5e59c | 604 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
3bfb1d20 HS |
605 | desc->txd.cookie); |
606 | ||
607 | list_add_tail(&desc->desc_node, &dwc->queue); | |
608 | } | |
609 | ||
69cea5a0 | 610 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
611 | |
612 | return cookie; | |
613 | } | |
614 | ||
615 | static struct dma_async_tx_descriptor * | |
616 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
617 | size_t len, unsigned long flags) | |
618 | { | |
619 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
620 | struct dw_desc *desc; | |
621 | struct dw_desc *first; | |
622 | struct dw_desc *prev; | |
623 | size_t xfer_count; | |
624 | size_t offset; | |
625 | unsigned int src_width; | |
626 | unsigned int dst_width; | |
627 | u32 ctllo; | |
628 | ||
41d5e59c | 629 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
3bfb1d20 HS |
630 | dest, src, len, flags); |
631 | ||
632 | if (unlikely(!len)) { | |
41d5e59c | 633 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
3bfb1d20 HS |
634 | return NULL; |
635 | } | |
636 | ||
637 | /* | |
638 | * We can be a lot more clever here, but this should take care | |
639 | * of the most common optimization. | |
640 | */ | |
a0227456 VK |
641 | if (!((src | dest | len) & 7)) |
642 | src_width = dst_width = 3; | |
643 | else if (!((src | dest | len) & 3)) | |
3bfb1d20 HS |
644 | src_width = dst_width = 2; |
645 | else if (!((src | dest | len) & 1)) | |
646 | src_width = dst_width = 1; | |
647 | else | |
648 | src_width = dst_width = 0; | |
649 | ||
327e6970 | 650 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
651 | | DWC_CTLL_DST_WIDTH(dst_width) |
652 | | DWC_CTLL_SRC_WIDTH(src_width) | |
653 | | DWC_CTLL_DST_INC | |
654 | | DWC_CTLL_SRC_INC | |
655 | | DWC_CTLL_FC_M2M; | |
656 | prev = first = NULL; | |
657 | ||
658 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
659 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
660 | DWC_MAX_COUNT); | |
661 | ||
662 | desc = dwc_desc_get(dwc); | |
663 | if (!desc) | |
664 | goto err_desc_get; | |
665 | ||
666 | desc->lli.sar = src + offset; | |
667 | desc->lli.dar = dest + offset; | |
668 | desc->lli.ctllo = ctllo; | |
669 | desc->lli.ctlhi = xfer_count; | |
670 | ||
671 | if (!first) { | |
672 | first = desc; | |
673 | } else { | |
674 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 675 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
676 | prev->txd.phys, sizeof(prev->lli), |
677 | DMA_TO_DEVICE); | |
678 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 679 | &first->tx_list); |
3bfb1d20 HS |
680 | } |
681 | prev = desc; | |
682 | } | |
683 | ||
684 | ||
685 | if (flags & DMA_PREP_INTERRUPT) | |
686 | /* Trigger interrupt after last block */ | |
687 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
688 | ||
689 | prev->lli.llp = 0; | |
41d5e59c | 690 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
691 | prev->txd.phys, sizeof(prev->lli), |
692 | DMA_TO_DEVICE); | |
693 | ||
694 | first->txd.flags = flags; | |
695 | first->len = len; | |
696 | ||
697 | return &first->txd; | |
698 | ||
699 | err_desc_get: | |
700 | dwc_desc_put(dwc, first); | |
701 | return NULL; | |
702 | } | |
703 | ||
704 | static struct dma_async_tx_descriptor * | |
705 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 706 | unsigned int sg_len, enum dma_transfer_direction direction, |
3bfb1d20 HS |
707 | unsigned long flags) |
708 | { | |
709 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
287d8592 | 710 | struct dw_dma_slave *dws = chan->private; |
327e6970 | 711 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
712 | struct dw_desc *prev; |
713 | struct dw_desc *first; | |
714 | u32 ctllo; | |
715 | dma_addr_t reg; | |
716 | unsigned int reg_width; | |
717 | unsigned int mem_width; | |
718 | unsigned int i; | |
719 | struct scatterlist *sg; | |
720 | size_t total_len = 0; | |
721 | ||
41d5e59c | 722 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
3bfb1d20 HS |
723 | |
724 | if (unlikely(!dws || !sg_len)) | |
725 | return NULL; | |
726 | ||
3bfb1d20 HS |
727 | prev = first = NULL; |
728 | ||
3bfb1d20 | 729 | switch (direction) { |
db8196df | 730 | case DMA_MEM_TO_DEV: |
327e6970 VK |
731 | reg_width = __fls(sconfig->dst_addr_width); |
732 | reg = sconfig->dst_addr; | |
733 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
734 | | DWC_CTLL_DST_WIDTH(reg_width) |
735 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
736 | | DWC_CTLL_SRC_INC); |
737 | ||
738 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
739 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
740 | ||
3bfb1d20 HS |
741 | for_each_sg(sgl, sg, sg_len, i) { |
742 | struct dw_desc *desc; | |
69dc14b5 | 743 | u32 len, dlen, mem; |
3bfb1d20 | 744 | |
69dc14b5 VK |
745 | mem = sg_phys(sg); |
746 | len = sg_dma_len(sg); | |
6bc711f6 VK |
747 | |
748 | if (!((mem | len) & 7)) | |
749 | mem_width = 3; | |
750 | else if (!((mem | len) & 3)) | |
751 | mem_width = 2; | |
752 | else if (!((mem | len) & 1)) | |
753 | mem_width = 1; | |
754 | else | |
69dc14b5 | 755 | mem_width = 0; |
3bfb1d20 | 756 | |
69dc14b5 | 757 | slave_sg_todev_fill_desc: |
3bfb1d20 HS |
758 | desc = dwc_desc_get(dwc); |
759 | if (!desc) { | |
41d5e59c | 760 | dev_err(chan2dev(chan), |
3bfb1d20 HS |
761 | "not enough descriptors available\n"); |
762 | goto err_desc_get; | |
763 | } | |
764 | ||
3bfb1d20 HS |
765 | desc->lli.sar = mem; |
766 | desc->lli.dar = reg; | |
767 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
69dc14b5 VK |
768 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
769 | dlen = DWC_MAX_COUNT << mem_width; | |
770 | mem += dlen; | |
771 | len -= dlen; | |
772 | } else { | |
773 | dlen = len; | |
774 | len = 0; | |
775 | } | |
776 | ||
777 | desc->lli.ctlhi = dlen >> mem_width; | |
3bfb1d20 HS |
778 | |
779 | if (!first) { | |
780 | first = desc; | |
781 | } else { | |
782 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 783 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
784 | prev->txd.phys, |
785 | sizeof(prev->lli), | |
786 | DMA_TO_DEVICE); | |
787 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 788 | &first->tx_list); |
3bfb1d20 HS |
789 | } |
790 | prev = desc; | |
69dc14b5 VK |
791 | total_len += dlen; |
792 | ||
793 | if (len) | |
794 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
795 | } |
796 | break; | |
db8196df | 797 | case DMA_DEV_TO_MEM: |
327e6970 VK |
798 | reg_width = __fls(sconfig->src_addr_width); |
799 | reg = sconfig->src_addr; | |
800 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
801 | | DWC_CTLL_SRC_WIDTH(reg_width) |
802 | | DWC_CTLL_DST_INC | |
327e6970 VK |
803 | | DWC_CTLL_SRC_FIX); |
804 | ||
805 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
806 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 807 | |
3bfb1d20 HS |
808 | for_each_sg(sgl, sg, sg_len, i) { |
809 | struct dw_desc *desc; | |
69dc14b5 | 810 | u32 len, dlen, mem; |
3bfb1d20 HS |
811 | |
812 | mem = sg_phys(sg); | |
813 | len = sg_dma_len(sg); | |
6bc711f6 VK |
814 | |
815 | if (!((mem | len) & 7)) | |
816 | mem_width = 3; | |
817 | else if (!((mem | len) & 3)) | |
818 | mem_width = 2; | |
819 | else if (!((mem | len) & 1)) | |
820 | mem_width = 1; | |
821 | else | |
3bfb1d20 HS |
822 | mem_width = 0; |
823 | ||
69dc14b5 VK |
824 | slave_sg_fromdev_fill_desc: |
825 | desc = dwc_desc_get(dwc); | |
826 | if (!desc) { | |
827 | dev_err(chan2dev(chan), | |
828 | "not enough descriptors available\n"); | |
829 | goto err_desc_get; | |
830 | } | |
831 | ||
3bfb1d20 HS |
832 | desc->lli.sar = reg; |
833 | desc->lli.dar = mem; | |
834 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
69dc14b5 VK |
835 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
836 | dlen = DWC_MAX_COUNT << reg_width; | |
837 | mem += dlen; | |
838 | len -= dlen; | |
839 | } else { | |
840 | dlen = len; | |
841 | len = 0; | |
842 | } | |
843 | desc->lli.ctlhi = dlen >> reg_width; | |
3bfb1d20 HS |
844 | |
845 | if (!first) { | |
846 | first = desc; | |
847 | } else { | |
848 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 849 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
850 | prev->txd.phys, |
851 | sizeof(prev->lli), | |
852 | DMA_TO_DEVICE); | |
853 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 854 | &first->tx_list); |
3bfb1d20 HS |
855 | } |
856 | prev = desc; | |
69dc14b5 VK |
857 | total_len += dlen; |
858 | ||
859 | if (len) | |
860 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
861 | } |
862 | break; | |
863 | default: | |
864 | return NULL; | |
865 | } | |
866 | ||
867 | if (flags & DMA_PREP_INTERRUPT) | |
868 | /* Trigger interrupt after last block */ | |
869 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
870 | ||
871 | prev->lli.llp = 0; | |
41d5e59c | 872 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
873 | prev->txd.phys, sizeof(prev->lli), |
874 | DMA_TO_DEVICE); | |
875 | ||
876 | first->len = total_len; | |
877 | ||
878 | return &first->txd; | |
879 | ||
880 | err_desc_get: | |
881 | dwc_desc_put(dwc, first); | |
882 | return NULL; | |
883 | } | |
884 | ||
327e6970 VK |
885 | /* |
886 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
887 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
888 | * | |
889 | * NOTE: burst size 2 is not supported by controller. | |
890 | * | |
891 | * This can be done by finding least significant bit set: n & (n - 1) | |
892 | */ | |
893 | static inline void convert_burst(u32 *maxburst) | |
894 | { | |
895 | if (*maxburst > 1) | |
896 | *maxburst = fls(*maxburst) - 2; | |
897 | else | |
898 | *maxburst = 0; | |
899 | } | |
900 | ||
901 | static int | |
902 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |
903 | { | |
904 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
905 | ||
906 | /* Check if it is chan is configured for slave transfers */ | |
907 | if (!chan->private) | |
908 | return -EINVAL; | |
909 | ||
910 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
911 | ||
912 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
913 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
914 | ||
915 | return 0; | |
916 | } | |
917 | ||
05827630 LW |
918 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
919 | unsigned long arg) | |
3bfb1d20 HS |
920 | { |
921 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
922 | struct dw_dma *dw = to_dw_dma(chan->device); | |
923 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 924 | unsigned long flags; |
a7c57cf7 | 925 | u32 cfglo; |
3bfb1d20 HS |
926 | LIST_HEAD(list); |
927 | ||
a7c57cf7 LW |
928 | if (cmd == DMA_PAUSE) { |
929 | spin_lock_irqsave(&dwc->lock, flags); | |
c3635c78 | 930 | |
a7c57cf7 LW |
931 | cfglo = channel_readl(dwc, CFG_LO); |
932 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | |
933 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | |
934 | cpu_relax(); | |
3bfb1d20 | 935 | |
a7c57cf7 LW |
936 | dwc->paused = true; |
937 | spin_unlock_irqrestore(&dwc->lock, flags); | |
938 | } else if (cmd == DMA_RESUME) { | |
939 | if (!dwc->paused) | |
940 | return 0; | |
3bfb1d20 | 941 | |
a7c57cf7 | 942 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 943 | |
a7c57cf7 LW |
944 | cfglo = channel_readl(dwc, CFG_LO); |
945 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
946 | dwc->paused = false; | |
3bfb1d20 | 947 | |
a7c57cf7 LW |
948 | spin_unlock_irqrestore(&dwc->lock, flags); |
949 | } else if (cmd == DMA_TERMINATE_ALL) { | |
950 | spin_lock_irqsave(&dwc->lock, flags); | |
3bfb1d20 | 951 | |
a7c57cf7 LW |
952 | channel_clear_bit(dw, CH_EN, dwc->mask); |
953 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
954 | cpu_relax(); | |
955 | ||
956 | dwc->paused = false; | |
957 | ||
958 | /* active_list entries will end up before queued entries */ | |
959 | list_splice_init(&dwc->queue, &list); | |
960 | list_splice_init(&dwc->active_list, &list); | |
961 | ||
962 | spin_unlock_irqrestore(&dwc->lock, flags); | |
963 | ||
964 | /* Flush all pending and queued descriptors */ | |
965 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
966 | dwc_descriptor_complete(dwc, desc, false); | |
327e6970 VK |
967 | } else if (cmd == DMA_SLAVE_CONFIG) { |
968 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | |
969 | } else { | |
a7c57cf7 | 970 | return -ENXIO; |
327e6970 | 971 | } |
c3635c78 LW |
972 | |
973 | return 0; | |
3bfb1d20 HS |
974 | } |
975 | ||
976 | static enum dma_status | |
07934481 LW |
977 | dwc_tx_status(struct dma_chan *chan, |
978 | dma_cookie_t cookie, | |
979 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
980 | { |
981 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
982 | dma_cookie_t last_used; | |
983 | dma_cookie_t last_complete; | |
984 | int ret; | |
985 | ||
4d4e58de | 986 | last_complete = chan->completed_cookie; |
3bfb1d20 HS |
987 | last_used = chan->cookie; |
988 | ||
989 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
990 | if (ret != DMA_SUCCESS) { | |
991 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
992 | ||
4d4e58de | 993 | last_complete = chan->completed_cookie; |
3bfb1d20 HS |
994 | last_used = chan->cookie; |
995 | ||
996 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
997 | } | |
998 | ||
abf53902 VK |
999 | if (ret != DMA_SUCCESS) |
1000 | dma_set_tx_state(txstate, last_complete, last_used, | |
1001 | dwc_first_active(dwc)->len); | |
1002 | else | |
1003 | dma_set_tx_state(txstate, last_complete, last_used, 0); | |
3bfb1d20 | 1004 | |
a7c57cf7 LW |
1005 | if (dwc->paused) |
1006 | return DMA_PAUSED; | |
3bfb1d20 HS |
1007 | |
1008 | return ret; | |
1009 | } | |
1010 | ||
1011 | static void dwc_issue_pending(struct dma_chan *chan) | |
1012 | { | |
1013 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1014 | ||
3bfb1d20 HS |
1015 | if (!list_empty(&dwc->queue)) |
1016 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
3bfb1d20 HS |
1017 | } |
1018 | ||
aa1e6f1a | 1019 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1020 | { |
1021 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1022 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1023 | struct dw_desc *desc; | |
3bfb1d20 | 1024 | int i; |
69cea5a0 | 1025 | unsigned long flags; |
3bfb1d20 | 1026 | |
41d5e59c | 1027 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
3bfb1d20 | 1028 | |
3bfb1d20 HS |
1029 | /* ASSERT: channel is idle */ |
1030 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1031 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1032 | return -EIO; |
1033 | } | |
1034 | ||
4d4e58de | 1035 | chan->completed_cookie = chan->cookie = 1; |
3bfb1d20 | 1036 | |
3bfb1d20 HS |
1037 | /* |
1038 | * NOTE: some controllers may have additional features that we | |
1039 | * need to initialize here, like "scatter-gather" (which | |
1040 | * doesn't mean what you think it means), and status writeback. | |
1041 | */ | |
1042 | ||
69cea5a0 | 1043 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1044 | i = dwc->descs_allocated; |
1045 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
69cea5a0 | 1046 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1047 | |
1048 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | |
1049 | if (!desc) { | |
41d5e59c | 1050 | dev_info(chan2dev(chan), |
3bfb1d20 | 1051 | "only allocated %d descriptors\n", i); |
69cea5a0 | 1052 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1053 | break; |
1054 | } | |
1055 | ||
e0bd0f8c | 1056 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1057 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1058 | desc->txd.tx_submit = dwc_tx_submit; | |
1059 | desc->txd.flags = DMA_CTRL_ACK; | |
41d5e59c | 1060 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
3bfb1d20 HS |
1061 | sizeof(desc->lli), DMA_TO_DEVICE); |
1062 | dwc_desc_put(dwc, desc); | |
1063 | ||
69cea5a0 | 1064 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1065 | i = ++dwc->descs_allocated; |
1066 | } | |
1067 | ||
69cea5a0 | 1068 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1069 | |
41d5e59c | 1070 | dev_dbg(chan2dev(chan), |
3bfb1d20 HS |
1071 | "alloc_chan_resources allocated %d descriptors\n", i); |
1072 | ||
1073 | return i; | |
1074 | } | |
1075 | ||
1076 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1077 | { | |
1078 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1079 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1080 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1081 | unsigned long flags; |
3bfb1d20 HS |
1082 | LIST_HEAD(list); |
1083 | ||
41d5e59c | 1084 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
3bfb1d20 HS |
1085 | dwc->descs_allocated); |
1086 | ||
1087 | /* ASSERT: channel is idle */ | |
1088 | BUG_ON(!list_empty(&dwc->active_list)); | |
1089 | BUG_ON(!list_empty(&dwc->queue)); | |
1090 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1091 | ||
69cea5a0 | 1092 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1093 | list_splice_init(&dwc->free_list, &list); |
1094 | dwc->descs_allocated = 0; | |
61e183f8 | 1095 | dwc->initialized = false; |
3bfb1d20 HS |
1096 | |
1097 | /* Disable interrupts */ | |
1098 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
3bfb1d20 HS |
1099 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1100 | ||
69cea5a0 | 1101 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1102 | |
1103 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
41d5e59c DW |
1104 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1105 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
3bfb1d20 HS |
1106 | sizeof(desc->lli), DMA_TO_DEVICE); |
1107 | kfree(desc); | |
1108 | } | |
1109 | ||
41d5e59c | 1110 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
3bfb1d20 HS |
1111 | } |
1112 | ||
d9de4519 HCE |
1113 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1114 | ||
1115 | /** | |
1116 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1117 | * @chan: the DMA channel to start | |
1118 | * | |
1119 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1120 | * -errno on failure. | |
1121 | */ | |
1122 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1123 | { | |
1124 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1125 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1126 | unsigned long flags; |
d9de4519 HCE |
1127 | |
1128 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1129 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1130 | return -ENODEV; | |
1131 | } | |
1132 | ||
69cea5a0 | 1133 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1134 | |
1135 | /* assert channel is idle */ | |
1136 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
1137 | dev_err(chan2dev(&dwc->chan), | |
1138 | "BUG: Attempted to start non-idle channel\n"); | |
1139 | dev_err(chan2dev(&dwc->chan), | |
1140 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
1141 | channel_readl(dwc, SAR), | |
1142 | channel_readl(dwc, DAR), | |
1143 | channel_readl(dwc, LLP), | |
1144 | channel_readl(dwc, CTL_HI), | |
1145 | channel_readl(dwc, CTL_LO)); | |
69cea5a0 | 1146 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1147 | return -EBUSY; |
1148 | } | |
1149 | ||
d9de4519 HCE |
1150 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1151 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1152 | ||
1153 | /* setup DMAC channel registers */ | |
1154 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | |
1155 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
1156 | channel_writel(dwc, CTL_HI, 0); | |
1157 | ||
1158 | channel_set_bit(dw, CH_EN, dwc->mask); | |
1159 | ||
69cea5a0 | 1160 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1161 | |
1162 | return 0; | |
1163 | } | |
1164 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1165 | ||
1166 | /** | |
1167 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1168 | * @chan: the DMA channel to stop | |
1169 | * | |
1170 | * Must be called with soft interrupts disabled. | |
1171 | */ | |
1172 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1173 | { | |
1174 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1175 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1176 | unsigned long flags; |
d9de4519 | 1177 | |
69cea5a0 | 1178 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1179 | |
1180 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1181 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1182 | cpu_relax(); | |
1183 | ||
69cea5a0 | 1184 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1185 | } |
1186 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1187 | ||
1188 | /** | |
1189 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1190 | * @chan: the DMA channel to prepare | |
1191 | * @buf_addr: physical DMA address where the buffer starts | |
1192 | * @buf_len: total number of bytes for the entire buffer | |
1193 | * @period_len: number of bytes for each period | |
1194 | * @direction: transfer direction, to or from device | |
1195 | * | |
1196 | * Must be called before trying to start the transfer. Returns a valid struct | |
1197 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1198 | */ | |
1199 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1200 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1201 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1202 | { |
1203 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1204 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1205 | struct dw_cyclic_desc *cdesc; |
1206 | struct dw_cyclic_desc *retval = NULL; | |
1207 | struct dw_desc *desc; | |
1208 | struct dw_desc *last = NULL; | |
d9de4519 HCE |
1209 | unsigned long was_cyclic; |
1210 | unsigned int reg_width; | |
1211 | unsigned int periods; | |
1212 | unsigned int i; | |
69cea5a0 | 1213 | unsigned long flags; |
d9de4519 | 1214 | |
69cea5a0 | 1215 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1216 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1217 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1218 | dev_dbg(chan2dev(&dwc->chan), |
1219 | "queue and/or active list are not empty\n"); | |
1220 | return ERR_PTR(-EBUSY); | |
1221 | } | |
1222 | ||
1223 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1224 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1225 | if (was_cyclic) { |
1226 | dev_dbg(chan2dev(&dwc->chan), | |
1227 | "channel already prepared for cyclic DMA\n"); | |
1228 | return ERR_PTR(-EBUSY); | |
1229 | } | |
1230 | ||
1231 | retval = ERR_PTR(-EINVAL); | |
327e6970 VK |
1232 | |
1233 | if (direction == DMA_MEM_TO_DEV) | |
1234 | reg_width = __ffs(sconfig->dst_addr_width); | |
1235 | else | |
1236 | reg_width = __ffs(sconfig->src_addr_width); | |
1237 | ||
d9de4519 HCE |
1238 | periods = buf_len / period_len; |
1239 | ||
1240 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
1241 | if (period_len > (DWC_MAX_COUNT << reg_width)) | |
1242 | goto out_err; | |
1243 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1244 | goto out_err; | |
1245 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1246 | goto out_err; | |
db8196df | 1247 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
d9de4519 HCE |
1248 | goto out_err; |
1249 | ||
1250 | retval = ERR_PTR(-ENOMEM); | |
1251 | ||
1252 | if (periods > NR_DESCS_PER_CHANNEL) | |
1253 | goto out_err; | |
1254 | ||
1255 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1256 | if (!cdesc) | |
1257 | goto out_err; | |
1258 | ||
1259 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1260 | if (!cdesc->desc) | |
1261 | goto out_err_alloc; | |
1262 | ||
1263 | for (i = 0; i < periods; i++) { | |
1264 | desc = dwc_desc_get(dwc); | |
1265 | if (!desc) | |
1266 | goto out_err_desc_get; | |
1267 | ||
1268 | switch (direction) { | |
db8196df | 1269 | case DMA_MEM_TO_DEV: |
327e6970 | 1270 | desc->lli.dar = sconfig->dst_addr; |
d9de4519 | 1271 | desc->lli.sar = buf_addr + (period_len * i); |
327e6970 | 1272 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
d9de4519 HCE |
1273 | | DWC_CTLL_DST_WIDTH(reg_width) |
1274 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1275 | | DWC_CTLL_DST_FIX | |
1276 | | DWC_CTLL_SRC_INC | |
d9de4519 | 1277 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1278 | |
1279 | desc->lli.ctllo |= sconfig->device_fc ? | |
1280 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1281 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
1282 | ||
d9de4519 | 1283 | break; |
db8196df | 1284 | case DMA_DEV_TO_MEM: |
d9de4519 | 1285 | desc->lli.dar = buf_addr + (period_len * i); |
327e6970 VK |
1286 | desc->lli.sar = sconfig->src_addr; |
1287 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | |
d9de4519 HCE |
1288 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1289 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1290 | | DWC_CTLL_DST_INC | |
1291 | | DWC_CTLL_SRC_FIX | |
d9de4519 | 1292 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1293 | |
1294 | desc->lli.ctllo |= sconfig->device_fc ? | |
1295 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1296 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
1297 | ||
d9de4519 HCE |
1298 | break; |
1299 | default: | |
1300 | break; | |
1301 | } | |
1302 | ||
1303 | desc->lli.ctlhi = (period_len >> reg_width); | |
1304 | cdesc->desc[i] = desc; | |
1305 | ||
1306 | if (last) { | |
1307 | last->lli.llp = desc->txd.phys; | |
1308 | dma_sync_single_for_device(chan2parent(chan), | |
1309 | last->txd.phys, sizeof(last->lli), | |
1310 | DMA_TO_DEVICE); | |
1311 | } | |
1312 | ||
1313 | last = desc; | |
1314 | } | |
1315 | ||
1316 | /* lets make a cyclic list */ | |
1317 | last->lli.llp = cdesc->desc[0]->txd.phys; | |
1318 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | |
1319 | sizeof(last->lli), DMA_TO_DEVICE); | |
1320 | ||
1321 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | |
1322 | "period %zu periods %d\n", buf_addr, buf_len, | |
1323 | period_len, periods); | |
1324 | ||
1325 | cdesc->periods = periods; | |
1326 | dwc->cdesc = cdesc; | |
1327 | ||
1328 | return cdesc; | |
1329 | ||
1330 | out_err_desc_get: | |
1331 | while (i--) | |
1332 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1333 | out_err_alloc: | |
1334 | kfree(cdesc); | |
1335 | out_err: | |
1336 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1337 | return (struct dw_cyclic_desc *)retval; | |
1338 | } | |
1339 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1340 | ||
1341 | /** | |
1342 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1343 | * @chan: the DMA channel to free | |
1344 | */ | |
1345 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1346 | { | |
1347 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1348 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1349 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1350 | int i; | |
69cea5a0 | 1351 | unsigned long flags; |
d9de4519 HCE |
1352 | |
1353 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | |
1354 | ||
1355 | if (!cdesc) | |
1356 | return; | |
1357 | ||
69cea5a0 | 1358 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1359 | |
1360 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1361 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1362 | cpu_relax(); | |
1363 | ||
d9de4519 HCE |
1364 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1365 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1366 | ||
69cea5a0 | 1367 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1368 | |
1369 | for (i = 0; i < cdesc->periods; i++) | |
1370 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1371 | ||
1372 | kfree(cdesc->desc); | |
1373 | kfree(cdesc); | |
1374 | ||
1375 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1376 | } | |
1377 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1378 | ||
3bfb1d20 HS |
1379 | /*----------------------------------------------------------------------*/ |
1380 | ||
1381 | static void dw_dma_off(struct dw_dma *dw) | |
1382 | { | |
61e183f8 VK |
1383 | int i; |
1384 | ||
3bfb1d20 HS |
1385 | dma_writel(dw, CFG, 0); |
1386 | ||
1387 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1388 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1389 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1390 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1391 | ||
1392 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1393 | cpu_relax(); | |
61e183f8 VK |
1394 | |
1395 | for (i = 0; i < dw->dma.chancnt; i++) | |
1396 | dw->chan[i].initialized = false; | |
3bfb1d20 HS |
1397 | } |
1398 | ||
1399 | static int __init dw_probe(struct platform_device *pdev) | |
1400 | { | |
1401 | struct dw_dma_platform_data *pdata; | |
1402 | struct resource *io; | |
1403 | struct dw_dma *dw; | |
1404 | size_t size; | |
1405 | int irq; | |
1406 | int err; | |
1407 | int i; | |
1408 | ||
6c618c9d | 1409 | pdata = dev_get_platdata(&pdev->dev); |
3bfb1d20 HS |
1410 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1411 | return -EINVAL; | |
1412 | ||
1413 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1414 | if (!io) | |
1415 | return -EINVAL; | |
1416 | ||
1417 | irq = platform_get_irq(pdev, 0); | |
1418 | if (irq < 0) | |
1419 | return irq; | |
1420 | ||
1421 | size = sizeof(struct dw_dma); | |
1422 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | |
1423 | dw = kzalloc(size, GFP_KERNEL); | |
1424 | if (!dw) | |
1425 | return -ENOMEM; | |
1426 | ||
1427 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | |
1428 | err = -EBUSY; | |
1429 | goto err_kfree; | |
1430 | } | |
1431 | ||
3bfb1d20 HS |
1432 | dw->regs = ioremap(io->start, DW_REGLEN); |
1433 | if (!dw->regs) { | |
1434 | err = -ENOMEM; | |
1435 | goto err_release_r; | |
1436 | } | |
1437 | ||
1438 | dw->clk = clk_get(&pdev->dev, "hclk"); | |
1439 | if (IS_ERR(dw->clk)) { | |
1440 | err = PTR_ERR(dw->clk); | |
1441 | goto err_clk; | |
1442 | } | |
1443 | clk_enable(dw->clk); | |
1444 | ||
1445 | /* force dma off, just in case */ | |
1446 | dw_dma_off(dw); | |
1447 | ||
1448 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | |
1449 | if (err) | |
1450 | goto err_irq; | |
1451 | ||
1452 | platform_set_drvdata(pdev, dw); | |
1453 | ||
1454 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | |
1455 | ||
1456 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | |
1457 | ||
1458 | INIT_LIST_HEAD(&dw->dma.channels); | |
46389470 | 1459 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1460 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1461 | ||
1462 | dwc->chan.device = &dw->dma; | |
4d4e58de | 1463 | dwc->chan.cookie = dwc->chan.completed_cookie = 1; |
b0c3130d VK |
1464 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1465 | list_add_tail(&dwc->chan.device_node, | |
1466 | &dw->dma.channels); | |
1467 | else | |
1468 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1469 | |
93317e8e VK |
1470 | /* 7 is highest priority & 0 is lowest. */ |
1471 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
e8d9f875 | 1472 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1473 | else |
1474 | dwc->priority = i; | |
1475 | ||
3bfb1d20 HS |
1476 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1477 | spin_lock_init(&dwc->lock); | |
1478 | dwc->mask = 1 << i; | |
1479 | ||
1480 | INIT_LIST_HEAD(&dwc->active_list); | |
1481 | INIT_LIST_HEAD(&dwc->queue); | |
1482 | INIT_LIST_HEAD(&dwc->free_list); | |
1483 | ||
1484 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1485 | } | |
1486 | ||
1487 | /* Clear/disable all interrupts on all channels. */ | |
1488 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1489 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1490 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1491 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1492 | ||
1493 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1494 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1495 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1496 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1497 | ||
1498 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1499 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | |
95ea759e JI |
1500 | if (pdata->is_private) |
1501 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
3bfb1d20 HS |
1502 | dw->dma.dev = &pdev->dev; |
1503 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | |
1504 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1505 | ||
1506 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
1507 | ||
1508 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | |
c3635c78 | 1509 | dw->dma.device_control = dwc_control; |
3bfb1d20 | 1510 | |
07934481 | 1511 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1512 | dw->dma.device_issue_pending = dwc_issue_pending; |
1513 | ||
1514 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1515 | ||
1516 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | |
46389470 | 1517 | dev_name(&pdev->dev), pdata->nr_channels); |
3bfb1d20 HS |
1518 | |
1519 | dma_async_device_register(&dw->dma); | |
1520 | ||
1521 | return 0; | |
1522 | ||
1523 | err_irq: | |
1524 | clk_disable(dw->clk); | |
1525 | clk_put(dw->clk); | |
1526 | err_clk: | |
1527 | iounmap(dw->regs); | |
1528 | dw->regs = NULL; | |
1529 | err_release_r: | |
1530 | release_resource(io); | |
1531 | err_kfree: | |
1532 | kfree(dw); | |
1533 | return err; | |
1534 | } | |
1535 | ||
1536 | static int __exit dw_remove(struct platform_device *pdev) | |
1537 | { | |
1538 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1539 | struct dw_dma_chan *dwc, *_dwc; | |
1540 | struct resource *io; | |
1541 | ||
1542 | dw_dma_off(dw); | |
1543 | dma_async_device_unregister(&dw->dma); | |
1544 | ||
1545 | free_irq(platform_get_irq(pdev, 0), dw); | |
1546 | tasklet_kill(&dw->tasklet); | |
1547 | ||
1548 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1549 | chan.device_node) { | |
1550 | list_del(&dwc->chan.device_node); | |
1551 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1552 | } | |
1553 | ||
1554 | clk_disable(dw->clk); | |
1555 | clk_put(dw->clk); | |
1556 | ||
1557 | iounmap(dw->regs); | |
1558 | dw->regs = NULL; | |
1559 | ||
1560 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1561 | release_mem_region(io->start, DW_REGLEN); | |
1562 | ||
1563 | kfree(dw); | |
1564 | ||
1565 | return 0; | |
1566 | } | |
1567 | ||
1568 | static void dw_shutdown(struct platform_device *pdev) | |
1569 | { | |
1570 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1571 | ||
1572 | dw_dma_off(platform_get_drvdata(pdev)); | |
1573 | clk_disable(dw->clk); | |
1574 | } | |
1575 | ||
4a256b5f | 1576 | static int dw_suspend_noirq(struct device *dev) |
3bfb1d20 | 1577 | { |
4a256b5f | 1578 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1579 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1580 | ||
1581 | dw_dma_off(platform_get_drvdata(pdev)); | |
1582 | clk_disable(dw->clk); | |
61e183f8 | 1583 | |
3bfb1d20 HS |
1584 | return 0; |
1585 | } | |
1586 | ||
4a256b5f | 1587 | static int dw_resume_noirq(struct device *dev) |
3bfb1d20 | 1588 | { |
4a256b5f | 1589 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1590 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1591 | ||
1592 | clk_enable(dw->clk); | |
1593 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1594 | return 0; | |
3bfb1d20 HS |
1595 | } |
1596 | ||
47145210 | 1597 | static const struct dev_pm_ops dw_dev_pm_ops = { |
4a256b5f MD |
1598 | .suspend_noirq = dw_suspend_noirq, |
1599 | .resume_noirq = dw_resume_noirq, | |
7414a1b8 RK |
1600 | .freeze_noirq = dw_suspend_noirq, |
1601 | .thaw_noirq = dw_resume_noirq, | |
1602 | .restore_noirq = dw_resume_noirq, | |
1603 | .poweroff_noirq = dw_suspend_noirq, | |
4a256b5f MD |
1604 | }; |
1605 | ||
3bfb1d20 HS |
1606 | static struct platform_driver dw_driver = { |
1607 | .remove = __exit_p(dw_remove), | |
1608 | .shutdown = dw_shutdown, | |
3bfb1d20 HS |
1609 | .driver = { |
1610 | .name = "dw_dmac", | |
4a256b5f | 1611 | .pm = &dw_dev_pm_ops, |
3bfb1d20 HS |
1612 | }, |
1613 | }; | |
1614 | ||
1615 | static int __init dw_init(void) | |
1616 | { | |
1617 | return platform_driver_probe(&dw_driver, dw_probe); | |
1618 | } | |
cb689a70 | 1619 | subsys_initcall(dw_init); |
3bfb1d20 HS |
1620 | |
1621 | static void __exit dw_exit(void) | |
1622 | { | |
1623 | platform_driver_unregister(&dw_driver); | |
1624 | } | |
1625 | module_exit(dw_exit); | |
1626 | ||
1627 | MODULE_LICENSE("GPL v2"); | |
1628 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | |
e05503ef | 1629 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
aecb7b64 | 1630 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); |