]>
Commit | Line | Data |
---|---|---|
3bfb1d20 HS |
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | |
3 | * AVR32 systems.) | |
4 | * | |
5 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
327e6970 | 12 | #include <linux/bitops.h> |
3bfb1d20 HS |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
d3f797d9 | 20 | #include <linux/of.h> |
3bfb1d20 HS |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | ||
26 | #include "dw_dmac_regs.h" | |
d2ebfb33 | 27 | #include "dmaengine.h" |
3bfb1d20 HS |
28 | |
29 | /* | |
30 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
31 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
32 | * of which use ARM any more). See the "Databook" from Synopsys for | |
33 | * information beyond what licensees probably provide. | |
34 | * | |
35 | * The driver has currently been tested only with the Atmel AT32AP7000, | |
36 | * which does not support descriptor writeback. | |
37 | */ | |
38 | ||
327e6970 VK |
39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
40 | struct dw_dma_slave *__slave = (_chan->private); \ | |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
43 | int _dms = __slave ? __slave->dst_master : 0; \ | |
44 | int _sms = __slave ? __slave->src_master : 1; \ | |
45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | |
46 | DW_DMA_MSIZE_16; \ | |
47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | |
48 | DW_DMA_MSIZE_16; \ | |
f301c062 | 49 | \ |
327e6970 VK |
50 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
51 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
52 | | DWC_CTLL_LLP_D_EN \ |
53 | | DWC_CTLL_LLP_S_EN \ | |
327e6970 VK |
54 | | DWC_CTLL_DMS(_dms) \ |
55 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 56 | }) |
3bfb1d20 | 57 | |
3bfb1d20 HS |
58 | /* |
59 | * Number of descriptors to allocate for each channel. This should be | |
60 | * made configurable somehow; preferably, the clients (at least the | |
61 | * ones using slave transfers) should be able to give us a hint. | |
62 | */ | |
63 | #define NR_DESCS_PER_CHANNEL 64 | |
64 | ||
65 | /*----------------------------------------------------------------------*/ | |
66 | ||
67 | /* | |
68 | * Because we're not relying on writeback from the controller (it may not | |
69 | * even be configured into the core!) we don't need to use dma_pool. These | |
70 | * descriptors -- and associated data -- are cacheable. We do need to make | |
71 | * sure their dcache entries are written back before handing them off to | |
72 | * the controller, though. | |
73 | */ | |
74 | ||
41d5e59c DW |
75 | static struct device *chan2dev(struct dma_chan *chan) |
76 | { | |
77 | return &chan->dev->device; | |
78 | } | |
79 | static struct device *chan2parent(struct dma_chan *chan) | |
80 | { | |
81 | return chan->dev->device.parent; | |
82 | } | |
83 | ||
3bfb1d20 HS |
84 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
85 | { | |
86 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | |
87 | } | |
88 | ||
3bfb1d20 HS |
89 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
90 | { | |
91 | struct dw_desc *desc, *_desc; | |
92 | struct dw_desc *ret = NULL; | |
93 | unsigned int i = 0; | |
69cea5a0 | 94 | unsigned long flags; |
3bfb1d20 | 95 | |
69cea5a0 | 96 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 97 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
2ab37276 | 98 | i++; |
3bfb1d20 HS |
99 | if (async_tx_test_ack(&desc->txd)) { |
100 | list_del(&desc->desc_node); | |
101 | ret = desc; | |
102 | break; | |
103 | } | |
41d5e59c | 104 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 | 105 | } |
69cea5a0 | 106 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 107 | |
41d5e59c | 108 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
109 | |
110 | return ret; | |
111 | } | |
112 | ||
113 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
114 | { | |
115 | struct dw_desc *child; | |
116 | ||
e0bd0f8c | 117 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 118 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
119 | child->txd.phys, sizeof(child->lli), |
120 | DMA_TO_DEVICE); | |
41d5e59c | 121 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
122 | desc->txd.phys, sizeof(desc->lli), |
123 | DMA_TO_DEVICE); | |
124 | } | |
125 | ||
126 | /* | |
127 | * Move a descriptor, including any children, to the free list. | |
128 | * `desc' must not be on any lists. | |
129 | */ | |
130 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
131 | { | |
69cea5a0 VK |
132 | unsigned long flags; |
133 | ||
3bfb1d20 HS |
134 | if (desc) { |
135 | struct dw_desc *child; | |
136 | ||
137 | dwc_sync_desc_for_cpu(dwc, desc); | |
138 | ||
69cea5a0 | 139 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 140 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 141 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
142 | "moving child desc %p to freelist\n", |
143 | child); | |
e0bd0f8c | 144 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 146 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 147 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
148 | } |
149 | } | |
150 | ||
61e183f8 VK |
151 | static void dwc_initialize(struct dw_dma_chan *dwc) |
152 | { | |
153 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
154 | struct dw_dma_slave *dws = dwc->chan.private; | |
155 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
156 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
157 | ||
158 | if (dwc->initialized == true) | |
159 | return; | |
160 | ||
161 | if (dws) { | |
162 | /* | |
163 | * We need controller-specific data to set up slave | |
164 | * transfers. | |
165 | */ | |
166 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
167 | ||
168 | cfghi = dws->cfg_hi; | |
169 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | |
8fccc5bf AS |
170 | } else { |
171 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | |
172 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | |
173 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | |
174 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | |
61e183f8 VK |
175 | } |
176 | ||
177 | channel_writel(dwc, CFG_LO, cfglo); | |
178 | channel_writel(dwc, CFG_HI, cfghi); | |
179 | ||
180 | /* Enable interrupts */ | |
181 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
182 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
183 | ||
184 | dwc->initialized = true; | |
185 | } | |
186 | ||
3bfb1d20 HS |
187 | /*----------------------------------------------------------------------*/ |
188 | ||
4c2d56c5 AS |
189 | static inline unsigned int dwc_fast_fls(unsigned long long v) |
190 | { | |
191 | /* | |
192 | * We can be a lot more clever here, but this should take care | |
193 | * of the most common optimization. | |
194 | */ | |
195 | if (!(v & 7)) | |
196 | return 3; | |
197 | else if (!(v & 3)) | |
198 | return 2; | |
199 | else if (!(v & 1)) | |
200 | return 1; | |
201 | return 0; | |
202 | } | |
203 | ||
f52b36d2 | 204 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
1d455437 AS |
205 | { |
206 | dev_err(chan2dev(&dwc->chan), | |
207 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
208 | channel_readl(dwc, SAR), | |
209 | channel_readl(dwc, DAR), | |
210 | channel_readl(dwc, LLP), | |
211 | channel_readl(dwc, CTL_HI), | |
212 | channel_readl(dwc, CTL_LO)); | |
213 | } | |
214 | ||
3f936207 AS |
215 | |
216 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
217 | { | |
218 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
219 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
220 | cpu_relax(); | |
221 | } | |
222 | ||
1d455437 AS |
223 | /*----------------------------------------------------------------------*/ |
224 | ||
3bfb1d20 HS |
225 | /* Called with dwc->lock held and bh disabled */ |
226 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
227 | { | |
228 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
229 | ||
230 | /* ASSERT: channel is idle */ | |
231 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 232 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 | 233 | "BUG: Attempted to start non-idle channel\n"); |
1d455437 | 234 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
235 | |
236 | /* The tasklet will hopefully advance the queue... */ | |
237 | return; | |
238 | } | |
239 | ||
61e183f8 VK |
240 | dwc_initialize(dwc); |
241 | ||
3bfb1d20 HS |
242 | channel_writel(dwc, LLP, first->txd.phys); |
243 | channel_writel(dwc, CTL_LO, | |
244 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
245 | channel_writel(dwc, CTL_HI, 0); | |
246 | channel_set_bit(dw, CH_EN, dwc->mask); | |
247 | } | |
248 | ||
249 | /*----------------------------------------------------------------------*/ | |
250 | ||
251 | static void | |
5fedefb8 VK |
252 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
253 | bool callback_required) | |
3bfb1d20 | 254 | { |
5fedefb8 VK |
255 | dma_async_tx_callback callback = NULL; |
256 | void *param = NULL; | |
3bfb1d20 | 257 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 258 | struct dw_desc *child; |
69cea5a0 | 259 | unsigned long flags; |
3bfb1d20 | 260 | |
41d5e59c | 261 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 262 | |
69cea5a0 | 263 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 264 | dma_cookie_complete(txd); |
5fedefb8 VK |
265 | if (callback_required) { |
266 | callback = txd->callback; | |
267 | param = txd->callback_param; | |
268 | } | |
3bfb1d20 HS |
269 | |
270 | dwc_sync_desc_for_cpu(dwc, desc); | |
e518076e VK |
271 | |
272 | /* async_tx_ack */ | |
273 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
274 | async_tx_ack(&child->txd); | |
275 | async_tx_ack(&desc->txd); | |
276 | ||
e0bd0f8c | 277 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
278 | list_move(&desc->desc_node, &dwc->free_list); |
279 | ||
657a77fa AN |
280 | if (!dwc->chan.private) { |
281 | struct device *parent = chan2parent(&dwc->chan); | |
282 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
283 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | |
284 | dma_unmap_single(parent, desc->lli.dar, | |
285 | desc->len, DMA_FROM_DEVICE); | |
286 | else | |
287 | dma_unmap_page(parent, desc->lli.dar, | |
288 | desc->len, DMA_FROM_DEVICE); | |
289 | } | |
290 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
291 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | |
292 | dma_unmap_single(parent, desc->lli.sar, | |
293 | desc->len, DMA_TO_DEVICE); | |
294 | else | |
295 | dma_unmap_page(parent, desc->lli.sar, | |
296 | desc->len, DMA_TO_DEVICE); | |
297 | } | |
298 | } | |
3bfb1d20 | 299 | |
69cea5a0 VK |
300 | spin_unlock_irqrestore(&dwc->lock, flags); |
301 | ||
5fedefb8 | 302 | if (callback_required && callback) |
3bfb1d20 HS |
303 | callback(param); |
304 | } | |
305 | ||
306 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
307 | { | |
308 | struct dw_desc *desc, *_desc; | |
309 | LIST_HEAD(list); | |
69cea5a0 | 310 | unsigned long flags; |
3bfb1d20 | 311 | |
69cea5a0 | 312 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 313 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 314 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
315 | "BUG: XFER bit set, but channel not idle!\n"); |
316 | ||
317 | /* Try to continue after resetting the channel... */ | |
3f936207 | 318 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
319 | } |
320 | ||
321 | /* | |
322 | * Submit queued descriptors ASAP, i.e. before we go through | |
323 | * the completed ones. | |
324 | */ | |
3bfb1d20 | 325 | list_splice_init(&dwc->active_list, &list); |
f336e42f VK |
326 | if (!list_empty(&dwc->queue)) { |
327 | list_move(dwc->queue.next, &dwc->active_list); | |
328 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
329 | } | |
3bfb1d20 | 330 | |
69cea5a0 VK |
331 | spin_unlock_irqrestore(&dwc->lock, flags); |
332 | ||
3bfb1d20 | 333 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 334 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
335 | } |
336 | ||
337 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
338 | { | |
339 | dma_addr_t llp; | |
340 | struct dw_desc *desc, *_desc; | |
341 | struct dw_desc *child; | |
342 | u32 status_xfer; | |
69cea5a0 | 343 | unsigned long flags; |
3bfb1d20 | 344 | |
69cea5a0 | 345 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
346 | llp = channel_readl(dwc, LLP); |
347 | status_xfer = dma_readl(dw, RAW.XFER); | |
348 | ||
349 | if (status_xfer & dwc->mask) { | |
350 | /* Everything we've submitted is done */ | |
351 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
69cea5a0 VK |
352 | spin_unlock_irqrestore(&dwc->lock, flags); |
353 | ||
3bfb1d20 HS |
354 | dwc_complete_all(dw, dwc); |
355 | return; | |
356 | } | |
357 | ||
69cea5a0 VK |
358 | if (list_empty(&dwc->active_list)) { |
359 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 360 | return; |
69cea5a0 | 361 | } |
087809fc | 362 | |
2e4c364e | 363 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
2f45d613 | 364 | (unsigned long long)llp); |
3bfb1d20 HS |
365 | |
366 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
84adccfb | 367 | /* check first descriptors addr */ |
69cea5a0 VK |
368 | if (desc->txd.phys == llp) { |
369 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 370 | return; |
69cea5a0 | 371 | } |
84adccfb VK |
372 | |
373 | /* check first descriptors llp */ | |
69cea5a0 | 374 | if (desc->lli.llp == llp) { |
3bfb1d20 | 375 | /* This one is currently in progress */ |
69cea5a0 | 376 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 377 | return; |
69cea5a0 | 378 | } |
3bfb1d20 | 379 | |
e0bd0f8c | 380 | list_for_each_entry(child, &desc->tx_list, desc_node) |
69cea5a0 | 381 | if (child->lli.llp == llp) { |
3bfb1d20 | 382 | /* Currently in progress */ |
69cea5a0 | 383 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 384 | return; |
69cea5a0 | 385 | } |
3bfb1d20 HS |
386 | |
387 | /* | |
388 | * No descriptors so far seem to be in progress, i.e. | |
389 | * this one must be done. | |
390 | */ | |
69cea5a0 | 391 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 392 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 393 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
394 | } |
395 | ||
41d5e59c | 396 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
397 | "BUG: All descriptors done, but channel not idle!\n"); |
398 | ||
399 | /* Try to continue after resetting the channel... */ | |
3f936207 | 400 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
401 | |
402 | if (!list_empty(&dwc->queue)) { | |
f336e42f VK |
403 | list_move(dwc->queue.next, &dwc->active_list); |
404 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
3bfb1d20 | 405 | } |
69cea5a0 | 406 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
407 | } |
408 | ||
93aad1bc | 409 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
3bfb1d20 | 410 | { |
41d5e59c | 411 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 412 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
f8609c2b | 413 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
3bfb1d20 HS |
414 | } |
415 | ||
416 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
417 | { | |
418 | struct dw_desc *bad_desc; | |
419 | struct dw_desc *child; | |
69cea5a0 | 420 | unsigned long flags; |
3bfb1d20 HS |
421 | |
422 | dwc_scan_descriptors(dw, dwc); | |
423 | ||
69cea5a0 VK |
424 | spin_lock_irqsave(&dwc->lock, flags); |
425 | ||
3bfb1d20 HS |
426 | /* |
427 | * The descriptor currently at the head of the active list is | |
428 | * borked. Since we don't have any way to report errors, we'll | |
429 | * just have to scream loudly and try to carry on. | |
430 | */ | |
431 | bad_desc = dwc_first_active(dwc); | |
432 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 433 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
434 | |
435 | /* Clear the error flag and try to restart the controller */ | |
436 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
437 | if (!list_empty(&dwc->active_list)) | |
438 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
439 | ||
440 | /* | |
441 | * KERN_CRITICAL may seem harsh, but since this only happens | |
442 | * when someone submits a bad physical address in a | |
443 | * descriptor, we should consider ourselves lucky that the | |
444 | * controller flagged an error instead of scribbling over | |
445 | * random memory locations. | |
446 | */ | |
41d5e59c | 447 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 448 | "Bad descriptor submitted for DMA!\n"); |
41d5e59c | 449 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
450 | " cookie: %d\n", bad_desc->txd.cookie); |
451 | dwc_dump_lli(dwc, &bad_desc->lli); | |
e0bd0f8c | 452 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
453 | dwc_dump_lli(dwc, &child->lli); |
454 | ||
69cea5a0 VK |
455 | spin_unlock_irqrestore(&dwc->lock, flags); |
456 | ||
3bfb1d20 | 457 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 458 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
459 | } |
460 | ||
d9de4519 HCE |
461 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
462 | ||
463 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | |
464 | { | |
465 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
466 | return channel_readl(dwc, SAR); | |
467 | } | |
468 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
469 | ||
470 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |
471 | { | |
472 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
473 | return channel_readl(dwc, DAR); | |
474 | } | |
475 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
476 | ||
477 | /* called with dwc->lock held and all DMAC interrupts disabled */ | |
478 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |
ff7b05f2 | 479 | u32 status_err, u32 status_xfer) |
d9de4519 | 480 | { |
69cea5a0 VK |
481 | unsigned long flags; |
482 | ||
ff7b05f2 | 483 | if (dwc->mask) { |
d9de4519 HCE |
484 | void (*callback)(void *param); |
485 | void *callback_param; | |
486 | ||
487 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
488 | channel_readl(dwc, LLP)); | |
d9de4519 HCE |
489 | |
490 | callback = dwc->cdesc->period_callback; | |
491 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
492 | |
493 | if (callback) | |
d9de4519 | 494 | callback(callback_param); |
d9de4519 HCE |
495 | } |
496 | ||
497 | /* | |
498 | * Error and transfer complete are highly unlikely, and will most | |
499 | * likely be due to a configuration error by the user. | |
500 | */ | |
501 | if (unlikely(status_err & dwc->mask) || | |
502 | unlikely(status_xfer & dwc->mask)) { | |
503 | int i; | |
504 | ||
505 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | |
506 | "interrupt, stopping DMA transfer\n", | |
507 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
508 | |
509 | spin_lock_irqsave(&dwc->lock, flags); | |
510 | ||
1d455437 | 511 | dwc_dump_chan_regs(dwc); |
d9de4519 | 512 | |
3f936207 | 513 | dwc_chan_disable(dw, dwc); |
d9de4519 HCE |
514 | |
515 | /* make sure DMA does not restart by loading a new list */ | |
516 | channel_writel(dwc, LLP, 0); | |
517 | channel_writel(dwc, CTL_LO, 0); | |
518 | channel_writel(dwc, CTL_HI, 0); | |
519 | ||
d9de4519 HCE |
520 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
521 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
522 | ||
523 | for (i = 0; i < dwc->cdesc->periods; i++) | |
524 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
525 | |
526 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 HCE |
527 | } |
528 | } | |
529 | ||
530 | /* ------------------------------------------------------------------------- */ | |
531 | ||
3bfb1d20 HS |
532 | static void dw_dma_tasklet(unsigned long data) |
533 | { | |
534 | struct dw_dma *dw = (struct dw_dma *)data; | |
535 | struct dw_dma_chan *dwc; | |
3bfb1d20 HS |
536 | u32 status_xfer; |
537 | u32 status_err; | |
538 | int i; | |
539 | ||
7fe7b2f4 | 540 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
541 | status_err = dma_readl(dw, RAW.ERROR); |
542 | ||
2e4c364e | 543 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
544 | |
545 | for (i = 0; i < dw->dma.chancnt; i++) { | |
546 | dwc = &dw->chan[i]; | |
d9de4519 | 547 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
ff7b05f2 | 548 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
d9de4519 | 549 | else if (status_err & (1 << i)) |
3bfb1d20 | 550 | dwc_handle_error(dw, dwc); |
ff7b05f2 | 551 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 552 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
553 | } |
554 | ||
555 | /* | |
ff7b05f2 | 556 | * Re-enable interrupts. |
3bfb1d20 HS |
557 | */ |
558 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
559 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
560 | } | |
561 | ||
562 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
563 | { | |
564 | struct dw_dma *dw = dev_id; | |
565 | u32 status; | |
566 | ||
2e4c364e | 567 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
3bfb1d20 HS |
568 | dma_readl(dw, STATUS_INT)); |
569 | ||
570 | /* | |
571 | * Just disable the interrupts. We'll turn them back on in the | |
572 | * softirq handler. | |
573 | */ | |
574 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
575 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
576 | ||
577 | status = dma_readl(dw, STATUS_INT); | |
578 | if (status) { | |
579 | dev_err(dw->dma.dev, | |
580 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
581 | status); | |
582 | ||
583 | /* Try to recover */ | |
584 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
3bfb1d20 HS |
585 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
586 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
587 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
588 | } | |
589 | ||
590 | tasklet_schedule(&dw->tasklet); | |
591 | ||
592 | return IRQ_HANDLED; | |
593 | } | |
594 | ||
595 | /*----------------------------------------------------------------------*/ | |
596 | ||
597 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
598 | { | |
599 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
600 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
601 | dma_cookie_t cookie; | |
69cea5a0 | 602 | unsigned long flags; |
3bfb1d20 | 603 | |
69cea5a0 | 604 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 605 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
606 | |
607 | /* | |
608 | * REVISIT: We should attempt to chain as many descriptors as | |
609 | * possible, perhaps even appending to those already submitted | |
610 | * for DMA. But this is hard to do in a race-free manner. | |
611 | */ | |
612 | if (list_empty(&dwc->active_list)) { | |
2e4c364e | 613 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
3bfb1d20 | 614 | desc->txd.cookie); |
3bfb1d20 | 615 | list_add_tail(&desc->desc_node, &dwc->active_list); |
f336e42f | 616 | dwc_dostart(dwc, dwc_first_active(dwc)); |
3bfb1d20 | 617 | } else { |
2e4c364e | 618 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
3bfb1d20 HS |
619 | desc->txd.cookie); |
620 | ||
621 | list_add_tail(&desc->desc_node, &dwc->queue); | |
622 | } | |
623 | ||
69cea5a0 | 624 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
625 | |
626 | return cookie; | |
627 | } | |
628 | ||
629 | static struct dma_async_tx_descriptor * | |
630 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
631 | size_t len, unsigned long flags) | |
632 | { | |
633 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
634 | struct dw_desc *desc; | |
635 | struct dw_desc *first; | |
636 | struct dw_desc *prev; | |
637 | size_t xfer_count; | |
638 | size_t offset; | |
639 | unsigned int src_width; | |
640 | unsigned int dst_width; | |
641 | u32 ctllo; | |
642 | ||
2f45d613 | 643 | dev_vdbg(chan2dev(chan), |
2e4c364e | 644 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
2f45d613 AS |
645 | (unsigned long long)dest, (unsigned long long)src, |
646 | len, flags); | |
3bfb1d20 HS |
647 | |
648 | if (unlikely(!len)) { | |
2e4c364e | 649 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
650 | return NULL; |
651 | } | |
652 | ||
4c2d56c5 | 653 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
3bfb1d20 | 654 | |
327e6970 | 655 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
656 | | DWC_CTLL_DST_WIDTH(dst_width) |
657 | | DWC_CTLL_SRC_WIDTH(src_width) | |
658 | | DWC_CTLL_DST_INC | |
659 | | DWC_CTLL_SRC_INC | |
660 | | DWC_CTLL_FC_M2M; | |
661 | prev = first = NULL; | |
662 | ||
663 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
664 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
4a63a8b3 | 665 | dwc->block_size); |
3bfb1d20 HS |
666 | |
667 | desc = dwc_desc_get(dwc); | |
668 | if (!desc) | |
669 | goto err_desc_get; | |
670 | ||
671 | desc->lli.sar = src + offset; | |
672 | desc->lli.dar = dest + offset; | |
673 | desc->lli.ctllo = ctllo; | |
674 | desc->lli.ctlhi = xfer_count; | |
675 | ||
676 | if (!first) { | |
677 | first = desc; | |
678 | } else { | |
679 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 680 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
681 | prev->txd.phys, sizeof(prev->lli), |
682 | DMA_TO_DEVICE); | |
683 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 684 | &first->tx_list); |
3bfb1d20 HS |
685 | } |
686 | prev = desc; | |
687 | } | |
688 | ||
689 | ||
690 | if (flags & DMA_PREP_INTERRUPT) | |
691 | /* Trigger interrupt after last block */ | |
692 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
693 | ||
694 | prev->lli.llp = 0; | |
41d5e59c | 695 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
696 | prev->txd.phys, sizeof(prev->lli), |
697 | DMA_TO_DEVICE); | |
698 | ||
699 | first->txd.flags = flags; | |
700 | first->len = len; | |
701 | ||
702 | return &first->txd; | |
703 | ||
704 | err_desc_get: | |
705 | dwc_desc_put(dwc, first); | |
706 | return NULL; | |
707 | } | |
708 | ||
709 | static struct dma_async_tx_descriptor * | |
710 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 711 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 712 | unsigned long flags, void *context) |
3bfb1d20 HS |
713 | { |
714 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
287d8592 | 715 | struct dw_dma_slave *dws = chan->private; |
327e6970 | 716 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
717 | struct dw_desc *prev; |
718 | struct dw_desc *first; | |
719 | u32 ctllo; | |
720 | dma_addr_t reg; | |
721 | unsigned int reg_width; | |
722 | unsigned int mem_width; | |
723 | unsigned int i; | |
724 | struct scatterlist *sg; | |
725 | size_t total_len = 0; | |
726 | ||
2e4c364e | 727 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 HS |
728 | |
729 | if (unlikely(!dws || !sg_len)) | |
730 | return NULL; | |
731 | ||
3bfb1d20 HS |
732 | prev = first = NULL; |
733 | ||
3bfb1d20 | 734 | switch (direction) { |
db8196df | 735 | case DMA_MEM_TO_DEV: |
327e6970 VK |
736 | reg_width = __fls(sconfig->dst_addr_width); |
737 | reg = sconfig->dst_addr; | |
738 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
739 | | DWC_CTLL_DST_WIDTH(reg_width) |
740 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
741 | | DWC_CTLL_SRC_INC); |
742 | ||
743 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
744 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
745 | ||
3bfb1d20 HS |
746 | for_each_sg(sgl, sg, sg_len, i) { |
747 | struct dw_desc *desc; | |
69dc14b5 | 748 | u32 len, dlen, mem; |
3bfb1d20 | 749 | |
cbb796cc | 750 | mem = sg_dma_address(sg); |
69dc14b5 | 751 | len = sg_dma_len(sg); |
6bc711f6 | 752 | |
4c2d56c5 | 753 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 754 | |
69dc14b5 | 755 | slave_sg_todev_fill_desc: |
3bfb1d20 HS |
756 | desc = dwc_desc_get(dwc); |
757 | if (!desc) { | |
41d5e59c | 758 | dev_err(chan2dev(chan), |
3bfb1d20 HS |
759 | "not enough descriptors available\n"); |
760 | goto err_desc_get; | |
761 | } | |
762 | ||
3bfb1d20 HS |
763 | desc->lli.sar = mem; |
764 | desc->lli.dar = reg; | |
765 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
4a63a8b3 AS |
766 | if ((len >> mem_width) > dwc->block_size) { |
767 | dlen = dwc->block_size << mem_width; | |
69dc14b5 VK |
768 | mem += dlen; |
769 | len -= dlen; | |
770 | } else { | |
771 | dlen = len; | |
772 | len = 0; | |
773 | } | |
774 | ||
775 | desc->lli.ctlhi = dlen >> mem_width; | |
3bfb1d20 HS |
776 | |
777 | if (!first) { | |
778 | first = desc; | |
779 | } else { | |
780 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 781 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
782 | prev->txd.phys, |
783 | sizeof(prev->lli), | |
784 | DMA_TO_DEVICE); | |
785 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 786 | &first->tx_list); |
3bfb1d20 HS |
787 | } |
788 | prev = desc; | |
69dc14b5 VK |
789 | total_len += dlen; |
790 | ||
791 | if (len) | |
792 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
793 | } |
794 | break; | |
db8196df | 795 | case DMA_DEV_TO_MEM: |
327e6970 VK |
796 | reg_width = __fls(sconfig->src_addr_width); |
797 | reg = sconfig->src_addr; | |
798 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
799 | | DWC_CTLL_SRC_WIDTH(reg_width) |
800 | | DWC_CTLL_DST_INC | |
327e6970 VK |
801 | | DWC_CTLL_SRC_FIX); |
802 | ||
803 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
804 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 805 | |
3bfb1d20 HS |
806 | for_each_sg(sgl, sg, sg_len, i) { |
807 | struct dw_desc *desc; | |
69dc14b5 | 808 | u32 len, dlen, mem; |
3bfb1d20 | 809 | |
cbb796cc | 810 | mem = sg_dma_address(sg); |
3bfb1d20 | 811 | len = sg_dma_len(sg); |
6bc711f6 | 812 | |
4c2d56c5 | 813 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 814 | |
69dc14b5 VK |
815 | slave_sg_fromdev_fill_desc: |
816 | desc = dwc_desc_get(dwc); | |
817 | if (!desc) { | |
818 | dev_err(chan2dev(chan), | |
819 | "not enough descriptors available\n"); | |
820 | goto err_desc_get; | |
821 | } | |
822 | ||
3bfb1d20 HS |
823 | desc->lli.sar = reg; |
824 | desc->lli.dar = mem; | |
825 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
4a63a8b3 AS |
826 | if ((len >> reg_width) > dwc->block_size) { |
827 | dlen = dwc->block_size << reg_width; | |
69dc14b5 VK |
828 | mem += dlen; |
829 | len -= dlen; | |
830 | } else { | |
831 | dlen = len; | |
832 | len = 0; | |
833 | } | |
834 | desc->lli.ctlhi = dlen >> reg_width; | |
3bfb1d20 HS |
835 | |
836 | if (!first) { | |
837 | first = desc; | |
838 | } else { | |
839 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 840 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
841 | prev->txd.phys, |
842 | sizeof(prev->lli), | |
843 | DMA_TO_DEVICE); | |
844 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 845 | &first->tx_list); |
3bfb1d20 HS |
846 | } |
847 | prev = desc; | |
69dc14b5 VK |
848 | total_len += dlen; |
849 | ||
850 | if (len) | |
851 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
852 | } |
853 | break; | |
854 | default: | |
855 | return NULL; | |
856 | } | |
857 | ||
858 | if (flags & DMA_PREP_INTERRUPT) | |
859 | /* Trigger interrupt after last block */ | |
860 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
861 | ||
862 | prev->lli.llp = 0; | |
41d5e59c | 863 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
864 | prev->txd.phys, sizeof(prev->lli), |
865 | DMA_TO_DEVICE); | |
866 | ||
867 | first->len = total_len; | |
868 | ||
869 | return &first->txd; | |
870 | ||
871 | err_desc_get: | |
872 | dwc_desc_put(dwc, first); | |
873 | return NULL; | |
874 | } | |
875 | ||
327e6970 VK |
876 | /* |
877 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
878 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
879 | * | |
880 | * NOTE: burst size 2 is not supported by controller. | |
881 | * | |
882 | * This can be done by finding least significant bit set: n & (n - 1) | |
883 | */ | |
884 | static inline void convert_burst(u32 *maxburst) | |
885 | { | |
886 | if (*maxburst > 1) | |
887 | *maxburst = fls(*maxburst) - 2; | |
888 | else | |
889 | *maxburst = 0; | |
890 | } | |
891 | ||
892 | static int | |
893 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |
894 | { | |
895 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
896 | ||
897 | /* Check if it is chan is configured for slave transfers */ | |
898 | if (!chan->private) | |
899 | return -EINVAL; | |
900 | ||
901 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
902 | ||
903 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
904 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
05827630 LW |
909 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
910 | unsigned long arg) | |
3bfb1d20 HS |
911 | { |
912 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
913 | struct dw_dma *dw = to_dw_dma(chan->device); | |
914 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 915 | unsigned long flags; |
a7c57cf7 | 916 | u32 cfglo; |
3bfb1d20 HS |
917 | LIST_HEAD(list); |
918 | ||
a7c57cf7 LW |
919 | if (cmd == DMA_PAUSE) { |
920 | spin_lock_irqsave(&dwc->lock, flags); | |
c3635c78 | 921 | |
a7c57cf7 LW |
922 | cfglo = channel_readl(dwc, CFG_LO); |
923 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | |
924 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | |
925 | cpu_relax(); | |
3bfb1d20 | 926 | |
a7c57cf7 LW |
927 | dwc->paused = true; |
928 | spin_unlock_irqrestore(&dwc->lock, flags); | |
929 | } else if (cmd == DMA_RESUME) { | |
930 | if (!dwc->paused) | |
931 | return 0; | |
3bfb1d20 | 932 | |
a7c57cf7 | 933 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 934 | |
a7c57cf7 LW |
935 | cfglo = channel_readl(dwc, CFG_LO); |
936 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
937 | dwc->paused = false; | |
3bfb1d20 | 938 | |
a7c57cf7 LW |
939 | spin_unlock_irqrestore(&dwc->lock, flags); |
940 | } else if (cmd == DMA_TERMINATE_ALL) { | |
941 | spin_lock_irqsave(&dwc->lock, flags); | |
3bfb1d20 | 942 | |
3f936207 | 943 | dwc_chan_disable(dw, dwc); |
a7c57cf7 LW |
944 | |
945 | dwc->paused = false; | |
946 | ||
947 | /* active_list entries will end up before queued entries */ | |
948 | list_splice_init(&dwc->queue, &list); | |
949 | list_splice_init(&dwc->active_list, &list); | |
950 | ||
951 | spin_unlock_irqrestore(&dwc->lock, flags); | |
952 | ||
953 | /* Flush all pending and queued descriptors */ | |
954 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
955 | dwc_descriptor_complete(dwc, desc, false); | |
327e6970 VK |
956 | } else if (cmd == DMA_SLAVE_CONFIG) { |
957 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | |
958 | } else { | |
a7c57cf7 | 959 | return -ENXIO; |
327e6970 | 960 | } |
c3635c78 LW |
961 | |
962 | return 0; | |
3bfb1d20 HS |
963 | } |
964 | ||
965 | static enum dma_status | |
07934481 LW |
966 | dwc_tx_status(struct dma_chan *chan, |
967 | dma_cookie_t cookie, | |
968 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
969 | { |
970 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 971 | enum dma_status ret; |
3bfb1d20 | 972 | |
96a2af41 | 973 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
974 | if (ret != DMA_SUCCESS) { |
975 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
976 | ||
96a2af41 | 977 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
978 | } |
979 | ||
abf53902 | 980 | if (ret != DMA_SUCCESS) |
96a2af41 | 981 | dma_set_residue(txstate, dwc_first_active(dwc)->len); |
3bfb1d20 | 982 | |
a7c57cf7 LW |
983 | if (dwc->paused) |
984 | return DMA_PAUSED; | |
3bfb1d20 HS |
985 | |
986 | return ret; | |
987 | } | |
988 | ||
989 | static void dwc_issue_pending(struct dma_chan *chan) | |
990 | { | |
991 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
992 | ||
3bfb1d20 HS |
993 | if (!list_empty(&dwc->queue)) |
994 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
3bfb1d20 HS |
995 | } |
996 | ||
aa1e6f1a | 997 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
998 | { |
999 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1000 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1001 | struct dw_desc *desc; | |
3bfb1d20 | 1002 | int i; |
69cea5a0 | 1003 | unsigned long flags; |
3bfb1d20 | 1004 | |
2e4c364e | 1005 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1006 | |
3bfb1d20 HS |
1007 | /* ASSERT: channel is idle */ |
1008 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1009 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1010 | return -EIO; |
1011 | } | |
1012 | ||
d3ee98cd | 1013 | dma_cookie_init(chan); |
3bfb1d20 | 1014 | |
3bfb1d20 HS |
1015 | /* |
1016 | * NOTE: some controllers may have additional features that we | |
1017 | * need to initialize here, like "scatter-gather" (which | |
1018 | * doesn't mean what you think it means), and status writeback. | |
1019 | */ | |
1020 | ||
69cea5a0 | 1021 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1022 | i = dwc->descs_allocated; |
1023 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
69cea5a0 | 1024 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1025 | |
1026 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | |
1027 | if (!desc) { | |
41d5e59c | 1028 | dev_info(chan2dev(chan), |
3bfb1d20 | 1029 | "only allocated %d descriptors\n", i); |
69cea5a0 | 1030 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1031 | break; |
1032 | } | |
1033 | ||
e0bd0f8c | 1034 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1035 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1036 | desc->txd.tx_submit = dwc_tx_submit; | |
1037 | desc->txd.flags = DMA_CTRL_ACK; | |
41d5e59c | 1038 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
3bfb1d20 HS |
1039 | sizeof(desc->lli), DMA_TO_DEVICE); |
1040 | dwc_desc_put(dwc, desc); | |
1041 | ||
69cea5a0 | 1042 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1043 | i = ++dwc->descs_allocated; |
1044 | } | |
1045 | ||
69cea5a0 | 1046 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1047 | |
2e4c364e | 1048 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
3bfb1d20 HS |
1049 | |
1050 | return i; | |
1051 | } | |
1052 | ||
1053 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1054 | { | |
1055 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1056 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1057 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1058 | unsigned long flags; |
3bfb1d20 HS |
1059 | LIST_HEAD(list); |
1060 | ||
2e4c364e | 1061 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1062 | dwc->descs_allocated); |
1063 | ||
1064 | /* ASSERT: channel is idle */ | |
1065 | BUG_ON(!list_empty(&dwc->active_list)); | |
1066 | BUG_ON(!list_empty(&dwc->queue)); | |
1067 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1068 | ||
69cea5a0 | 1069 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1070 | list_splice_init(&dwc->free_list, &list); |
1071 | dwc->descs_allocated = 0; | |
61e183f8 | 1072 | dwc->initialized = false; |
3bfb1d20 HS |
1073 | |
1074 | /* Disable interrupts */ | |
1075 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
3bfb1d20 HS |
1076 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1077 | ||
69cea5a0 | 1078 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1079 | |
1080 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
41d5e59c DW |
1081 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1082 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
3bfb1d20 HS |
1083 | sizeof(desc->lli), DMA_TO_DEVICE); |
1084 | kfree(desc); | |
1085 | } | |
1086 | ||
2e4c364e | 1087 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1088 | } |
1089 | ||
d9de4519 HCE |
1090 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1091 | ||
1092 | /** | |
1093 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1094 | * @chan: the DMA channel to start | |
1095 | * | |
1096 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1097 | * -errno on failure. | |
1098 | */ | |
1099 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1100 | { | |
1101 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1102 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1103 | unsigned long flags; |
d9de4519 HCE |
1104 | |
1105 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1106 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1107 | return -ENODEV; | |
1108 | } | |
1109 | ||
69cea5a0 | 1110 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1111 | |
1112 | /* assert channel is idle */ | |
1113 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
1114 | dev_err(chan2dev(&dwc->chan), | |
1115 | "BUG: Attempted to start non-idle channel\n"); | |
1d455437 | 1116 | dwc_dump_chan_regs(dwc); |
69cea5a0 | 1117 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1118 | return -EBUSY; |
1119 | } | |
1120 | ||
d9de4519 HCE |
1121 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1122 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1123 | ||
1124 | /* setup DMAC channel registers */ | |
1125 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | |
1126 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
1127 | channel_writel(dwc, CTL_HI, 0); | |
1128 | ||
1129 | channel_set_bit(dw, CH_EN, dwc->mask); | |
1130 | ||
69cea5a0 | 1131 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1132 | |
1133 | return 0; | |
1134 | } | |
1135 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1136 | ||
1137 | /** | |
1138 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1139 | * @chan: the DMA channel to stop | |
1140 | * | |
1141 | * Must be called with soft interrupts disabled. | |
1142 | */ | |
1143 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1144 | { | |
1145 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1146 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1147 | unsigned long flags; |
d9de4519 | 1148 | |
69cea5a0 | 1149 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1150 | |
3f936207 | 1151 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1152 | |
69cea5a0 | 1153 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1154 | } |
1155 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1156 | ||
1157 | /** | |
1158 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1159 | * @chan: the DMA channel to prepare | |
1160 | * @buf_addr: physical DMA address where the buffer starts | |
1161 | * @buf_len: total number of bytes for the entire buffer | |
1162 | * @period_len: number of bytes for each period | |
1163 | * @direction: transfer direction, to or from device | |
1164 | * | |
1165 | * Must be called before trying to start the transfer. Returns a valid struct | |
1166 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1167 | */ | |
1168 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1169 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1170 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1171 | { |
1172 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1173 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1174 | struct dw_cyclic_desc *cdesc; |
1175 | struct dw_cyclic_desc *retval = NULL; | |
1176 | struct dw_desc *desc; | |
1177 | struct dw_desc *last = NULL; | |
d9de4519 HCE |
1178 | unsigned long was_cyclic; |
1179 | unsigned int reg_width; | |
1180 | unsigned int periods; | |
1181 | unsigned int i; | |
69cea5a0 | 1182 | unsigned long flags; |
d9de4519 | 1183 | |
69cea5a0 | 1184 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1185 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1186 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1187 | dev_dbg(chan2dev(&dwc->chan), |
1188 | "queue and/or active list are not empty\n"); | |
1189 | return ERR_PTR(-EBUSY); | |
1190 | } | |
1191 | ||
1192 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1193 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1194 | if (was_cyclic) { |
1195 | dev_dbg(chan2dev(&dwc->chan), | |
1196 | "channel already prepared for cyclic DMA\n"); | |
1197 | return ERR_PTR(-EBUSY); | |
1198 | } | |
1199 | ||
1200 | retval = ERR_PTR(-EINVAL); | |
327e6970 VK |
1201 | |
1202 | if (direction == DMA_MEM_TO_DEV) | |
1203 | reg_width = __ffs(sconfig->dst_addr_width); | |
1204 | else | |
1205 | reg_width = __ffs(sconfig->src_addr_width); | |
1206 | ||
d9de4519 HCE |
1207 | periods = buf_len / period_len; |
1208 | ||
1209 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
4a63a8b3 | 1210 | if (period_len > (dwc->block_size << reg_width)) |
d9de4519 HCE |
1211 | goto out_err; |
1212 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1213 | goto out_err; | |
1214 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1215 | goto out_err; | |
db8196df | 1216 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
d9de4519 HCE |
1217 | goto out_err; |
1218 | ||
1219 | retval = ERR_PTR(-ENOMEM); | |
1220 | ||
1221 | if (periods > NR_DESCS_PER_CHANNEL) | |
1222 | goto out_err; | |
1223 | ||
1224 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1225 | if (!cdesc) | |
1226 | goto out_err; | |
1227 | ||
1228 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1229 | if (!cdesc->desc) | |
1230 | goto out_err_alloc; | |
1231 | ||
1232 | for (i = 0; i < periods; i++) { | |
1233 | desc = dwc_desc_get(dwc); | |
1234 | if (!desc) | |
1235 | goto out_err_desc_get; | |
1236 | ||
1237 | switch (direction) { | |
db8196df | 1238 | case DMA_MEM_TO_DEV: |
327e6970 | 1239 | desc->lli.dar = sconfig->dst_addr; |
d9de4519 | 1240 | desc->lli.sar = buf_addr + (period_len * i); |
327e6970 | 1241 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
d9de4519 HCE |
1242 | | DWC_CTLL_DST_WIDTH(reg_width) |
1243 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1244 | | DWC_CTLL_DST_FIX | |
1245 | | DWC_CTLL_SRC_INC | |
d9de4519 | 1246 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1247 | |
1248 | desc->lli.ctllo |= sconfig->device_fc ? | |
1249 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1250 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
1251 | ||
d9de4519 | 1252 | break; |
db8196df | 1253 | case DMA_DEV_TO_MEM: |
d9de4519 | 1254 | desc->lli.dar = buf_addr + (period_len * i); |
327e6970 VK |
1255 | desc->lli.sar = sconfig->src_addr; |
1256 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | |
d9de4519 HCE |
1257 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1258 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1259 | | DWC_CTLL_DST_INC | |
1260 | | DWC_CTLL_SRC_FIX | |
d9de4519 | 1261 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1262 | |
1263 | desc->lli.ctllo |= sconfig->device_fc ? | |
1264 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1265 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
1266 | ||
d9de4519 HCE |
1267 | break; |
1268 | default: | |
1269 | break; | |
1270 | } | |
1271 | ||
1272 | desc->lli.ctlhi = (period_len >> reg_width); | |
1273 | cdesc->desc[i] = desc; | |
1274 | ||
1275 | if (last) { | |
1276 | last->lli.llp = desc->txd.phys; | |
1277 | dma_sync_single_for_device(chan2parent(chan), | |
1278 | last->txd.phys, sizeof(last->lli), | |
1279 | DMA_TO_DEVICE); | |
1280 | } | |
1281 | ||
1282 | last = desc; | |
1283 | } | |
1284 | ||
1285 | /* lets make a cyclic list */ | |
1286 | last->lli.llp = cdesc->desc[0]->txd.phys; | |
1287 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | |
1288 | sizeof(last->lli), DMA_TO_DEVICE); | |
1289 | ||
2f45d613 AS |
1290 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1291 | "period %zu periods %d\n", (unsigned long long)buf_addr, | |
1292 | buf_len, period_len, periods); | |
d9de4519 HCE |
1293 | |
1294 | cdesc->periods = periods; | |
1295 | dwc->cdesc = cdesc; | |
1296 | ||
1297 | return cdesc; | |
1298 | ||
1299 | out_err_desc_get: | |
1300 | while (i--) | |
1301 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1302 | out_err_alloc: | |
1303 | kfree(cdesc); | |
1304 | out_err: | |
1305 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1306 | return (struct dw_cyclic_desc *)retval; | |
1307 | } | |
1308 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1309 | ||
1310 | /** | |
1311 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1312 | * @chan: the DMA channel to free | |
1313 | */ | |
1314 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1315 | { | |
1316 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1317 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1318 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1319 | int i; | |
69cea5a0 | 1320 | unsigned long flags; |
d9de4519 | 1321 | |
2e4c364e | 1322 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1323 | |
1324 | if (!cdesc) | |
1325 | return; | |
1326 | ||
69cea5a0 | 1327 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1328 | |
3f936207 | 1329 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1330 | |
d9de4519 HCE |
1331 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1332 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1333 | ||
69cea5a0 | 1334 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1335 | |
1336 | for (i = 0; i < cdesc->periods; i++) | |
1337 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1338 | ||
1339 | kfree(cdesc->desc); | |
1340 | kfree(cdesc); | |
1341 | ||
1342 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1343 | } | |
1344 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1345 | ||
3bfb1d20 HS |
1346 | /*----------------------------------------------------------------------*/ |
1347 | ||
1348 | static void dw_dma_off(struct dw_dma *dw) | |
1349 | { | |
61e183f8 VK |
1350 | int i; |
1351 | ||
3bfb1d20 HS |
1352 | dma_writel(dw, CFG, 0); |
1353 | ||
1354 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1355 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1356 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1357 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1358 | ||
1359 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1360 | cpu_relax(); | |
61e183f8 VK |
1361 | |
1362 | for (i = 0; i < dw->dma.chancnt; i++) | |
1363 | dw->chan[i].initialized = false; | |
3bfb1d20 HS |
1364 | } |
1365 | ||
0272e93f | 1366 | static int __devinit dw_probe(struct platform_device *pdev) |
3bfb1d20 HS |
1367 | { |
1368 | struct dw_dma_platform_data *pdata; | |
1369 | struct resource *io; | |
1370 | struct dw_dma *dw; | |
1371 | size_t size; | |
482c67ea AS |
1372 | void __iomem *regs; |
1373 | bool autocfg; | |
1374 | unsigned int dw_params; | |
1375 | unsigned int nr_channels; | |
4a63a8b3 | 1376 | unsigned int max_blk_size = 0; |
3bfb1d20 HS |
1377 | int irq; |
1378 | int err; | |
1379 | int i; | |
1380 | ||
6c618c9d | 1381 | pdata = dev_get_platdata(&pdev->dev); |
3bfb1d20 HS |
1382 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1383 | return -EINVAL; | |
1384 | ||
1385 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1386 | if (!io) | |
1387 | return -EINVAL; | |
1388 | ||
1389 | irq = platform_get_irq(pdev, 0); | |
1390 | if (irq < 0) | |
1391 | return irq; | |
1392 | ||
482c67ea AS |
1393 | regs = devm_request_and_ioremap(&pdev->dev, io); |
1394 | if (!regs) | |
1395 | return -EBUSY; | |
1396 | ||
1397 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | |
1398 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | |
1399 | ||
1400 | if (autocfg) | |
1401 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | |
1402 | else | |
1403 | nr_channels = pdata->nr_channels; | |
1404 | ||
1405 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); | |
dbde5c29 | 1406 | dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); |
3bfb1d20 HS |
1407 | if (!dw) |
1408 | return -ENOMEM; | |
1409 | ||
dbde5c29 AS |
1410 | dw->clk = devm_clk_get(&pdev->dev, "hclk"); |
1411 | if (IS_ERR(dw->clk)) | |
1412 | return PTR_ERR(dw->clk); | |
3075528d | 1413 | clk_prepare_enable(dw->clk); |
3bfb1d20 | 1414 | |
482c67ea AS |
1415 | dw->regs = regs; |
1416 | ||
4a63a8b3 AS |
1417 | /* get hardware configuration parameters */ |
1418 | if (autocfg) | |
1419 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | |
1420 | ||
11f932ec | 1421 | /* Calculate all channel mask before DMA setup */ |
482c67ea | 1422 | dw->all_chan_mask = (1 << nr_channels) - 1; |
11f932ec | 1423 | |
3bfb1d20 HS |
1424 | /* force dma off, just in case */ |
1425 | dw_dma_off(dw); | |
1426 | ||
236b106f AS |
1427 | /* disable BLOCK interrupts as well */ |
1428 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | |
1429 | ||
dbde5c29 AS |
1430 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, |
1431 | "dw_dmac", dw); | |
3bfb1d20 | 1432 | if (err) |
dbde5c29 | 1433 | return err; |
3bfb1d20 HS |
1434 | |
1435 | platform_set_drvdata(pdev, dw); | |
1436 | ||
1437 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | |
1438 | ||
3bfb1d20 | 1439 | INIT_LIST_HEAD(&dw->dma.channels); |
482c67ea | 1440 | for (i = 0; i < nr_channels; i++) { |
3bfb1d20 HS |
1441 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1442 | ||
1443 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1444 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1445 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1446 | list_add_tail(&dwc->chan.device_node, | |
1447 | &dw->dma.channels); | |
1448 | else | |
1449 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1450 | |
93317e8e VK |
1451 | /* 7 is highest priority & 0 is lowest. */ |
1452 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
482c67ea | 1453 | dwc->priority = nr_channels - i - 1; |
93317e8e VK |
1454 | else |
1455 | dwc->priority = i; | |
1456 | ||
3bfb1d20 HS |
1457 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1458 | spin_lock_init(&dwc->lock); | |
1459 | dwc->mask = 1 << i; | |
1460 | ||
1461 | INIT_LIST_HEAD(&dwc->active_list); | |
1462 | INIT_LIST_HEAD(&dwc->queue); | |
1463 | INIT_LIST_HEAD(&dwc->free_list); | |
1464 | ||
1465 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
4a63a8b3 AS |
1466 | |
1467 | /* hardware configuration */ | |
1468 | if (autocfg) | |
1469 | /* Decode maximum block size for given channel. The | |
1470 | * stored 4 bit value represents blocks from 0x00 for 3 | |
1471 | * up to 0x0a for 4095. */ | |
1472 | dwc->block_size = | |
1473 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | |
1474 | else | |
1475 | dwc->block_size = pdata->block_size; | |
3bfb1d20 HS |
1476 | } |
1477 | ||
11f932ec | 1478 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1479 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1480 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1481 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1482 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1483 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1484 | ||
3bfb1d20 HS |
1485 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1486 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | |
95ea759e JI |
1487 | if (pdata->is_private) |
1488 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
3bfb1d20 HS |
1489 | dw->dma.dev = &pdev->dev; |
1490 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | |
1491 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1492 | ||
1493 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
1494 | ||
1495 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | |
c3635c78 | 1496 | dw->dma.device_control = dwc_control; |
3bfb1d20 | 1497 | |
07934481 | 1498 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1499 | dw->dma.device_issue_pending = dwc_issue_pending; |
1500 | ||
1501 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1502 | ||
1503 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | |
482c67ea | 1504 | dev_name(&pdev->dev), nr_channels); |
3bfb1d20 HS |
1505 | |
1506 | dma_async_device_register(&dw->dma); | |
1507 | ||
1508 | return 0; | |
3bfb1d20 HS |
1509 | } |
1510 | ||
0272e93f | 1511 | static int __devexit dw_remove(struct platform_device *pdev) |
3bfb1d20 HS |
1512 | { |
1513 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1514 | struct dw_dma_chan *dwc, *_dwc; | |
3bfb1d20 HS |
1515 | |
1516 | dw_dma_off(dw); | |
1517 | dma_async_device_unregister(&dw->dma); | |
1518 | ||
3bfb1d20 HS |
1519 | tasklet_kill(&dw->tasklet); |
1520 | ||
1521 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1522 | chan.device_node) { | |
1523 | list_del(&dwc->chan.device_node); | |
1524 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1525 | } | |
1526 | ||
3bfb1d20 HS |
1527 | return 0; |
1528 | } | |
1529 | ||
1530 | static void dw_shutdown(struct platform_device *pdev) | |
1531 | { | |
1532 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1533 | ||
1534 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1535 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1536 | } |
1537 | ||
4a256b5f | 1538 | static int dw_suspend_noirq(struct device *dev) |
3bfb1d20 | 1539 | { |
4a256b5f | 1540 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1541 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1542 | ||
1543 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1544 | clk_disable_unprepare(dw->clk); |
61e183f8 | 1545 | |
3bfb1d20 HS |
1546 | return 0; |
1547 | } | |
1548 | ||
4a256b5f | 1549 | static int dw_resume_noirq(struct device *dev) |
3bfb1d20 | 1550 | { |
4a256b5f | 1551 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1552 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1553 | ||
3075528d | 1554 | clk_prepare_enable(dw->clk); |
3bfb1d20 HS |
1555 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1556 | return 0; | |
3bfb1d20 HS |
1557 | } |
1558 | ||
47145210 | 1559 | static const struct dev_pm_ops dw_dev_pm_ops = { |
4a256b5f MD |
1560 | .suspend_noirq = dw_suspend_noirq, |
1561 | .resume_noirq = dw_resume_noirq, | |
7414a1b8 RK |
1562 | .freeze_noirq = dw_suspend_noirq, |
1563 | .thaw_noirq = dw_resume_noirq, | |
1564 | .restore_noirq = dw_resume_noirq, | |
1565 | .poweroff_noirq = dw_suspend_noirq, | |
4a256b5f MD |
1566 | }; |
1567 | ||
d3f797d9 VK |
1568 | #ifdef CONFIG_OF |
1569 | static const struct of_device_id dw_dma_id_table[] = { | |
1570 | { .compatible = "snps,dma-spear1340" }, | |
1571 | {} | |
1572 | }; | |
1573 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |
1574 | #endif | |
1575 | ||
3bfb1d20 | 1576 | static struct platform_driver dw_driver = { |
0272e93f | 1577 | .remove = __devexit_p(dw_remove), |
3bfb1d20 | 1578 | .shutdown = dw_shutdown, |
3bfb1d20 HS |
1579 | .driver = { |
1580 | .name = "dw_dmac", | |
4a256b5f | 1581 | .pm = &dw_dev_pm_ops, |
d3f797d9 | 1582 | .of_match_table = of_match_ptr(dw_dma_id_table), |
3bfb1d20 HS |
1583 | }, |
1584 | }; | |
1585 | ||
1586 | static int __init dw_init(void) | |
1587 | { | |
1588 | return platform_driver_probe(&dw_driver, dw_probe); | |
1589 | } | |
cb689a70 | 1590 | subsys_initcall(dw_init); |
3bfb1d20 HS |
1591 | |
1592 | static void __exit dw_exit(void) | |
1593 | { | |
1594 | platform_driver_unregister(&dw_driver); | |
1595 | } | |
1596 | module_exit(dw_exit); | |
1597 | ||
1598 | MODULE_LICENSE("GPL v2"); | |
1599 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | |
e05503ef | 1600 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
10d8935f | 1601 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |