]>
Commit | Line | Data |
---|---|---|
3bfb1d20 | 1 | /* |
b801479b | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3bfb1d20 HS |
3 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
9cade1a4 | 6 | * Copyright (C) 2013 Intel Corporation |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
b801479b | 12 | |
327e6970 | 13 | #include <linux/bitops.h> |
3bfb1d20 HS |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
f8122a82 | 17 | #include <linux/dmapool.h> |
7331205a | 18 | #include <linux/err.h> |
3bfb1d20 HS |
19 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/io.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
3bfb1d20 | 24 | #include <linux/slab.h> |
bb32baf7 | 25 | #include <linux/pm_runtime.h> |
3bfb1d20 | 26 | |
61a76496 | 27 | #include "../dmaengine.h" |
9cade1a4 | 28 | #include "internal.h" |
3bfb1d20 HS |
29 | |
30 | /* | |
31 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
32 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
33 | * of which use ARM any more). See the "Databook" from Synopsys for | |
34 | * information beyond what licensees probably provide. | |
35 | * | |
dd5720b3 AS |
36 | * The driver has been tested with the Atmel AT32AP7000, which does not |
37 | * support descriptor writeback. | |
3bfb1d20 HS |
38 | */ |
39 | ||
327e6970 | 40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
327e6970 VK |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
495aea4b | 43 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
495aea4b | 44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ |
327e6970 | 45 | DW_DMA_MSIZE_16; \ |
495aea4b | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
327e6970 | 47 | DW_DMA_MSIZE_16; \ |
bb3450ad | 48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ |
9217a5bf | 49 | _dwc->dws.p_master : _dwc->dws.m_master; \ |
bb3450ad | 50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ |
9217a5bf | 51 | _dwc->dws.p_master : _dwc->dws.m_master; \ |
f301c062 | 52 | \ |
327e6970 VK |
53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
55 | | DWC_CTLL_LLP_D_EN \ |
56 | | DWC_CTLL_LLP_S_EN \ | |
bb3450ad MR |
57 | | DWC_CTLL_DMS(_dms) \ |
58 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 59 | }) |
3bfb1d20 | 60 | |
029a40e9 AS |
61 | /* The set of bus widths supported by the DMA controller */ |
62 | #define DW_DMA_BUSWIDTHS \ | |
63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
64 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
65 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
66 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
67 | ||
3bfb1d20 | 68 | /*----------------------------------------------------------------------*/ |
3bfb1d20 | 69 | |
41d5e59c DW |
70 | static struct device *chan2dev(struct dma_chan *chan) |
71 | { | |
72 | return &chan->dev->device; | |
73 | } | |
41d5e59c | 74 | |
3bfb1d20 HS |
75 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
76 | { | |
e63a47a3 | 77 | return to_dw_desc(dwc->active_list.next); |
3bfb1d20 HS |
78 | } |
79 | ||
ab703f81 | 80 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
3bfb1d20 | 81 | { |
ab703f81 CL |
82 | struct dw_desc *desc = txd_to_dw_desc(tx); |
83 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
84 | dma_cookie_t cookie; | |
85 | unsigned long flags; | |
3bfb1d20 | 86 | |
69cea5a0 | 87 | spin_lock_irqsave(&dwc->lock, flags); |
ab703f81 CL |
88 | cookie = dma_cookie_assign(tx); |
89 | ||
90 | /* | |
91 | * REVISIT: We should attempt to chain as many descriptors as | |
92 | * possible, perhaps even appending to those already submitted | |
93 | * for DMA. But this is hard to do in a race-free manner. | |
94 | */ | |
95 | ||
96 | list_add_tail(&desc->desc_node, &dwc->queue); | |
69cea5a0 | 97 | spin_unlock_irqrestore(&dwc->lock, flags); |
ab703f81 CL |
98 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", |
99 | __func__, desc->txd.cookie); | |
3bfb1d20 | 100 | |
ab703f81 CL |
101 | return cookie; |
102 | } | |
3bfb1d20 | 103 | |
ab703f81 CL |
104 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
105 | { | |
106 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
107 | struct dw_desc *desc; | |
108 | dma_addr_t phys; | |
109 | ||
110 | desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); | |
111 | if (!desc) | |
112 | return NULL; | |
113 | ||
114 | dwc->descs_allocated++; | |
115 | INIT_LIST_HEAD(&desc->tx_list); | |
116 | dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); | |
117 | desc->txd.tx_submit = dwc_tx_submit; | |
118 | desc->txd.flags = DMA_CTRL_ACK; | |
119 | desc->txd.phys = phys; | |
120 | return desc; | |
3bfb1d20 HS |
121 | } |
122 | ||
3bfb1d20 HS |
123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
124 | { | |
ab703f81 CL |
125 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
126 | struct dw_desc *child, *_next; | |
69cea5a0 | 127 | |
ab703f81 CL |
128 | if (unlikely(!desc)) |
129 | return; | |
3bfb1d20 | 130 | |
ab703f81 CL |
131 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { |
132 | list_del(&child->desc_node); | |
133 | dma_pool_free(dw->desc_pool, child, child->txd.phys); | |
134 | dwc->descs_allocated--; | |
3bfb1d20 | 135 | } |
ab703f81 CL |
136 | |
137 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | |
138 | dwc->descs_allocated--; | |
3bfb1d20 HS |
139 | } |
140 | ||
61e183f8 VK |
141 | static void dwc_initialize(struct dw_dma_chan *dwc) |
142 | { | |
143 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
61e183f8 VK |
144 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
145 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
c072e113 | 146 | bool hs_polarity = dwc->dws.hs_polarity; |
61e183f8 | 147 | |
423f9cbf | 148 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
61e183f8 VK |
149 | return; |
150 | ||
9217a5bf AS |
151 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); |
152 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | |
61e183f8 | 153 | |
c072e113 AS |
154 | /* Set polarity of handshake interface */ |
155 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | |
156 | ||
61e183f8 VK |
157 | channel_writel(dwc, CFG_LO, cfglo); |
158 | channel_writel(dwc, CFG_HI, cfghi); | |
159 | ||
160 | /* Enable interrupts */ | |
161 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
162 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
163 | ||
423f9cbf | 164 | set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
61e183f8 VK |
165 | } |
166 | ||
3bfb1d20 HS |
167 | /*----------------------------------------------------------------------*/ |
168 | ||
f52b36d2 | 169 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
1d455437 AS |
170 | { |
171 | dev_err(chan2dev(&dwc->chan), | |
172 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
173 | channel_readl(dwc, SAR), | |
174 | channel_readl(dwc, DAR), | |
175 | channel_readl(dwc, LLP), | |
176 | channel_readl(dwc, CTL_HI), | |
177 | channel_readl(dwc, CTL_LO)); | |
178 | } | |
179 | ||
3f936207 AS |
180 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
181 | { | |
182 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
183 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
184 | cpu_relax(); | |
185 | } | |
186 | ||
1d455437 AS |
187 | /*----------------------------------------------------------------------*/ |
188 | ||
fed2574b AS |
189 | /* Perform single block transfer */ |
190 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |
191 | struct dw_desc *desc) | |
192 | { | |
193 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
194 | u32 ctllo; | |
195 | ||
1d566f11 AS |
196 | /* |
197 | * Software emulation of LLP mode relies on interrupts to continue | |
198 | * multi block transfer. | |
199 | */ | |
df1f3a23 | 200 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
fed2574b | 201 | |
df1f3a23 MR |
202 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
203 | channel_writel(dwc, DAR, lli_read(desc, dar)); | |
fed2574b | 204 | channel_writel(dwc, CTL_LO, ctllo); |
df1f3a23 | 205 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
fed2574b | 206 | channel_set_bit(dw, CH_EN, dwc->mask); |
f5c6a7df AS |
207 | |
208 | /* Move pointer to next descriptor */ | |
209 | dwc->tx_node_active = dwc->tx_node_active->next; | |
fed2574b AS |
210 | } |
211 | ||
3bfb1d20 HS |
212 | /* Called with dwc->lock held and bh disabled */ |
213 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
214 | { | |
215 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
9217a5bf | 216 | u8 lms = DWC_LLP_LMS(dwc->dws.m_master); |
fed2574b | 217 | unsigned long was_soft_llp; |
3bfb1d20 HS |
218 | |
219 | /* ASSERT: channel is idle */ | |
220 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 221 | dev_err(chan2dev(&dwc->chan), |
550da64b JN |
222 | "%s: BUG: Attempted to start non-idle channel\n", |
223 | __func__); | |
1d455437 | 224 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
225 | |
226 | /* The tasklet will hopefully advance the queue... */ | |
227 | return; | |
228 | } | |
229 | ||
fed2574b AS |
230 | if (dwc->nollp) { |
231 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | |
232 | &dwc->flags); | |
233 | if (was_soft_llp) { | |
234 | dev_err(chan2dev(&dwc->chan), | |
fc61f6b4 | 235 | "BUG: Attempted to start new LLP transfer inside ongoing one\n"); |
fed2574b AS |
236 | return; |
237 | } | |
238 | ||
239 | dwc_initialize(dwc); | |
240 | ||
b68fd097 | 241 | first->residue = first->total_len; |
f5c6a7df | 242 | dwc->tx_node_active = &first->tx_list; |
fed2574b | 243 | |
fdf475fa | 244 | /* Submit first block */ |
fed2574b AS |
245 | dwc_do_single_block(dwc, first); |
246 | ||
247 | return; | |
248 | } | |
249 | ||
61e183f8 VK |
250 | dwc_initialize(dwc); |
251 | ||
2a0fae02 MR |
252 | channel_writel(dwc, LLP, first->txd.phys | lms); |
253 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
3bfb1d20 HS |
254 | channel_writel(dwc, CTL_HI, 0); |
255 | channel_set_bit(dw, CH_EN, dwc->mask); | |
256 | } | |
257 | ||
e7637c6c AS |
258 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) |
259 | { | |
cba15617 AS |
260 | struct dw_desc *desc; |
261 | ||
e7637c6c AS |
262 | if (list_empty(&dwc->queue)) |
263 | return; | |
264 | ||
265 | list_move(dwc->queue.next, &dwc->active_list); | |
cba15617 AS |
266 | desc = dwc_first_active(dwc); |
267 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | |
268 | dwc_dostart(dwc, desc); | |
e7637c6c AS |
269 | } |
270 | ||
3bfb1d20 HS |
271 | /*----------------------------------------------------------------------*/ |
272 | ||
273 | static void | |
5fedefb8 VK |
274 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
275 | bool callback_required) | |
3bfb1d20 | 276 | { |
3bfb1d20 | 277 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 278 | struct dw_desc *child; |
69cea5a0 | 279 | unsigned long flags; |
577ef925 | 280 | struct dmaengine_desc_callback cb; |
3bfb1d20 | 281 | |
41d5e59c | 282 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 283 | |
69cea5a0 | 284 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 285 | dma_cookie_complete(txd); |
577ef925 DJ |
286 | if (callback_required) |
287 | dmaengine_desc_get_callback(txd, &cb); | |
288 | else | |
289 | memset(&cb, 0, sizeof(cb)); | |
3bfb1d20 | 290 | |
e518076e VK |
291 | /* async_tx_ack */ |
292 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
293 | async_tx_ack(&child->txd); | |
294 | async_tx_ack(&desc->txd); | |
ab703f81 | 295 | dwc_desc_put(dwc, desc); |
69cea5a0 VK |
296 | spin_unlock_irqrestore(&dwc->lock, flags); |
297 | ||
577ef925 | 298 | dmaengine_desc_callback_invoke(&cb, NULL); |
3bfb1d20 HS |
299 | } |
300 | ||
301 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
302 | { | |
303 | struct dw_desc *desc, *_desc; | |
304 | LIST_HEAD(list); | |
69cea5a0 | 305 | unsigned long flags; |
3bfb1d20 | 306 | |
69cea5a0 | 307 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 308 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 309 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
310 | "BUG: XFER bit set, but channel not idle!\n"); |
311 | ||
312 | /* Try to continue after resetting the channel... */ | |
3f936207 | 313 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
314 | } |
315 | ||
316 | /* | |
317 | * Submit queued descriptors ASAP, i.e. before we go through | |
318 | * the completed ones. | |
319 | */ | |
3bfb1d20 | 320 | list_splice_init(&dwc->active_list, &list); |
e7637c6c | 321 | dwc_dostart_first_queued(dwc); |
3bfb1d20 | 322 | |
69cea5a0 VK |
323 | spin_unlock_irqrestore(&dwc->lock, flags); |
324 | ||
3bfb1d20 | 325 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 326 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
327 | } |
328 | ||
4702d524 AS |
329 | /* Returns how many bytes were already received from source */ |
330 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | |
331 | { | |
332 | u32 ctlhi = channel_readl(dwc, CTL_HI); | |
333 | u32 ctllo = channel_readl(dwc, CTL_LO); | |
334 | ||
335 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | |
336 | } | |
337 | ||
3bfb1d20 HS |
338 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
339 | { | |
340 | dma_addr_t llp; | |
341 | struct dw_desc *desc, *_desc; | |
342 | struct dw_desc *child; | |
343 | u32 status_xfer; | |
69cea5a0 | 344 | unsigned long flags; |
3bfb1d20 | 345 | |
69cea5a0 | 346 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
347 | llp = channel_readl(dwc, LLP); |
348 | status_xfer = dma_readl(dw, RAW.XFER); | |
349 | ||
350 | if (status_xfer & dwc->mask) { | |
351 | /* Everything we've submitted is done */ | |
352 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
77bcc497 AS |
353 | |
354 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | |
fdf475fa AS |
355 | struct list_head *head, *active = dwc->tx_node_active; |
356 | ||
357 | /* | |
358 | * We are inside first active descriptor. | |
359 | * Otherwise something is really wrong. | |
360 | */ | |
361 | desc = dwc_first_active(dwc); | |
362 | ||
363 | head = &desc->tx_list; | |
364 | if (active != head) { | |
b68fd097 AS |
365 | /* Update residue to reflect last sent descriptor */ |
366 | if (active == head->next) | |
367 | desc->residue -= desc->len; | |
368 | else | |
369 | desc->residue -= to_dw_desc(active->prev)->len; | |
4702d524 | 370 | |
fdf475fa | 371 | child = to_dw_desc(active); |
77bcc497 AS |
372 | |
373 | /* Submit next block */ | |
fdf475fa | 374 | dwc_do_single_block(dwc, child); |
77bcc497 | 375 | |
fdf475fa | 376 | spin_unlock_irqrestore(&dwc->lock, flags); |
77bcc497 AS |
377 | return; |
378 | } | |
fdf475fa | 379 | |
77bcc497 AS |
380 | /* We are done here */ |
381 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | |
382 | } | |
4702d524 | 383 | |
69cea5a0 VK |
384 | spin_unlock_irqrestore(&dwc->lock, flags); |
385 | ||
3bfb1d20 HS |
386 | dwc_complete_all(dw, dwc); |
387 | return; | |
388 | } | |
389 | ||
69cea5a0 VK |
390 | if (list_empty(&dwc->active_list)) { |
391 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 392 | return; |
69cea5a0 | 393 | } |
087809fc | 394 | |
77bcc497 AS |
395 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
396 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | |
69cea5a0 | 397 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 398 | return; |
69cea5a0 | 399 | } |
087809fc | 400 | |
5a87f0e6 | 401 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); |
3bfb1d20 HS |
402 | |
403 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
75c61225 | 404 | /* Initial residue value */ |
b68fd097 | 405 | desc->residue = desc->total_len; |
4702d524 | 406 | |
75c61225 | 407 | /* Check first descriptors addr */ |
2a0fae02 | 408 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
69cea5a0 | 409 | spin_unlock_irqrestore(&dwc->lock, flags); |
84adccfb | 410 | return; |
69cea5a0 | 411 | } |
84adccfb | 412 | |
75c61225 | 413 | /* Check first descriptors llp */ |
df1f3a23 | 414 | if (lli_read(desc, llp) == llp) { |
3bfb1d20 | 415 | /* This one is currently in progress */ |
b68fd097 | 416 | desc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 417 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 418 | return; |
69cea5a0 | 419 | } |
3bfb1d20 | 420 | |
b68fd097 | 421 | desc->residue -= desc->len; |
4702d524 | 422 | list_for_each_entry(child, &desc->tx_list, desc_node) { |
df1f3a23 | 423 | if (lli_read(child, llp) == llp) { |
3bfb1d20 | 424 | /* Currently in progress */ |
b68fd097 | 425 | desc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 426 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 427 | return; |
69cea5a0 | 428 | } |
b68fd097 | 429 | desc->residue -= child->len; |
4702d524 | 430 | } |
3bfb1d20 HS |
431 | |
432 | /* | |
433 | * No descriptors so far seem to be in progress, i.e. | |
434 | * this one must be done. | |
435 | */ | |
69cea5a0 | 436 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 437 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 438 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
439 | } |
440 | ||
41d5e59c | 441 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
442 | "BUG: All descriptors done, but channel not idle!\n"); |
443 | ||
444 | /* Try to continue after resetting the channel... */ | |
3f936207 | 445 | dwc_chan_disable(dw, dwc); |
3bfb1d20 | 446 | |
e7637c6c | 447 | dwc_dostart_first_queued(dwc); |
69cea5a0 | 448 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
449 | } |
450 | ||
df1f3a23 | 451 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
3bfb1d20 | 452 | { |
21d43f49 | 453 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
df1f3a23 MR |
454 | lli_read(desc, sar), |
455 | lli_read(desc, dar), | |
456 | lli_read(desc, llp), | |
457 | lli_read(desc, ctlhi), | |
458 | lli_read(desc, ctllo)); | |
3bfb1d20 HS |
459 | } |
460 | ||
461 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
462 | { | |
463 | struct dw_desc *bad_desc; | |
464 | struct dw_desc *child; | |
69cea5a0 | 465 | unsigned long flags; |
3bfb1d20 HS |
466 | |
467 | dwc_scan_descriptors(dw, dwc); | |
468 | ||
69cea5a0 VK |
469 | spin_lock_irqsave(&dwc->lock, flags); |
470 | ||
3bfb1d20 HS |
471 | /* |
472 | * The descriptor currently at the head of the active list is | |
473 | * borked. Since we don't have any way to report errors, we'll | |
474 | * just have to scream loudly and try to carry on. | |
475 | */ | |
476 | bad_desc = dwc_first_active(dwc); | |
477 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 478 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
479 | |
480 | /* Clear the error flag and try to restart the controller */ | |
481 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
482 | if (!list_empty(&dwc->active_list)) | |
483 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
484 | ||
485 | /* | |
ba84bd71 | 486 | * WARN may seem harsh, but since this only happens |
3bfb1d20 HS |
487 | * when someone submits a bad physical address in a |
488 | * descriptor, we should consider ourselves lucky that the | |
489 | * controller flagged an error instead of scribbling over | |
490 | * random memory locations. | |
491 | */ | |
ba84bd71 AS |
492 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
493 | " cookie: %d\n", bad_desc->txd.cookie); | |
df1f3a23 | 494 | dwc_dump_lli(dwc, bad_desc); |
e0bd0f8c | 495 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
df1f3a23 | 496 | dwc_dump_lli(dwc, child); |
3bfb1d20 | 497 | |
69cea5a0 VK |
498 | spin_unlock_irqrestore(&dwc->lock, flags); |
499 | ||
3bfb1d20 | 500 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 501 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
502 | } |
503 | ||
d9de4519 HCE |
504 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
505 | ||
8004cbb4 | 506 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
d9de4519 HCE |
507 | { |
508 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
509 | return channel_readl(dwc, SAR); | |
510 | } | |
511 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
512 | ||
8004cbb4 | 513 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
d9de4519 HCE |
514 | { |
515 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
516 | return channel_readl(dwc, DAR); | |
517 | } | |
518 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
519 | ||
75c61225 | 520 | /* Called with dwc->lock held and all DMAC interrupts disabled */ |
d9de4519 | 521 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
2895b2ca | 522 | u32 status_block, u32 status_err, u32 status_xfer) |
d9de4519 | 523 | { |
69cea5a0 VK |
524 | unsigned long flags; |
525 | ||
2895b2ca | 526 | if (status_block & dwc->mask) { |
d9de4519 HCE |
527 | void (*callback)(void *param); |
528 | void *callback_param; | |
529 | ||
530 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
531 | channel_readl(dwc, LLP)); | |
2895b2ca | 532 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
533 | |
534 | callback = dwc->cdesc->period_callback; | |
535 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
536 | |
537 | if (callback) | |
d9de4519 | 538 | callback(callback_param); |
d9de4519 HCE |
539 | } |
540 | ||
541 | /* | |
542 | * Error and transfer complete are highly unlikely, and will most | |
543 | * likely be due to a configuration error by the user. | |
544 | */ | |
545 | if (unlikely(status_err & dwc->mask) || | |
546 | unlikely(status_xfer & dwc->mask)) { | |
7794e5b9 | 547 | unsigned int i; |
d9de4519 | 548 | |
fc61f6b4 AS |
549 | dev_err(chan2dev(&dwc->chan), |
550 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | |
551 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
552 | |
553 | spin_lock_irqsave(&dwc->lock, flags); | |
554 | ||
1d455437 | 555 | dwc_dump_chan_regs(dwc); |
d9de4519 | 556 | |
3f936207 | 557 | dwc_chan_disable(dw, dwc); |
d9de4519 | 558 | |
75c61225 | 559 | /* Make sure DMA does not restart by loading a new list */ |
d9de4519 HCE |
560 | channel_writel(dwc, LLP, 0); |
561 | channel_writel(dwc, CTL_LO, 0); | |
562 | channel_writel(dwc, CTL_HI, 0); | |
563 | ||
2895b2ca | 564 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
565 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
566 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
567 | ||
568 | for (i = 0; i < dwc->cdesc->periods; i++) | |
df1f3a23 | 569 | dwc_dump_lli(dwc, dwc->cdesc->desc[i]); |
69cea5a0 VK |
570 | |
571 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 | 572 | } |
ee1cdcda AS |
573 | |
574 | /* Re-enable interrupts */ | |
575 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
d9de4519 HCE |
576 | } |
577 | ||
578 | /* ------------------------------------------------------------------------- */ | |
579 | ||
3bfb1d20 HS |
580 | static void dw_dma_tasklet(unsigned long data) |
581 | { | |
582 | struct dw_dma *dw = (struct dw_dma *)data; | |
583 | struct dw_dma_chan *dwc; | |
2895b2ca | 584 | u32 status_block; |
3bfb1d20 HS |
585 | u32 status_xfer; |
586 | u32 status_err; | |
7794e5b9 | 587 | unsigned int i; |
3bfb1d20 | 588 | |
2895b2ca | 589 | status_block = dma_readl(dw, RAW.BLOCK); |
7fe7b2f4 | 590 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
591 | status_err = dma_readl(dw, RAW.ERROR); |
592 | ||
2e4c364e | 593 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
594 | |
595 | for (i = 0; i < dw->dma.chancnt; i++) { | |
596 | dwc = &dw->chan[i]; | |
d9de4519 | 597 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
2895b2ca MR |
598 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
599 | status_xfer); | |
d9de4519 | 600 | else if (status_err & (1 << i)) |
3bfb1d20 | 601 | dwc_handle_error(dw, dwc); |
77bcc497 | 602 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 603 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
604 | } |
605 | ||
ee1cdcda | 606 | /* Re-enable interrupts */ |
3bfb1d20 | 607 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
3bfb1d20 HS |
608 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
609 | } | |
610 | ||
611 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
612 | { | |
613 | struct dw_dma *dw = dev_id; | |
02a21b79 | 614 | u32 status; |
3bfb1d20 | 615 | |
02a21b79 AS |
616 | /* Check if we have any interrupt from the DMAC which is not in use */ |
617 | if (!dw->in_use) | |
618 | return IRQ_NONE; | |
619 | ||
620 | status = dma_readl(dw, STATUS_INT); | |
3783cef8 AS |
621 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
622 | ||
623 | /* Check if we have any interrupt from the DMAC */ | |
02a21b79 | 624 | if (!status) |
3783cef8 | 625 | return IRQ_NONE; |
3bfb1d20 HS |
626 | |
627 | /* | |
628 | * Just disable the interrupts. We'll turn them back on in the | |
629 | * softirq handler. | |
630 | */ | |
631 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 632 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
633 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
634 | ||
635 | status = dma_readl(dw, STATUS_INT); | |
636 | if (status) { | |
637 | dev_err(dw->dma.dev, | |
638 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
639 | status); | |
640 | ||
641 | /* Try to recover */ | |
642 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
2895b2ca | 643 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
3bfb1d20 HS |
644 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
645 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
646 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
647 | } | |
648 | ||
649 | tasklet_schedule(&dw->tasklet); | |
650 | ||
651 | return IRQ_HANDLED; | |
652 | } | |
653 | ||
654 | /*----------------------------------------------------------------------*/ | |
655 | ||
3bfb1d20 HS |
656 | static struct dma_async_tx_descriptor * |
657 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
658 | size_t len, unsigned long flags) | |
659 | { | |
660 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 661 | struct dw_dma *dw = to_dw_dma(chan->device); |
3bfb1d20 HS |
662 | struct dw_desc *desc; |
663 | struct dw_desc *first; | |
664 | struct dw_desc *prev; | |
665 | size_t xfer_count; | |
666 | size_t offset; | |
9217a5bf | 667 | u8 m_master = dwc->dws.m_master; |
3bfb1d20 HS |
668 | unsigned int src_width; |
669 | unsigned int dst_width; | |
161c3d04 | 670 | unsigned int data_width = dw->pdata->data_width[m_master]; |
3bfb1d20 | 671 | u32 ctllo; |
2e65060e | 672 | u8 lms = DWC_LLP_LMS(m_master); |
3bfb1d20 | 673 | |
2f45d613 | 674 | dev_vdbg(chan2dev(chan), |
5a87f0e6 AS |
675 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
676 | &dest, &src, len, flags); | |
3bfb1d20 HS |
677 | |
678 | if (unlikely(!len)) { | |
2e4c364e | 679 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
680 | return NULL; |
681 | } | |
682 | ||
0fdb567f AS |
683 | dwc->direction = DMA_MEM_TO_MEM; |
684 | ||
2e65060e | 685 | src_width = dst_width = __ffs(data_width | src | dest | len); |
3bfb1d20 | 686 | |
327e6970 | 687 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
688 | | DWC_CTLL_DST_WIDTH(dst_width) |
689 | | DWC_CTLL_SRC_WIDTH(src_width) | |
690 | | DWC_CTLL_DST_INC | |
691 | | DWC_CTLL_SRC_INC | |
692 | | DWC_CTLL_FC_M2M; | |
693 | prev = first = NULL; | |
694 | ||
695 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
696 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
4a63a8b3 | 697 | dwc->block_size); |
3bfb1d20 HS |
698 | |
699 | desc = dwc_desc_get(dwc); | |
700 | if (!desc) | |
701 | goto err_desc_get; | |
702 | ||
df1f3a23 MR |
703 | lli_write(desc, sar, src + offset); |
704 | lli_write(desc, dar, dest + offset); | |
705 | lli_write(desc, ctllo, ctllo); | |
706 | lli_write(desc, ctlhi, xfer_count); | |
176dcec5 | 707 | desc->len = xfer_count << src_width; |
3bfb1d20 HS |
708 | |
709 | if (!first) { | |
710 | first = desc; | |
711 | } else { | |
2a0fae02 | 712 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 713 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
714 | } |
715 | prev = desc; | |
716 | } | |
717 | ||
3bfb1d20 HS |
718 | if (flags & DMA_PREP_INTERRUPT) |
719 | /* Trigger interrupt after last block */ | |
df1f3a23 | 720 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
721 | |
722 | prev->lli.llp = 0; | |
a3e55799 | 723 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
3bfb1d20 | 724 | first->txd.flags = flags; |
30d38a32 | 725 | first->total_len = len; |
3bfb1d20 HS |
726 | |
727 | return &first->txd; | |
728 | ||
729 | err_desc_get: | |
730 | dwc_desc_put(dwc, first); | |
731 | return NULL; | |
732 | } | |
733 | ||
734 | static struct dma_async_tx_descriptor * | |
735 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 736 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 737 | unsigned long flags, void *context) |
3bfb1d20 HS |
738 | { |
739 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 740 | struct dw_dma *dw = to_dw_dma(chan->device); |
327e6970 | 741 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
742 | struct dw_desc *prev; |
743 | struct dw_desc *first; | |
744 | u32 ctllo; | |
9217a5bf | 745 | u8 m_master = dwc->dws.m_master; |
2e65060e | 746 | u8 lms = DWC_LLP_LMS(m_master); |
3bfb1d20 HS |
747 | dma_addr_t reg; |
748 | unsigned int reg_width; | |
749 | unsigned int mem_width; | |
161c3d04 | 750 | unsigned int data_width = dw->pdata->data_width[m_master]; |
3bfb1d20 HS |
751 | unsigned int i; |
752 | struct scatterlist *sg; | |
753 | size_t total_len = 0; | |
754 | ||
2e4c364e | 755 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 756 | |
495aea4b | 757 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
3bfb1d20 HS |
758 | return NULL; |
759 | ||
0fdb567f AS |
760 | dwc->direction = direction; |
761 | ||
3bfb1d20 HS |
762 | prev = first = NULL; |
763 | ||
3bfb1d20 | 764 | switch (direction) { |
db8196df | 765 | case DMA_MEM_TO_DEV: |
39416677 | 766 | reg_width = __ffs(sconfig->dst_addr_width); |
327e6970 VK |
767 | reg = sconfig->dst_addr; |
768 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
769 | | DWC_CTLL_DST_WIDTH(reg_width) |
770 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
771 | | DWC_CTLL_SRC_INC); |
772 | ||
773 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
774 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
775 | ||
3bfb1d20 HS |
776 | for_each_sg(sgl, sg, sg_len, i) { |
777 | struct dw_desc *desc; | |
69dc14b5 | 778 | u32 len, dlen, mem; |
3bfb1d20 | 779 | |
cbb796cc | 780 | mem = sg_dma_address(sg); |
69dc14b5 | 781 | len = sg_dma_len(sg); |
6bc711f6 | 782 | |
2e65060e | 783 | mem_width = __ffs(data_width | mem | len); |
3bfb1d20 | 784 | |
69dc14b5 | 785 | slave_sg_todev_fill_desc: |
3bfb1d20 | 786 | desc = dwc_desc_get(dwc); |
b2607227 | 787 | if (!desc) |
3bfb1d20 | 788 | goto err_desc_get; |
3bfb1d20 | 789 | |
df1f3a23 MR |
790 | lli_write(desc, sar, mem); |
791 | lli_write(desc, dar, reg); | |
792 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | |
4a63a8b3 AS |
793 | if ((len >> mem_width) > dwc->block_size) { |
794 | dlen = dwc->block_size << mem_width; | |
69dc14b5 VK |
795 | mem += dlen; |
796 | len -= dlen; | |
797 | } else { | |
798 | dlen = len; | |
799 | len = 0; | |
800 | } | |
801 | ||
df1f3a23 | 802 | lli_write(desc, ctlhi, dlen >> mem_width); |
176dcec5 | 803 | desc->len = dlen; |
3bfb1d20 HS |
804 | |
805 | if (!first) { | |
806 | first = desc; | |
807 | } else { | |
2a0fae02 | 808 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 809 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
810 | } |
811 | prev = desc; | |
69dc14b5 VK |
812 | total_len += dlen; |
813 | ||
814 | if (len) | |
815 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
816 | } |
817 | break; | |
db8196df | 818 | case DMA_DEV_TO_MEM: |
39416677 | 819 | reg_width = __ffs(sconfig->src_addr_width); |
327e6970 VK |
820 | reg = sconfig->src_addr; |
821 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
822 | | DWC_CTLL_SRC_WIDTH(reg_width) |
823 | | DWC_CTLL_DST_INC | |
327e6970 VK |
824 | | DWC_CTLL_SRC_FIX); |
825 | ||
826 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
827 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 828 | |
3bfb1d20 HS |
829 | for_each_sg(sgl, sg, sg_len, i) { |
830 | struct dw_desc *desc; | |
69dc14b5 | 831 | u32 len, dlen, mem; |
3bfb1d20 | 832 | |
cbb796cc | 833 | mem = sg_dma_address(sg); |
3bfb1d20 | 834 | len = sg_dma_len(sg); |
6bc711f6 | 835 | |
2e65060e | 836 | mem_width = __ffs(data_width | mem | len); |
3bfb1d20 | 837 | |
69dc14b5 VK |
838 | slave_sg_fromdev_fill_desc: |
839 | desc = dwc_desc_get(dwc); | |
b2607227 | 840 | if (!desc) |
69dc14b5 | 841 | goto err_desc_get; |
69dc14b5 | 842 | |
df1f3a23 MR |
843 | lli_write(desc, sar, reg); |
844 | lli_write(desc, dar, mem); | |
845 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | |
4a63a8b3 AS |
846 | if ((len >> reg_width) > dwc->block_size) { |
847 | dlen = dwc->block_size << reg_width; | |
69dc14b5 VK |
848 | mem += dlen; |
849 | len -= dlen; | |
850 | } else { | |
851 | dlen = len; | |
852 | len = 0; | |
853 | } | |
df1f3a23 | 854 | lli_write(desc, ctlhi, dlen >> reg_width); |
176dcec5 | 855 | desc->len = dlen; |
3bfb1d20 HS |
856 | |
857 | if (!first) { | |
858 | first = desc; | |
859 | } else { | |
2a0fae02 | 860 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 861 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
862 | } |
863 | prev = desc; | |
69dc14b5 VK |
864 | total_len += dlen; |
865 | ||
866 | if (len) | |
867 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
868 | } |
869 | break; | |
870 | default: | |
871 | return NULL; | |
872 | } | |
873 | ||
874 | if (flags & DMA_PREP_INTERRUPT) | |
875 | /* Trigger interrupt after last block */ | |
df1f3a23 | 876 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
877 | |
878 | prev->lli.llp = 0; | |
a3e55799 | 879 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
30d38a32 | 880 | first->total_len = total_len; |
3bfb1d20 HS |
881 | |
882 | return &first->txd; | |
883 | ||
884 | err_desc_get: | |
b2607227 JN |
885 | dev_err(chan2dev(chan), |
886 | "not enough descriptors available. Direction %d\n", direction); | |
3bfb1d20 HS |
887 | dwc_desc_put(dwc, first); |
888 | return NULL; | |
889 | } | |
890 | ||
4d130de2 AS |
891 | bool dw_dma_filter(struct dma_chan *chan, void *param) |
892 | { | |
893 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
894 | struct dw_dma_slave *dws = param; | |
895 | ||
3fe6409c | 896 | if (dws->dma_dev != chan->device->dev) |
4d130de2 AS |
897 | return false; |
898 | ||
899 | /* We have to copy data since dws can be temporary storage */ | |
9217a5bf | 900 | memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); |
4d130de2 AS |
901 | |
902 | return true; | |
903 | } | |
904 | EXPORT_SYMBOL_GPL(dw_dma_filter); | |
905 | ||
327e6970 VK |
906 | /* |
907 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
908 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
909 | * | |
910 | * NOTE: burst size 2 is not supported by controller. | |
911 | * | |
912 | * This can be done by finding least significant bit set: n & (n - 1) | |
913 | */ | |
914 | static inline void convert_burst(u32 *maxburst) | |
915 | { | |
916 | if (*maxburst > 1) | |
917 | *maxburst = fls(*maxburst) - 2; | |
918 | else | |
919 | *maxburst = 0; | |
920 | } | |
921 | ||
a4b0d348 | 922 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
327e6970 VK |
923 | { |
924 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
925 | ||
495aea4b AS |
926 | /* Check if chan will be configured for slave transfers */ |
927 | if (!is_slave_direction(sconfig->direction)) | |
327e6970 VK |
928 | return -EINVAL; |
929 | ||
930 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
0fdb567f | 931 | dwc->direction = sconfig->direction; |
327e6970 VK |
932 | |
933 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
934 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
935 | ||
936 | return 0; | |
937 | } | |
938 | ||
a4b0d348 | 939 | static int dwc_pause(struct dma_chan *chan) |
21fe3c52 | 940 | { |
a4b0d348 MR |
941 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
942 | unsigned long flags; | |
943 | unsigned int count = 20; /* timeout iterations */ | |
944 | u32 cfglo; | |
945 | ||
946 | spin_lock_irqsave(&dwc->lock, flags); | |
21fe3c52 | 947 | |
a4b0d348 | 948 | cfglo = channel_readl(dwc, CFG_LO); |
21fe3c52 | 949 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
123b69ab AS |
950 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
951 | udelay(2); | |
21fe3c52 | 952 | |
5e09f98e | 953 | set_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
a4b0d348 MR |
954 | |
955 | spin_unlock_irqrestore(&dwc->lock, flags); | |
956 | ||
957 | return 0; | |
21fe3c52 AS |
958 | } |
959 | ||
960 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |
961 | { | |
962 | u32 cfglo = channel_readl(dwc, CFG_LO); | |
963 | ||
964 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
965 | ||
5e09f98e | 966 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
21fe3c52 AS |
967 | } |
968 | ||
a4b0d348 | 969 | static int dwc_resume(struct dma_chan *chan) |
3bfb1d20 HS |
970 | { |
971 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
69cea5a0 | 972 | unsigned long flags; |
3bfb1d20 | 973 | |
a4b0d348 | 974 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 975 | |
5e09f98e AS |
976 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
977 | dwc_chan_resume(dwc); | |
3bfb1d20 | 978 | |
a4b0d348 | 979 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 980 | |
a4b0d348 MR |
981 | return 0; |
982 | } | |
3bfb1d20 | 983 | |
a4b0d348 MR |
984 | static int dwc_terminate_all(struct dma_chan *chan) |
985 | { | |
986 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
987 | struct dw_dma *dw = to_dw_dma(chan->device); | |
988 | struct dw_desc *desc, *_desc; | |
989 | unsigned long flags; | |
990 | LIST_HEAD(list); | |
3bfb1d20 | 991 | |
a4b0d348 | 992 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b | 993 | |
a4b0d348 | 994 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
fed2574b | 995 | |
a4b0d348 | 996 | dwc_chan_disable(dw, dwc); |
a7c57cf7 | 997 | |
a4b0d348 | 998 | dwc_chan_resume(dwc); |
a7c57cf7 | 999 | |
a4b0d348 MR |
1000 | /* active_list entries will end up before queued entries */ |
1001 | list_splice_init(&dwc->queue, &list); | |
1002 | list_splice_init(&dwc->active_list, &list); | |
a7c57cf7 | 1003 | |
a4b0d348 | 1004 | spin_unlock_irqrestore(&dwc->lock, flags); |
a7c57cf7 | 1005 | |
a4b0d348 MR |
1006 | /* Flush all pending and queued descriptors */ |
1007 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
1008 | dwc_descriptor_complete(dwc, desc, false); | |
c3635c78 LW |
1009 | |
1010 | return 0; | |
3bfb1d20 HS |
1011 | } |
1012 | ||
b68fd097 AS |
1013 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) |
1014 | { | |
1015 | struct dw_desc *desc; | |
1016 | ||
1017 | list_for_each_entry(desc, &dwc->active_list, desc_node) | |
1018 | if (desc->txd.cookie == c) | |
1019 | return desc; | |
1020 | ||
1021 | return NULL; | |
1022 | } | |
1023 | ||
1024 | static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) | |
4702d524 | 1025 | { |
b68fd097 | 1026 | struct dw_desc *desc; |
4702d524 AS |
1027 | unsigned long flags; |
1028 | u32 residue; | |
1029 | ||
1030 | spin_lock_irqsave(&dwc->lock, flags); | |
1031 | ||
b68fd097 AS |
1032 | desc = dwc_find_desc(dwc, cookie); |
1033 | if (desc) { | |
1034 | if (desc == dwc_first_active(dwc)) { | |
1035 | residue = desc->residue; | |
1036 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | |
1037 | residue -= dwc_get_sent(dwc); | |
1038 | } else { | |
1039 | residue = desc->total_len; | |
1040 | } | |
1041 | } else { | |
1042 | residue = 0; | |
1043 | } | |
4702d524 AS |
1044 | |
1045 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1046 | return residue; | |
1047 | } | |
1048 | ||
3bfb1d20 | 1049 | static enum dma_status |
07934481 LW |
1050 | dwc_tx_status(struct dma_chan *chan, |
1051 | dma_cookie_t cookie, | |
1052 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
1053 | { |
1054 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 1055 | enum dma_status ret; |
3bfb1d20 | 1056 | |
96a2af41 | 1057 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1058 | if (ret == DMA_COMPLETE) |
12381dc0 | 1059 | return ret; |
3bfb1d20 | 1060 | |
12381dc0 | 1061 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
3bfb1d20 | 1062 | |
12381dc0 | 1063 | ret = dma_cookie_status(chan, cookie, txstate); |
b68fd097 AS |
1064 | if (ret == DMA_COMPLETE) |
1065 | return ret; | |
1066 | ||
1067 | dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); | |
3bfb1d20 | 1068 | |
5e09f98e | 1069 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) |
a7c57cf7 | 1070 | return DMA_PAUSED; |
3bfb1d20 HS |
1071 | |
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | static void dwc_issue_pending(struct dma_chan *chan) | |
1076 | { | |
1077 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
dd8ecfca | 1078 | unsigned long flags; |
3bfb1d20 | 1079 | |
dd8ecfca AS |
1080 | spin_lock_irqsave(&dwc->lock, flags); |
1081 | if (list_empty(&dwc->active_list)) | |
1082 | dwc_dostart_first_queued(dwc); | |
1083 | spin_unlock_irqrestore(&dwc->lock, flags); | |
3bfb1d20 HS |
1084 | } |
1085 | ||
99d9bf4e AS |
1086 | /*----------------------------------------------------------------------*/ |
1087 | ||
1088 | static void dw_dma_off(struct dw_dma *dw) | |
1089 | { | |
7794e5b9 | 1090 | unsigned int i; |
99d9bf4e AS |
1091 | |
1092 | dma_writel(dw, CFG, 0); | |
1093 | ||
1094 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 1095 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
99d9bf4e AS |
1096 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1097 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1098 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1099 | ||
1100 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1101 | cpu_relax(); | |
1102 | ||
1103 | for (i = 0; i < dw->dma.chancnt; i++) | |
423f9cbf | 1104 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
99d9bf4e AS |
1105 | } |
1106 | ||
1107 | static void dw_dma_on(struct dw_dma *dw) | |
1108 | { | |
1109 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1110 | } | |
1111 | ||
aa1e6f1a | 1112 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1113 | { |
1114 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1115 | struct dw_dma *dw = to_dw_dma(chan->device); | |
3bfb1d20 | 1116 | |
2e4c364e | 1117 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1118 | |
3bfb1d20 HS |
1119 | /* ASSERT: channel is idle */ |
1120 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1121 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1122 | return -EIO; |
1123 | } | |
1124 | ||
d3ee98cd | 1125 | dma_cookie_init(chan); |
3bfb1d20 | 1126 | |
3bfb1d20 HS |
1127 | /* |
1128 | * NOTE: some controllers may have additional features that we | |
1129 | * need to initialize here, like "scatter-gather" (which | |
1130 | * doesn't mean what you think it means), and status writeback. | |
1131 | */ | |
1132 | ||
3fe6409c AS |
1133 | /* |
1134 | * We need controller-specific data to set up slave transfers. | |
1135 | */ | |
1136 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | |
1137 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | |
1138 | return -EINVAL; | |
1139 | } | |
1140 | ||
99d9bf4e AS |
1141 | /* Enable controller here if needed */ |
1142 | if (!dw->in_use) | |
1143 | dw_dma_on(dw); | |
1144 | dw->in_use |= dwc->mask; | |
1145 | ||
ab703f81 | 1146 | return 0; |
3bfb1d20 HS |
1147 | } |
1148 | ||
1149 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1150 | { | |
1151 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1152 | struct dw_dma *dw = to_dw_dma(chan->device); | |
69cea5a0 | 1153 | unsigned long flags; |
3bfb1d20 HS |
1154 | LIST_HEAD(list); |
1155 | ||
2e4c364e | 1156 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1157 | dwc->descs_allocated); |
1158 | ||
1159 | /* ASSERT: channel is idle */ | |
1160 | BUG_ON(!list_empty(&dwc->active_list)); | |
1161 | BUG_ON(!list_empty(&dwc->queue)); | |
1162 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1163 | ||
69cea5a0 | 1164 | spin_lock_irqsave(&dwc->lock, flags); |
3fe6409c AS |
1165 | |
1166 | /* Clear custom channel configuration */ | |
9217a5bf | 1167 | memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); |
3fe6409c | 1168 | |
423f9cbf | 1169 | clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
3bfb1d20 HS |
1170 | |
1171 | /* Disable interrupts */ | |
1172 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
2895b2ca | 1173 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
3bfb1d20 HS |
1174 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1175 | ||
69cea5a0 | 1176 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1177 | |
99d9bf4e AS |
1178 | /* Disable controller in case it was a last user */ |
1179 | dw->in_use &= ~dwc->mask; | |
1180 | if (!dw->in_use) | |
1181 | dw_dma_off(dw); | |
1182 | ||
2e4c364e | 1183 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1184 | } |
1185 | ||
d9de4519 HCE |
1186 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1187 | ||
1188 | /** | |
1189 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1190 | * @chan: the DMA channel to start | |
1191 | * | |
1192 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1193 | * -errno on failure. | |
1194 | */ | |
1195 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1196 | { | |
1197 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
ee1cdcda | 1198 | struct dw_dma *dw = to_dw_dma(chan->device); |
69cea5a0 | 1199 | unsigned long flags; |
d9de4519 HCE |
1200 | |
1201 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1202 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1203 | return -ENODEV; | |
1204 | } | |
1205 | ||
69cea5a0 | 1206 | spin_lock_irqsave(&dwc->lock, flags); |
ee1cdcda AS |
1207 | |
1208 | /* Enable interrupts to perform cyclic transfer */ | |
1209 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
1210 | ||
df3bb8a0 | 1211 | dwc_dostart(dwc, dwc->cdesc->desc[0]); |
ee1cdcda | 1212 | |
69cea5a0 | 1213 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1214 | |
1215 | return 0; | |
1216 | } | |
1217 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1218 | ||
1219 | /** | |
1220 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1221 | * @chan: the DMA channel to stop | |
1222 | * | |
1223 | * Must be called with soft interrupts disabled. | |
1224 | */ | |
1225 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1226 | { | |
1227 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1228 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1229 | unsigned long flags; |
d9de4519 | 1230 | |
69cea5a0 | 1231 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1232 | |
3f936207 | 1233 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1234 | |
69cea5a0 | 1235 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1236 | } |
1237 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1238 | ||
1239 | /** | |
1240 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1241 | * @chan: the DMA channel to prepare | |
1242 | * @buf_addr: physical DMA address where the buffer starts | |
1243 | * @buf_len: total number of bytes for the entire buffer | |
1244 | * @period_len: number of bytes for each period | |
1245 | * @direction: transfer direction, to or from device | |
1246 | * | |
1247 | * Must be called before trying to start the transfer. Returns a valid struct | |
1248 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1249 | */ | |
1250 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1251 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1252 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1253 | { |
1254 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1255 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1256 | struct dw_cyclic_desc *cdesc; |
1257 | struct dw_cyclic_desc *retval = NULL; | |
1258 | struct dw_desc *desc; | |
1259 | struct dw_desc *last = NULL; | |
9217a5bf | 1260 | u8 lms = DWC_LLP_LMS(dwc->dws.m_master); |
d9de4519 HCE |
1261 | unsigned long was_cyclic; |
1262 | unsigned int reg_width; | |
1263 | unsigned int periods; | |
1264 | unsigned int i; | |
69cea5a0 | 1265 | unsigned long flags; |
d9de4519 | 1266 | |
69cea5a0 | 1267 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b AS |
1268 | if (dwc->nollp) { |
1269 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1270 | dev_dbg(chan2dev(&dwc->chan), | |
1271 | "channel doesn't support LLP transfers\n"); | |
1272 | return ERR_PTR(-EINVAL); | |
1273 | } | |
1274 | ||
d9de4519 | 1275 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1276 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1277 | dev_dbg(chan2dev(&dwc->chan), |
1278 | "queue and/or active list are not empty\n"); | |
1279 | return ERR_PTR(-EBUSY); | |
1280 | } | |
1281 | ||
1282 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1283 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1284 | if (was_cyclic) { |
1285 | dev_dbg(chan2dev(&dwc->chan), | |
1286 | "channel already prepared for cyclic DMA\n"); | |
1287 | return ERR_PTR(-EBUSY); | |
1288 | } | |
1289 | ||
1290 | retval = ERR_PTR(-EINVAL); | |
327e6970 | 1291 | |
f44b92f4 AS |
1292 | if (unlikely(!is_slave_direction(direction))) |
1293 | goto out_err; | |
1294 | ||
0fdb567f AS |
1295 | dwc->direction = direction; |
1296 | ||
327e6970 VK |
1297 | if (direction == DMA_MEM_TO_DEV) |
1298 | reg_width = __ffs(sconfig->dst_addr_width); | |
1299 | else | |
1300 | reg_width = __ffs(sconfig->src_addr_width); | |
1301 | ||
d9de4519 HCE |
1302 | periods = buf_len / period_len; |
1303 | ||
1304 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
4a63a8b3 | 1305 | if (period_len > (dwc->block_size << reg_width)) |
d9de4519 HCE |
1306 | goto out_err; |
1307 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1308 | goto out_err; | |
1309 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1310 | goto out_err; | |
d9de4519 HCE |
1311 | |
1312 | retval = ERR_PTR(-ENOMEM); | |
1313 | ||
d9de4519 HCE |
1314 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1315 | if (!cdesc) | |
1316 | goto out_err; | |
1317 | ||
1318 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1319 | if (!cdesc->desc) | |
1320 | goto out_err_alloc; | |
1321 | ||
1322 | for (i = 0; i < periods; i++) { | |
1323 | desc = dwc_desc_get(dwc); | |
1324 | if (!desc) | |
1325 | goto out_err_desc_get; | |
1326 | ||
1327 | switch (direction) { | |
db8196df | 1328 | case DMA_MEM_TO_DEV: |
df1f3a23 MR |
1329 | lli_write(desc, dar, sconfig->dst_addr); |
1330 | lli_write(desc, sar, buf_addr + period_len * i); | |
1331 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1332 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1333 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1334 | | DWC_CTLL_DST_FIX | |
1335 | | DWC_CTLL_SRC_INC | |
1336 | | DWC_CTLL_INT_EN)); | |
1337 | ||
1338 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1339 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1340 | DWC_CTLL_FC(DW_DMA_FC_D_M2P)); | |
327e6970 | 1341 | |
d9de4519 | 1342 | break; |
db8196df | 1343 | case DMA_DEV_TO_MEM: |
df1f3a23 MR |
1344 | lli_write(desc, dar, buf_addr + period_len * i); |
1345 | lli_write(desc, sar, sconfig->src_addr); | |
1346 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1347 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1348 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1349 | | DWC_CTLL_DST_INC | |
1350 | | DWC_CTLL_SRC_FIX | |
1351 | | DWC_CTLL_INT_EN)); | |
1352 | ||
1353 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1354 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1355 | DWC_CTLL_FC(DW_DMA_FC_D_P2M)); | |
327e6970 | 1356 | |
d9de4519 HCE |
1357 | break; |
1358 | default: | |
1359 | break; | |
1360 | } | |
1361 | ||
df1f3a23 | 1362 | lli_write(desc, ctlhi, period_len >> reg_width); |
d9de4519 HCE |
1363 | cdesc->desc[i] = desc; |
1364 | ||
f8122a82 | 1365 | if (last) |
2a0fae02 | 1366 | lli_write(last, llp, desc->txd.phys | lms); |
d9de4519 HCE |
1367 | |
1368 | last = desc; | |
1369 | } | |
1370 | ||
75c61225 | 1371 | /* Let's make a cyclic list */ |
2a0fae02 | 1372 | lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); |
d9de4519 | 1373 | |
5a87f0e6 AS |
1374 | dev_dbg(chan2dev(&dwc->chan), |
1375 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | |
1376 | &buf_addr, buf_len, period_len, periods); | |
d9de4519 HCE |
1377 | |
1378 | cdesc->periods = periods; | |
1379 | dwc->cdesc = cdesc; | |
1380 | ||
1381 | return cdesc; | |
1382 | ||
1383 | out_err_desc_get: | |
1384 | while (i--) | |
1385 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1386 | out_err_alloc: | |
1387 | kfree(cdesc); | |
1388 | out_err: | |
1389 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1390 | return (struct dw_cyclic_desc *)retval; | |
1391 | } | |
1392 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1393 | ||
1394 | /** | |
1395 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1396 | * @chan: the DMA channel to free | |
1397 | */ | |
1398 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1399 | { | |
1400 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1401 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1402 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
7794e5b9 | 1403 | unsigned int i; |
69cea5a0 | 1404 | unsigned long flags; |
d9de4519 | 1405 | |
2e4c364e | 1406 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1407 | |
1408 | if (!cdesc) | |
1409 | return; | |
1410 | ||
69cea5a0 | 1411 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1412 | |
3f936207 | 1413 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1414 | |
2895b2ca | 1415 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
1416 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1417 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1418 | ||
69cea5a0 | 1419 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1420 | |
1421 | for (i = 0; i < cdesc->periods; i++) | |
1422 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1423 | ||
1424 | kfree(cdesc->desc); | |
1425 | kfree(cdesc); | |
1426 | ||
925a7d04 AS |
1427 | dwc->cdesc = NULL; |
1428 | ||
d9de4519 HCE |
1429 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1430 | } | |
1431 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1432 | ||
3bfb1d20 HS |
1433 | /*----------------------------------------------------------------------*/ |
1434 | ||
3a14c66d | 1435 | int dw_dma_probe(struct dw_dma_chip *chip) |
a9ddb575 | 1436 | { |
3a14c66d | 1437 | struct dw_dma_platform_data *pdata; |
3bfb1d20 | 1438 | struct dw_dma *dw; |
30cb2639 | 1439 | bool autocfg = false; |
482c67ea | 1440 | unsigned int dw_params; |
7794e5b9 | 1441 | unsigned int i; |
3bfb1d20 | 1442 | int err; |
3bfb1d20 | 1443 | |
000871ce AS |
1444 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1445 | if (!dw) | |
1446 | return -ENOMEM; | |
1447 | ||
161c3d04 AS |
1448 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); |
1449 | if (!dw->pdata) | |
1450 | return -ENOMEM; | |
1451 | ||
000871ce AS |
1452 | dw->regs = chip->regs; |
1453 | chip->dw = dw; | |
1454 | ||
bb32baf7 AS |
1455 | pm_runtime_get_sync(chip->dev); |
1456 | ||
3a14c66d | 1457 | if (!chip->pdata) { |
897e40d3 | 1458 | dw_params = dma_readl(dw, DW_PARAMS); |
30cb2639 | 1459 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); |
482c67ea | 1460 | |
30cb2639 AS |
1461 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1462 | if (!autocfg) { | |
1463 | err = -EINVAL; | |
1464 | goto err_pdata; | |
1465 | } | |
123de543 | 1466 | |
161c3d04 AS |
1467 | /* Reassign the platform data pointer */ |
1468 | pdata = dw->pdata; | |
123de543 | 1469 | |
30cb2639 AS |
1470 | /* Get hardware configuration parameters */ |
1471 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | |
1472 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | |
1473 | for (i = 0; i < pdata->nr_masters; i++) { | |
1474 | pdata->data_width[i] = | |
2e65060e | 1475 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); |
30cb2639 | 1476 | } |
161c3d04 | 1477 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
30cb2639 | 1478 | |
123de543 AS |
1479 | /* Fill platform data with the default values */ |
1480 | pdata->is_private = true; | |
df5c7386 | 1481 | pdata->is_memcpy = true; |
123de543 AS |
1482 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1483 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | |
3a14c66d | 1484 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
8be4f523 AS |
1485 | err = -EINVAL; |
1486 | goto err_pdata; | |
161c3d04 | 1487 | } else { |
3a14c66d | 1488 | memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); |
161c3d04 AS |
1489 | |
1490 | /* Reassign the platform data pointer */ | |
1491 | pdata = dw->pdata; | |
8be4f523 | 1492 | } |
123de543 | 1493 | |
30cb2639 | 1494 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
000871ce | 1495 | GFP_KERNEL); |
8be4f523 AS |
1496 | if (!dw->chan) { |
1497 | err = -ENOMEM; | |
1498 | goto err_pdata; | |
1499 | } | |
3bfb1d20 | 1500 | |
11f932ec | 1501 | /* Calculate all channel mask before DMA setup */ |
30cb2639 | 1502 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
11f932ec | 1503 | |
75c61225 | 1504 | /* Force dma off, just in case */ |
3bfb1d20 HS |
1505 | dw_dma_off(dw); |
1506 | ||
75c61225 | 1507 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
9cade1a4 | 1508 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
f8122a82 AS |
1509 | sizeof(struct dw_desc), 4, 0); |
1510 | if (!dw->desc_pool) { | |
9cade1a4 | 1511 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
8be4f523 AS |
1512 | err = -ENOMEM; |
1513 | goto err_pdata; | |
f8122a82 AS |
1514 | } |
1515 | ||
3bfb1d20 HS |
1516 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1517 | ||
97977f75 AS |
1518 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, |
1519 | "dw_dmac", dw); | |
1520 | if (err) | |
8be4f523 | 1521 | goto err_pdata; |
97977f75 | 1522 | |
3bfb1d20 | 1523 | INIT_LIST_HEAD(&dw->dma.channels); |
30cb2639 | 1524 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1525 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1526 | ||
1527 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1528 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1529 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1530 | list_add_tail(&dwc->chan.device_node, | |
1531 | &dw->dma.channels); | |
1532 | else | |
1533 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1534 | |
93317e8e VK |
1535 | /* 7 is highest priority & 0 is lowest. */ |
1536 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
30cb2639 | 1537 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1538 | else |
1539 | dwc->priority = i; | |
1540 | ||
3bfb1d20 HS |
1541 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1542 | spin_lock_init(&dwc->lock); | |
1543 | dwc->mask = 1 << i; | |
1544 | ||
1545 | INIT_LIST_HEAD(&dwc->active_list); | |
1546 | INIT_LIST_HEAD(&dwc->queue); | |
3bfb1d20 HS |
1547 | |
1548 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
4a63a8b3 | 1549 | |
0fdb567f | 1550 | dwc->direction = DMA_TRANS_NONE; |
a0982004 | 1551 | |
75c61225 | 1552 | /* Hardware configuration */ |
fed2574b | 1553 | if (autocfg) { |
6bea0f6d | 1554 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
897e40d3 AS |
1555 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; |
1556 | unsigned int dwc_params = dma_readl_native(addr); | |
fed2574b | 1557 | |
9cade1a4 AS |
1558 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1559 | dwc_params); | |
985a6c7d | 1560 | |
1d566f11 AS |
1561 | /* |
1562 | * Decode maximum block size for given channel. The | |
4a63a8b3 | 1563 | * stored 4 bit value represents blocks from 0x00 for 3 |
1d566f11 AS |
1564 | * up to 0x0a for 4095. |
1565 | */ | |
4a63a8b3 | 1566 | dwc->block_size = |
161c3d04 | 1567 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; |
fed2574b AS |
1568 | dwc->nollp = |
1569 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | |
1570 | } else { | |
4a63a8b3 | 1571 | dwc->block_size = pdata->block_size; |
bd2c6636 | 1572 | dwc->nollp = !pdata->multi_block[i]; |
fed2574b | 1573 | } |
3bfb1d20 HS |
1574 | } |
1575 | ||
11f932ec | 1576 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1577 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1578 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1579 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1580 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1581 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1582 | ||
df5c7386 | 1583 | /* Set capabilities */ |
3bfb1d20 | 1584 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
95ea759e JI |
1585 | if (pdata->is_private) |
1586 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
df5c7386 AS |
1587 | if (pdata->is_memcpy) |
1588 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1589 | ||
9cade1a4 | 1590 | dw->dma.dev = chip->dev; |
3bfb1d20 HS |
1591 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1592 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1593 | ||
1594 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
3bfb1d20 | 1595 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
029a40e9 | 1596 | |
a4b0d348 MR |
1597 | dw->dma.device_config = dwc_config; |
1598 | dw->dma.device_pause = dwc_pause; | |
1599 | dw->dma.device_resume = dwc_resume; | |
1600 | dw->dma.device_terminate_all = dwc_terminate_all; | |
3bfb1d20 | 1601 | |
07934481 | 1602 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1603 | dw->dma.device_issue_pending = dwc_issue_pending; |
1604 | ||
029a40e9 AS |
1605 | /* DMA capabilities */ |
1606 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | |
1607 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | |
1608 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | |
1609 | BIT(DMA_MEM_TO_MEM); | |
1610 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1611 | ||
1222934e AS |
1612 | err = dma_async_device_register(&dw->dma); |
1613 | if (err) | |
1614 | goto err_dma_register; | |
1615 | ||
9cade1a4 | 1616 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
30cb2639 | 1617 | pdata->nr_channels); |
3bfb1d20 | 1618 | |
bb32baf7 AS |
1619 | pm_runtime_put_sync_suspend(chip->dev); |
1620 | ||
3bfb1d20 | 1621 | return 0; |
8be4f523 | 1622 | |
1222934e AS |
1623 | err_dma_register: |
1624 | free_irq(chip->irq, dw); | |
8be4f523 | 1625 | err_pdata: |
bb32baf7 | 1626 | pm_runtime_put_sync_suspend(chip->dev); |
8be4f523 | 1627 | return err; |
3bfb1d20 | 1628 | } |
9cade1a4 | 1629 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
3bfb1d20 | 1630 | |
9cade1a4 | 1631 | int dw_dma_remove(struct dw_dma_chip *chip) |
3bfb1d20 | 1632 | { |
9cade1a4 | 1633 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1634 | struct dw_dma_chan *dwc, *_dwc; |
3bfb1d20 | 1635 | |
bb32baf7 AS |
1636 | pm_runtime_get_sync(chip->dev); |
1637 | ||
3bfb1d20 HS |
1638 | dw_dma_off(dw); |
1639 | dma_async_device_unregister(&dw->dma); | |
1640 | ||
97977f75 | 1641 | free_irq(chip->irq, dw); |
3bfb1d20 HS |
1642 | tasklet_kill(&dw->tasklet); |
1643 | ||
1644 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1645 | chan.device_node) { | |
1646 | list_del(&dwc->chan.device_node); | |
1647 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1648 | } | |
1649 | ||
bb32baf7 | 1650 | pm_runtime_put_sync_suspend(chip->dev); |
3bfb1d20 HS |
1651 | return 0; |
1652 | } | |
9cade1a4 | 1653 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
3bfb1d20 | 1654 | |
2540f74b | 1655 | int dw_dma_disable(struct dw_dma_chip *chip) |
3bfb1d20 | 1656 | { |
9cade1a4 | 1657 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1658 | |
6168d567 | 1659 | dw_dma_off(dw); |
3bfb1d20 HS |
1660 | return 0; |
1661 | } | |
2540f74b | 1662 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
3bfb1d20 | 1663 | |
2540f74b | 1664 | int dw_dma_enable(struct dw_dma_chip *chip) |
3bfb1d20 | 1665 | { |
9cade1a4 | 1666 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1667 | |
7a83c045 | 1668 | dw_dma_on(dw); |
3bfb1d20 | 1669 | return 0; |
3bfb1d20 | 1670 | } |
2540f74b | 1671 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
3bfb1d20 HS |
1672 | |
1673 | MODULE_LICENSE("GPL v2"); | |
9cade1a4 | 1674 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
e05503ef | 1675 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
da89947b | 1676 | MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); |