]>
Commit | Line | Data |
---|---|---|
3bfb1d20 | 1 | /* |
b801479b | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3bfb1d20 HS |
3 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
9cade1a4 | 6 | * Copyright (C) 2013 Intel Corporation |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
b801479b | 12 | |
327e6970 | 13 | #include <linux/bitops.h> |
3bfb1d20 HS |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
f8122a82 | 17 | #include <linux/dmapool.h> |
7331205a | 18 | #include <linux/err.h> |
3bfb1d20 HS |
19 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/io.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
3bfb1d20 | 24 | #include <linux/slab.h> |
bb32baf7 | 25 | #include <linux/pm_runtime.h> |
3bfb1d20 | 26 | |
61a76496 | 27 | #include "../dmaengine.h" |
9cade1a4 | 28 | #include "internal.h" |
3bfb1d20 HS |
29 | |
30 | /* | |
31 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
32 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
33 | * of which use ARM any more). See the "Databook" from Synopsys for | |
34 | * information beyond what licensees probably provide. | |
35 | * | |
dd5720b3 AS |
36 | * The driver has been tested with the Atmel AT32AP7000, which does not |
37 | * support descriptor writeback. | |
3bfb1d20 HS |
38 | */ |
39 | ||
327e6970 | 40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
327e6970 VK |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
495aea4b | 43 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
495aea4b | 44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ |
327e6970 | 45 | DW_DMA_MSIZE_16; \ |
495aea4b | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
327e6970 | 47 | DW_DMA_MSIZE_16; \ |
bb3450ad MR |
48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ |
49 | _dwc->p_master : _dwc->m_master; \ | |
50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | |
51 | _dwc->p_master : _dwc->m_master; \ | |
f301c062 | 52 | \ |
327e6970 VK |
53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
55 | | DWC_CTLL_LLP_D_EN \ |
56 | | DWC_CTLL_LLP_S_EN \ | |
bb3450ad MR |
57 | | DWC_CTLL_DMS(_dms) \ |
58 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 59 | }) |
3bfb1d20 | 60 | |
3bfb1d20 HS |
61 | /* |
62 | * Number of descriptors to allocate for each channel. This should be | |
63 | * made configurable somehow; preferably, the clients (at least the | |
64 | * ones using slave transfers) should be able to give us a hint. | |
65 | */ | |
66 | #define NR_DESCS_PER_CHANNEL 64 | |
67 | ||
029a40e9 AS |
68 | /* The set of bus widths supported by the DMA controller */ |
69 | #define DW_DMA_BUSWIDTHS \ | |
70 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
71 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
72 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
73 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
74 | ||
3bfb1d20 | 75 | /*----------------------------------------------------------------------*/ |
3bfb1d20 | 76 | |
41d5e59c DW |
77 | static struct device *chan2dev(struct dma_chan *chan) |
78 | { | |
79 | return &chan->dev->device; | |
80 | } | |
41d5e59c | 81 | |
3bfb1d20 HS |
82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
83 | { | |
e63a47a3 | 84 | return to_dw_desc(dwc->active_list.next); |
3bfb1d20 HS |
85 | } |
86 | ||
3bfb1d20 HS |
87 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
88 | { | |
89 | struct dw_desc *desc, *_desc; | |
90 | struct dw_desc *ret = NULL; | |
91 | unsigned int i = 0; | |
69cea5a0 | 92 | unsigned long flags; |
3bfb1d20 | 93 | |
69cea5a0 | 94 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 95 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
2ab37276 | 96 | i++; |
3bfb1d20 HS |
97 | if (async_tx_test_ack(&desc->txd)) { |
98 | list_del(&desc->desc_node); | |
99 | ret = desc; | |
100 | break; | |
101 | } | |
41d5e59c | 102 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 | 103 | } |
69cea5a0 | 104 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 105 | |
41d5e59c | 106 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
107 | |
108 | return ret; | |
109 | } | |
110 | ||
3bfb1d20 HS |
111 | /* |
112 | * Move a descriptor, including any children, to the free list. | |
113 | * `desc' must not be on any lists. | |
114 | */ | |
115 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
116 | { | |
69cea5a0 VK |
117 | unsigned long flags; |
118 | ||
3bfb1d20 HS |
119 | if (desc) { |
120 | struct dw_desc *child; | |
121 | ||
69cea5a0 | 122 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 123 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 124 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
125 | "moving child desc %p to freelist\n", |
126 | child); | |
e0bd0f8c | 127 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 128 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 129 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 130 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
131 | } |
132 | } | |
133 | ||
61e183f8 VK |
134 | static void dwc_initialize(struct dw_dma_chan *dwc) |
135 | { | |
136 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
61e183f8 VK |
137 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
138 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
139 | ||
140 | if (dwc->initialized == true) | |
141 | return; | |
142 | ||
3fe6409c AS |
143 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
144 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); | |
61e183f8 VK |
145 | |
146 | channel_writel(dwc, CFG_LO, cfglo); | |
147 | channel_writel(dwc, CFG_HI, cfghi); | |
148 | ||
149 | /* Enable interrupts */ | |
150 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
151 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
152 | ||
153 | dwc->initialized = true; | |
154 | } | |
155 | ||
3bfb1d20 HS |
156 | /*----------------------------------------------------------------------*/ |
157 | ||
39416677 | 158 | static inline unsigned int dwc_fast_ffs(unsigned long long v) |
4c2d56c5 AS |
159 | { |
160 | /* | |
161 | * We can be a lot more clever here, but this should take care | |
162 | * of the most common optimization. | |
163 | */ | |
164 | if (!(v & 7)) | |
165 | return 3; | |
166 | else if (!(v & 3)) | |
167 | return 2; | |
168 | else if (!(v & 1)) | |
169 | return 1; | |
170 | return 0; | |
171 | } | |
172 | ||
f52b36d2 | 173 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
1d455437 AS |
174 | { |
175 | dev_err(chan2dev(&dwc->chan), | |
176 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
177 | channel_readl(dwc, SAR), | |
178 | channel_readl(dwc, DAR), | |
179 | channel_readl(dwc, LLP), | |
180 | channel_readl(dwc, CTL_HI), | |
181 | channel_readl(dwc, CTL_LO)); | |
182 | } | |
183 | ||
3f936207 AS |
184 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
185 | { | |
186 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
187 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
188 | cpu_relax(); | |
189 | } | |
190 | ||
1d455437 AS |
191 | /*----------------------------------------------------------------------*/ |
192 | ||
fed2574b AS |
193 | /* Perform single block transfer */ |
194 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |
195 | struct dw_desc *desc) | |
196 | { | |
197 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
198 | u32 ctllo; | |
199 | ||
1d566f11 AS |
200 | /* |
201 | * Software emulation of LLP mode relies on interrupts to continue | |
202 | * multi block transfer. | |
203 | */ | |
df1f3a23 | 204 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
fed2574b | 205 | |
df1f3a23 MR |
206 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
207 | channel_writel(dwc, DAR, lli_read(desc, dar)); | |
fed2574b | 208 | channel_writel(dwc, CTL_LO, ctllo); |
df1f3a23 | 209 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
fed2574b | 210 | channel_set_bit(dw, CH_EN, dwc->mask); |
f5c6a7df AS |
211 | |
212 | /* Move pointer to next descriptor */ | |
213 | dwc->tx_node_active = dwc->tx_node_active->next; | |
fed2574b AS |
214 | } |
215 | ||
3bfb1d20 HS |
216 | /* Called with dwc->lock held and bh disabled */ |
217 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
218 | { | |
219 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
2a0fae02 | 220 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
fed2574b | 221 | unsigned long was_soft_llp; |
3bfb1d20 HS |
222 | |
223 | /* ASSERT: channel is idle */ | |
224 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 225 | dev_err(chan2dev(&dwc->chan), |
550da64b JN |
226 | "%s: BUG: Attempted to start non-idle channel\n", |
227 | __func__); | |
1d455437 | 228 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
229 | |
230 | /* The tasklet will hopefully advance the queue... */ | |
231 | return; | |
232 | } | |
233 | ||
fed2574b AS |
234 | if (dwc->nollp) { |
235 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | |
236 | &dwc->flags); | |
237 | if (was_soft_llp) { | |
238 | dev_err(chan2dev(&dwc->chan), | |
fc61f6b4 | 239 | "BUG: Attempted to start new LLP transfer inside ongoing one\n"); |
fed2574b AS |
240 | return; |
241 | } | |
242 | ||
243 | dwc_initialize(dwc); | |
244 | ||
4702d524 | 245 | dwc->residue = first->total_len; |
f5c6a7df | 246 | dwc->tx_node_active = &first->tx_list; |
fed2574b | 247 | |
fdf475fa | 248 | /* Submit first block */ |
fed2574b AS |
249 | dwc_do_single_block(dwc, first); |
250 | ||
251 | return; | |
252 | } | |
253 | ||
61e183f8 VK |
254 | dwc_initialize(dwc); |
255 | ||
2a0fae02 MR |
256 | channel_writel(dwc, LLP, first->txd.phys | lms); |
257 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
3bfb1d20 HS |
258 | channel_writel(dwc, CTL_HI, 0); |
259 | channel_set_bit(dw, CH_EN, dwc->mask); | |
260 | } | |
261 | ||
e7637c6c AS |
262 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) |
263 | { | |
cba15617 AS |
264 | struct dw_desc *desc; |
265 | ||
e7637c6c AS |
266 | if (list_empty(&dwc->queue)) |
267 | return; | |
268 | ||
269 | list_move(dwc->queue.next, &dwc->active_list); | |
cba15617 AS |
270 | desc = dwc_first_active(dwc); |
271 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | |
272 | dwc_dostart(dwc, desc); | |
e7637c6c AS |
273 | } |
274 | ||
3bfb1d20 HS |
275 | /*----------------------------------------------------------------------*/ |
276 | ||
277 | static void | |
5fedefb8 VK |
278 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
279 | bool callback_required) | |
3bfb1d20 | 280 | { |
5fedefb8 VK |
281 | dma_async_tx_callback callback = NULL; |
282 | void *param = NULL; | |
3bfb1d20 | 283 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 284 | struct dw_desc *child; |
69cea5a0 | 285 | unsigned long flags; |
3bfb1d20 | 286 | |
41d5e59c | 287 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 288 | |
69cea5a0 | 289 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 290 | dma_cookie_complete(txd); |
5fedefb8 VK |
291 | if (callback_required) { |
292 | callback = txd->callback; | |
293 | param = txd->callback_param; | |
294 | } | |
3bfb1d20 | 295 | |
e518076e VK |
296 | /* async_tx_ack */ |
297 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
298 | async_tx_ack(&child->txd); | |
299 | async_tx_ack(&desc->txd); | |
300 | ||
e0bd0f8c | 301 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
302 | list_move(&desc->desc_node, &dwc->free_list); |
303 | ||
d38a8c62 | 304 | dma_descriptor_unmap(txd); |
69cea5a0 VK |
305 | spin_unlock_irqrestore(&dwc->lock, flags); |
306 | ||
21e93c1e | 307 | if (callback) |
3bfb1d20 HS |
308 | callback(param); |
309 | } | |
310 | ||
311 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
312 | { | |
313 | struct dw_desc *desc, *_desc; | |
314 | LIST_HEAD(list); | |
69cea5a0 | 315 | unsigned long flags; |
3bfb1d20 | 316 | |
69cea5a0 | 317 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 318 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 319 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
320 | "BUG: XFER bit set, but channel not idle!\n"); |
321 | ||
322 | /* Try to continue after resetting the channel... */ | |
3f936207 | 323 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
324 | } |
325 | ||
326 | /* | |
327 | * Submit queued descriptors ASAP, i.e. before we go through | |
328 | * the completed ones. | |
329 | */ | |
3bfb1d20 | 330 | list_splice_init(&dwc->active_list, &list); |
e7637c6c | 331 | dwc_dostart_first_queued(dwc); |
3bfb1d20 | 332 | |
69cea5a0 VK |
333 | spin_unlock_irqrestore(&dwc->lock, flags); |
334 | ||
3bfb1d20 | 335 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 336 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
337 | } |
338 | ||
4702d524 AS |
339 | /* Returns how many bytes were already received from source */ |
340 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | |
341 | { | |
342 | u32 ctlhi = channel_readl(dwc, CTL_HI); | |
343 | u32 ctllo = channel_readl(dwc, CTL_LO); | |
344 | ||
345 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | |
346 | } | |
347 | ||
3bfb1d20 HS |
348 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
349 | { | |
350 | dma_addr_t llp; | |
351 | struct dw_desc *desc, *_desc; | |
352 | struct dw_desc *child; | |
353 | u32 status_xfer; | |
69cea5a0 | 354 | unsigned long flags; |
3bfb1d20 | 355 | |
69cea5a0 | 356 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
357 | llp = channel_readl(dwc, LLP); |
358 | status_xfer = dma_readl(dw, RAW.XFER); | |
359 | ||
360 | if (status_xfer & dwc->mask) { | |
361 | /* Everything we've submitted is done */ | |
362 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
77bcc497 AS |
363 | |
364 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | |
fdf475fa AS |
365 | struct list_head *head, *active = dwc->tx_node_active; |
366 | ||
367 | /* | |
368 | * We are inside first active descriptor. | |
369 | * Otherwise something is really wrong. | |
370 | */ | |
371 | desc = dwc_first_active(dwc); | |
372 | ||
373 | head = &desc->tx_list; | |
374 | if (active != head) { | |
4702d524 AS |
375 | /* Update desc to reflect last sent one */ |
376 | if (active != head->next) | |
377 | desc = to_dw_desc(active->prev); | |
378 | ||
379 | dwc->residue -= desc->len; | |
380 | ||
fdf475fa | 381 | child = to_dw_desc(active); |
77bcc497 AS |
382 | |
383 | /* Submit next block */ | |
fdf475fa | 384 | dwc_do_single_block(dwc, child); |
77bcc497 | 385 | |
fdf475fa | 386 | spin_unlock_irqrestore(&dwc->lock, flags); |
77bcc497 AS |
387 | return; |
388 | } | |
fdf475fa | 389 | |
77bcc497 AS |
390 | /* We are done here */ |
391 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | |
392 | } | |
4702d524 AS |
393 | |
394 | dwc->residue = 0; | |
395 | ||
69cea5a0 VK |
396 | spin_unlock_irqrestore(&dwc->lock, flags); |
397 | ||
3bfb1d20 HS |
398 | dwc_complete_all(dw, dwc); |
399 | return; | |
400 | } | |
401 | ||
69cea5a0 | 402 | if (list_empty(&dwc->active_list)) { |
4702d524 | 403 | dwc->residue = 0; |
69cea5a0 | 404 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 405 | return; |
69cea5a0 | 406 | } |
087809fc | 407 | |
77bcc497 AS |
408 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
409 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | |
69cea5a0 | 410 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 411 | return; |
69cea5a0 | 412 | } |
087809fc | 413 | |
5a87f0e6 | 414 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); |
3bfb1d20 HS |
415 | |
416 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
75c61225 | 417 | /* Initial residue value */ |
4702d524 AS |
418 | dwc->residue = desc->total_len; |
419 | ||
75c61225 | 420 | /* Check first descriptors addr */ |
2a0fae02 | 421 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
69cea5a0 | 422 | spin_unlock_irqrestore(&dwc->lock, flags); |
84adccfb | 423 | return; |
69cea5a0 | 424 | } |
84adccfb | 425 | |
75c61225 | 426 | /* Check first descriptors llp */ |
df1f3a23 | 427 | if (lli_read(desc, llp) == llp) { |
3bfb1d20 | 428 | /* This one is currently in progress */ |
4702d524 | 429 | dwc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 430 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 431 | return; |
69cea5a0 | 432 | } |
3bfb1d20 | 433 | |
4702d524 AS |
434 | dwc->residue -= desc->len; |
435 | list_for_each_entry(child, &desc->tx_list, desc_node) { | |
df1f3a23 | 436 | if (lli_read(child, llp) == llp) { |
3bfb1d20 | 437 | /* Currently in progress */ |
4702d524 | 438 | dwc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 439 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 440 | return; |
69cea5a0 | 441 | } |
4702d524 AS |
442 | dwc->residue -= child->len; |
443 | } | |
3bfb1d20 HS |
444 | |
445 | /* | |
446 | * No descriptors so far seem to be in progress, i.e. | |
447 | * this one must be done. | |
448 | */ | |
69cea5a0 | 449 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 450 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 451 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
452 | } |
453 | ||
41d5e59c | 454 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
455 | "BUG: All descriptors done, but channel not idle!\n"); |
456 | ||
457 | /* Try to continue after resetting the channel... */ | |
3f936207 | 458 | dwc_chan_disable(dw, dwc); |
3bfb1d20 | 459 | |
e7637c6c | 460 | dwc_dostart_first_queued(dwc); |
69cea5a0 | 461 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
462 | } |
463 | ||
df1f3a23 | 464 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
3bfb1d20 | 465 | { |
21d43f49 | 466 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
df1f3a23 MR |
467 | lli_read(desc, sar), |
468 | lli_read(desc, dar), | |
469 | lli_read(desc, llp), | |
470 | lli_read(desc, ctlhi), | |
471 | lli_read(desc, ctllo)); | |
3bfb1d20 HS |
472 | } |
473 | ||
474 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
475 | { | |
476 | struct dw_desc *bad_desc; | |
477 | struct dw_desc *child; | |
69cea5a0 | 478 | unsigned long flags; |
3bfb1d20 HS |
479 | |
480 | dwc_scan_descriptors(dw, dwc); | |
481 | ||
69cea5a0 VK |
482 | spin_lock_irqsave(&dwc->lock, flags); |
483 | ||
3bfb1d20 HS |
484 | /* |
485 | * The descriptor currently at the head of the active list is | |
486 | * borked. Since we don't have any way to report errors, we'll | |
487 | * just have to scream loudly and try to carry on. | |
488 | */ | |
489 | bad_desc = dwc_first_active(dwc); | |
490 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 491 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
492 | |
493 | /* Clear the error flag and try to restart the controller */ | |
494 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
495 | if (!list_empty(&dwc->active_list)) | |
496 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
497 | ||
498 | /* | |
ba84bd71 | 499 | * WARN may seem harsh, but since this only happens |
3bfb1d20 HS |
500 | * when someone submits a bad physical address in a |
501 | * descriptor, we should consider ourselves lucky that the | |
502 | * controller flagged an error instead of scribbling over | |
503 | * random memory locations. | |
504 | */ | |
ba84bd71 AS |
505 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
506 | " cookie: %d\n", bad_desc->txd.cookie); | |
df1f3a23 | 507 | dwc_dump_lli(dwc, bad_desc); |
e0bd0f8c | 508 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
df1f3a23 | 509 | dwc_dump_lli(dwc, child); |
3bfb1d20 | 510 | |
69cea5a0 VK |
511 | spin_unlock_irqrestore(&dwc->lock, flags); |
512 | ||
3bfb1d20 | 513 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 514 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
515 | } |
516 | ||
d9de4519 HCE |
517 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
518 | ||
8004cbb4 | 519 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
d9de4519 HCE |
520 | { |
521 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
522 | return channel_readl(dwc, SAR); | |
523 | } | |
524 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
525 | ||
8004cbb4 | 526 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
d9de4519 HCE |
527 | { |
528 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
529 | return channel_readl(dwc, DAR); | |
530 | } | |
531 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
532 | ||
75c61225 | 533 | /* Called with dwc->lock held and all DMAC interrupts disabled */ |
d9de4519 | 534 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
2895b2ca | 535 | u32 status_block, u32 status_err, u32 status_xfer) |
d9de4519 | 536 | { |
69cea5a0 VK |
537 | unsigned long flags; |
538 | ||
2895b2ca | 539 | if (status_block & dwc->mask) { |
d9de4519 HCE |
540 | void (*callback)(void *param); |
541 | void *callback_param; | |
542 | ||
543 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
544 | channel_readl(dwc, LLP)); | |
2895b2ca | 545 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
546 | |
547 | callback = dwc->cdesc->period_callback; | |
548 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
549 | |
550 | if (callback) | |
d9de4519 | 551 | callback(callback_param); |
d9de4519 HCE |
552 | } |
553 | ||
554 | /* | |
555 | * Error and transfer complete are highly unlikely, and will most | |
556 | * likely be due to a configuration error by the user. | |
557 | */ | |
558 | if (unlikely(status_err & dwc->mask) || | |
559 | unlikely(status_xfer & dwc->mask)) { | |
560 | int i; | |
561 | ||
fc61f6b4 AS |
562 | dev_err(chan2dev(&dwc->chan), |
563 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | |
564 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
565 | |
566 | spin_lock_irqsave(&dwc->lock, flags); | |
567 | ||
1d455437 | 568 | dwc_dump_chan_regs(dwc); |
d9de4519 | 569 | |
3f936207 | 570 | dwc_chan_disable(dw, dwc); |
d9de4519 | 571 | |
75c61225 | 572 | /* Make sure DMA does not restart by loading a new list */ |
d9de4519 HCE |
573 | channel_writel(dwc, LLP, 0); |
574 | channel_writel(dwc, CTL_LO, 0); | |
575 | channel_writel(dwc, CTL_HI, 0); | |
576 | ||
2895b2ca | 577 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
578 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
579 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
580 | ||
581 | for (i = 0; i < dwc->cdesc->periods; i++) | |
df1f3a23 | 582 | dwc_dump_lli(dwc, dwc->cdesc->desc[i]); |
69cea5a0 VK |
583 | |
584 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 | 585 | } |
ee1cdcda AS |
586 | |
587 | /* Re-enable interrupts */ | |
588 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
d9de4519 HCE |
589 | } |
590 | ||
591 | /* ------------------------------------------------------------------------- */ | |
592 | ||
3bfb1d20 HS |
593 | static void dw_dma_tasklet(unsigned long data) |
594 | { | |
595 | struct dw_dma *dw = (struct dw_dma *)data; | |
596 | struct dw_dma_chan *dwc; | |
2895b2ca | 597 | u32 status_block; |
3bfb1d20 HS |
598 | u32 status_xfer; |
599 | u32 status_err; | |
600 | int i; | |
601 | ||
2895b2ca | 602 | status_block = dma_readl(dw, RAW.BLOCK); |
7fe7b2f4 | 603 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
604 | status_err = dma_readl(dw, RAW.ERROR); |
605 | ||
2e4c364e | 606 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
607 | |
608 | for (i = 0; i < dw->dma.chancnt; i++) { | |
609 | dwc = &dw->chan[i]; | |
d9de4519 | 610 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
2895b2ca MR |
611 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
612 | status_xfer); | |
d9de4519 | 613 | else if (status_err & (1 << i)) |
3bfb1d20 | 614 | dwc_handle_error(dw, dwc); |
77bcc497 | 615 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 616 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
617 | } |
618 | ||
ee1cdcda | 619 | /* Re-enable interrupts */ |
3bfb1d20 | 620 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
3bfb1d20 HS |
621 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
622 | } | |
623 | ||
624 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
625 | { | |
626 | struct dw_dma *dw = dev_id; | |
02a21b79 | 627 | u32 status; |
3bfb1d20 | 628 | |
02a21b79 AS |
629 | /* Check if we have any interrupt from the DMAC which is not in use */ |
630 | if (!dw->in_use) | |
631 | return IRQ_NONE; | |
632 | ||
633 | status = dma_readl(dw, STATUS_INT); | |
3783cef8 AS |
634 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
635 | ||
636 | /* Check if we have any interrupt from the DMAC */ | |
02a21b79 | 637 | if (!status) |
3783cef8 | 638 | return IRQ_NONE; |
3bfb1d20 HS |
639 | |
640 | /* | |
641 | * Just disable the interrupts. We'll turn them back on in the | |
642 | * softirq handler. | |
643 | */ | |
644 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 645 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
646 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
647 | ||
648 | status = dma_readl(dw, STATUS_INT); | |
649 | if (status) { | |
650 | dev_err(dw->dma.dev, | |
651 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
652 | status); | |
653 | ||
654 | /* Try to recover */ | |
655 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
2895b2ca | 656 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
3bfb1d20 HS |
657 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
658 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
659 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
660 | } | |
661 | ||
662 | tasklet_schedule(&dw->tasklet); | |
663 | ||
664 | return IRQ_HANDLED; | |
665 | } | |
666 | ||
667 | /*----------------------------------------------------------------------*/ | |
668 | ||
669 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
670 | { | |
671 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
672 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
673 | dma_cookie_t cookie; | |
69cea5a0 | 674 | unsigned long flags; |
3bfb1d20 | 675 | |
69cea5a0 | 676 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 677 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
678 | |
679 | /* | |
680 | * REVISIT: We should attempt to chain as many descriptors as | |
681 | * possible, perhaps even appending to those already submitted | |
682 | * for DMA. But this is hard to do in a race-free manner. | |
683 | */ | |
3bfb1d20 | 684 | |
dd8ecfca AS |
685 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); |
686 | list_add_tail(&desc->desc_node, &dwc->queue); | |
3bfb1d20 | 687 | |
69cea5a0 | 688 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
689 | |
690 | return cookie; | |
691 | } | |
692 | ||
693 | static struct dma_async_tx_descriptor * | |
694 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
695 | size_t len, unsigned long flags) | |
696 | { | |
697 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 698 | struct dw_dma *dw = to_dw_dma(chan->device); |
3bfb1d20 HS |
699 | struct dw_desc *desc; |
700 | struct dw_desc *first; | |
701 | struct dw_desc *prev; | |
702 | size_t xfer_count; | |
703 | size_t offset; | |
704 | unsigned int src_width; | |
705 | unsigned int dst_width; | |
3d4f8605 | 706 | unsigned int data_width; |
3bfb1d20 | 707 | u32 ctllo; |
2a0fae02 | 708 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
3bfb1d20 | 709 | |
2f45d613 | 710 | dev_vdbg(chan2dev(chan), |
5a87f0e6 AS |
711 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
712 | &dest, &src, len, flags); | |
3bfb1d20 HS |
713 | |
714 | if (unlikely(!len)) { | |
2e4c364e | 715 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
716 | return NULL; |
717 | } | |
718 | ||
0fdb567f AS |
719 | dwc->direction = DMA_MEM_TO_MEM; |
720 | ||
c422025c | 721 | data_width = dw->data_width[dwc->m_master]; |
a0982004 | 722 | |
3d4f8605 | 723 | src_width = dst_width = min_t(unsigned int, data_width, |
39416677 | 724 | dwc_fast_ffs(src | dest | len)); |
3bfb1d20 | 725 | |
327e6970 | 726 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
727 | | DWC_CTLL_DST_WIDTH(dst_width) |
728 | | DWC_CTLL_SRC_WIDTH(src_width) | |
729 | | DWC_CTLL_DST_INC | |
730 | | DWC_CTLL_SRC_INC | |
731 | | DWC_CTLL_FC_M2M; | |
732 | prev = first = NULL; | |
733 | ||
734 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
735 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
4a63a8b3 | 736 | dwc->block_size); |
3bfb1d20 HS |
737 | |
738 | desc = dwc_desc_get(dwc); | |
739 | if (!desc) | |
740 | goto err_desc_get; | |
741 | ||
df1f3a23 MR |
742 | lli_write(desc, sar, src + offset); |
743 | lli_write(desc, dar, dest + offset); | |
744 | lli_write(desc, ctllo, ctllo); | |
745 | lli_write(desc, ctlhi, xfer_count); | |
176dcec5 | 746 | desc->len = xfer_count << src_width; |
3bfb1d20 HS |
747 | |
748 | if (!first) { | |
749 | first = desc; | |
750 | } else { | |
2a0fae02 | 751 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 752 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
753 | } |
754 | prev = desc; | |
755 | } | |
756 | ||
3bfb1d20 HS |
757 | if (flags & DMA_PREP_INTERRUPT) |
758 | /* Trigger interrupt after last block */ | |
df1f3a23 | 759 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
760 | |
761 | prev->lli.llp = 0; | |
a3e55799 | 762 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
3bfb1d20 | 763 | first->txd.flags = flags; |
30d38a32 | 764 | first->total_len = len; |
3bfb1d20 HS |
765 | |
766 | return &first->txd; | |
767 | ||
768 | err_desc_get: | |
769 | dwc_desc_put(dwc, first); | |
770 | return NULL; | |
771 | } | |
772 | ||
773 | static struct dma_async_tx_descriptor * | |
774 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 775 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 776 | unsigned long flags, void *context) |
3bfb1d20 HS |
777 | { |
778 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 779 | struct dw_dma *dw = to_dw_dma(chan->device); |
327e6970 | 780 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
781 | struct dw_desc *prev; |
782 | struct dw_desc *first; | |
783 | u32 ctllo; | |
2a0fae02 | 784 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
3bfb1d20 HS |
785 | dma_addr_t reg; |
786 | unsigned int reg_width; | |
787 | unsigned int mem_width; | |
a0982004 | 788 | unsigned int data_width; |
3bfb1d20 HS |
789 | unsigned int i; |
790 | struct scatterlist *sg; | |
791 | size_t total_len = 0; | |
792 | ||
2e4c364e | 793 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 794 | |
495aea4b | 795 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
3bfb1d20 HS |
796 | return NULL; |
797 | ||
0fdb567f AS |
798 | dwc->direction = direction; |
799 | ||
3bfb1d20 HS |
800 | prev = first = NULL; |
801 | ||
3bfb1d20 | 802 | switch (direction) { |
db8196df | 803 | case DMA_MEM_TO_DEV: |
39416677 | 804 | reg_width = __ffs(sconfig->dst_addr_width); |
327e6970 VK |
805 | reg = sconfig->dst_addr; |
806 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
807 | | DWC_CTLL_DST_WIDTH(reg_width) |
808 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
809 | | DWC_CTLL_SRC_INC); |
810 | ||
811 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
812 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
813 | ||
c422025c | 814 | data_width = dw->data_width[dwc->m_master]; |
a0982004 | 815 | |
3bfb1d20 HS |
816 | for_each_sg(sgl, sg, sg_len, i) { |
817 | struct dw_desc *desc; | |
69dc14b5 | 818 | u32 len, dlen, mem; |
3bfb1d20 | 819 | |
cbb796cc | 820 | mem = sg_dma_address(sg); |
69dc14b5 | 821 | len = sg_dma_len(sg); |
6bc711f6 | 822 | |
a0982004 | 823 | mem_width = min_t(unsigned int, |
39416677 | 824 | data_width, dwc_fast_ffs(mem | len)); |
3bfb1d20 | 825 | |
69dc14b5 | 826 | slave_sg_todev_fill_desc: |
3bfb1d20 | 827 | desc = dwc_desc_get(dwc); |
b2607227 | 828 | if (!desc) |
3bfb1d20 | 829 | goto err_desc_get; |
3bfb1d20 | 830 | |
df1f3a23 MR |
831 | lli_write(desc, sar, mem); |
832 | lli_write(desc, dar, reg); | |
833 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | |
4a63a8b3 AS |
834 | if ((len >> mem_width) > dwc->block_size) { |
835 | dlen = dwc->block_size << mem_width; | |
69dc14b5 VK |
836 | mem += dlen; |
837 | len -= dlen; | |
838 | } else { | |
839 | dlen = len; | |
840 | len = 0; | |
841 | } | |
842 | ||
df1f3a23 | 843 | lli_write(desc, ctlhi, dlen >> mem_width); |
176dcec5 | 844 | desc->len = dlen; |
3bfb1d20 HS |
845 | |
846 | if (!first) { | |
847 | first = desc; | |
848 | } else { | |
2a0fae02 | 849 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 850 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
851 | } |
852 | prev = desc; | |
69dc14b5 VK |
853 | total_len += dlen; |
854 | ||
855 | if (len) | |
856 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
857 | } |
858 | break; | |
db8196df | 859 | case DMA_DEV_TO_MEM: |
39416677 | 860 | reg_width = __ffs(sconfig->src_addr_width); |
327e6970 VK |
861 | reg = sconfig->src_addr; |
862 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
863 | | DWC_CTLL_SRC_WIDTH(reg_width) |
864 | | DWC_CTLL_DST_INC | |
327e6970 VK |
865 | | DWC_CTLL_SRC_FIX); |
866 | ||
867 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
868 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 869 | |
c422025c | 870 | data_width = dw->data_width[dwc->m_master]; |
a0982004 | 871 | |
3bfb1d20 HS |
872 | for_each_sg(sgl, sg, sg_len, i) { |
873 | struct dw_desc *desc; | |
69dc14b5 | 874 | u32 len, dlen, mem; |
3bfb1d20 | 875 | |
cbb796cc | 876 | mem = sg_dma_address(sg); |
3bfb1d20 | 877 | len = sg_dma_len(sg); |
6bc711f6 | 878 | |
a0982004 | 879 | mem_width = min_t(unsigned int, |
39416677 | 880 | data_width, dwc_fast_ffs(mem | len)); |
3bfb1d20 | 881 | |
69dc14b5 VK |
882 | slave_sg_fromdev_fill_desc: |
883 | desc = dwc_desc_get(dwc); | |
b2607227 | 884 | if (!desc) |
69dc14b5 | 885 | goto err_desc_get; |
69dc14b5 | 886 | |
df1f3a23 MR |
887 | lli_write(desc, sar, reg); |
888 | lli_write(desc, dar, mem); | |
889 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | |
4a63a8b3 AS |
890 | if ((len >> reg_width) > dwc->block_size) { |
891 | dlen = dwc->block_size << reg_width; | |
69dc14b5 VK |
892 | mem += dlen; |
893 | len -= dlen; | |
894 | } else { | |
895 | dlen = len; | |
896 | len = 0; | |
897 | } | |
df1f3a23 | 898 | lli_write(desc, ctlhi, dlen >> reg_width); |
176dcec5 | 899 | desc->len = dlen; |
3bfb1d20 HS |
900 | |
901 | if (!first) { | |
902 | first = desc; | |
903 | } else { | |
2a0fae02 | 904 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 905 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
906 | } |
907 | prev = desc; | |
69dc14b5 VK |
908 | total_len += dlen; |
909 | ||
910 | if (len) | |
911 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
912 | } |
913 | break; | |
914 | default: | |
915 | return NULL; | |
916 | } | |
917 | ||
918 | if (flags & DMA_PREP_INTERRUPT) | |
919 | /* Trigger interrupt after last block */ | |
df1f3a23 | 920 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
921 | |
922 | prev->lli.llp = 0; | |
a3e55799 | 923 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
30d38a32 | 924 | first->total_len = total_len; |
3bfb1d20 HS |
925 | |
926 | return &first->txd; | |
927 | ||
928 | err_desc_get: | |
b2607227 JN |
929 | dev_err(chan2dev(chan), |
930 | "not enough descriptors available. Direction %d\n", direction); | |
3bfb1d20 HS |
931 | dwc_desc_put(dwc, first); |
932 | return NULL; | |
933 | } | |
934 | ||
4d130de2 AS |
935 | bool dw_dma_filter(struct dma_chan *chan, void *param) |
936 | { | |
937 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
938 | struct dw_dma_slave *dws = param; | |
939 | ||
3fe6409c | 940 | if (dws->dma_dev != chan->device->dev) |
4d130de2 AS |
941 | return false; |
942 | ||
943 | /* We have to copy data since dws can be temporary storage */ | |
944 | ||
945 | dwc->src_id = dws->src_id; | |
946 | dwc->dst_id = dws->dst_id; | |
947 | ||
c422025c AS |
948 | dwc->m_master = dws->m_master; |
949 | dwc->p_master = dws->p_master; | |
4d130de2 AS |
950 | |
951 | return true; | |
952 | } | |
953 | EXPORT_SYMBOL_GPL(dw_dma_filter); | |
954 | ||
327e6970 VK |
955 | /* |
956 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
957 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
958 | * | |
959 | * NOTE: burst size 2 is not supported by controller. | |
960 | * | |
961 | * This can be done by finding least significant bit set: n & (n - 1) | |
962 | */ | |
963 | static inline void convert_burst(u32 *maxburst) | |
964 | { | |
965 | if (*maxburst > 1) | |
966 | *maxburst = fls(*maxburst) - 2; | |
967 | else | |
968 | *maxburst = 0; | |
969 | } | |
970 | ||
a4b0d348 | 971 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
327e6970 VK |
972 | { |
973 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
974 | ||
495aea4b AS |
975 | /* Check if chan will be configured for slave transfers */ |
976 | if (!is_slave_direction(sconfig->direction)) | |
327e6970 VK |
977 | return -EINVAL; |
978 | ||
979 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
0fdb567f | 980 | dwc->direction = sconfig->direction; |
327e6970 VK |
981 | |
982 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
983 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
984 | ||
985 | return 0; | |
986 | } | |
987 | ||
a4b0d348 | 988 | static int dwc_pause(struct dma_chan *chan) |
21fe3c52 | 989 | { |
a4b0d348 MR |
990 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
991 | unsigned long flags; | |
992 | unsigned int count = 20; /* timeout iterations */ | |
993 | u32 cfglo; | |
994 | ||
995 | spin_lock_irqsave(&dwc->lock, flags); | |
21fe3c52 | 996 | |
a4b0d348 | 997 | cfglo = channel_readl(dwc, CFG_LO); |
21fe3c52 | 998 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
123b69ab AS |
999 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
1000 | udelay(2); | |
21fe3c52 AS |
1001 | |
1002 | dwc->paused = true; | |
a4b0d348 MR |
1003 | |
1004 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1005 | ||
1006 | return 0; | |
21fe3c52 AS |
1007 | } |
1008 | ||
1009 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |
1010 | { | |
1011 | u32 cfglo = channel_readl(dwc, CFG_LO); | |
1012 | ||
1013 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
1014 | ||
1015 | dwc->paused = false; | |
1016 | } | |
1017 | ||
a4b0d348 | 1018 | static int dwc_resume(struct dma_chan *chan) |
3bfb1d20 HS |
1019 | { |
1020 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
69cea5a0 | 1021 | unsigned long flags; |
3bfb1d20 | 1022 | |
a4b0d348 MR |
1023 | if (!dwc->paused) |
1024 | return 0; | |
c3635c78 | 1025 | |
a4b0d348 | 1026 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 1027 | |
a4b0d348 | 1028 | dwc_chan_resume(dwc); |
3bfb1d20 | 1029 | |
a4b0d348 | 1030 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1031 | |
a4b0d348 MR |
1032 | return 0; |
1033 | } | |
3bfb1d20 | 1034 | |
a4b0d348 MR |
1035 | static int dwc_terminate_all(struct dma_chan *chan) |
1036 | { | |
1037 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1038 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1039 | struct dw_desc *desc, *_desc; | |
1040 | unsigned long flags; | |
1041 | LIST_HEAD(list); | |
3bfb1d20 | 1042 | |
a4b0d348 | 1043 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b | 1044 | |
a4b0d348 | 1045 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
fed2574b | 1046 | |
a4b0d348 | 1047 | dwc_chan_disable(dw, dwc); |
a7c57cf7 | 1048 | |
a4b0d348 | 1049 | dwc_chan_resume(dwc); |
a7c57cf7 | 1050 | |
a4b0d348 MR |
1051 | /* active_list entries will end up before queued entries */ |
1052 | list_splice_init(&dwc->queue, &list); | |
1053 | list_splice_init(&dwc->active_list, &list); | |
a7c57cf7 | 1054 | |
a4b0d348 | 1055 | spin_unlock_irqrestore(&dwc->lock, flags); |
a7c57cf7 | 1056 | |
a4b0d348 MR |
1057 | /* Flush all pending and queued descriptors */ |
1058 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
1059 | dwc_descriptor_complete(dwc, desc, false); | |
c3635c78 LW |
1060 | |
1061 | return 0; | |
3bfb1d20 HS |
1062 | } |
1063 | ||
4702d524 AS |
1064 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) |
1065 | { | |
1066 | unsigned long flags; | |
1067 | u32 residue; | |
1068 | ||
1069 | spin_lock_irqsave(&dwc->lock, flags); | |
1070 | ||
1071 | residue = dwc->residue; | |
1072 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | |
1073 | residue -= dwc_get_sent(dwc); | |
1074 | ||
1075 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1076 | return residue; | |
1077 | } | |
1078 | ||
3bfb1d20 | 1079 | static enum dma_status |
07934481 LW |
1080 | dwc_tx_status(struct dma_chan *chan, |
1081 | dma_cookie_t cookie, | |
1082 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
1083 | { |
1084 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 1085 | enum dma_status ret; |
3bfb1d20 | 1086 | |
96a2af41 | 1087 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1088 | if (ret == DMA_COMPLETE) |
12381dc0 | 1089 | return ret; |
3bfb1d20 | 1090 | |
12381dc0 | 1091 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
3bfb1d20 | 1092 | |
12381dc0 | 1093 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1094 | if (ret != DMA_COMPLETE) |
4702d524 | 1095 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
3bfb1d20 | 1096 | |
effd5cf6 | 1097 | if (dwc->paused && ret == DMA_IN_PROGRESS) |
a7c57cf7 | 1098 | return DMA_PAUSED; |
3bfb1d20 HS |
1099 | |
1100 | return ret; | |
1101 | } | |
1102 | ||
1103 | static void dwc_issue_pending(struct dma_chan *chan) | |
1104 | { | |
1105 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
dd8ecfca | 1106 | unsigned long flags; |
3bfb1d20 | 1107 | |
dd8ecfca AS |
1108 | spin_lock_irqsave(&dwc->lock, flags); |
1109 | if (list_empty(&dwc->active_list)) | |
1110 | dwc_dostart_first_queued(dwc); | |
1111 | spin_unlock_irqrestore(&dwc->lock, flags); | |
3bfb1d20 HS |
1112 | } |
1113 | ||
99d9bf4e AS |
1114 | /*----------------------------------------------------------------------*/ |
1115 | ||
1116 | static void dw_dma_off(struct dw_dma *dw) | |
1117 | { | |
1118 | int i; | |
1119 | ||
1120 | dma_writel(dw, CFG, 0); | |
1121 | ||
1122 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 1123 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
99d9bf4e AS |
1124 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1125 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1126 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1127 | ||
1128 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1129 | cpu_relax(); | |
1130 | ||
1131 | for (i = 0; i < dw->dma.chancnt; i++) | |
1132 | dw->chan[i].initialized = false; | |
1133 | } | |
1134 | ||
1135 | static void dw_dma_on(struct dw_dma *dw) | |
1136 | { | |
1137 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1138 | } | |
1139 | ||
aa1e6f1a | 1140 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1141 | { |
1142 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1143 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1144 | struct dw_desc *desc; | |
3bfb1d20 | 1145 | int i; |
69cea5a0 | 1146 | unsigned long flags; |
3bfb1d20 | 1147 | |
2e4c364e | 1148 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1149 | |
3bfb1d20 HS |
1150 | /* ASSERT: channel is idle */ |
1151 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1152 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1153 | return -EIO; |
1154 | } | |
1155 | ||
d3ee98cd | 1156 | dma_cookie_init(chan); |
3bfb1d20 | 1157 | |
3bfb1d20 HS |
1158 | /* |
1159 | * NOTE: some controllers may have additional features that we | |
1160 | * need to initialize here, like "scatter-gather" (which | |
1161 | * doesn't mean what you think it means), and status writeback. | |
1162 | */ | |
1163 | ||
3fe6409c AS |
1164 | /* |
1165 | * We need controller-specific data to set up slave transfers. | |
1166 | */ | |
1167 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | |
1168 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | |
1169 | return -EINVAL; | |
1170 | } | |
1171 | ||
99d9bf4e AS |
1172 | /* Enable controller here if needed */ |
1173 | if (!dw->in_use) | |
1174 | dw_dma_on(dw); | |
1175 | dw->in_use |= dwc->mask; | |
1176 | ||
69cea5a0 | 1177 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1178 | i = dwc->descs_allocated; |
1179 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
f8122a82 AS |
1180 | dma_addr_t phys; |
1181 | ||
69cea5a0 | 1182 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1183 | |
f8122a82 | 1184 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); |
cbd65312 AS |
1185 | if (!desc) |
1186 | goto err_desc_alloc; | |
3bfb1d20 | 1187 | |
f8122a82 | 1188 | memset(desc, 0, sizeof(struct dw_desc)); |
3bfb1d20 | 1189 | |
e0bd0f8c | 1190 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1191 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1192 | desc->txd.tx_submit = dwc_tx_submit; | |
1193 | desc->txd.flags = DMA_CTRL_ACK; | |
f8122a82 | 1194 | desc->txd.phys = phys; |
cbd65312 | 1195 | |
3bfb1d20 HS |
1196 | dwc_desc_put(dwc, desc); |
1197 | ||
69cea5a0 | 1198 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1199 | i = ++dwc->descs_allocated; |
1200 | } | |
1201 | ||
69cea5a0 | 1202 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1203 | |
2e4c364e | 1204 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
3bfb1d20 | 1205 | |
cbd65312 AS |
1206 | return i; |
1207 | ||
1208 | err_desc_alloc: | |
cbd65312 AS |
1209 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); |
1210 | ||
3bfb1d20 HS |
1211 | return i; |
1212 | } | |
1213 | ||
1214 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1215 | { | |
1216 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1217 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1218 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1219 | unsigned long flags; |
3bfb1d20 HS |
1220 | LIST_HEAD(list); |
1221 | ||
2e4c364e | 1222 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1223 | dwc->descs_allocated); |
1224 | ||
1225 | /* ASSERT: channel is idle */ | |
1226 | BUG_ON(!list_empty(&dwc->active_list)); | |
1227 | BUG_ON(!list_empty(&dwc->queue)); | |
1228 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1229 | ||
69cea5a0 | 1230 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1231 | list_splice_init(&dwc->free_list, &list); |
1232 | dwc->descs_allocated = 0; | |
3fe6409c AS |
1233 | |
1234 | /* Clear custom channel configuration */ | |
1235 | dwc->src_id = 0; | |
1236 | dwc->dst_id = 0; | |
1237 | ||
c422025c AS |
1238 | dwc->m_master = 0; |
1239 | dwc->p_master = 0; | |
3fe6409c | 1240 | |
61e183f8 | 1241 | dwc->initialized = false; |
3bfb1d20 HS |
1242 | |
1243 | /* Disable interrupts */ | |
1244 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
2895b2ca | 1245 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
3bfb1d20 HS |
1246 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1247 | ||
69cea5a0 | 1248 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1249 | |
99d9bf4e AS |
1250 | /* Disable controller in case it was a last user */ |
1251 | dw->in_use &= ~dwc->mask; | |
1252 | if (!dw->in_use) | |
1253 | dw_dma_off(dw); | |
1254 | ||
3bfb1d20 | 1255 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
41d5e59c | 1256 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
f8122a82 | 1257 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
3bfb1d20 HS |
1258 | } |
1259 | ||
2e4c364e | 1260 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1261 | } |
1262 | ||
d9de4519 HCE |
1263 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1264 | ||
1265 | /** | |
1266 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1267 | * @chan: the DMA channel to start | |
1268 | * | |
1269 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1270 | * -errno on failure. | |
1271 | */ | |
1272 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1273 | { | |
1274 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
ee1cdcda | 1275 | struct dw_dma *dw = to_dw_dma(chan->device); |
69cea5a0 | 1276 | unsigned long flags; |
d9de4519 HCE |
1277 | |
1278 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1279 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1280 | return -ENODEV; | |
1281 | } | |
1282 | ||
69cea5a0 | 1283 | spin_lock_irqsave(&dwc->lock, flags); |
ee1cdcda AS |
1284 | |
1285 | /* Enable interrupts to perform cyclic transfer */ | |
1286 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
1287 | ||
df3bb8a0 | 1288 | dwc_dostart(dwc, dwc->cdesc->desc[0]); |
ee1cdcda | 1289 | |
69cea5a0 | 1290 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1291 | |
1292 | return 0; | |
1293 | } | |
1294 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1295 | ||
1296 | /** | |
1297 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1298 | * @chan: the DMA channel to stop | |
1299 | * | |
1300 | * Must be called with soft interrupts disabled. | |
1301 | */ | |
1302 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1303 | { | |
1304 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1305 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1306 | unsigned long flags; |
d9de4519 | 1307 | |
69cea5a0 | 1308 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1309 | |
3f936207 | 1310 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1311 | |
69cea5a0 | 1312 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1313 | } |
1314 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1315 | ||
1316 | /** | |
1317 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1318 | * @chan: the DMA channel to prepare | |
1319 | * @buf_addr: physical DMA address where the buffer starts | |
1320 | * @buf_len: total number of bytes for the entire buffer | |
1321 | * @period_len: number of bytes for each period | |
1322 | * @direction: transfer direction, to or from device | |
1323 | * | |
1324 | * Must be called before trying to start the transfer. Returns a valid struct | |
1325 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1326 | */ | |
1327 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1328 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1329 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1330 | { |
1331 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1332 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1333 | struct dw_cyclic_desc *cdesc; |
1334 | struct dw_cyclic_desc *retval = NULL; | |
1335 | struct dw_desc *desc; | |
1336 | struct dw_desc *last = NULL; | |
2a0fae02 | 1337 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
d9de4519 HCE |
1338 | unsigned long was_cyclic; |
1339 | unsigned int reg_width; | |
1340 | unsigned int periods; | |
1341 | unsigned int i; | |
69cea5a0 | 1342 | unsigned long flags; |
d9de4519 | 1343 | |
69cea5a0 | 1344 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b AS |
1345 | if (dwc->nollp) { |
1346 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1347 | dev_dbg(chan2dev(&dwc->chan), | |
1348 | "channel doesn't support LLP transfers\n"); | |
1349 | return ERR_PTR(-EINVAL); | |
1350 | } | |
1351 | ||
d9de4519 | 1352 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1353 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1354 | dev_dbg(chan2dev(&dwc->chan), |
1355 | "queue and/or active list are not empty\n"); | |
1356 | return ERR_PTR(-EBUSY); | |
1357 | } | |
1358 | ||
1359 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1360 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1361 | if (was_cyclic) { |
1362 | dev_dbg(chan2dev(&dwc->chan), | |
1363 | "channel already prepared for cyclic DMA\n"); | |
1364 | return ERR_PTR(-EBUSY); | |
1365 | } | |
1366 | ||
1367 | retval = ERR_PTR(-EINVAL); | |
327e6970 | 1368 | |
f44b92f4 AS |
1369 | if (unlikely(!is_slave_direction(direction))) |
1370 | goto out_err; | |
1371 | ||
0fdb567f AS |
1372 | dwc->direction = direction; |
1373 | ||
327e6970 VK |
1374 | if (direction == DMA_MEM_TO_DEV) |
1375 | reg_width = __ffs(sconfig->dst_addr_width); | |
1376 | else | |
1377 | reg_width = __ffs(sconfig->src_addr_width); | |
1378 | ||
d9de4519 HCE |
1379 | periods = buf_len / period_len; |
1380 | ||
1381 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
4a63a8b3 | 1382 | if (period_len > (dwc->block_size << reg_width)) |
d9de4519 HCE |
1383 | goto out_err; |
1384 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1385 | goto out_err; | |
1386 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1387 | goto out_err; | |
d9de4519 HCE |
1388 | |
1389 | retval = ERR_PTR(-ENOMEM); | |
1390 | ||
1391 | if (periods > NR_DESCS_PER_CHANNEL) | |
1392 | goto out_err; | |
1393 | ||
1394 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1395 | if (!cdesc) | |
1396 | goto out_err; | |
1397 | ||
1398 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1399 | if (!cdesc->desc) | |
1400 | goto out_err_alloc; | |
1401 | ||
1402 | for (i = 0; i < periods; i++) { | |
1403 | desc = dwc_desc_get(dwc); | |
1404 | if (!desc) | |
1405 | goto out_err_desc_get; | |
1406 | ||
1407 | switch (direction) { | |
db8196df | 1408 | case DMA_MEM_TO_DEV: |
df1f3a23 MR |
1409 | lli_write(desc, dar, sconfig->dst_addr); |
1410 | lli_write(desc, sar, buf_addr + period_len * i); | |
1411 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1412 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1413 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1414 | | DWC_CTLL_DST_FIX | |
1415 | | DWC_CTLL_SRC_INC | |
1416 | | DWC_CTLL_INT_EN)); | |
1417 | ||
1418 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1419 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1420 | DWC_CTLL_FC(DW_DMA_FC_D_M2P)); | |
327e6970 | 1421 | |
d9de4519 | 1422 | break; |
db8196df | 1423 | case DMA_DEV_TO_MEM: |
df1f3a23 MR |
1424 | lli_write(desc, dar, buf_addr + period_len * i); |
1425 | lli_write(desc, sar, sconfig->src_addr); | |
1426 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1427 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1428 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1429 | | DWC_CTLL_DST_INC | |
1430 | | DWC_CTLL_SRC_FIX | |
1431 | | DWC_CTLL_INT_EN)); | |
1432 | ||
1433 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1434 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1435 | DWC_CTLL_FC(DW_DMA_FC_D_P2M)); | |
327e6970 | 1436 | |
d9de4519 HCE |
1437 | break; |
1438 | default: | |
1439 | break; | |
1440 | } | |
1441 | ||
df1f3a23 | 1442 | lli_write(desc, ctlhi, period_len >> reg_width); |
d9de4519 HCE |
1443 | cdesc->desc[i] = desc; |
1444 | ||
f8122a82 | 1445 | if (last) |
2a0fae02 | 1446 | lli_write(last, llp, desc->txd.phys | lms); |
d9de4519 HCE |
1447 | |
1448 | last = desc; | |
1449 | } | |
1450 | ||
75c61225 | 1451 | /* Let's make a cyclic list */ |
2a0fae02 | 1452 | lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); |
d9de4519 | 1453 | |
5a87f0e6 AS |
1454 | dev_dbg(chan2dev(&dwc->chan), |
1455 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | |
1456 | &buf_addr, buf_len, period_len, periods); | |
d9de4519 HCE |
1457 | |
1458 | cdesc->periods = periods; | |
1459 | dwc->cdesc = cdesc; | |
1460 | ||
1461 | return cdesc; | |
1462 | ||
1463 | out_err_desc_get: | |
1464 | while (i--) | |
1465 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1466 | out_err_alloc: | |
1467 | kfree(cdesc); | |
1468 | out_err: | |
1469 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1470 | return (struct dw_cyclic_desc *)retval; | |
1471 | } | |
1472 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1473 | ||
1474 | /** | |
1475 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1476 | * @chan: the DMA channel to free | |
1477 | */ | |
1478 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1479 | { | |
1480 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1481 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1482 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1483 | int i; | |
69cea5a0 | 1484 | unsigned long flags; |
d9de4519 | 1485 | |
2e4c364e | 1486 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1487 | |
1488 | if (!cdesc) | |
1489 | return; | |
1490 | ||
69cea5a0 | 1491 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1492 | |
3f936207 | 1493 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1494 | |
2895b2ca | 1495 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
1496 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1497 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1498 | ||
69cea5a0 | 1499 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1500 | |
1501 | for (i = 0; i < cdesc->periods; i++) | |
1502 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1503 | ||
1504 | kfree(cdesc->desc); | |
1505 | kfree(cdesc); | |
1506 | ||
1507 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1508 | } | |
1509 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1510 | ||
3bfb1d20 HS |
1511 | /*----------------------------------------------------------------------*/ |
1512 | ||
9cade1a4 | 1513 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
a9ddb575 | 1514 | { |
3bfb1d20 | 1515 | struct dw_dma *dw; |
30cb2639 | 1516 | bool autocfg = false; |
482c67ea | 1517 | unsigned int dw_params; |
4a63a8b3 | 1518 | unsigned int max_blk_size = 0; |
3bfb1d20 HS |
1519 | int err; |
1520 | int i; | |
1521 | ||
000871ce AS |
1522 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1523 | if (!dw) | |
1524 | return -ENOMEM; | |
1525 | ||
1526 | dw->regs = chip->regs; | |
1527 | chip->dw = dw; | |
1528 | ||
bb32baf7 AS |
1529 | pm_runtime_get_sync(chip->dev); |
1530 | ||
30cb2639 AS |
1531 | if (!pdata) { |
1532 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | |
1533 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | |
482c67ea | 1534 | |
30cb2639 AS |
1535 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1536 | if (!autocfg) { | |
1537 | err = -EINVAL; | |
1538 | goto err_pdata; | |
1539 | } | |
123de543 | 1540 | |
9cade1a4 | 1541 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); |
8be4f523 AS |
1542 | if (!pdata) { |
1543 | err = -ENOMEM; | |
1544 | goto err_pdata; | |
1545 | } | |
123de543 | 1546 | |
30cb2639 AS |
1547 | /* Get hardware configuration parameters */ |
1548 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | |
1549 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | |
1550 | for (i = 0; i < pdata->nr_masters; i++) { | |
1551 | pdata->data_width[i] = | |
1552 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | |
1553 | } | |
1554 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | |
1555 | ||
123de543 AS |
1556 | /* Fill platform data with the default values */ |
1557 | pdata->is_private = true; | |
df5c7386 | 1558 | pdata->is_memcpy = true; |
123de543 AS |
1559 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1560 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | |
30cb2639 | 1561 | } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
8be4f523 AS |
1562 | err = -EINVAL; |
1563 | goto err_pdata; | |
1564 | } | |
123de543 | 1565 | |
30cb2639 | 1566 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
000871ce | 1567 | GFP_KERNEL); |
8be4f523 AS |
1568 | if (!dw->chan) { |
1569 | err = -ENOMEM; | |
1570 | goto err_pdata; | |
1571 | } | |
3bfb1d20 | 1572 | |
75c61225 | 1573 | /* Get hardware configuration parameters */ |
30cb2639 AS |
1574 | dw->nr_masters = pdata->nr_masters; |
1575 | for (i = 0; i < dw->nr_masters; i++) | |
1576 | dw->data_width[i] = pdata->data_width[i]; | |
a0982004 | 1577 | |
11f932ec | 1578 | /* Calculate all channel mask before DMA setup */ |
30cb2639 | 1579 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
11f932ec | 1580 | |
75c61225 | 1581 | /* Force dma off, just in case */ |
3bfb1d20 HS |
1582 | dw_dma_off(dw); |
1583 | ||
75c61225 | 1584 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
9cade1a4 | 1585 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
f8122a82 AS |
1586 | sizeof(struct dw_desc), 4, 0); |
1587 | if (!dw->desc_pool) { | |
9cade1a4 | 1588 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
8be4f523 AS |
1589 | err = -ENOMEM; |
1590 | goto err_pdata; | |
f8122a82 AS |
1591 | } |
1592 | ||
3bfb1d20 HS |
1593 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1594 | ||
97977f75 AS |
1595 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, |
1596 | "dw_dmac", dw); | |
1597 | if (err) | |
8be4f523 | 1598 | goto err_pdata; |
97977f75 | 1599 | |
3bfb1d20 | 1600 | INIT_LIST_HEAD(&dw->dma.channels); |
30cb2639 | 1601 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1602 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1603 | ||
1604 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1605 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1606 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1607 | list_add_tail(&dwc->chan.device_node, | |
1608 | &dw->dma.channels); | |
1609 | else | |
1610 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1611 | |
93317e8e VK |
1612 | /* 7 is highest priority & 0 is lowest. */ |
1613 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
30cb2639 | 1614 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1615 | else |
1616 | dwc->priority = i; | |
1617 | ||
3bfb1d20 HS |
1618 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1619 | spin_lock_init(&dwc->lock); | |
1620 | dwc->mask = 1 << i; | |
1621 | ||
1622 | INIT_LIST_HEAD(&dwc->active_list); | |
1623 | INIT_LIST_HEAD(&dwc->queue); | |
1624 | INIT_LIST_HEAD(&dwc->free_list); | |
1625 | ||
1626 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
4a63a8b3 | 1627 | |
0fdb567f | 1628 | dwc->direction = DMA_TRANS_NONE; |
a0982004 | 1629 | |
75c61225 | 1630 | /* Hardware configuration */ |
fed2574b AS |
1631 | if (autocfg) { |
1632 | unsigned int dwc_params; | |
6bea0f6d | 1633 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
9cade1a4 | 1634 | void __iomem *addr = chip->regs + r * sizeof(u32); |
fed2574b | 1635 | |
9cade1a4 | 1636 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
fed2574b | 1637 | |
9cade1a4 AS |
1638 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1639 | dwc_params); | |
985a6c7d | 1640 | |
1d566f11 AS |
1641 | /* |
1642 | * Decode maximum block size for given channel. The | |
4a63a8b3 | 1643 | * stored 4 bit value represents blocks from 0x00 for 3 |
1d566f11 AS |
1644 | * up to 0x0a for 4095. |
1645 | */ | |
4a63a8b3 AS |
1646 | dwc->block_size = |
1647 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | |
fed2574b AS |
1648 | dwc->nollp = |
1649 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | |
1650 | } else { | |
4a63a8b3 | 1651 | dwc->block_size = pdata->block_size; |
fed2574b AS |
1652 | |
1653 | /* Check if channel supports multi block transfer */ | |
2a0fae02 MR |
1654 | channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff)); |
1655 | dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0; | |
fed2574b AS |
1656 | channel_writel(dwc, LLP, 0); |
1657 | } | |
3bfb1d20 HS |
1658 | } |
1659 | ||
11f932ec | 1660 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1661 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1662 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1663 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1664 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1665 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1666 | ||
df5c7386 | 1667 | /* Set capabilities */ |
3bfb1d20 | 1668 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
95ea759e JI |
1669 | if (pdata->is_private) |
1670 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
df5c7386 AS |
1671 | if (pdata->is_memcpy) |
1672 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1673 | ||
9cade1a4 | 1674 | dw->dma.dev = chip->dev; |
3bfb1d20 HS |
1675 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1676 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1677 | ||
1678 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
3bfb1d20 | 1679 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
029a40e9 | 1680 | |
a4b0d348 MR |
1681 | dw->dma.device_config = dwc_config; |
1682 | dw->dma.device_pause = dwc_pause; | |
1683 | dw->dma.device_resume = dwc_resume; | |
1684 | dw->dma.device_terminate_all = dwc_terminate_all; | |
3bfb1d20 | 1685 | |
07934481 | 1686 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1687 | dw->dma.device_issue_pending = dwc_issue_pending; |
1688 | ||
029a40e9 AS |
1689 | /* DMA capabilities */ |
1690 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | |
1691 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | |
1692 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | |
1693 | BIT(DMA_MEM_TO_MEM); | |
1694 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1695 | ||
1222934e AS |
1696 | err = dma_async_device_register(&dw->dma); |
1697 | if (err) | |
1698 | goto err_dma_register; | |
1699 | ||
9cade1a4 | 1700 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
30cb2639 | 1701 | pdata->nr_channels); |
3bfb1d20 | 1702 | |
bb32baf7 AS |
1703 | pm_runtime_put_sync_suspend(chip->dev); |
1704 | ||
3bfb1d20 | 1705 | return 0; |
8be4f523 | 1706 | |
1222934e AS |
1707 | err_dma_register: |
1708 | free_irq(chip->irq, dw); | |
8be4f523 | 1709 | err_pdata: |
bb32baf7 | 1710 | pm_runtime_put_sync_suspend(chip->dev); |
8be4f523 | 1711 | return err; |
3bfb1d20 | 1712 | } |
9cade1a4 | 1713 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
3bfb1d20 | 1714 | |
9cade1a4 | 1715 | int dw_dma_remove(struct dw_dma_chip *chip) |
3bfb1d20 | 1716 | { |
9cade1a4 | 1717 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1718 | struct dw_dma_chan *dwc, *_dwc; |
3bfb1d20 | 1719 | |
bb32baf7 AS |
1720 | pm_runtime_get_sync(chip->dev); |
1721 | ||
3bfb1d20 HS |
1722 | dw_dma_off(dw); |
1723 | dma_async_device_unregister(&dw->dma); | |
1724 | ||
97977f75 | 1725 | free_irq(chip->irq, dw); |
3bfb1d20 HS |
1726 | tasklet_kill(&dw->tasklet); |
1727 | ||
1728 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1729 | chan.device_node) { | |
1730 | list_del(&dwc->chan.device_node); | |
1731 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1732 | } | |
1733 | ||
bb32baf7 | 1734 | pm_runtime_put_sync_suspend(chip->dev); |
3bfb1d20 HS |
1735 | return 0; |
1736 | } | |
9cade1a4 | 1737 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
3bfb1d20 | 1738 | |
2540f74b | 1739 | int dw_dma_disable(struct dw_dma_chip *chip) |
3bfb1d20 | 1740 | { |
9cade1a4 | 1741 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1742 | |
6168d567 | 1743 | dw_dma_off(dw); |
3bfb1d20 HS |
1744 | return 0; |
1745 | } | |
2540f74b | 1746 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
3bfb1d20 | 1747 | |
2540f74b | 1748 | int dw_dma_enable(struct dw_dma_chip *chip) |
3bfb1d20 | 1749 | { |
9cade1a4 | 1750 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1751 | |
7a83c045 | 1752 | dw_dma_on(dw); |
3bfb1d20 | 1753 | return 0; |
3bfb1d20 | 1754 | } |
2540f74b | 1755 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
3bfb1d20 HS |
1756 | |
1757 | MODULE_LICENSE("GPL v2"); | |
9cade1a4 | 1758 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
e05503ef | 1759 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
da89947b | 1760 | MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); |