]>
Commit | Line | Data |
---|---|---|
b6147490 GL |
1 | /* |
2 | * linux/drivers/mmc/tmio_mmc_dma.c | |
3 | * | |
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * DMA function for TMIO MMC implementations | |
11 | */ | |
12 | ||
13 | #include <linux/device.h> | |
b7f080cf | 14 | #include <linux/dma-mapping.h> |
b6147490 GL |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/mfd/tmio.h> | |
17 | #include <linux/mmc/host.h> | |
cba179ae | 18 | #include <linux/mmc/tmio.h> |
b6147490 GL |
19 | #include <linux/pagemap.h> |
20 | #include <linux/scatterlist.h> | |
21 | ||
22 | #include "tmio_mmc.h" | |
23 | ||
24 | #define TMIO_MMC_MIN_DMA_LEN 8 | |
25 | ||
162f43e3 | 26 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
b6147490 | 27 | { |
162f43e3 GL |
28 | if (!host->chan_tx || !host->chan_rx) |
29 | return; | |
30 | ||
5add2aca KM |
31 | if (host->dma->enable) |
32 | host->dma->enable(host, enable); | |
b6147490 GL |
33 | } |
34 | ||
e3de2be7 GL |
35 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host) |
36 | { | |
37 | tmio_mmc_enable_dma(host, false); | |
38 | ||
39 | if (host->chan_rx) | |
40 | dmaengine_terminate_all(host->chan_rx); | |
41 | if (host->chan_tx) | |
42 | dmaengine_terminate_all(host->chan_tx); | |
43 | ||
44 | tmio_mmc_enable_dma(host, true); | |
45 | } | |
46 | ||
b6147490 GL |
47 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
48 | { | |
49 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
50 | struct dma_async_tx_descriptor *desc = NULL; | |
51 | struct dma_chan *chan = host->chan_rx; | |
b6147490 GL |
52 | dma_cookie_t cookie; |
53 | int ret, i; | |
54 | bool aligned = true, multiple = true; | |
e471df0b | 55 | unsigned int align = (1 << host->pdata->alignment_shift) - 1; |
b6147490 GL |
56 | |
57 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
58 | if (sg_tmp->offset & align) | |
59 | aligned = false; | |
60 | if (sg_tmp->length & align) { | |
61 | multiple = false; | |
62 | break; | |
63 | } | |
64 | } | |
65 | ||
66 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | |
67 | (align & PAGE_MASK))) || !multiple) { | |
68 | ret = -EINVAL; | |
69 | goto pio; | |
70 | } | |
71 | ||
72 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
73 | host->force_pio = true; | |
74 | return; | |
75 | } | |
76 | ||
77 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | |
78 | ||
79 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
80 | if (!aligned) { | |
81 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
82 | host->sg_ptr = &host->bounce_sg; | |
83 | sg = host->sg_ptr; | |
84 | } | |
85 | ||
86 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | |
87 | if (ret > 0) | |
16052827 | 88 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
05f5799c | 89 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); |
b6147490 GL |
90 | |
91 | if (desc) { | |
92 | cookie = dmaengine_submit(desc); | |
93 | if (cookie < 0) { | |
94 | desc = NULL; | |
95 | ret = cookie; | |
96 | } | |
97 | } | |
98 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
99 | __func__, host->sg_len, ret, cookie, host->mrq); | |
100 | ||
101 | pio: | |
102 | if (!desc) { | |
103 | /* DMA failed, fall back to PIO */ | |
f936f9b6 | 104 | tmio_mmc_enable_dma(host, false); |
b6147490 GL |
105 | if (ret >= 0) |
106 | ret = -EIO; | |
107 | host->chan_rx = NULL; | |
108 | dma_release_channel(chan); | |
109 | /* Free the Tx channel too */ | |
110 | chan = host->chan_tx; | |
111 | if (chan) { | |
112 | host->chan_tx = NULL; | |
113 | dma_release_channel(chan); | |
114 | } | |
115 | dev_warn(&host->pdev->dev, | |
116 | "DMA failed: %d, falling back to PIO\n", ret); | |
b6147490 GL |
117 | } |
118 | ||
119 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | |
120 | desc, cookie, host->sg_len); | |
121 | } | |
122 | ||
123 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |
124 | { | |
125 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
126 | struct dma_async_tx_descriptor *desc = NULL; | |
127 | struct dma_chan *chan = host->chan_tx; | |
b6147490 GL |
128 | dma_cookie_t cookie; |
129 | int ret, i; | |
130 | bool aligned = true, multiple = true; | |
e471df0b | 131 | unsigned int align = (1 << host->pdata->alignment_shift) - 1; |
b6147490 GL |
132 | |
133 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
134 | if (sg_tmp->offset & align) | |
135 | aligned = false; | |
136 | if (sg_tmp->length & align) { | |
137 | multiple = false; | |
138 | break; | |
139 | } | |
140 | } | |
141 | ||
142 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | |
143 | (align & PAGE_MASK))) || !multiple) { | |
144 | ret = -EINVAL; | |
145 | goto pio; | |
146 | } | |
147 | ||
148 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
149 | host->force_pio = true; | |
150 | return; | |
151 | } | |
152 | ||
153 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | |
154 | ||
155 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
156 | if (!aligned) { | |
157 | unsigned long flags; | |
158 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | |
159 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
160 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | |
161 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | |
162 | host->sg_ptr = &host->bounce_sg; | |
163 | sg = host->sg_ptr; | |
164 | } | |
165 | ||
166 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | |
167 | if (ret > 0) | |
16052827 | 168 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
05f5799c | 169 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); |
b6147490 GL |
170 | |
171 | if (desc) { | |
172 | cookie = dmaengine_submit(desc); | |
173 | if (cookie < 0) { | |
174 | desc = NULL; | |
175 | ret = cookie; | |
176 | } | |
177 | } | |
178 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
179 | __func__, host->sg_len, ret, cookie, host->mrq); | |
180 | ||
181 | pio: | |
182 | if (!desc) { | |
183 | /* DMA failed, fall back to PIO */ | |
f936f9b6 | 184 | tmio_mmc_enable_dma(host, false); |
b6147490 GL |
185 | if (ret >= 0) |
186 | ret = -EIO; | |
187 | host->chan_tx = NULL; | |
188 | dma_release_channel(chan); | |
189 | /* Free the Rx channel too */ | |
190 | chan = host->chan_rx; | |
191 | if (chan) { | |
192 | host->chan_rx = NULL; | |
193 | dma_release_channel(chan); | |
194 | } | |
195 | dev_warn(&host->pdev->dev, | |
196 | "DMA failed: %d, falling back to PIO\n", ret); | |
b6147490 GL |
197 | } |
198 | ||
199 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | |
200 | desc, cookie); | |
201 | } | |
202 | ||
203 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | |
204 | struct mmc_data *data) | |
205 | { | |
206 | if (data->flags & MMC_DATA_READ) { | |
207 | if (host->chan_rx) | |
208 | tmio_mmc_start_dma_rx(host); | |
209 | } else { | |
210 | if (host->chan_tx) | |
211 | tmio_mmc_start_dma_tx(host); | |
212 | } | |
213 | } | |
214 | ||
215 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | |
216 | { | |
217 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | |
218 | struct dma_chan *chan = NULL; | |
219 | ||
220 | spin_lock_irq(&host->lock); | |
221 | ||
222 | if (host && host->data) { | |
223 | if (host->data->flags & MMC_DATA_READ) | |
224 | chan = host->chan_rx; | |
225 | else | |
226 | chan = host->chan_tx; | |
227 | } | |
228 | ||
229 | spin_unlock_irq(&host->lock); | |
230 | ||
231 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
232 | ||
233 | if (chan) | |
234 | dma_async_issue_pending(chan); | |
235 | } | |
236 | ||
237 | static void tmio_mmc_tasklet_fn(unsigned long arg) | |
238 | { | |
239 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | |
240 | ||
241 | spin_lock_irq(&host->lock); | |
242 | ||
243 | if (!host->data) | |
244 | goto out; | |
245 | ||
246 | if (host->data->flags & MMC_DATA_READ) | |
247 | dma_unmap_sg(host->chan_rx->device->dev, | |
248 | host->sg_ptr, host->sg_len, | |
249 | DMA_FROM_DEVICE); | |
250 | else | |
251 | dma_unmap_sg(host->chan_tx->device->dev, | |
252 | host->sg_ptr, host->sg_len, | |
253 | DMA_TO_DEVICE); | |
254 | ||
255 | tmio_mmc_do_data_irq(host); | |
256 | out: | |
257 | spin_unlock_irq(&host->lock); | |
258 | } | |
259 | ||
b6147490 GL |
260 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) |
261 | { | |
262 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | |
7ecc09ba KM |
263 | if (!host->dma || (!host->pdev->dev.of_node && |
264 | (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx))) | |
e6ee7182 GL |
265 | return; |
266 | ||
267 | if (!host->chan_tx && !host->chan_rx) { | |
eec95ee2 GL |
268 | struct resource *res = platform_get_resource(host->pdev, |
269 | IORESOURCE_MEM, 0); | |
270 | struct dma_slave_config cfg = {}; | |
b6147490 | 271 | dma_cap_mask_t mask; |
eec95ee2 GL |
272 | int ret; |
273 | ||
274 | if (!res) | |
275 | return; | |
b6147490 GL |
276 | |
277 | dma_cap_zero(mask); | |
278 | dma_cap_set(DMA_SLAVE, mask); | |
279 | ||
87ae7bbe | 280 | host->chan_tx = dma_request_slave_channel_compat(mask, |
7ecc09ba | 281 | host->dma->filter, host->dma->chan_priv_tx, |
87ae7bbe | 282 | &host->pdev->dev, "tx"); |
b6147490 GL |
283 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, |
284 | host->chan_tx); | |
285 | ||
286 | if (!host->chan_tx) | |
287 | return; | |
288 | ||
eec95ee2 | 289 | cfg.direction = DMA_MEM_TO_DEV; |
7445bf9e | 290 | cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); |
361936ef KM |
291 | cfg.dst_addr_width = host->dma->dma_buswidth; |
292 | if (!cfg.dst_addr_width) | |
293 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
eec95ee2 GL |
294 | cfg.src_addr = 0; |
295 | ret = dmaengine_slave_config(host->chan_tx, &cfg); | |
296 | if (ret < 0) | |
297 | goto ecfgtx; | |
298 | ||
87ae7bbe | 299 | host->chan_rx = dma_request_slave_channel_compat(mask, |
7ecc09ba | 300 | host->dma->filter, host->dma->chan_priv_rx, |
87ae7bbe | 301 | &host->pdev->dev, "rx"); |
b6147490 GL |
302 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, |
303 | host->chan_rx); | |
304 | ||
305 | if (!host->chan_rx) | |
306 | goto ereqrx; | |
307 | ||
eec95ee2 | 308 | cfg.direction = DMA_DEV_TO_MEM; |
8b4c8f32 | 309 | cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; |
361936ef KM |
310 | cfg.src_addr_width = host->dma->dma_buswidth; |
311 | if (!cfg.src_addr_width) | |
312 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
eec95ee2 GL |
313 | cfg.dst_addr = 0; |
314 | ret = dmaengine_slave_config(host->chan_rx, &cfg); | |
315 | if (ret < 0) | |
316 | goto ecfgrx; | |
317 | ||
b6147490 GL |
318 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); |
319 | if (!host->bounce_buf) | |
320 | goto ebouncebuf; | |
321 | ||
322 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | |
323 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | |
e6ee7182 | 324 | } |
b6147490 | 325 | |
e6ee7182 GL |
326 | tmio_mmc_enable_dma(host, true); |
327 | ||
328 | return; | |
b6147490 | 329 | |
b6147490 | 330 | ebouncebuf: |
eec95ee2 | 331 | ecfgrx: |
e6ee7182 GL |
332 | dma_release_channel(host->chan_rx); |
333 | host->chan_rx = NULL; | |
b6147490 | 334 | ereqrx: |
eec95ee2 | 335 | ecfgtx: |
e6ee7182 GL |
336 | dma_release_channel(host->chan_tx); |
337 | host->chan_tx = NULL; | |
b6147490 GL |
338 | } |
339 | ||
340 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |
341 | { | |
342 | if (host->chan_tx) { | |
343 | struct dma_chan *chan = host->chan_tx; | |
344 | host->chan_tx = NULL; | |
345 | dma_release_channel(chan); | |
346 | } | |
347 | if (host->chan_rx) { | |
348 | struct dma_chan *chan = host->chan_rx; | |
349 | host->chan_rx = NULL; | |
350 | dma_release_channel(chan); | |
351 | } | |
352 | if (host->bounce_buf) { | |
353 | free_pages((unsigned long)host->bounce_buf, 0); | |
354 | host->bounce_buf = NULL; | |
355 | } | |
356 | } |