]>
Commit | Line | Data |
---|---|---|
b6147490 GL |
1 | /* |
2 | * linux/drivers/mmc/tmio_mmc_dma.c | |
3 | * | |
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * DMA function for TMIO MMC implementations | |
11 | */ | |
12 | ||
13 | #include <linux/device.h> | |
b7f080cf | 14 | #include <linux/dma-mapping.h> |
b6147490 GL |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/mfd/tmio.h> | |
17 | #include <linux/mmc/host.h> | |
cba179ae | 18 | #include <linux/mmc/tmio.h> |
b6147490 GL |
19 | #include <linux/pagemap.h> |
20 | #include <linux/scatterlist.h> | |
21 | ||
22 | #include "tmio_mmc.h" | |
23 | ||
24 | #define TMIO_MMC_MIN_DMA_LEN 8 | |
25 | ||
162f43e3 | 26 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
b6147490 | 27 | { |
162f43e3 GL |
28 | if (!host->chan_tx || !host->chan_rx) |
29 | return; | |
30 | ||
b6147490 GL |
31 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) |
32 | /* Switch DMA mode on or off - SuperH specific? */ | |
332bdb50 | 33 | sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); |
b6147490 GL |
34 | #endif |
35 | } | |
36 | ||
37 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |
38 | { | |
39 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
40 | struct dma_async_tx_descriptor *desc = NULL; | |
41 | struct dma_chan *chan = host->chan_rx; | |
42 | struct tmio_mmc_data *pdata = host->pdata; | |
43 | dma_cookie_t cookie; | |
44 | int ret, i; | |
45 | bool aligned = true, multiple = true; | |
46 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | |
47 | ||
48 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
49 | if (sg_tmp->offset & align) | |
50 | aligned = false; | |
51 | if (sg_tmp->length & align) { | |
52 | multiple = false; | |
53 | break; | |
54 | } | |
55 | } | |
56 | ||
57 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | |
58 | (align & PAGE_MASK))) || !multiple) { | |
59 | ret = -EINVAL; | |
60 | goto pio; | |
61 | } | |
62 | ||
63 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
64 | host->force_pio = true; | |
65 | return; | |
66 | } | |
67 | ||
68 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | |
69 | ||
70 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
71 | if (!aligned) { | |
72 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
73 | host->sg_ptr = &host->bounce_sg; | |
74 | sg = host->sg_ptr; | |
75 | } | |
76 | ||
77 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | |
78 | if (ret > 0) | |
79 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | |
80 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | |
81 | ||
82 | if (desc) { | |
83 | cookie = dmaengine_submit(desc); | |
84 | if (cookie < 0) { | |
85 | desc = NULL; | |
86 | ret = cookie; | |
87 | } | |
88 | } | |
89 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
90 | __func__, host->sg_len, ret, cookie, host->mrq); | |
91 | ||
92 | pio: | |
93 | if (!desc) { | |
94 | /* DMA failed, fall back to PIO */ | |
95 | if (ret >= 0) | |
96 | ret = -EIO; | |
97 | host->chan_rx = NULL; | |
98 | dma_release_channel(chan); | |
99 | /* Free the Tx channel too */ | |
100 | chan = host->chan_tx; | |
101 | if (chan) { | |
102 | host->chan_tx = NULL; | |
103 | dma_release_channel(chan); | |
104 | } | |
105 | dev_warn(&host->pdev->dev, | |
106 | "DMA failed: %d, falling back to PIO\n", ret); | |
107 | tmio_mmc_enable_dma(host, false); | |
108 | } | |
109 | ||
110 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | |
111 | desc, cookie, host->sg_len); | |
112 | } | |
113 | ||
114 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |
115 | { | |
116 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
117 | struct dma_async_tx_descriptor *desc = NULL; | |
118 | struct dma_chan *chan = host->chan_tx; | |
119 | struct tmio_mmc_data *pdata = host->pdata; | |
120 | dma_cookie_t cookie; | |
121 | int ret, i; | |
122 | bool aligned = true, multiple = true; | |
123 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | |
124 | ||
125 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
126 | if (sg_tmp->offset & align) | |
127 | aligned = false; | |
128 | if (sg_tmp->length & align) { | |
129 | multiple = false; | |
130 | break; | |
131 | } | |
132 | } | |
133 | ||
134 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | |
135 | (align & PAGE_MASK))) || !multiple) { | |
136 | ret = -EINVAL; | |
137 | goto pio; | |
138 | } | |
139 | ||
140 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
141 | host->force_pio = true; | |
142 | return; | |
143 | } | |
144 | ||
145 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | |
146 | ||
147 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
148 | if (!aligned) { | |
149 | unsigned long flags; | |
150 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | |
151 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
152 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | |
153 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | |
154 | host->sg_ptr = &host->bounce_sg; | |
155 | sg = host->sg_ptr; | |
156 | } | |
157 | ||
158 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | |
159 | if (ret > 0) | |
160 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | |
161 | DMA_TO_DEVICE, DMA_CTRL_ACK); | |
162 | ||
163 | if (desc) { | |
164 | cookie = dmaengine_submit(desc); | |
165 | if (cookie < 0) { | |
166 | desc = NULL; | |
167 | ret = cookie; | |
168 | } | |
169 | } | |
170 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
171 | __func__, host->sg_len, ret, cookie, host->mrq); | |
172 | ||
173 | pio: | |
174 | if (!desc) { | |
175 | /* DMA failed, fall back to PIO */ | |
176 | if (ret >= 0) | |
177 | ret = -EIO; | |
178 | host->chan_tx = NULL; | |
179 | dma_release_channel(chan); | |
180 | /* Free the Rx channel too */ | |
181 | chan = host->chan_rx; | |
182 | if (chan) { | |
183 | host->chan_rx = NULL; | |
184 | dma_release_channel(chan); | |
185 | } | |
186 | dev_warn(&host->pdev->dev, | |
187 | "DMA failed: %d, falling back to PIO\n", ret); | |
188 | tmio_mmc_enable_dma(host, false); | |
189 | } | |
190 | ||
191 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | |
192 | desc, cookie); | |
193 | } | |
194 | ||
195 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | |
196 | struct mmc_data *data) | |
197 | { | |
198 | if (data->flags & MMC_DATA_READ) { | |
199 | if (host->chan_rx) | |
200 | tmio_mmc_start_dma_rx(host); | |
201 | } else { | |
202 | if (host->chan_tx) | |
203 | tmio_mmc_start_dma_tx(host); | |
204 | } | |
205 | } | |
206 | ||
207 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | |
208 | { | |
209 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | |
210 | struct dma_chan *chan = NULL; | |
211 | ||
212 | spin_lock_irq(&host->lock); | |
213 | ||
214 | if (host && host->data) { | |
215 | if (host->data->flags & MMC_DATA_READ) | |
216 | chan = host->chan_rx; | |
217 | else | |
218 | chan = host->chan_tx; | |
219 | } | |
220 | ||
221 | spin_unlock_irq(&host->lock); | |
222 | ||
223 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
224 | ||
225 | if (chan) | |
226 | dma_async_issue_pending(chan); | |
227 | } | |
228 | ||
229 | static void tmio_mmc_tasklet_fn(unsigned long arg) | |
230 | { | |
231 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | |
232 | ||
233 | spin_lock_irq(&host->lock); | |
234 | ||
235 | if (!host->data) | |
236 | goto out; | |
237 | ||
238 | if (host->data->flags & MMC_DATA_READ) | |
239 | dma_unmap_sg(host->chan_rx->device->dev, | |
240 | host->sg_ptr, host->sg_len, | |
241 | DMA_FROM_DEVICE); | |
242 | else | |
243 | dma_unmap_sg(host->chan_tx->device->dev, | |
244 | host->sg_ptr, host->sg_len, | |
245 | DMA_TO_DEVICE); | |
246 | ||
247 | tmio_mmc_do_data_irq(host); | |
248 | out: | |
249 | spin_unlock_irq(&host->lock); | |
250 | } | |
251 | ||
252 | /* It might be necessary to make filter MFD specific */ | |
253 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | |
254 | { | |
255 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | |
256 | chan->private = arg; | |
257 | return true; | |
258 | } | |
259 | ||
260 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) | |
261 | { | |
262 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | |
e6ee7182 GL |
263 | if (!pdata->dma) |
264 | return; | |
265 | ||
266 | if (!host->chan_tx && !host->chan_rx) { | |
b6147490 GL |
267 | dma_cap_mask_t mask; |
268 | ||
269 | dma_cap_zero(mask); | |
270 | dma_cap_set(DMA_SLAVE, mask); | |
271 | ||
272 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | |
273 | pdata->dma->chan_priv_tx); | |
274 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | |
275 | host->chan_tx); | |
276 | ||
277 | if (!host->chan_tx) | |
278 | return; | |
279 | ||
280 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | |
281 | pdata->dma->chan_priv_rx); | |
282 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | |
283 | host->chan_rx); | |
284 | ||
285 | if (!host->chan_rx) | |
286 | goto ereqrx; | |
287 | ||
288 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); | |
289 | if (!host->bounce_buf) | |
290 | goto ebouncebuf; | |
291 | ||
292 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | |
293 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | |
e6ee7182 | 294 | } |
b6147490 | 295 | |
e6ee7182 GL |
296 | tmio_mmc_enable_dma(host, true); |
297 | ||
298 | return; | |
b6147490 | 299 | |
b6147490 | 300 | ebouncebuf: |
e6ee7182 GL |
301 | dma_release_channel(host->chan_rx); |
302 | host->chan_rx = NULL; | |
b6147490 | 303 | ereqrx: |
e6ee7182 GL |
304 | dma_release_channel(host->chan_tx); |
305 | host->chan_tx = NULL; | |
b6147490 GL |
306 | } |
307 | ||
308 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |
309 | { | |
310 | if (host->chan_tx) { | |
311 | struct dma_chan *chan = host->chan_tx; | |
312 | host->chan_tx = NULL; | |
313 | dma_release_channel(chan); | |
314 | } | |
315 | if (host->chan_rx) { | |
316 | struct dma_chan *chan = host->chan_rx; | |
317 | host->chan_rx = NULL; | |
318 | dma_release_channel(chan); | |
319 | } | |
320 | if (host->bounce_buf) { | |
321 | free_pages((unsigned long)host->bounce_buf, 0); | |
322 | host->bounce_buf = NULL; | |
323 | } | |
324 | } |