]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/mmc/host/renesas_sdhi_sys_dmac.c
1a4016f635d398c28ca9edc5cf202eae6776ffa7
[mirror_ubuntu-eoan-kernel.git] / drivers / mmc / host / renesas_sdhi_sys_dmac.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMA support use of SYS DMAC with SDHI SD/SDIO controller
4 *
5 * Copyright (C) 2016-17 Renesas Electronics Corporation
6 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
7 * Copyright (C) 2017 Horms Solutions, Simon Horman
8 * Copyright (C) 2010-2011 Guennadi Liakhovetski
9 */
10
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/mfd/tmio.h>
15 #include <linux/mmc/host.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
21 #include <linux/sys_soc.h>
22
23 #include "renesas_sdhi.h"
24 #include "tmio_mmc.h"
25
26 #define TMIO_MMC_MIN_DMA_LEN 8
27
28 static const struct renesas_sdhi_of_data of_default_cfg = {
29 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
30 };
31
32 static const struct renesas_sdhi_of_data of_rz_compatible = {
33 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT |
34 TMIO_MMC_HAVE_CBSY,
35 .tmio_ocr_mask = MMC_VDD_32_33,
36 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
37 };
38
39 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
40 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL,
41 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
42 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
43 };
44
45 /* Definitions for sampling clocks */
46 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
47 {
48 .clk_rate = 156000000,
49 .tap = 0x00000703,
50 },
51 {
52 .clk_rate = 0,
53 .tap = 0x00000300,
54 },
55 };
56
57 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
58 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
59 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
60 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
61 MMC_CAP_CMD23,
62 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
63 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
64 .dma_rx_offset = 0x2000,
65 .scc_offset = 0x0300,
66 .taps = rcar_gen2_scc_taps,
67 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
68 };
69
70 /* Definitions for sampling clocks */
71 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
72 {
73 .clk_rate = 0,
74 .tap = 0x00000300,
75 },
76 };
77
78 static const struct renesas_sdhi_of_data of_rcar_r8a7795_compatible = {
79 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
80 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
81 TMIO_MMC_HAVE_4TAP_HS400,
82 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
83 MMC_CAP_CMD23,
84 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
85 .bus_shift = 2,
86 .scc_offset = 0x1000,
87 .taps = rcar_gen3_scc_taps,
88 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
89 };
90
91 static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
92 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
93 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
94 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
95 MMC_CAP_CMD23,
96 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
97 .bus_shift = 2,
98 .scc_offset = 0x1000,
99 .taps = rcar_gen3_scc_taps,
100 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
101 };
102
103 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
104 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
105 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
106 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
107 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
108 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
109 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
110 { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, },
111 { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, },
112 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
113 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
114 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
115 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
116 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
117 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_r8a7795_compatible, },
118 { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_r8a7795_compatible, },
119 { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
120 { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
121 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
122 { .compatible = "renesas,sdhi-shmobile" },
123 {},
124 };
125 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
126
127 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
128 bool enable)
129 {
130 struct renesas_sdhi *priv = host_to_priv(host);
131
132 if (!host->chan_tx || !host->chan_rx)
133 return;
134
135 if (priv->dma_priv.enable)
136 priv->dma_priv.enable(host, enable);
137 }
138
139 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
140 {
141 renesas_sdhi_sys_dmac_enable_dma(host, false);
142
143 if (host->chan_rx)
144 dmaengine_terminate_all(host->chan_rx);
145 if (host->chan_tx)
146 dmaengine_terminate_all(host->chan_tx);
147
148 renesas_sdhi_sys_dmac_enable_dma(host, true);
149 }
150
151 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
152 {
153 struct renesas_sdhi *priv = host_to_priv(host);
154
155 complete(&priv->dma_priv.dma_dataend);
156 }
157
158 static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
159 {
160 struct tmio_mmc_host *host = arg;
161 struct renesas_sdhi *priv = host_to_priv(host);
162
163 spin_lock_irq(&host->lock);
164
165 if (!host->data)
166 goto out;
167
168 if (host->data->flags & MMC_DATA_READ)
169 dma_unmap_sg(host->chan_rx->device->dev,
170 host->sg_ptr, host->sg_len,
171 DMA_FROM_DEVICE);
172 else
173 dma_unmap_sg(host->chan_tx->device->dev,
174 host->sg_ptr, host->sg_len,
175 DMA_TO_DEVICE);
176
177 spin_unlock_irq(&host->lock);
178
179 wait_for_completion(&priv->dma_priv.dma_dataend);
180
181 spin_lock_irq(&host->lock);
182 tmio_mmc_do_data_irq(host);
183 out:
184 spin_unlock_irq(&host->lock);
185 }
186
187 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
188 {
189 struct renesas_sdhi *priv = host_to_priv(host);
190 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
191 struct dma_async_tx_descriptor *desc = NULL;
192 struct dma_chan *chan = host->chan_rx;
193 dma_cookie_t cookie;
194 int ret, i;
195 bool aligned = true, multiple = true;
196 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
197
198 for_each_sg(sg, sg_tmp, host->sg_len, i) {
199 if (sg_tmp->offset & align)
200 aligned = false;
201 if (sg_tmp->length & align) {
202 multiple = false;
203 break;
204 }
205 }
206
207 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
208 (align & PAGE_MASK))) || !multiple) {
209 ret = -EINVAL;
210 goto pio;
211 }
212
213 if (sg->length < TMIO_MMC_MIN_DMA_LEN)
214 return;
215
216 /* The only sg element can be unaligned, use our bounce buffer then */
217 if (!aligned) {
218 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
219 host->sg_ptr = &host->bounce_sg;
220 sg = host->sg_ptr;
221 }
222
223 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
224 if (ret > 0)
225 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
226 DMA_CTRL_ACK);
227
228 if (desc) {
229 reinit_completion(&priv->dma_priv.dma_dataend);
230 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
231 desc->callback_param = host;
232
233 cookie = dmaengine_submit(desc);
234 if (cookie < 0) {
235 desc = NULL;
236 ret = cookie;
237 }
238 host->dma_on = true;
239 }
240 pio:
241 if (!desc) {
242 /* DMA failed, fall back to PIO */
243 renesas_sdhi_sys_dmac_enable_dma(host, false);
244 if (ret >= 0)
245 ret = -EIO;
246 host->chan_rx = NULL;
247 dma_release_channel(chan);
248 /* Free the Tx channel too */
249 chan = host->chan_tx;
250 if (chan) {
251 host->chan_tx = NULL;
252 dma_release_channel(chan);
253 }
254 dev_warn(&host->pdev->dev,
255 "DMA failed: %d, falling back to PIO\n", ret);
256 }
257 }
258
259 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
260 {
261 struct renesas_sdhi *priv = host_to_priv(host);
262 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
263 struct dma_async_tx_descriptor *desc = NULL;
264 struct dma_chan *chan = host->chan_tx;
265 dma_cookie_t cookie;
266 int ret, i;
267 bool aligned = true, multiple = true;
268 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
269
270 for_each_sg(sg, sg_tmp, host->sg_len, i) {
271 if (sg_tmp->offset & align)
272 aligned = false;
273 if (sg_tmp->length & align) {
274 multiple = false;
275 break;
276 }
277 }
278
279 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
280 (align & PAGE_MASK))) || !multiple) {
281 ret = -EINVAL;
282 goto pio;
283 }
284
285 if (sg->length < TMIO_MMC_MIN_DMA_LEN)
286 return;
287
288 /* The only sg element can be unaligned, use our bounce buffer then */
289 if (!aligned) {
290 unsigned long flags;
291 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
292
293 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
294 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
295 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
296 host->sg_ptr = &host->bounce_sg;
297 sg = host->sg_ptr;
298 }
299
300 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
301 if (ret > 0)
302 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
303 DMA_CTRL_ACK);
304
305 if (desc) {
306 reinit_completion(&priv->dma_priv.dma_dataend);
307 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
308 desc->callback_param = host;
309
310 cookie = dmaengine_submit(desc);
311 if (cookie < 0) {
312 desc = NULL;
313 ret = cookie;
314 }
315 host->dma_on = true;
316 }
317 pio:
318 if (!desc) {
319 /* DMA failed, fall back to PIO */
320 renesas_sdhi_sys_dmac_enable_dma(host, false);
321 if (ret >= 0)
322 ret = -EIO;
323 host->chan_tx = NULL;
324 dma_release_channel(chan);
325 /* Free the Rx channel too */
326 chan = host->chan_rx;
327 if (chan) {
328 host->chan_rx = NULL;
329 dma_release_channel(chan);
330 }
331 dev_warn(&host->pdev->dev,
332 "DMA failed: %d, falling back to PIO\n", ret);
333 }
334 }
335
336 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
337 struct mmc_data *data)
338 {
339 if (data->flags & MMC_DATA_READ) {
340 if (host->chan_rx)
341 renesas_sdhi_sys_dmac_start_dma_rx(host);
342 } else {
343 if (host->chan_tx)
344 renesas_sdhi_sys_dmac_start_dma_tx(host);
345 }
346 }
347
348 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
349 {
350 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
351 struct dma_chan *chan = NULL;
352
353 spin_lock_irq(&host->lock);
354
355 if (host->data) {
356 if (host->data->flags & MMC_DATA_READ)
357 chan = host->chan_rx;
358 else
359 chan = host->chan_tx;
360 }
361
362 spin_unlock_irq(&host->lock);
363
364 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
365
366 if (chan)
367 dma_async_issue_pending(chan);
368 }
369
370 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
371 struct tmio_mmc_data *pdata)
372 {
373 struct renesas_sdhi *priv = host_to_priv(host);
374
375 /* We can only either use DMA for both Tx and Rx or not use it at all */
376 if (!host->pdev->dev.of_node &&
377 (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
378 return;
379
380 if (!host->chan_tx && !host->chan_rx) {
381 struct resource *res = platform_get_resource(host->pdev,
382 IORESOURCE_MEM, 0);
383 struct dma_slave_config cfg = {};
384 dma_cap_mask_t mask;
385 int ret;
386
387 if (!res)
388 return;
389
390 dma_cap_zero(mask);
391 dma_cap_set(DMA_SLAVE, mask);
392
393 host->chan_tx = dma_request_slave_channel_compat(mask,
394 priv->dma_priv.filter, pdata->chan_priv_tx,
395 &host->pdev->dev, "tx");
396 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
397 host->chan_tx);
398
399 if (!host->chan_tx)
400 return;
401
402 cfg.direction = DMA_MEM_TO_DEV;
403 cfg.dst_addr = res->start +
404 (CTL_SD_DATA_PORT << host->bus_shift);
405 cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
406 if (!cfg.dst_addr_width)
407 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
408 cfg.src_addr = 0;
409 ret = dmaengine_slave_config(host->chan_tx, &cfg);
410 if (ret < 0)
411 goto ecfgtx;
412
413 host->chan_rx = dma_request_slave_channel_compat(mask,
414 priv->dma_priv.filter, pdata->chan_priv_rx,
415 &host->pdev->dev, "rx");
416 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
417 host->chan_rx);
418
419 if (!host->chan_rx)
420 goto ereqrx;
421
422 cfg.direction = DMA_DEV_TO_MEM;
423 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
424 cfg.src_addr_width = priv->dma_priv.dma_buswidth;
425 if (!cfg.src_addr_width)
426 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
427 cfg.dst_addr = 0;
428 ret = dmaengine_slave_config(host->chan_rx, &cfg);
429 if (ret < 0)
430 goto ecfgrx;
431
432 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
433 if (!host->bounce_buf)
434 goto ebouncebuf;
435
436 init_completion(&priv->dma_priv.dma_dataend);
437 tasklet_init(&host->dma_issue,
438 renesas_sdhi_sys_dmac_issue_tasklet_fn,
439 (unsigned long)host);
440 }
441
442 renesas_sdhi_sys_dmac_enable_dma(host, true);
443
444 return;
445
446 ebouncebuf:
447 ecfgrx:
448 dma_release_channel(host->chan_rx);
449 host->chan_rx = NULL;
450 ereqrx:
451 ecfgtx:
452 dma_release_channel(host->chan_tx);
453 host->chan_tx = NULL;
454 }
455
456 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
457 {
458 if (host->chan_tx) {
459 struct dma_chan *chan = host->chan_tx;
460
461 host->chan_tx = NULL;
462 dma_release_channel(chan);
463 }
464 if (host->chan_rx) {
465 struct dma_chan *chan = host->chan_rx;
466
467 host->chan_rx = NULL;
468 dma_release_channel(chan);
469 }
470 if (host->bounce_buf) {
471 free_pages((unsigned long)host->bounce_buf, 0);
472 host->bounce_buf = NULL;
473 }
474 }
475
476 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
477 .start = renesas_sdhi_sys_dmac_start_dma,
478 .enable = renesas_sdhi_sys_dmac_enable_dma,
479 .request = renesas_sdhi_sys_dmac_request_dma,
480 .release = renesas_sdhi_sys_dmac_release_dma,
481 .abort = renesas_sdhi_sys_dmac_abort_dma,
482 .dataend = renesas_sdhi_sys_dmac_dataend_dma,
483 };
484
485 /*
486 * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
487 * implementation. Currently empty as all supported ES versions use
488 * the internal DMAC.
489 */
490 static const struct soc_device_attribute gen3_soc_whitelist[] = {
491 { /* sentinel */ }
492 };
493
494 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
495 {
496 if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
497 of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
498 !soc_device_match(gen3_soc_whitelist))
499 return -ENODEV;
500
501 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
502 }
503
504 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
505 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
506 pm_runtime_force_resume)
507 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
508 tmio_mmc_host_runtime_resume,
509 NULL)
510 };
511
512 static struct platform_driver renesas_sys_dmac_sdhi_driver = {
513 .driver = {
514 .name = "sh_mobile_sdhi",
515 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
516 .of_match_table = renesas_sdhi_sys_dmac_of_match,
517 },
518 .probe = renesas_sdhi_sys_dmac_probe,
519 .remove = renesas_sdhi_remove,
520 };
521
522 module_platform_driver(renesas_sys_dmac_sdhi_driver);
523
524 MODULE_DESCRIPTION("Renesas SDHI driver");
525 MODULE_AUTHOR("Magnus Damm");
526 MODULE_LICENSE("GPL v2");
527 MODULE_ALIAS("platform:sh_mobile_sdhi");