]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/tmio_mmc_pio.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / tmio_mmc_pio.c
CommitLineData
b6147490
GL
1/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
bf96208f
WS
4 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
5 * Copyright (C) 2015-16 Renesas Electronics Corporation
b6147490
GL
6 * Copyright (C) 2011 Guennadi Liakhovetski
7 * Copyright (C) 2007 Ian Molton
8 * Copyright (C) 2004 Ian Molton
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Driver for the MMC / SD / SDIO IP found in:
15 *
16 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
17 *
18 * This driver draws mainly on scattered spec sheets, Reverse engineering
19 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
20 * support). (Further 4 bit support from a later datasheet).
21 *
22 * TODO:
23 * Investigate using a workqueue for PIO transfers
24 * Eliminate FIXMEs
b6147490
GL
25 * Better Power management
26 * Handle MMC errors better
27 * double buffer support
28 *
29 */
30
31#include <linux/delay.h>
32#include <linux/device.h>
33#include <linux/highmem.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
36#include <linux/irq.h>
37#include <linux/mfd/tmio.h>
4f119977 38#include <linux/mmc/card.h>
b6147490 39#include <linux/mmc/host.h>
0f506a96 40#include <linux/mmc/mmc.h>
fd0ea65d 41#include <linux/mmc/slot-gpio.h>
b6147490
GL
42#include <linux/module.h>
43#include <linux/pagemap.h>
44#include <linux/platform_device.h>
c419e611 45#include <linux/pm_qos.h>
e6ee7182 46#include <linux/pm_runtime.h>
619b08d4 47#include <linux/regulator/consumer.h>
b8d11962 48#include <linux/mmc/sdio.h>
b6147490 49#include <linux/scatterlist.h>
b6147490 50#include <linux/spinlock.h>
e3de2be7 51#include <linux/workqueue.h>
b6147490
GL
52
53#include "tmio_mmc.h"
54
b6147490
GL
55void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
56{
54680fe7 57 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
2c54506b 58 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
b6147490
GL
59}
60
61void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
62{
54680fe7 63 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
2c54506b 64 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
b6147490
GL
65}
66
67static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
68{
2c54506b 69 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
b6147490
GL
70}
71
72static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
73{
74 host->sg_len = data->sg_len;
75 host->sg_ptr = data->sg;
76 host->sg_orig = data->sg;
77 host->sg_off = 0;
78}
79
80static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
81{
82 host->sg_ptr = sg_next(host->sg_ptr);
83 host->sg_off = 0;
84 return --host->sg_len;
85}
86
0df9d2ea
TK
87#define CMDREQ_TIMEOUT 5000
88
b6147490
GL
89#ifdef CONFIG_MMC_DEBUG
90
91#define STATUS_TO_TEXT(a, status, i) \
92 do { \
93 if (status & TMIO_STAT_##a) { \
94 if (i++) \
95 printk(" | "); \
96 printk(#a); \
97 } \
98 } while (0)
99
100static void pr_debug_status(u32 status)
101{
102 int i = 0;
a3c76eb9 103 pr_debug("status: %08x = ", status);
b6147490
GL
104 STATUS_TO_TEXT(CARD_REMOVE, status, i);
105 STATUS_TO_TEXT(CARD_INSERT, status, i);
106 STATUS_TO_TEXT(SIGSTATE, status, i);
107 STATUS_TO_TEXT(WRPROTECT, status, i);
108 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
109 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
110 STATUS_TO_TEXT(SIGSTATE_A, status, i);
111 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
112 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
113 STATUS_TO_TEXT(ILL_FUNC, status, i);
114 STATUS_TO_TEXT(CMD_BUSY, status, i);
115 STATUS_TO_TEXT(CMDRESPEND, status, i);
116 STATUS_TO_TEXT(DATAEND, status, i);
117 STATUS_TO_TEXT(CRCFAIL, status, i);
118 STATUS_TO_TEXT(DATATIMEOUT, status, i);
119 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
120 STATUS_TO_TEXT(RXOVERFLOW, status, i);
121 STATUS_TO_TEXT(TXUNDERRUN, status, i);
122 STATUS_TO_TEXT(RXRDY, status, i);
123 STATUS_TO_TEXT(TXRQ, status, i);
124 STATUS_TO_TEXT(ILL_ACCESS, status, i);
125 printk("\n");
126}
127
128#else
129#define pr_debug_status(s) do { } while (0)
130#endif
131
132static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
133{
134 struct tmio_mmc_host *host = mmc_priv(mmc);
135
7501c431 136 if (enable && !host->sdio_irq_enabled) {
ee289815
WS
137 u16 sdio_status;
138
7501c431
UH
139 /* Keep device active while SDIO irq is enabled */
140 pm_runtime_get_sync(mmc_dev(mmc));
7501c431 141
ee289815 142 host->sdio_irq_enabled = true;
54680fe7
SH
143 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
144 ~TMIO_SDIO_STAT_IOIRQ;
ee289815
WS
145
146 /* Clear obsolete interrupts before enabling */
147 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
148 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
149 sdio_status |= TMIO_SDIO_SETBITS_MASK;
150 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
151
54680fe7 152 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
7501c431 153 } else if (!enable && host->sdio_irq_enabled) {
54680fe7
SH
154 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
155 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
7501c431
UH
156
157 host->sdio_irq_enabled = false;
0369483e
UH
158 pm_runtime_mark_last_busy(mmc_dev(mmc));
159 pm_runtime_put_autosuspend(mmc_dev(mmc));
b6147490
GL
160 }
161}
162
7fbc030d
WS
163static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
164{
165 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
166 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
3d376fb2 167 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
7fbc030d
WS
168
169 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
170 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
171 msleep(10);
172 }
173}
174
148634d2
WS
175static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
176{
177 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
178 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
179 msleep(10);
180 }
181
182 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
183 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
3d376fb2 184 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
148634d2
WS
185}
186
ae12d250
UH
187static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
188 unsigned int new_clock)
b6147490
GL
189{
190 u32 clk = 0, clock;
191
148634d2
WS
192 if (new_clock == 0) {
193 tmio_mmc_clk_stop(host);
194 return;
195 }
2fb55956 196
148634d2
WS
197 if (host->clk_update)
198 clock = host->clk_update(host, new_clock) / 512;
199 else
200 clock = host->mmc->f_min;
da29fe2b 201
148634d2
WS
202 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
203 clock <<= 1;
204
205 /* 1/1 clock is option */
206 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
207 clk |= 0xff;
b6147490
GL
208
209 if (host->set_clk_div)
bf96208f 210 host->set_clk_div(host->pdev, (clk >> 22) & 1);
b6147490 211
14d5828f
WS
212 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
213 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
bf96208f 214 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
3d376fb2 215 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
04e24b80 216 msleep(10);
7fbc030d
WS
217
218 tmio_mmc_clk_start(host);
b6147490
GL
219}
220
b6147490
GL
221static void tmio_mmc_reset(struct tmio_mmc_host *host)
222{
223 /* FIXME - should we set stop clock reg here */
224 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
5d60e500 225 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
69d1fe18 226 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
b6147490
GL
227 msleep(10);
228 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
5d60e500 229 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
69d1fe18 230 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
b6147490
GL
231 msleep(10);
232}
233
234static void tmio_mmc_reset_work(struct work_struct *work)
235{
236 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
237 delayed_reset_work.work);
238 struct mmc_request *mrq;
239 unsigned long flags;
240
241 spin_lock_irqsave(&host->lock, flags);
242 mrq = host->mrq;
243
df3ef2d3
GL
244 /*
245 * is request already finished? Since we use a non-blocking
246 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
247 * us, so, have to check for IS_ERR(host->mrq)
248 */
249 if (IS_ERR_OR_NULL(mrq)
b6147490 250 || time_is_after_jiffies(host->last_req_ts +
0df9d2ea 251 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
b6147490
GL
252 spin_unlock_irqrestore(&host->lock, flags);
253 return;
254 }
255
256 dev_warn(&host->pdev->dev,
257 "timeout waiting for hardware interrupt (CMD%u)\n",
258 mrq->cmd->opcode);
259
260 if (host->data)
261 host->data->error = -ETIMEDOUT;
262 else if (host->cmd)
263 host->cmd->error = -ETIMEDOUT;
264 else
265 mrq->cmd->error = -ETIMEDOUT;
266
267 host->cmd = NULL;
268 host->data = NULL;
b6147490
GL
269 host->force_pio = false;
270
271 spin_unlock_irqrestore(&host->lock, flags);
272
273 tmio_mmc_reset(host);
274
df3ef2d3
GL
275 /* Ready for new calls */
276 host->mrq = NULL;
277
e3de2be7 278 tmio_mmc_abort_dma(host);
b6147490
GL
279 mmc_request_done(host->mmc, mrq);
280}
281
df3ef2d3 282/* called with host->lock held, interrupts disabled */
b6147490
GL
283static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
284{
b9269fdd
GL
285 struct mmc_request *mrq;
286 unsigned long flags;
b6147490 287
b9269fdd
GL
288 spin_lock_irqsave(&host->lock, flags);
289
290 mrq = host->mrq;
291 if (IS_ERR_OR_NULL(mrq)) {
292 spin_unlock_irqrestore(&host->lock, flags);
b6147490 293 return;
b9269fdd 294 }
b6147490 295
b6147490
GL
296 host->cmd = NULL;
297 host->data = NULL;
298 host->force_pio = false;
299
300 cancel_delayed_work(&host->delayed_reset_work);
301
df3ef2d3 302 host->mrq = NULL;
b9269fdd 303 spin_unlock_irqrestore(&host->lock, flags);
df3ef2d3 304
e3de2be7
GL
305 if (mrq->cmd->error || (mrq->data && mrq->data->error))
306 tmio_mmc_abort_dma(host);
307
4f119977
AK
308 if (host->check_scc_error)
309 host->check_scc_error(host);
310
b6147490
GL
311 mmc_request_done(host->mmc, mrq);
312}
313
b9269fdd
GL
314static void tmio_mmc_done_work(struct work_struct *work)
315{
316 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
317 done);
318 tmio_mmc_finish_request(host);
319}
320
b6147490
GL
321/* These are the bitmasks the tmio chip requires to implement the MMC response
322 * types. Note that R1 and R6 are the same in this scheme. */
323#define APP_CMD 0x0040
324#define RESP_NONE 0x0300
325#define RESP_R1 0x0400
326#define RESP_R1B 0x0500
327#define RESP_R2 0x0600
328#define RESP_R3 0x0700
329#define DATA_PRESENT 0x0800
330#define TRANSFER_READ 0x1000
331#define TRANSFER_MULTI 0x2000
332#define SECURITY_CMD 0x4000
b8d11962 333#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
b6147490
GL
334
335static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
336{
337 struct mmc_data *data = host->data;
338 int c = cmd->opcode;
e23cd53c 339 u32 irq_mask = TMIO_MASK_CMD;
b6147490 340
0f506a96
GL
341 /* CMD12 is handled by hardware */
342 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
b6147490
GL
343 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
344 return 0;
345 }
346
347 switch (mmc_resp_type(cmd)) {
348 case MMC_RSP_NONE: c |= RESP_NONE; break;
0bc0b6e8
WS
349 case MMC_RSP_R1:
350 case MMC_RSP_R1_NO_CRC:
351 c |= RESP_R1; break;
b6147490
GL
352 case MMC_RSP_R1B: c |= RESP_R1B; break;
353 case MMC_RSP_R2: c |= RESP_R2; break;
354 case MMC_RSP_R3: c |= RESP_R3; break;
355 default:
356 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
357 return -EINVAL;
358 }
359
360 host->cmd = cmd;
361
362/* FIXME - this seems to be ok commented out but the spec suggest this bit
363 * should be set when issuing app commands.
364 * if(cmd->flags & MMC_FLAG_ACMD)
365 * c |= APP_CMD;
366 */
367 if (data) {
368 c |= DATA_PRESENT;
369 if (data->blocks > 1) {
370 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
371 c |= TRANSFER_MULTI;
b8d11962
SU
372
373 /*
374 * Disable auto CMD12 at IO_RW_EXTENDED when
375 * multiple block transfer
376 */
377 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
378 (cmd->opcode == SD_IO_RW_EXTENDED))
379 c |= NO_CMD12_ISSUE;
b6147490
GL
380 }
381 if (data->flags & MMC_DATA_READ)
382 c |= TRANSFER_READ;
383 }
384
e23cd53c
GL
385 if (!host->native_hotplug)
386 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
387 tmio_mmc_enable_mmc_irqs(host, irq_mask);
b6147490
GL
388
389 /* Fire off the command */
2c54506b 390 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
b6147490
GL
391 sd_ctrl_write16(host, CTL_SD_CMD, c);
392
393 return 0;
394}
395
b9bd7ff8
KM
396static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
397 unsigned short *buf,
398 unsigned int count)
399{
400 int is_read = host->data->flags & MMC_DATA_READ;
401 u8 *buf8;
402
403 /*
404 * Transfer the data
405 */
8185e51f
CB
406 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
407 u8 data[4] = { };
408
409 if (is_read)
410 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
411 count >> 2);
412 else
413 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
414 count >> 2);
415
416 /* if count was multiple of 4 */
417 if (!(count & 0x3))
418 return;
419
420 buf8 = (u8 *)(buf + (count >> 2));
421 count %= 4;
422
423 if (is_read) {
424 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
425 (u32 *)data, 1);
426 memcpy(buf8, data, count);
427 } else {
428 memcpy(data, buf8, count);
429 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
430 (u32 *)data, 1);
431 }
432
433 return;
434 }
435
b9bd7ff8
KM
436 if (is_read)
437 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
438 else
439 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
440
441 /* if count was even number */
442 if (!(count & 0x1))
443 return;
444
445 /* if count was odd number */
446 buf8 = (u8 *)(buf + (count >> 1));
447
448 /*
449 * FIXME
450 *
451 * driver and this function are assuming that
452 * it is used as little endian
453 */
454 if (is_read)
455 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
456 else
457 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
458}
459
b6147490
GL
460/*
461 * This chip always returns (at least?) as much data as you ask for.
462 * I'm unsure what happens if you ask for less than a block. This should be
25985edc 463 * looked into to ensure that a funny length read doesn't hose the controller.
b6147490
GL
464 */
465static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
466{
467 struct mmc_data *data = host->data;
468 void *sg_virt;
469 unsigned short *buf;
470 unsigned int count;
471 unsigned long flags;
472
473 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
474 pr_err("PIO IRQ in DMA mode!\n");
475 return;
476 } else if (!data) {
477 pr_debug("Spurious PIO IRQ\n");
478 return;
479 }
480
481 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
482 buf = (unsigned short *)(sg_virt + host->sg_off);
483
484 count = host->sg_ptr->length - host->sg_off;
485 if (count > data->blksz)
486 count = data->blksz;
487
488 pr_debug("count: %08x offset: %08x flags %08x\n",
489 count, host->sg_off, data->flags);
490
491 /* Transfer the data */
b9bd7ff8 492 tmio_mmc_transfer_data(host, buf, count);
b6147490
GL
493
494 host->sg_off += count;
495
496 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
497
498 if (host->sg_off == host->sg_ptr->length)
499 tmio_mmc_next_sg(host);
500
501 return;
502}
503
504static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
505{
506 if (host->sg_ptr == &host->bounce_sg) {
507 unsigned long flags;
508 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
509 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
510 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
511 }
512}
513
514/* needs to be called with host->lock held */
515void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
516{
517 struct mmc_data *data = host->data;
518 struct mmc_command *stop;
519
520 host->data = NULL;
521
522 if (!data) {
523 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
524 return;
525 }
526 stop = data->stop;
527
528 /* FIXME - return correct transfer count on errors */
529 if (!data->error)
530 data->bytes_xfered = data->blocks * data->blksz;
531 else
532 data->bytes_xfered = 0;
533
534 pr_debug("Completed data request\n");
535
536 /*
537 * FIXME: other drivers allow an optional stop command of any given type
538 * which we dont do, as the chip can auto generate them.
539 * Perhaps we can be smarter about when to use auto CMD12 and
540 * only issue the auto request when we know this is the desired
541 * stop command, allowing fallback to the stop command the
542 * upper layers expect. For now, we do what works.
543 */
544
545 if (data->flags & MMC_DATA_READ) {
546 if (host->chan_rx && !host->force_pio)
547 tmio_mmc_check_bounce_buffer(host);
548 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
549 host->mrq);
550 } else {
551 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
552 host->mrq);
553 }
554
555 if (stop) {
0f506a96 556 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
b6147490
GL
557 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
558 else
559 BUG();
560 }
561
b9269fdd 562 schedule_work(&host->done);
b6147490
GL
563}
564
96e0b2ba 565static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
b6147490
GL
566{
567 struct mmc_data *data;
568 spin_lock(&host->lock);
569 data = host->data;
570
571 if (!data)
572 goto out;
573
96e0b2ba
AK
574 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
575 stat & TMIO_STAT_TXUNDERRUN)
576 data->error = -EILSEQ;
b6147490 577 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
2c54506b 578 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
81e888da
SU
579 bool done = false;
580
b6147490
GL
581 /*
582 * Has all data been written out yet? Testing on SuperH showed,
583 * that in most cases the first interrupt comes already with the
584 * BUSY status bit clear, but on some operations, like mount or
585 * in the beginning of a write / sync / umount, there is one
586 * DATAEND interrupt with the BUSY bit set, in this cases
587 * waiting for one more interrupt fixes the problem.
588 */
81e888da 589 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
a21553c9 590 if (status & TMIO_STAT_SCLKDIVEN)
81e888da
SU
591 done = true;
592 } else {
593 if (!(status & TMIO_STAT_CMD_BUSY))
594 done = true;
595 }
596
597 if (done) {
b6147490
GL
598 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
599 tasklet_schedule(&host->dma_complete);
600 }
601 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
602 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
603 tasklet_schedule(&host->dma_complete);
604 } else {
605 tmio_mmc_do_data_irq(host);
606 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
607 }
608out:
609 spin_unlock(&host->lock);
610}
611
612static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
613 unsigned int stat)
614{
615 struct mmc_command *cmd = host->cmd;
616 int i, addr;
617
618 spin_lock(&host->lock);
619
620 if (!host->cmd) {
621 pr_debug("Spurious CMD irq\n");
622 goto out;
623 }
624
b6147490
GL
625 /* This controller is sicker than the PXA one. Not only do we need to
626 * drop the top 8 bits of the first response word, we also need to
627 * modify the order of the response for short response command types.
628 */
629
630 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
2c54506b 631 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
b6147490
GL
632
633 if (cmd->flags & MMC_RSP_136) {
634 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
635 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
636 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
637 cmd->resp[3] <<= 8;
638 } else if (cmd->flags & MMC_RSP_R3) {
639 cmd->resp[0] = cmd->resp[3];
640 }
641
642 if (stat & TMIO_STAT_CMDTIMEOUT)
643 cmd->error = -ETIMEDOUT;
96e0b2ba
AK
644 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
645 stat & TMIO_STAT_STOPBIT_ERR ||
646 stat & TMIO_STAT_CMD_IDX_ERR)
b6147490
GL
647 cmd->error = -EILSEQ;
648
649 /* If there is data to handle we enable data IRQs here, and
650 * we will ultimatley finish the request in the data_end handler.
651 * If theres no data or we encountered an error, finish now.
652 */
96e0b2ba 653 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
b6147490
GL
654 if (host->data->flags & MMC_DATA_READ) {
655 if (host->force_pio || !host->chan_rx)
656 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
657 else
658 tasklet_schedule(&host->dma_issue);
659 } else {
660 if (host->force_pio || !host->chan_tx)
661 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
662 else
663 tasklet_schedule(&host->dma_issue);
664 }
665 } else {
b9269fdd 666 schedule_work(&host->done);
b6147490
GL
667 }
668
669out:
670 spin_unlock(&host->lock);
671}
672
7729c7a2
SH
673static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
674 int ireg, int status)
675{
676 struct mmc_host *mmc = host->mmc;
b6147490 677
e312eb1e
PP
678 /* Card insert / remove attempts */
679 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
680 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
681 TMIO_STAT_CARD_REMOVE);
71d111cd
GL
682 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
683 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
684 !work_pending(&mmc->detect.work))
b9269fdd 685 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
7729c7a2 686 return true;
b6147490
GL
687 }
688
7729c7a2
SH
689 return false;
690}
691
7729c7a2
SH
692static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
693 int ireg, int status)
694{
e312eb1e
PP
695 /* Command completion */
696 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
697 tmio_mmc_ack_mmc_irqs(host,
698 TMIO_STAT_CMDRESPEND |
699 TMIO_STAT_CMDTIMEOUT);
700 tmio_mmc_cmd_irq(host, status);
7729c7a2 701 return true;
e312eb1e 702 }
b6147490 703
e312eb1e
PP
704 /* Data transfer */
705 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
706 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
707 tmio_mmc_pio_irq(host);
7729c7a2 708 return true;
e312eb1e 709 }
b6147490 710
e312eb1e
PP
711 /* Data transfer completion */
712 if (ireg & TMIO_STAT_DATAEND) {
713 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
96e0b2ba 714 tmio_mmc_data_irq(host, status);
7729c7a2 715 return true;
b6147490 716 }
e312eb1e 717
7729c7a2
SH
718 return false;
719}
720
e4f38eb1 721static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
7729c7a2 722{
7729c7a2
SH
723 struct mmc_host *mmc = host->mmc;
724 struct tmio_mmc_data *pdata = host->pdata;
725 unsigned int ireg, status;
6b98757e 726 unsigned int sdio_status;
7729c7a2
SH
727
728 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
4da98670 729 return;
7729c7a2
SH
730
731 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
0c4bf5be 732 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
7729c7a2 733
6b98757e 734 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
20dd0373 735 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
ee289815 736 sdio_status |= TMIO_SDIO_SETBITS_MASK;
6b98757e
SU
737
738 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
7729c7a2
SH
739
740 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
741 mmc_signal_sdio_irq(mmc);
7729c7a2 742}
7729c7a2
SH
743
744irqreturn_t tmio_mmc_irq(int irq, void *devid)
745{
746 struct tmio_mmc_host *host = devid;
747 unsigned int ireg, status;
748
2c54506b 749 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
95840126
WS
750 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
751
752 pr_debug_status(status);
753 pr_debug_status(ireg);
754
755 /* Clear the status except the interrupt status */
2c54506b 756 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
7729c7a2 757
7729c7a2
SH
758 if (__tmio_mmc_card_detect_irq(host, ireg, status))
759 return IRQ_HANDLED;
760 if (__tmio_mmc_sdcard_irq(host, ireg, status))
761 return IRQ_HANDLED;
762
e4f38eb1 763 __tmio_mmc_sdio_irq(host);
b6147490 764
b6147490
GL
765 return IRQ_HANDLED;
766}
8e7bfdb3 767EXPORT_SYMBOL(tmio_mmc_irq);
b6147490
GL
768
769static int tmio_mmc_start_data(struct tmio_mmc_host *host,
770 struct mmc_data *data)
771{
772 struct tmio_mmc_data *pdata = host->pdata;
773
774 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
775 data->blksz, data->blocks);
776
0bc0b6e8
WS
777 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
778 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
779 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
b6147490
GL
780 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
781
782 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
0bc0b6e8 783 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
b6147490
GL
784 mmc_hostname(host->mmc), data->blksz);
785 return -EINVAL;
786 }
787 }
788
789 tmio_mmc_init_sg(host, data);
790 host->data = data;
791
792 /* Set transfer length / blocksize */
793 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
794 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
795
796 tmio_mmc_start_dma(host, data);
797
798 return 0;
799}
800
e8f36b5d
AK
801static void tmio_mmc_hw_reset(struct mmc_host *mmc)
802{
803 struct tmio_mmc_host *host = mmc_priv(mmc);
804
805 if (host->hw_reset)
806 host->hw_reset(host);
807}
808
4f119977
AK
809static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
810{
811 struct tmio_mmc_host *host = mmc_priv(mmc);
812 int i, ret = 0;
813
814 if (!host->tap_num) {
815 if (!host->init_tuning || !host->select_tuning)
816 /* Tuning is not supported */
817 goto out;
818
819 host->tap_num = host->init_tuning(host);
820 if (!host->tap_num)
821 /* Tuning is not supported */
822 goto out;
823 }
824
825 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
826 dev_warn_once(&host->pdev->dev,
827 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
828 goto out;
829 }
830
831 bitmap_zero(host->taps, host->tap_num * 2);
832
833 /* Issue CMD19 twice for each tap */
834 for (i = 0; i < 2 * host->tap_num; i++) {
835 if (host->prepare_tuning)
836 host->prepare_tuning(host, i % host->tap_num);
837
838 ret = mmc_send_tuning(mmc, opcode, NULL);
839 if (ret && ret != -EILSEQ)
840 goto out;
841 if (ret == 0)
842 set_bit(i, host->taps);
843
844 mdelay(1);
845 }
846
847 ret = host->select_tuning(host);
848
849out:
850 if (ret < 0) {
851 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
852 tmio_mmc_hw_reset(mmc);
853 }
854
855 return ret;
856}
857
b6147490
GL
858/* Process requests from the MMC layer */
859static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
860{
861 struct tmio_mmc_host *host = mmc_priv(mmc);
df3ef2d3 862 unsigned long flags;
b6147490
GL
863 int ret;
864
df3ef2d3
GL
865 spin_lock_irqsave(&host->lock, flags);
866
867 if (host->mrq) {
b6147490 868 pr_debug("request not null\n");
df3ef2d3
GL
869 if (IS_ERR(host->mrq)) {
870 spin_unlock_irqrestore(&host->lock, flags);
871 mrq->cmd->error = -EAGAIN;
872 mmc_request_done(mmc, mrq);
873 return;
874 }
875 }
b6147490
GL
876
877 host->last_req_ts = jiffies;
878 wmb();
879 host->mrq = mrq;
880
df3ef2d3
GL
881 spin_unlock_irqrestore(&host->lock, flags);
882
b6147490
GL
883 if (mrq->data) {
884 ret = tmio_mmc_start_data(host, mrq->data);
885 if (ret)
886 goto fail;
887 }
888
889 ret = tmio_mmc_start_command(host, mrq->cmd);
890 if (!ret) {
891 schedule_delayed_work(&host->delayed_reset_work,
0df9d2ea 892 msecs_to_jiffies(CMDREQ_TIMEOUT));
b6147490
GL
893 return;
894 }
895
896fail:
b6147490 897 host->force_pio = false;
df3ef2d3 898 host->mrq = NULL;
b6147490
GL
899 mrq->cmd->error = ret;
900 mmc_request_done(mmc, mrq);
901}
902
2fb55956 903static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
8c102a96 904{
4fe2ec57 905 if (!host->clk_enable)
8c102a96
GL
906 return -ENOTSUPP;
907
2fb55956 908 return host->clk_enable(host);
8c102a96
GL
909}
910
dfcba5ff
WS
911static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
912{
913 if (host->clk_disable)
914 host->clk_disable(host);
915}
916
619b08d4 917static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
b958a67c
GL
918{
919 struct mmc_host *mmc = host->mmc;
619b08d4
GL
920 int ret = 0;
921
922 /* .set_ios() is returning void, so, no chance to report an error */
b958a67c 923
9d731e75
CB
924 if (host->set_pwr)
925 host->set_pwr(host->pdev, 1);
926
619b08d4
GL
927 if (!IS_ERR(mmc->supply.vmmc)) {
928 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
929 /*
930 * Attention: empiric value. With a b43 WiFi SDIO card this
931 * delay proved necessary for reliable card-insertion probing.
932 * 100us were not enough. Is this the same 140us delay, as in
933 * tmio_mmc_set_ios()?
934 */
935 udelay(200);
936 }
937 /*
938 * It seems, VccQ should be switched on after Vcc, this is also what the
939 * omap_hsmmc.c driver does.
940 */
941 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
6d1d6b47 942 ret = regulator_enable(mmc->supply.vqmmc);
619b08d4
GL
943 udelay(200);
944 }
6d1d6b47
GL
945
946 if (ret < 0)
947 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
948 ret);
619b08d4
GL
949}
950
951static void tmio_mmc_power_off(struct tmio_mmc_host *host)
952{
953 struct mmc_host *mmc = host->mmc;
954
955 if (!IS_ERR(mmc->supply.vqmmc))
956 regulator_disable(mmc->supply.vqmmc);
957
b958a67c 958 if (!IS_ERR(mmc->supply.vmmc))
619b08d4 959 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
9d731e75
CB
960
961 if (host->set_pwr)
962 host->set_pwr(host->pdev, 0);
b958a67c
GL
963}
964
9ae4ed7d
UH
965static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
966 unsigned char bus_width)
967{
0bc0b6e8
WS
968 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
969 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
970
971 /* reg now applies to MMC_BUS_WIDTH_4 */
972 if (bus_width == MMC_BUS_WIDTH_1)
973 reg |= CARD_OPT_WIDTH;
974 else if (bus_width == MMC_BUS_WIDTH_8)
975 reg |= CARD_OPT_WIDTH8;
976
977 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
9ae4ed7d
UH
978}
979
b6147490
GL
980/* Set MMC clock / power.
981 * Note: This controller uses a simple divider scheme therefore it cannot
982 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
983 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
984 * slowest setting.
985 */
986static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
987{
988 struct tmio_mmc_host *host = mmc_priv(mmc);
4932bd64 989 struct device *dev = &host->pdev->dev;
df3ef2d3
GL
990 unsigned long flags;
991
b9269fdd
GL
992 mutex_lock(&host->ios_lock);
993
df3ef2d3
GL
994 spin_lock_irqsave(&host->lock, flags);
995 if (host->mrq) {
996 if (IS_ERR(host->mrq)) {
4932bd64 997 dev_dbg(dev,
df3ef2d3
GL
998 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
999 current->comm, task_pid_nr(current),
1000 ios->clock, ios->power_mode);
1001 host->mrq = ERR_PTR(-EINTR);
1002 } else {
4932bd64 1003 dev_dbg(dev,
df3ef2d3
GL
1004 "%s.%d: CMD%u active since %lu, now %lu!\n",
1005 current->comm, task_pid_nr(current),
1006 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
1007 }
1008 spin_unlock_irqrestore(&host->lock, flags);
b9269fdd
GL
1009
1010 mutex_unlock(&host->ios_lock);
df3ef2d3
GL
1011 return;
1012 }
1013
1014 host->mrq = ERR_PTR(-EBUSY);
1015
1016 spin_unlock_irqrestore(&host->lock, flags);
b6147490 1017
3b292bb0
UH
1018 switch (ios->power_mode) {
1019 case MMC_POWER_OFF:
1020 tmio_mmc_power_off(host);
1021 tmio_mmc_clk_stop(host);
1022 break;
1023 case MMC_POWER_UP:
3b292bb0 1024 tmio_mmc_power_on(host, ios->vdd);
7fbc030d 1025 tmio_mmc_set_clock(host, ios->clock);
9ae4ed7d 1026 tmio_mmc_set_bus_width(host, ios->bus_width);
3b292bb0
UH
1027 break;
1028 case MMC_POWER_ON:
1029 tmio_mmc_set_clock(host, ios->clock);
3b292bb0
UH
1030 tmio_mmc_set_bus_width(host, ios->bus_width);
1031 break;
1032 }
b6147490
GL
1033
1034 /* Let things settle. delay taken from winCE driver */
1035 udelay(140);
df3ef2d3
GL
1036 if (PTR_ERR(host->mrq) == -EINTR)
1037 dev_dbg(&host->pdev->dev,
1038 "%s.%d: IOS interrupted: clk %u, mode %u",
1039 current->comm, task_pid_nr(current),
1040 ios->clock, ios->power_mode);
1041 host->mrq = NULL;
b9269fdd 1042
ae12d250
UH
1043 host->clk_cache = ios->clock;
1044
b9269fdd 1045 mutex_unlock(&host->ios_lock);
b6147490
GL
1046}
1047
1048static int tmio_mmc_get_ro(struct mmc_host *mmc)
1049{
1050 struct tmio_mmc_host *host = mmc_priv(mmc);
1051 struct tmio_mmc_data *pdata = host->pdata;
3071cafb
GL
1052 int ret = mmc_gpio_get_ro(mmc);
1053 if (ret >= 0)
1054 return ret;
b6147490 1055
0369483e 1056 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
2c54506b 1057 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
0369483e
UH
1058
1059 return ret;
b6147490
GL
1060}
1061
bbf0208d
KM
1062static int tmio_multi_io_quirk(struct mmc_card *card,
1063 unsigned int direction, int blk_size)
1064{
1065 struct tmio_mmc_host *host = mmc_priv(card->host);
bbf0208d 1066
85c02ddd
KM
1067 if (host->multi_io_quirk)
1068 return host->multi_io_quirk(card, direction, blk_size);
bbf0208d
KM
1069
1070 return blk_size;
1071}
1072
452e5eef 1073static struct mmc_host_ops tmio_mmc_ops = {
b6147490
GL
1074 .request = tmio_mmc_request,
1075 .set_ios = tmio_mmc_set_ios,
1076 .get_ro = tmio_mmc_get_ro,
2b63b341 1077 .get_cd = mmc_gpio_get_cd,
b6147490 1078 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
bbf0208d 1079 .multi_io_quirk = tmio_multi_io_quirk,
e8f36b5d 1080 .hw_reset = tmio_mmc_hw_reset,
4f119977 1081 .execute_tuning = tmio_mmc_execute_tuning,
b6147490
GL
1082};
1083
05fae4a7 1084static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
b958a67c
GL
1085{
1086 struct tmio_mmc_data *pdata = host->pdata;
1087 struct mmc_host *mmc = host->mmc;
1088
1089 mmc_regulator_get_supply(mmc);
1090
05fae4a7 1091 /* use ocr_mask if no regulator */
b958a67c 1092 if (!mmc->ocr_avail)
05fae4a7
KM
1093 mmc->ocr_avail = pdata->ocr_mask;
1094
1095 /*
1096 * try again.
1097 * There is possibility that regulator has not been probed
1098 */
1099 if (!mmc->ocr_avail)
1100 return -EPROBE_DEFER;
1101
1102 return 0;
b958a67c
GL
1103}
1104
5a00a971
GL
1105static void tmio_mmc_of_parse(struct platform_device *pdev,
1106 struct tmio_mmc_data *pdata)
1107{
1108 const struct device_node *np = pdev->dev.of_node;
1109 if (!np)
1110 return;
1111
1112 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1113 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1114}
1115
94b110af
KM
1116struct tmio_mmc_host*
1117tmio_mmc_host_alloc(struct platform_device *pdev)
b6147490 1118{
94b110af 1119 struct tmio_mmc_host *host;
b6147490 1120 struct mmc_host *mmc;
94b110af
KM
1121
1122 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1123 if (!mmc)
1124 return NULL;
1125
1126 host = mmc_priv(mmc);
1127 host->mmc = mmc;
1128 host->pdev = pdev;
1129
1130 return host;
1131}
1132EXPORT_SYMBOL(tmio_mmc_host_alloc);
1133
1134void tmio_mmc_host_free(struct tmio_mmc_host *host)
1135{
1136 mmc_free_host(host->mmc);
94b110af
KM
1137}
1138EXPORT_SYMBOL(tmio_mmc_host_free);
1139
1140int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1141 struct tmio_mmc_data *pdata)
1142{
1143 struct platform_device *pdev = _host->pdev;
1144 struct mmc_host *mmc = _host->mmc;
b6147490
GL
1145 struct resource *res_ctl;
1146 int ret;
1147 u32 irq_mask = TMIO_MASK_CMD;
1148
5a00a971
GL
1149 tmio_mmc_of_parse(pdev, pdata);
1150
7b952137 1151 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
dfe9a229 1152 _host->write16_hook = NULL;
7b952137 1153
b6147490
GL
1154 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1155 if (!res_ctl)
1156 return -EINVAL;
1157
274a752b
SB
1158 ret = mmc_of_parse(mmc);
1159 if (ret < 0)
ad7014b3 1160 return ret;
5a00a971 1161
b6147490 1162 _host->pdata = pdata;
b6147490
GL
1163 platform_set_drvdata(pdev, mmc);
1164
9d731e75 1165 _host->set_pwr = pdata->set_pwr;
b6147490
GL
1166 _host->set_clk_div = pdata->set_clk_div;
1167
05fae4a7
KM
1168 ret = tmio_mmc_init_ocr(_host);
1169 if (ret < 0)
ad7014b3 1170 return ret;
05fae4a7 1171
7df56bbb
IM
1172 _host->ctl = devm_ioremap(&pdev->dev,
1173 res_ctl->start, resource_size(res_ctl));
ad7014b3
WS
1174 if (!_host->ctl)
1175 return -ENOMEM;
b6147490 1176
6a4679f3 1177 tmio_mmc_ops.card_busy = _host->card_busy;
452e5eef 1178 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
b6147490 1179 mmc->ops = &tmio_mmc_ops;
452e5eef 1180
5a00a971 1181 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
dd006b30 1182 mmc->caps2 |= pdata->capabilities2;
b6147490
GL
1183 mmc->max_segs = 32;
1184 mmc->max_blk_size = 512;
09cbfeaf 1185 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
b6147490
GL
1186 mmc->max_segs;
1187 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1188 mmc->max_seg_size = mmc->max_req_size;
b6147490 1189
c8be24c2 1190 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
2b1ac5c2 1191 mmc->caps & MMC_CAP_NEEDS_POLL ||
efd7be7b 1192 !mmc_card_is_removable(mmc));
2b1ac5c2 1193
0bc0b6e8
WS
1194 /*
1195 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1196 * hotplug gets disabled. It seems RuntimePM related yet we need further
1197 * research. Since we are planning a PM overhaul anyway, let's enforce
1198 * for now the device being active by enabling native hotplug always.
1199 */
1200 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1201 _host->native_hotplug = true;
1202
2fb55956 1203 if (tmio_mmc_clk_enable(_host) < 0) {
8c102a96
GL
1204 mmc->f_max = pdata->hclk;
1205 mmc->f_min = mmc->f_max / 512;
1206 }
1207
bb98d9d1
SS
1208 /*
1209 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1210 * looping forever...
1211 */
ad7014b3
WS
1212 if (mmc->f_min == 0)
1213 return -EINVAL;
bb98d9d1 1214
cbb18b30 1215 /*
0369483e
UH
1216 * While using internal tmio hardware logic for card detection, we need
1217 * to ensure it stays powered for it to work.
cbb18b30 1218 */
2b1ac5c2 1219 if (_host->native_hotplug)
cbb18b30
BH
1220 pm_runtime_get_noresume(&pdev->dev);
1221
b6147490
GL
1222 tmio_mmc_clk_stop(_host);
1223 tmio_mmc_reset(_host);
1224
2c54506b 1225 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
b6147490 1226 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
e0337cc8
GL
1227
1228 /* Unmask the IRQs we want to know about */
1229 if (!_host->chan_rx)
1230 irq_mask |= TMIO_MASK_READOP;
1231 if (!_host->chan_tx)
1232 irq_mask |= TMIO_MASK_WRITEOP;
1233 if (!_host->native_hotplug)
1234 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1235
1236 _host->sdcard_irq_mask &= ~irq_mask;
1237
7501c431
UH
1238 _host->sdio_irq_enabled = false;
1239 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1240 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1241 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
c51ff6c6 1242 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
7501c431 1243 }
b6147490 1244
b6147490 1245 spin_lock_init(&_host->lock);
b9269fdd 1246 mutex_init(&_host->ios_lock);
b6147490
GL
1247
1248 /* Init delayed work for request timeouts */
1249 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
b9269fdd 1250 INIT_WORK(&_host->done, tmio_mmc_done_work);
b6147490
GL
1251
1252 /* See if we also get DMA */
1253 tmio_mmc_request_dma(_host, pdata);
1254
0369483e
UH
1255 pm_runtime_set_active(&pdev->dev);
1256 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1257 pm_runtime_use_autosuspend(&pdev->dev);
1258 pm_runtime_enable(&pdev->dev);
1259
8c102a96 1260 ret = mmc_add_host(mmc);
8c102a96
GL
1261 if (ret < 0) {
1262 tmio_mmc_host_remove(_host);
1263 return ret;
1264 }
b6147490 1265
c419e611
RW
1266 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1267
c8be24c2 1268 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
214fc309 1269 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
c8be24c2
GL
1270 if (ret < 0) {
1271 tmio_mmc_host_remove(_host);
1272 return ret;
1273 }
d4d11449 1274 mmc_gpiod_request_cd_irq(mmc);
c8be24c2
GL
1275 }
1276
b6147490 1277 return 0;
b6147490
GL
1278}
1279EXPORT_SYMBOL(tmio_mmc_host_probe);
1280
1281void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1282{
e6ee7182 1283 struct platform_device *pdev = host->pdev;
c8be24c2
GL
1284 struct mmc_host *mmc = host->mmc;
1285
c51ff6c6
WS
1286 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1287 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1288
2b1ac5c2 1289 if (!host->native_hotplug)
7311bef0
GL
1290 pm_runtime_get_sync(&pdev->dev);
1291
c419e611
RW
1292 dev_pm_qos_hide_latency_limit(&pdev->dev);
1293
c8be24c2 1294 mmc_remove_host(mmc);
b9269fdd 1295 cancel_work_sync(&host->done);
b6147490
GL
1296 cancel_delayed_work_sync(&host->delayed_reset_work);
1297 tmio_mmc_release_dma(host);
e6ee7182 1298
e6ee7182
GL
1299 pm_runtime_put_sync(&pdev->dev);
1300 pm_runtime_disable(&pdev->dev);
dfcba5ff
WS
1301
1302 tmio_mmc_clk_disable(host);
b6147490
GL
1303}
1304EXPORT_SYMBOL(tmio_mmc_host_remove);
1305
9ade7dbf 1306#ifdef CONFIG_PM
7311bef0
GL
1307int tmio_mmc_host_runtime_suspend(struct device *dev)
1308{
ae12d250
UH
1309 struct mmc_host *mmc = dev_get_drvdata(dev);
1310 struct tmio_mmc_host *host = mmc_priv(mmc);
1311
20e955c3
UH
1312 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1313
ae12d250
UH
1314 if (host->clk_cache)
1315 tmio_mmc_clk_stop(host);
1316
dfcba5ff 1317 tmio_mmc_clk_disable(host);
ae12d250 1318
7311bef0
GL
1319 return 0;
1320}
1321EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1322
4f119977
AK
1323static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1324{
1325 return host->tap_num && mmc_can_retune(host->mmc);
1326}
1327
7311bef0
GL
1328int tmio_mmc_host_runtime_resume(struct device *dev)
1329{
1330 struct mmc_host *mmc = dev_get_drvdata(dev);
1331 struct tmio_mmc_host *host = mmc_priv(mmc);
7311bef0 1332
ae12d250 1333 tmio_mmc_reset(host);
2fb55956 1334 tmio_mmc_clk_enable(host);
ae12d250 1335
7fbc030d 1336 if (host->clk_cache)
ae12d250 1337 tmio_mmc_set_clock(host, host->clk_cache);
ae12d250 1338
162f43e3 1339 tmio_mmc_enable_dma(host, true);
7311bef0 1340
4f119977
AK
1341 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1342 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1343
7311bef0
GL
1344 return 0;
1345}
1346EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
710dec95 1347#endif
7311bef0 1348
b6147490 1349MODULE_LICENSE("GPL v2");