]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: block: fix updating ext_csd caches on ioctl call
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
b6d2d81c 22#include <linux/iopoll.h>
f95f3850
WN
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
a6db2c86 26#include <linux/pm_runtime.h>
f95f3850
WN
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/delay.h>
31#include <linux/irq.h>
b24c8b26 32#include <linux/mmc/card.h>
f95f3850
WN
33#include <linux/mmc/host.h>
34#include <linux/mmc/mmc.h>
01730558 35#include <linux/mmc/sd.h>
90c2143a 36#include <linux/mmc/sdio.h>
f95f3850 37#include <linux/bitops.h>
c07946a3 38#include <linux/regulator/consumer.h>
c91eab4b 39#include <linux/of.h>
55a6ceb2 40#include <linux/of_gpio.h>
bf626e55 41#include <linux/mmc/slot-gpio.h>
f95f3850
WN
42
43#include "dw_mmc.h"
44
45/* Common flag combinations */
3f7eec62 46#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850 47 SDMMC_INT_HTO | SDMMC_INT_SBE | \
7a3c5677 48 SDMMC_INT_EBE | SDMMC_INT_HLE)
f95f3850 49#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
7a3c5677 50 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
f95f3850 51#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
7a3c5677 52 DW_MCI_CMD_ERROR_FLAGS)
f95f3850
WN
53#define DW_MCI_SEND_STATUS 1
54#define DW_MCI_RECV_STATUS 2
55#define DW_MCI_DMA_THRESHOLD 16
56
1f44a2a5 57#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
72e83577 58#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
1f44a2a5 59
fc79a4d6
JS
60#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63 SDMMC_IDMAC_INT_TI)
64
cc190d4c
SL
65#define DESC_RING_BUF_SZ PAGE_SIZE
66
69d99fdc
PT
67struct idmac_desc_64addr {
68 u32 des0; /* Control Descriptor */
b6d2d81c
SL
69#define IDMAC_OWN_CLR64(x) \
70 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
69d99fdc
PT
71
72 u32 des1; /* Reserved */
73
74 u32 des2; /*Buffer sizes */
75#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
6687c42f
BD
76 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
77 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
69d99fdc
PT
78
79 u32 des3; /* Reserved */
80
81 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
82 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
83
84 u32 des6; /* Lower 32-bits of Next Descriptor Address */
85 u32 des7; /* Upper 32-bits of Next Descriptor Address */
86};
87
f95f3850 88struct idmac_desc {
6687c42f 89 __le32 des0; /* Control Descriptor */
f95f3850
WN
90#define IDMAC_DES0_DIC BIT(1)
91#define IDMAC_DES0_LD BIT(2)
92#define IDMAC_DES0_FD BIT(3)
93#define IDMAC_DES0_CH BIT(4)
94#define IDMAC_DES0_ER BIT(5)
95#define IDMAC_DES0_CES BIT(30)
96#define IDMAC_DES0_OWN BIT(31)
97
6687c42f 98 __le32 des1; /* Buffer sizes */
f95f3850 99#define IDMAC_SET_BUFFER1_SIZE(d, s) \
e5306c3a 100 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
f95f3850 101
6687c42f 102 __le32 des2; /* buffer 1 physical address */
f95f3850 103
6687c42f 104 __le32 des3; /* buffer 2 physical address */
f95f3850 105};
5959b32e
AB
106
107/* Each descriptor can transfer up to 4KB of data in chained mode */
108#define DW_MCI_DESC_DATA_LENGTH 0x1000
f95f3850 109
f95f3850
WN
110#if defined(CONFIG_DEBUG_FS)
111static int dw_mci_req_show(struct seq_file *s, void *v)
112{
113 struct dw_mci_slot *slot = s->private;
114 struct mmc_request *mrq;
115 struct mmc_command *cmd;
116 struct mmc_command *stop;
117 struct mmc_data *data;
118
119 /* Make sure we get a consistent snapshot */
120 spin_lock_bh(&slot->host->lock);
121 mrq = slot->mrq;
122
123 if (mrq) {
124 cmd = mrq->cmd;
125 data = mrq->data;
126 stop = mrq->stop;
127
128 if (cmd)
129 seq_printf(s,
130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
131 cmd->opcode, cmd->arg, cmd->flags,
132 cmd->resp[0], cmd->resp[1], cmd->resp[2],
133 cmd->resp[2], cmd->error);
134 if (data)
135 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
136 data->bytes_xfered, data->blocks,
137 data->blksz, data->flags, data->error);
138 if (stop)
139 seq_printf(s,
140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
141 stop->opcode, stop->arg, stop->flags,
142 stop->resp[0], stop->resp[1], stop->resp[2],
143 stop->resp[2], stop->error);
144 }
145
146 spin_unlock_bh(&slot->host->lock);
147
148 return 0;
149}
150
151static int dw_mci_req_open(struct inode *inode, struct file *file)
152{
153 return single_open(file, dw_mci_req_show, inode->i_private);
154}
155
156static const struct file_operations dw_mci_req_fops = {
157 .owner = THIS_MODULE,
158 .open = dw_mci_req_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
164static int dw_mci_regs_show(struct seq_file *s, void *v)
165{
21657ebd
JC
166 struct dw_mci *host = s->private;
167
e56b4c64
SL
168 pm_runtime_get_sync(host->dev);
169
21657ebd
JC
170 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
171 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
172 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
173 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
174 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
175 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
f95f3850 176
e56b4c64
SL
177 pm_runtime_put_autosuspend(host->dev);
178
f95f3850
WN
179 return 0;
180}
181
182static int dw_mci_regs_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, dw_mci_regs_show, inode->i_private);
185}
186
187static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193};
194
195static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196{
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
199 struct dentry *root;
200 struct dentry *node;
201
202 root = mmc->debugfs_root;
203 if (!root)
204 return;
205
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 &dw_mci_regs_fops);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 &dw_mci_req_fops);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 if (!node)
218 goto err;
219
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
222 if (!node)
223 goto err;
224
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
227 if (!node)
228 goto err;
229
230 return;
231
232err:
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234}
235#endif /* defined(CONFIG_DEBUG_FS) */
236
8e6db1f6
SL
237static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
238{
239 u32 ctrl;
240
241 ctrl = mci_readl(host, CTRL);
242 ctrl |= reset;
243 mci_writel(host, CTRL, ctrl);
244
245 /* wait till resets clear */
246 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
247 !(ctrl & reset),
248 1, 500 * USEC_PER_MSEC)) {
249 dev_err(host->dev,
250 "Timeout resetting block (ctrl reset %#x)\n",
251 ctrl & reset);
252 return false;
253 }
254
255 return true;
256}
01730558 257
4dba18de
SL
258static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
259{
260 u32 status;
261
262 /*
263 * Databook says that before issuing a new data transfer command
264 * we need to check to see if the card is busy. Data transfer commands
265 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
266 *
267 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
268 * expected.
269 */
270 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
271 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
272 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
273 status,
274 !(status & SDMMC_STATUS_BUSY),
275 10, 500 * USEC_PER_MSEC))
276 dev_err(host->dev, "Busy; trying anyway\n");
277 }
278}
279
280static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
281{
282 struct dw_mci *host = slot->host;
283 unsigned int cmd_status = 0;
284
285 mci_writel(host, CMDARG, arg);
286 wmb(); /* drain writebuffer */
287 dw_mci_wait_while_busy(host, cmd);
288 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
289
290 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
291 !(cmd_status & SDMMC_CMD_START),
292 1, 500 * USEC_PER_MSEC))
293 dev_err(&slot->mmc->class_dev,
294 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
295 cmd, arg, cmd_status);
296}
297
f95f3850
WN
298static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
299{
800d78bf 300 struct dw_mci_slot *slot = mmc_priv(mmc);
01730558 301 struct dw_mci *host = slot->host;
f95f3850 302 u32 cmdr;
f95f3850 303
0e3a22c0 304 cmd->error = -EINPROGRESS;
f95f3850
WN
305 cmdr = cmd->opcode;
306
90c2143a
SJ
307 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
308 cmd->opcode == MMC_GO_IDLE_STATE ||
309 cmd->opcode == MMC_GO_INACTIVE_STATE ||
310 (cmd->opcode == SD_IO_RW_DIRECT &&
311 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850 312 cmdr |= SDMMC_CMD_STOP;
4a1b27ad
JC
313 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
314 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850 315
01730558
DA
316 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
317 u32 clk_en_a;
318
319 /* Special bit makes CMD11 not die */
320 cmdr |= SDMMC_CMD_VOLT_SWITCH;
321
322 /* Change state to continue to handle CMD11 weirdness */
323 WARN_ON(slot->host->state != STATE_SENDING_CMD);
324 slot->host->state = STATE_SENDING_CMD11;
325
326 /*
327 * We need to disable low power mode (automatic clock stop)
328 * while doing voltage switch so we don't confuse the card,
329 * since stopping the clock is a specific part of the UHS
330 * voltage change dance.
331 *
332 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
333 * unconditionally turned back on in dw_mci_setup_bus() if it's
334 * ever called with a non-zero clock. That shouldn't happen
335 * until the voltage change is all done.
336 */
337 clk_en_a = mci_readl(host, CLKENA);
338 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
339 mci_writel(host, CLKENA, clk_en_a);
340 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
341 SDMMC_CMD_PRV_DAT_WAIT, 0);
342 }
343
f95f3850
WN
344 if (cmd->flags & MMC_RSP_PRESENT) {
345 /* We expect a response, so set this bit */
346 cmdr |= SDMMC_CMD_RESP_EXP;
347 if (cmd->flags & MMC_RSP_136)
348 cmdr |= SDMMC_CMD_RESP_LONG;
349 }
350
351 if (cmd->flags & MMC_RSP_CRC)
352 cmdr |= SDMMC_CMD_RESP_CRC;
353
0349c085 354 if (cmd->data) {
f95f3850 355 cmdr |= SDMMC_CMD_DAT_EXP;
0349c085 356 if (cmd->data->flags & MMC_DATA_WRITE)
f95f3850
WN
357 cmdr |= SDMMC_CMD_DAT_WR;
358 }
359
aaaaeb7a
JC
360 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
361 cmdr |= SDMMC_CMD_USE_HOLD_REG;
800d78bf 362
f95f3850
WN
363 return cmdr;
364}
365
90c2143a
SJ
366static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
367{
368 struct mmc_command *stop;
369 u32 cmdr;
370
371 if (!cmd->data)
372 return 0;
373
374 stop = &host->stop_abort;
375 cmdr = cmd->opcode;
376 memset(stop, 0, sizeof(struct mmc_command));
377
378 if (cmdr == MMC_READ_SINGLE_BLOCK ||
379 cmdr == MMC_READ_MULTIPLE_BLOCK ||
380 cmdr == MMC_WRITE_BLOCK ||
6c2c6506
UH
381 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
382 cmdr == MMC_SEND_TUNING_BLOCK ||
383 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
90c2143a
SJ
384 stop->opcode = MMC_STOP_TRANSMISSION;
385 stop->arg = 0;
386 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
387 } else if (cmdr == SD_IO_RW_EXTENDED) {
388 stop->opcode = SD_IO_RW_DIRECT;
389 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
390 ((cmd->arg >> 28) & 0x7);
391 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
392 } else {
393 return 0;
394 }
395
396 cmdr = stop->opcode | SDMMC_CMD_STOP |
397 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
398
42f989c0 399 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
8c005b40
JC
400 cmdr |= SDMMC_CMD_USE_HOLD_REG;
401
90c2143a
SJ
402 return cmdr;
403}
404
03de1921
AK
405static inline void dw_mci_set_cto(struct dw_mci *host)
406{
407 unsigned int cto_clks;
4c2357f5 408 unsigned int cto_div;
03de1921 409 unsigned int cto_ms;
8892b705 410 unsigned long irqflags;
03de1921
AK
411
412 cto_clks = mci_readl(host, TMOUT) & 0xff;
4c2357f5
DA
413 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
414 if (cto_div == 0)
415 cto_div = 1;
416 cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz);
03de1921
AK
417
418 /* add a bit spare time */
419 cto_ms += 10;
420
8892b705
DA
421 /*
422 * The durations we're working with are fairly short so we have to be
423 * extra careful about synchronization here. Specifically in hardware a
424 * command timeout is _at most_ 5.1 ms, so that means we expect an
425 * interrupt (either command done or timeout) to come rather quickly
426 * after the mci_writel. ...but just in case we have a long interrupt
427 * latency let's add a bit of paranoia.
428 *
429 * In general we'll assume that at least an interrupt will be asserted
430 * in hardware by the time the cto_timer runs. ...and if it hasn't
431 * been asserted in hardware by that time then we'll assume it'll never
432 * come.
433 */
434 spin_lock_irqsave(&host->irq_lock, irqflags);
435 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
436 mod_timer(&host->cto_timer,
437 jiffies + msecs_to_jiffies(cto_ms) + 1);
438 spin_unlock_irqrestore(&host->irq_lock, irqflags);
03de1921
AK
439}
440
f95f3850
WN
441static void dw_mci_start_command(struct dw_mci *host,
442 struct mmc_command *cmd, u32 cmd_flags)
443{
444 host->cmd = cmd;
4a90920c 445 dev_vdbg(host->dev,
f95f3850
WN
446 "start command: ARGR=0x%08x CMDR=0x%08x\n",
447 cmd->arg, cmd_flags);
448
449 mci_writel(host, CMDARG, cmd->arg);
0e3a22c0 450 wmb(); /* drain writebuffer */
0bdbd0e8 451 dw_mci_wait_while_busy(host, cmd_flags);
f95f3850 452
8892b705
DA
453 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
454
03de1921
AK
455 /* response expected command only */
456 if (cmd_flags & SDMMC_CMD_RESP_EXP)
457 dw_mci_set_cto(host);
f95f3850
WN
458}
459
90c2143a 460static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 461{
e13c3c08 462 struct mmc_command *stop = &host->stop_abort;
0e3a22c0 463
90c2143a 464 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
465}
466
467/* DMA interface functions */
468static void dw_mci_stop_dma(struct dw_mci *host)
469{
03e8cb53 470 if (host->using_dma) {
f95f3850
WN
471 host->dma_ops->stop(host);
472 host->dma_ops->cleanup(host);
f95f3850 473 }
aa50f259
SJ
474
475 /* Data transfer was stopped by the interrupt handler */
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
477}
478
f95f3850
WN
479static void dw_mci_dma_cleanup(struct dw_mci *host)
480{
481 struct mmc_data *data = host->data;
482
a4cc7eb4
JC
483 if (data && data->host_cookie == COOKIE_MAPPED) {
484 dma_unmap_sg(host->dev,
485 data->sg,
486 data->sg_len,
feeef096 487 mmc_get_dma_dir(data));
a4cc7eb4
JC
488 data->host_cookie = COOKIE_UNMAPPED;
489 }
f95f3850
WN
490}
491
5ce9d961
SJ
492static void dw_mci_idmac_reset(struct dw_mci *host)
493{
494 u32 bmod = mci_readl(host, BMOD);
495 /* Software reset of DMA */
496 bmod |= SDMMC_IDMAC_SWRESET;
497 mci_writel(host, BMOD, bmod);
498}
499
f95f3850
WN
500static void dw_mci_idmac_stop_dma(struct dw_mci *host)
501{
502 u32 temp;
503
504 /* Disable and reset the IDMAC interface */
505 temp = mci_readl(host, CTRL);
506 temp &= ~SDMMC_CTRL_USE_IDMAC;
507 temp |= SDMMC_CTRL_DMA_RESET;
508 mci_writel(host, CTRL, temp);
509
510 /* Stop the IDMAC running */
511 temp = mci_readl(host, BMOD);
a5289a43 512 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 513 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
514 mci_writel(host, BMOD, temp);
515}
516
3fc7eaef 517static void dw_mci_dmac_complete_dma(void *arg)
f95f3850 518{
3fc7eaef 519 struct dw_mci *host = arg;
f95f3850
WN
520 struct mmc_data *data = host->data;
521
4a90920c 522 dev_vdbg(host->dev, "DMA complete\n");
f95f3850 523
3fc7eaef
SL
524 if ((host->use_dma == TRANS_MODE_EDMAC) &&
525 data && (data->flags & MMC_DATA_READ))
526 /* Invalidate cache after read */
42f989c0 527 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
3fc7eaef
SL
528 data->sg,
529 data->sg_len,
530 DMA_FROM_DEVICE);
531
f95f3850
WN
532 host->dma_ops->cleanup(host);
533
534 /*
535 * If the card was removed, data will be NULL. No point in trying to
536 * send the stop command or waiting for NBUSY in this case.
537 */
538 if (data) {
539 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
540 tasklet_schedule(&host->tasklet);
541 }
542}
543
3b2a067b
SL
544static int dw_mci_idmac_init(struct dw_mci *host)
545{
546 int i;
547
548 if (host->dma_64bit_address == 1) {
549 struct idmac_desc_64addr *p;
550 /* Number of descriptors in the ring buffer */
cc190d4c
SL
551 host->ring_size =
552 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
3b2a067b
SL
553
554 /* Forward link the descriptor list */
555 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
556 i++, p++) {
557 p->des6 = (host->sg_dma +
558 (sizeof(struct idmac_desc_64addr) *
559 (i + 1))) & 0xffffffff;
560
561 p->des7 = (u64)(host->sg_dma +
562 (sizeof(struct idmac_desc_64addr) *
563 (i + 1))) >> 32;
564 /* Initialize reserved and buffer size fields to "0" */
565 p->des1 = 0;
566 p->des2 = 0;
567 p->des3 = 0;
568 }
569
570 /* Set the last descriptor as the end-of-ring descriptor */
571 p->des6 = host->sg_dma & 0xffffffff;
572 p->des7 = (u64)host->sg_dma >> 32;
573 p->des0 = IDMAC_DES0_ER;
574
575 } else {
576 struct idmac_desc *p;
577 /* Number of descriptors in the ring buffer */
cc190d4c
SL
578 host->ring_size =
579 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
3b2a067b
SL
580
581 /* Forward link the descriptor list */
582 for (i = 0, p = host->sg_cpu;
583 i < host->ring_size - 1;
584 i++, p++) {
585 p->des3 = cpu_to_le32(host->sg_dma +
586 (sizeof(struct idmac_desc) * (i + 1)));
587 p->des1 = 0;
588 }
589
590 /* Set the last descriptor as the end-of-ring descriptor */
591 p->des3 = cpu_to_le32(host->sg_dma);
592 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
593 }
594
595 dw_mci_idmac_reset(host);
596
597 if (host->dma_64bit_address == 1) {
598 /* Mask out interrupts - get Tx & Rx complete only */
599 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
600 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
601 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
602
603 /* Set the descriptor base address */
604 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
605 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
606
607 } else {
608 /* Mask out interrupts - get Tx & Rx complete only */
609 mci_writel(host, IDSTS, IDMAC_INT_CLR);
610 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
611 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
612
613 /* Set the descriptor base address */
614 mci_writel(host, DBADDR, host->sg_dma);
615 }
616
617 return 0;
618}
619
620static inline int dw_mci_prepare_desc64(struct dw_mci *host,
ec0baaa6
SL
621 struct mmc_data *data,
622 unsigned int sg_len)
f95f3850 623{
5959b32e 624 unsigned int desc_len;
ec0baaa6 625 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
b6d2d81c 626 u32 val;
f95f3850 627 int i;
0e3a22c0 628
ec0baaa6 629 desc_first = desc_last = desc = host->sg_cpu;
5959b32e 630
ec0baaa6
SL
631 for (i = 0; i < sg_len; i++) {
632 unsigned int length = sg_dma_len(&data->sg[i]);
69d99fdc 633
ec0baaa6 634 u64 mem_addr = sg_dma_address(&data->sg[i]);
0e3a22c0 635
ec0baaa6
SL
636 for ( ; length ; desc++) {
637 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
638 length : DW_MCI_DESC_DATA_LENGTH;
f95f3850 639
ec0baaa6 640 length -= desc_len;
5959b32e 641
3b2a067b
SL
642 /*
643 * Wait for the former clear OWN bit operation
644 * of IDMAC to make sure that this descriptor
645 * isn't still owned by IDMAC as IDMAC's write
646 * ops and CPU's read ops are asynchronous.
647 */
b6d2d81c
SL
648 if (readl_poll_timeout_atomic(&desc->des0, val,
649 !(val & IDMAC_DES0_OWN),
650 10, 100 * USEC_PER_MSEC))
651 goto err_own_bit;
3b2a067b 652
ec0baaa6
SL
653 /*
654 * Set the OWN bit and disable interrupts
655 * for this descriptor
656 */
657 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
658 IDMAC_DES0_CH;
5959b32e 659
ec0baaa6
SL
660 /* Buffer length */
661 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
5959b32e 662
ec0baaa6
SL
663 /* Physical address to DMA to/from */
664 desc->des4 = mem_addr & 0xffffffff;
665 desc->des5 = mem_addr >> 32;
5959b32e 666
ec0baaa6
SL
667 /* Update physical address for the next desc */
668 mem_addr += desc_len;
5959b32e 669
ec0baaa6
SL
670 /* Save pointer to the last descriptor */
671 desc_last = desc;
69d99fdc 672 }
ec0baaa6 673 }
f95f3850 674
ec0baaa6
SL
675 /* Set first descriptor */
676 desc_first->des0 |= IDMAC_DES0_FD;
f95f3850 677
ec0baaa6
SL
678 /* Set last descriptor */
679 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
680 desc_last->des0 |= IDMAC_DES0_LD;
3b2a067b
SL
681
682 return 0;
683err_own_bit:
684 /* restore the descriptor chain as it's polluted */
26be9d70 685 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
cc190d4c 686 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
3b2a067b
SL
687 dw_mci_idmac_init(host);
688 return -EINVAL;
ec0baaa6 689}
5959b32e 690
69d99fdc 691
3b2a067b 692static inline int dw_mci_prepare_desc32(struct dw_mci *host,
ec0baaa6
SL
693 struct mmc_data *data,
694 unsigned int sg_len)
695{
696 unsigned int desc_len;
697 struct idmac_desc *desc_first, *desc_last, *desc;
b6d2d81c 698 u32 val;
ec0baaa6 699 int i;
0e3a22c0 700
ec0baaa6 701 desc_first = desc_last = desc = host->sg_cpu;
69d99fdc 702
ec0baaa6
SL
703 for (i = 0; i < sg_len; i++) {
704 unsigned int length = sg_dma_len(&data->sg[i]);
5959b32e 705
ec0baaa6 706 u32 mem_addr = sg_dma_address(&data->sg[i]);
5959b32e 707
ec0baaa6
SL
708 for ( ; length ; desc++) {
709 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
710 length : DW_MCI_DESC_DATA_LENGTH;
5959b32e 711
ec0baaa6 712 length -= desc_len;
f95f3850 713
3b2a067b
SL
714 /*
715 * Wait for the former clear OWN bit operation
716 * of IDMAC to make sure that this descriptor
717 * isn't still owned by IDMAC as IDMAC's write
718 * ops and CPU's read ops are asynchronous.
719 */
b6d2d81c
SL
720 if (readl_poll_timeout_atomic(&desc->des0, val,
721 IDMAC_OWN_CLR64(val),
722 10,
723 100 * USEC_PER_MSEC))
724 goto err_own_bit;
3b2a067b 725
ec0baaa6
SL
726 /*
727 * Set the OWN bit and disable interrupts
728 * for this descriptor
729 */
730 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
731 IDMAC_DES0_DIC |
732 IDMAC_DES0_CH);
5959b32e 733
ec0baaa6
SL
734 /* Buffer length */
735 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
5959b32e 736
ec0baaa6
SL
737 /* Physical address to DMA to/from */
738 desc->des2 = cpu_to_le32(mem_addr);
69d99fdc 739
ec0baaa6
SL
740 /* Update physical address for the next desc */
741 mem_addr += desc_len;
f95f3850 742
ec0baaa6
SL
743 /* Save pointer to the last descriptor */
744 desc_last = desc;
745 }
69d99fdc 746 }
f95f3850 747
ec0baaa6
SL
748 /* Set first descriptor */
749 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
750
751 /* Set last descriptor */
752 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
753 IDMAC_DES0_DIC));
754 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
3b2a067b
SL
755
756 return 0;
757err_own_bit:
758 /* restore the descriptor chain as it's polluted */
26be9d70 759 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
cc190d4c 760 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
3b2a067b
SL
761 dw_mci_idmac_init(host);
762 return -EINVAL;
f95f3850
WN
763}
764
3fc7eaef 765static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
f95f3850
WN
766{
767 u32 temp;
3b2a067b 768 int ret;
f95f3850 769
ec0baaa6 770 if (host->dma_64bit_address == 1)
3b2a067b 771 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
ec0baaa6 772 else
3b2a067b
SL
773 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
774
775 if (ret)
776 goto out;
ec0baaa6
SL
777
778 /* drain writebuffer */
779 wmb();
f95f3850 780
536f6b91
SR
781 /* Make sure to reset DMA in case we did PIO before this */
782 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
783 dw_mci_idmac_reset(host);
784
f95f3850
WN
785 /* Select IDMAC interface */
786 temp = mci_readl(host, CTRL);
787 temp |= SDMMC_CTRL_USE_IDMAC;
788 mci_writel(host, CTRL, temp);
789
0e3a22c0 790 /* drain writebuffer */
f95f3850
WN
791 wmb();
792
793 /* Enable the IDMAC */
794 temp = mci_readl(host, BMOD);
a5289a43 795 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
796 mci_writel(host, BMOD, temp);
797
798 /* Start it running */
799 mci_writel(host, PLDMND, 1);
3fc7eaef 800
3b2a067b
SL
801out:
802 return ret;
f95f3850
WN
803}
804
8e2b36ea 805static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
806 .init = dw_mci_idmac_init,
807 .start = dw_mci_idmac_start_dma,
808 .stop = dw_mci_idmac_stop_dma,
3fc7eaef
SL
809 .complete = dw_mci_dmac_complete_dma,
810 .cleanup = dw_mci_dma_cleanup,
811};
812
813static void dw_mci_edmac_stop_dma(struct dw_mci *host)
814{
ab925a31 815 dmaengine_terminate_async(host->dms->ch);
3fc7eaef
SL
816}
817
818static int dw_mci_edmac_start_dma(struct dw_mci *host,
819 unsigned int sg_len)
820{
821 struct dma_slave_config cfg;
822 struct dma_async_tx_descriptor *desc = NULL;
823 struct scatterlist *sgl = host->data->sg;
27d70d36 824 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
3fc7eaef
SL
825 u32 sg_elems = host->data->sg_len;
826 u32 fifoth_val;
827 u32 fifo_offset = host->fifo_reg - host->regs;
828 int ret = 0;
829
830 /* Set external dma config: burst size, burst width */
260b3164 831 cfg.dst_addr = host->phy_regs + fifo_offset;
3fc7eaef
SL
832 cfg.src_addr = cfg.dst_addr;
833 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
834 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
835
836 /* Match burst msize with external dma config */
837 fifoth_val = mci_readl(host, FIFOTH);
838 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
839 cfg.src_maxburst = cfg.dst_maxburst;
840
841 if (host->data->flags & MMC_DATA_WRITE)
842 cfg.direction = DMA_MEM_TO_DEV;
843 else
844 cfg.direction = DMA_DEV_TO_MEM;
845
846 ret = dmaengine_slave_config(host->dms->ch, &cfg);
847 if (ret) {
848 dev_err(host->dev, "Failed to config edmac.\n");
849 return -EBUSY;
850 }
851
852 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
853 sg_len, cfg.direction,
854 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
855 if (!desc) {
856 dev_err(host->dev, "Can't prepare slave sg.\n");
857 return -EBUSY;
858 }
859
860 /* Set dw_mci_dmac_complete_dma as callback */
861 desc->callback = dw_mci_dmac_complete_dma;
862 desc->callback_param = (void *)host;
863 dmaengine_submit(desc);
864
865 /* Flush cache before write */
866 if (host->data->flags & MMC_DATA_WRITE)
42f989c0 867 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
3fc7eaef
SL
868 sg_elems, DMA_TO_DEVICE);
869
870 dma_async_issue_pending(host->dms->ch);
871
872 return 0;
873}
874
875static int dw_mci_edmac_init(struct dw_mci *host)
876{
877 /* Request external dma channel */
878 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
879 if (!host->dms)
880 return -ENOMEM;
881
882 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
883 if (!host->dms->ch) {
4539d36e 884 dev_err(host->dev, "Failed to get external DMA channel.\n");
3fc7eaef
SL
885 kfree(host->dms);
886 host->dms = NULL;
887 return -ENXIO;
888 }
889
890 return 0;
891}
892
893static void dw_mci_edmac_exit(struct dw_mci *host)
894{
895 if (host->dms) {
896 if (host->dms->ch) {
897 dma_release_channel(host->dms->ch);
898 host->dms->ch = NULL;
899 }
900 kfree(host->dms);
901 host->dms = NULL;
902 }
903}
904
905static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
906 .init = dw_mci_edmac_init,
907 .exit = dw_mci_edmac_exit,
908 .start = dw_mci_edmac_start_dma,
909 .stop = dw_mci_edmac_stop_dma,
910 .complete = dw_mci_dmac_complete_dma,
885c3e80
SJ
911 .cleanup = dw_mci_dma_cleanup,
912};
885c3e80 913
9aa51408
SJ
914static int dw_mci_pre_dma_transfer(struct dw_mci *host,
915 struct mmc_data *data,
a4cc7eb4 916 int cookie)
f95f3850
WN
917{
918 struct scatterlist *sg;
9aa51408 919 unsigned int i, sg_len;
03e8cb53 920
a4cc7eb4
JC
921 if (data->host_cookie == COOKIE_PRE_MAPPED)
922 return data->sg_len;
f95f3850
WN
923
924 /*
925 * We don't do DMA on "complex" transfers, i.e. with
926 * non-word-aligned buffers or lengths. Also, we don't bother
927 * with all the DMA setup overhead for short transfers.
928 */
929 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
930 return -EINVAL;
9aa51408 931
f95f3850
WN
932 if (data->blksz & 3)
933 return -EINVAL;
934
935 for_each_sg(data->sg, sg, data->sg_len, i) {
936 if (sg->offset & 3 || sg->length & 3)
937 return -EINVAL;
938 }
939
4a90920c 940 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
941 data->sg,
942 data->sg_len,
feeef096 943 mmc_get_dma_dir(data));
9aa51408
SJ
944 if (sg_len == 0)
945 return -EINVAL;
03e8cb53 946
a4cc7eb4 947 data->host_cookie = cookie;
f95f3850 948
9aa51408
SJ
949 return sg_len;
950}
951
9aa51408 952static void dw_mci_pre_req(struct mmc_host *mmc,
d3c6aac3 953 struct mmc_request *mrq)
9aa51408
SJ
954{
955 struct dw_mci_slot *slot = mmc_priv(mmc);
956 struct mmc_data *data = mrq->data;
957
958 if (!slot->host->use_dma || !data)
959 return;
960
a4cc7eb4
JC
961 /* This data might be unmapped at this time */
962 data->host_cookie = COOKIE_UNMAPPED;
9aa51408 963
a4cc7eb4
JC
964 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
965 COOKIE_PRE_MAPPED) < 0)
966 data->host_cookie = COOKIE_UNMAPPED;
9aa51408
SJ
967}
968
969static void dw_mci_post_req(struct mmc_host *mmc,
970 struct mmc_request *mrq,
971 int err)
972{
973 struct dw_mci_slot *slot = mmc_priv(mmc);
974 struct mmc_data *data = mrq->data;
975
976 if (!slot->host->use_dma || !data)
977 return;
978
a4cc7eb4 979 if (data->host_cookie != COOKIE_UNMAPPED)
4a90920c 980 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
981 data->sg,
982 data->sg_len,
feeef096 983 mmc_get_dma_dir(data));
a4cc7eb4 984 data->host_cookie = COOKIE_UNMAPPED;
9aa51408
SJ
985}
986
671fa142
SL
987static int dw_mci_get_cd(struct mmc_host *mmc)
988{
989 int present;
990 struct dw_mci_slot *slot = mmc_priv(mmc);
991 struct dw_mci *host = slot->host;
992 int gpio_cd = mmc_gpio_get_cd(mmc);
993
994 /* Use platform get_cd function, else try onboard card detect */
995 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
996 || !mmc_card_is_removable(mmc))) {
997 present = 1;
998
999 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1000 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
1001 dev_info(&mmc->class_dev,
1002 "card is polling.\n");
1003 } else {
1004 dev_info(&mmc->class_dev,
1005 "card is non-removable.\n");
1006 }
1007 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1008 }
1009
1010 return present;
1011 } else if (gpio_cd >= 0)
1012 present = gpio_cd;
1013 else
1014 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1015 == 0 ? 1 : 0;
1016
1017 spin_lock_bh(&host->lock);
1018 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1019 dev_dbg(&mmc->class_dev, "card is present\n");
1020 else if (!present &&
1021 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1022 dev_dbg(&mmc->class_dev, "card is not present\n");
1023 spin_unlock_bh(&host->lock);
1024
1025 return present;
1026}
1027
52426899
SJ
1028static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
1029{
52426899 1030 unsigned int blksz = data->blksz;
27d70d36 1031 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
52426899
SJ
1032 u32 fifo_width = 1 << host->data_shift;
1033 u32 blksz_depth = blksz / fifo_width, fifoth_val;
1034 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
0e3a22c0 1035 int idx = ARRAY_SIZE(mszs) - 1;
52426899 1036
3fc7eaef
SL
1037 /* pio should ship this scenario */
1038 if (!host->use_dma)
1039 return;
1040
52426899
SJ
1041 tx_wmark = (host->fifo_depth) / 2;
1042 tx_wmark_invers = host->fifo_depth - tx_wmark;
1043
1044 /*
1045 * MSIZE is '1',
1046 * if blksz is not a multiple of the FIFO width
1047 */
20753569 1048 if (blksz % fifo_width)
52426899 1049 goto done;
52426899
SJ
1050
1051 do {
1052 if (!((blksz_depth % mszs[idx]) ||
1053 (tx_wmark_invers % mszs[idx]))) {
1054 msize = idx;
1055 rx_wmark = mszs[idx] - 1;
1056 break;
1057 }
1058 } while (--idx > 0);
1059 /*
1060 * If idx is '0', it won't be tried
1061 * Thus, initial values are uesed
1062 */
1063done:
1064 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1065 mci_writel(host, FIFOTH, fifoth_val);
52426899
SJ
1066}
1067
7e4bf1bc 1068static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
f1d2736c
SJ
1069{
1070 unsigned int blksz = data->blksz;
1071 u32 blksz_depth, fifo_depth;
1072 u16 thld_size;
7e4bf1bc 1073 u8 enable;
f1d2736c 1074
66dfd101
JH
1075 /*
1076 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1077 * in the FIFO region, so we really shouldn't access it).
1078 */
7e4bf1bc
JC
1079 if (host->verid < DW_MMC_240A ||
1080 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1081 return;
1082
1083 /*
1084 * Card write Threshold is introduced since 2.80a
1085 * It's used when HS400 mode is enabled.
1086 */
1087 if (data->flags & MMC_DATA_WRITE &&
1088 !(host->timing != MMC_TIMING_MMC_HS400))
66dfd101
JH
1089 return;
1090
7e4bf1bc
JC
1091 if (data->flags & MMC_DATA_WRITE)
1092 enable = SDMMC_CARD_WR_THR_EN;
1093 else
1094 enable = SDMMC_CARD_RD_THR_EN;
1095
f1d2736c
SJ
1096 if (host->timing != MMC_TIMING_MMC_HS200 &&
1097 host->timing != MMC_TIMING_UHS_SDR104)
1098 goto disable;
1099
1100 blksz_depth = blksz / (1 << host->data_shift);
1101 fifo_depth = host->fifo_depth;
1102
1103 if (blksz_depth > fifo_depth)
1104 goto disable;
1105
1106 /*
1107 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1108 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1109 * Currently just choose blksz.
1110 */
1111 thld_size = blksz;
7e4bf1bc 1112 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
f1d2736c
SJ
1113 return;
1114
1115disable:
7e4bf1bc 1116 mci_writel(host, CDTHRCTL, 0);
f1d2736c
SJ
1117}
1118
9aa51408
SJ
1119static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1120{
f8c58c11 1121 unsigned long irqflags;
9aa51408
SJ
1122 int sg_len;
1123 u32 temp;
1124
1125 host->using_dma = 0;
1126
1127 /* If we don't have a channel, we can't do DMA */
1128 if (!host->use_dma)
1129 return -ENODEV;
1130
a4cc7eb4 1131 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
a99aa9b9
SJ
1132 if (sg_len < 0) {
1133 host->dma_ops->stop(host);
9aa51408 1134 return sg_len;
a99aa9b9 1135 }
9aa51408
SJ
1136
1137 host->using_dma = 1;
f95f3850 1138
3fc7eaef
SL
1139 if (host->use_dma == TRANS_MODE_IDMAC)
1140 dev_vdbg(host->dev,
1141 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1142 (unsigned long)host->sg_cpu,
1143 (unsigned long)host->sg_dma,
1144 sg_len);
f95f3850 1145
52426899
SJ
1146 /*
1147 * Decide the MSIZE and RX/TX Watermark.
1148 * If current block size is same with previous size,
1149 * no need to update fifoth.
1150 */
1151 if (host->prev_blksz != data->blksz)
1152 dw_mci_adjust_fifoth(host, data);
1153
f95f3850
WN
1154 /* Enable the DMA interface */
1155 temp = mci_readl(host, CTRL);
1156 temp |= SDMMC_CTRL_DMA_ENABLE;
1157 mci_writel(host, CTRL, temp);
1158
1159 /* Disable RX/TX IRQs, let DMA handle it */
f8c58c11 1160 spin_lock_irqsave(&host->irq_lock, irqflags);
f95f3850
WN
1161 temp = mci_readl(host, INTMASK);
1162 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1163 mci_writel(host, INTMASK, temp);
f8c58c11 1164 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850 1165
3fc7eaef 1166 if (host->dma_ops->start(host, sg_len)) {
647f80a1 1167 host->dma_ops->stop(host);
d12d0cb1
SL
1168 /* We can't do DMA, try PIO for this one */
1169 dev_dbg(host->dev,
1170 "%s: fall back to PIO mode for current transfer\n",
1171 __func__);
3fc7eaef
SL
1172 return -ENODEV;
1173 }
f95f3850
WN
1174
1175 return 0;
1176}
1177
1178static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1179{
f8c58c11 1180 unsigned long irqflags;
0e3a22c0 1181 int flags = SG_MITER_ATOMIC;
f95f3850
WN
1182 u32 temp;
1183
1184 data->error = -EINPROGRESS;
1185
1186 WARN_ON(host->data);
1187 host->sg = NULL;
1188 host->data = data;
1189
7e4bf1bc 1190 if (data->flags & MMC_DATA_READ)
55c5efbc 1191 host->dir_status = DW_MCI_RECV_STATUS;
7e4bf1bc 1192 else
55c5efbc 1193 host->dir_status = DW_MCI_SEND_STATUS;
7e4bf1bc
JC
1194
1195 dw_mci_ctrl_thld(host, data);
55c5efbc 1196
f95f3850 1197 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
1198 if (host->data->flags & MMC_DATA_READ)
1199 flags |= SG_MITER_TO_SG;
1200 else
1201 flags |= SG_MITER_FROM_SG;
1202
1203 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 1204 host->sg = data->sg;
34b664a2
JH
1205 host->part_buf_start = 0;
1206 host->part_buf_count = 0;
f95f3850 1207
b40af3aa 1208 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f8c58c11
DA
1209
1210 spin_lock_irqsave(&host->irq_lock, irqflags);
f95f3850
WN
1211 temp = mci_readl(host, INTMASK);
1212 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1213 mci_writel(host, INTMASK, temp);
f8c58c11 1214 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850
WN
1215
1216 temp = mci_readl(host, CTRL);
1217 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1218 mci_writel(host, CTRL, temp);
52426899
SJ
1219
1220 /*
d6fced83
JN
1221 * Use the initial fifoth_val for PIO mode. If wm_algined
1222 * is set, we set watermark same as data size.
52426899
SJ
1223 * If next issued data may be transfered by DMA mode,
1224 * prev_blksz should be invalidated.
1225 */
d6fced83
JN
1226 if (host->wm_aligned)
1227 dw_mci_adjust_fifoth(host, data);
1228 else
1229 mci_writel(host, FIFOTH, host->fifoth_val);
52426899
SJ
1230 host->prev_blksz = 0;
1231 } else {
1232 /*
1233 * Keep the current block size.
1234 * It will be used to decide whether to update
1235 * fifoth register next time.
1236 */
1237 host->prev_blksz = data->blksz;
f95f3850
WN
1238 }
1239}
1240
ab269128 1241static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
1242{
1243 struct dw_mci *host = slot->host;
fdf492a1 1244 unsigned int clock = slot->clock;
f95f3850 1245 u32 div;
9623b5b9 1246 u32 clk_en_a;
01730558
DA
1247 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1248
1249 /* We must continue to set bit 28 in CMD until the change is complete */
1250 if (host->state == STATE_WAITING_CMD11_DONE)
1251 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
f95f3850 1252
fdf492a1
DA
1253 if (!clock) {
1254 mci_writel(host, CLKENA, 0);
01730558 1255 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
fdf492a1
DA
1256 } else if (clock != host->current_speed || force_clkinit) {
1257 div = host->bus_hz / clock;
1258 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
1259 /*
1260 * move the + 1 after the divide to prevent
1261 * over-clocking the card.
1262 */
e419990b
SJ
1263 div += 1;
1264
fdf492a1 1265 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 1266
e6cd7a8e
JC
1267 if ((clock != slot->__clk_old &&
1268 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1269 force_clkinit) {
ce69e2fe
SL
1270 /* Silent the verbose log if calling from PM context */
1271 if (!force_clkinit)
1272 dev_info(&slot->mmc->class_dev,
1273 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1274 slot->id, host->bus_hz, clock,
1275 div ? ((host->bus_hz / div) >> 1) :
1276 host->bus_hz, div);
f95f3850 1277
e6cd7a8e
JC
1278 /*
1279 * If card is polling, display the message only
1280 * one time at boot time.
1281 */
1282 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1283 slot->mmc->f_min == clock)
1284 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1285 }
1286
f95f3850
WN
1287 /* disable clock */
1288 mci_writel(host, CLKENA, 0);
1289 mci_writel(host, CLKSRC, 0);
1290
1291 /* inform CIU */
01730558 1292 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
f95f3850
WN
1293
1294 /* set clock to desired speed */
1295 mci_writel(host, CLKDIV, div);
1296
1297 /* inform CIU */
01730558 1298 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
f95f3850 1299
9623b5b9
DA
1300 /* enable clock; only low power if no SDIO */
1301 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
b24c8b26 1302 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
9623b5b9
DA
1303 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1304 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
1305
1306 /* inform CIU */
01730558 1307 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
005d675a
JC
1308
1309 /* keep the last clock value that was requested from core */
1310 slot->__clk_old = clock;
f95f3850
WN
1311 }
1312
fdf492a1
DA
1313 host->current_speed = clock;
1314
f95f3850 1315 /* Set the current slot bus width */
1d56c453 1316 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
1317}
1318
053b3ce6
SJ
1319static void __dw_mci_start_request(struct dw_mci *host,
1320 struct dw_mci_slot *slot,
1321 struct mmc_command *cmd)
f95f3850
WN
1322{
1323 struct mmc_request *mrq;
f95f3850
WN
1324 struct mmc_data *data;
1325 u32 cmdflags;
1326
1327 mrq = slot->mrq;
f95f3850 1328
f95f3850
WN
1329 host->mrq = mrq;
1330
1331 host->pending_events = 0;
1332 host->completed_events = 0;
e352c813 1333 host->cmd_status = 0;
f95f3850 1334 host->data_status = 0;
e352c813 1335 host->dir_status = 0;
f95f3850 1336
053b3ce6 1337 data = cmd->data;
f95f3850 1338 if (data) {
f16afa88 1339 mci_writel(host, TMOUT, 0xFFFFFFFF);
f95f3850
WN
1340 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1341 mci_writel(host, BLKSIZ, data->blksz);
1342 }
1343
f95f3850
WN
1344 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1345
1346 /* this is the first command, send the initialization clock */
1347 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1348 cmdflags |= SDMMC_CMD_INIT;
1349
1350 if (data) {
1351 dw_mci_submit_data(host, data);
0e3a22c0 1352 wmb(); /* drain writebuffer */
f95f3850
WN
1353 }
1354
1355 dw_mci_start_command(host, cmd, cmdflags);
1356
5c935165 1357 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
49ba0302
DA
1358 unsigned long irqflags;
1359
5c935165 1360 /*
8886a6fd
DA
1361 * Databook says to fail after 2ms w/ no response, but evidence
1362 * shows that sometimes the cmd11 interrupt takes over 130ms.
1363 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1364 * is just about to roll over.
49ba0302
DA
1365 *
1366 * We do this whole thing under spinlock and only if the
1367 * command hasn't already completed (indicating the the irq
1368 * already ran so we don't want the timeout).
5c935165 1369 */
49ba0302
DA
1370 spin_lock_irqsave(&host->irq_lock, irqflags);
1371 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1372 mod_timer(&host->cmd11_timer,
1373 jiffies + msecs_to_jiffies(500) + 1);
1374 spin_unlock_irqrestore(&host->irq_lock, irqflags);
5c935165
DA
1375 }
1376
e13c3c08 1377 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
1378}
1379
053b3ce6
SJ
1380static void dw_mci_start_request(struct dw_mci *host,
1381 struct dw_mci_slot *slot)
1382{
1383 struct mmc_request *mrq = slot->mrq;
1384 struct mmc_command *cmd;
1385
1386 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1387 __dw_mci_start_request(host, slot, cmd);
1388}
1389
7456caae 1390/* must be called with host->lock held */
f95f3850
WN
1391static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1392 struct mmc_request *mrq)
1393{
1394 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1395 host->state);
1396
f95f3850
WN
1397 slot->mrq = mrq;
1398
01730558
DA
1399 if (host->state == STATE_WAITING_CMD11_DONE) {
1400 dev_warn(&slot->mmc->class_dev,
1401 "Voltage change didn't complete\n");
1402 /*
1403 * this case isn't expected to happen, so we can
1404 * either crash here or just try to continue on
1405 * in the closest possible state
1406 */
1407 host->state = STATE_IDLE;
1408 }
1409
f95f3850
WN
1410 if (host->state == STATE_IDLE) {
1411 host->state = STATE_SENDING_CMD;
1412 dw_mci_start_request(host, slot);
1413 } else {
1414 list_add_tail(&slot->queue_node, &host->queue);
1415 }
f95f3850
WN
1416}
1417
1418static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1419{
1420 struct dw_mci_slot *slot = mmc_priv(mmc);
1421 struct dw_mci *host = slot->host;
1422
1423 WARN_ON(slot->mrq);
1424
7456caae
JH
1425 /*
1426 * The check for card presence and queueing of the request must be
1427 * atomic, otherwise the card could be removed in between and the
1428 * request wouldn't fail until another card was inserted.
1429 */
7456caae 1430
56f6911c 1431 if (!dw_mci_get_cd(mmc)) {
f95f3850
WN
1432 mrq->cmd->error = -ENOMEDIUM;
1433 mmc_request_done(mmc, mrq);
1434 return;
1435 }
1436
56f6911c
SL
1437 spin_lock_bh(&host->lock);
1438
f95f3850 1439 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
1440
1441 spin_unlock_bh(&host->lock);
f95f3850
WN
1442}
1443
1444static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1445{
1446 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 1447 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 1448 u32 regs;
51da2240 1449 int ret;
f95f3850 1450
f95f3850 1451 switch (ios->bus_width) {
f95f3850
WN
1452 case MMC_BUS_WIDTH_4:
1453 slot->ctype = SDMMC_CTYPE_4BIT;
1454 break;
c9b2a06f
JC
1455 case MMC_BUS_WIDTH_8:
1456 slot->ctype = SDMMC_CTYPE_8BIT;
1457 break;
b2f7cb45
JC
1458 default:
1459 /* set default 1 bit mode */
1460 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
1461 }
1462
3f514291
SJ
1463 regs = mci_readl(slot->host, UHS_REG);
1464
41babf75 1465 /* DDR mode set */
80113132 1466 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
7cc8d580 1467 ios->timing == MMC_TIMING_UHS_DDR50 ||
80113132 1468 ios->timing == MMC_TIMING_MMC_HS400)
c69042a5 1469 regs |= ((0x1 << slot->id) << 16);
3f514291 1470 else
c69042a5 1471 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
1472
1473 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 1474 slot->host->timing = ios->timing;
41babf75 1475
fdf492a1
DA
1476 /*
1477 * Use mirror of ios->clock to prevent race with mmc
1478 * core ios update when finding the minimum.
1479 */
1480 slot->clock = ios->clock;
f95f3850 1481
cb27a843
JH
1482 if (drv_data && drv_data->set_ios)
1483 drv_data->set_ios(slot->host, ios);
800d78bf 1484
f95f3850
WN
1485 switch (ios->power_mode) {
1486 case MMC_POWER_UP:
51da2240
YC
1487 if (!IS_ERR(mmc->supply.vmmc)) {
1488 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1489 ios->vdd);
1490 if (ret) {
1491 dev_err(slot->host->dev,
1492 "failed to enable vmmc regulator\n");
1493 /*return, if failed turn on vmmc*/
1494 return;
1495 }
1496 }
29d0d161
DA
1497 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1498 regs = mci_readl(slot->host, PWREN);
1499 regs |= (1 << slot->id);
1500 mci_writel(slot->host, PWREN, regs);
1501 break;
1502 case MMC_POWER_ON:
d1f1dd86
DA
1503 if (!slot->host->vqmmc_enabled) {
1504 if (!IS_ERR(mmc->supply.vqmmc)) {
1505 ret = regulator_enable(mmc->supply.vqmmc);
1506 if (ret < 0)
1507 dev_err(slot->host->dev,
1508 "failed to enable vqmmc\n");
1509 else
1510 slot->host->vqmmc_enabled = true;
1511
1512 } else {
1513 /* Keep track so we don't reset again */
51da2240 1514 slot->host->vqmmc_enabled = true;
d1f1dd86
DA
1515 }
1516
1517 /* Reset our state machine after powering on */
1518 dw_mci_ctrl_reset(slot->host,
1519 SDMMC_CTRL_ALL_RESET_FLAGS);
51da2240 1520 }
655babbd
DA
1521
1522 /* Adjust clock / bus width after power is up */
1523 dw_mci_setup_bus(slot, false);
1524
e6f34e2f
JH
1525 break;
1526 case MMC_POWER_OFF:
655babbd
DA
1527 /* Turn clock off before power goes down */
1528 dw_mci_setup_bus(slot, false);
1529
51da2240
YC
1530 if (!IS_ERR(mmc->supply.vmmc))
1531 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1532
d1f1dd86 1533 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
51da2240 1534 regulator_disable(mmc->supply.vqmmc);
d1f1dd86 1535 slot->host->vqmmc_enabled = false;
51da2240 1536
4366dcc5
JC
1537 regs = mci_readl(slot->host, PWREN);
1538 regs &= ~(1 << slot->id);
1539 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
1540 break;
1541 default:
1542 break;
1543 }
655babbd
DA
1544
1545 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1546 slot->host->state = STATE_IDLE;
f95f3850
WN
1547}
1548
01730558
DA
1549static int dw_mci_card_busy(struct mmc_host *mmc)
1550{
1551 struct dw_mci_slot *slot = mmc_priv(mmc);
1552 u32 status;
1553
1554 /*
1555 * Check the busy bit which is low when DAT[3:0]
1556 * (the data lines) are 0000
1557 */
1558 status = mci_readl(slot->host, STATUS);
1559
1560 return !!(status & SDMMC_STATUS_BUSY);
1561}
1562
1563static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1564{
1565 struct dw_mci_slot *slot = mmc_priv(mmc);
1566 struct dw_mci *host = slot->host;
8f7849c4 1567 const struct dw_mci_drv_data *drv_data = host->drv_data;
01730558
DA
1568 u32 uhs;
1569 u32 v18 = SDMMC_UHS_18V << slot->id;
01730558
DA
1570 int ret;
1571
8f7849c4
ZG
1572 if (drv_data && drv_data->switch_voltage)
1573 return drv_data->switch_voltage(mmc, ios);
1574
01730558
DA
1575 /*
1576 * Program the voltage. Note that some instances of dw_mmc may use
1577 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1578 * does no harm but you need to set the regulator directly. Try both.
1579 */
1580 uhs = mci_readl(host, UHS_REG);
e0848f5d 1581 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
01730558 1582 uhs &= ~v18;
e0848f5d 1583 else
01730558 1584 uhs |= v18;
e0848f5d 1585
01730558 1586 if (!IS_ERR(mmc->supply.vqmmc)) {
e0848f5d 1587 ret = mmc_regulator_set_vqmmc(mmc, ios);
01730558
DA
1588
1589 if (ret) {
b19caf37 1590 dev_dbg(&mmc->class_dev,
e0848f5d
DA
1591 "Regulator set error %d - %s V\n",
1592 ret, uhs & v18 ? "1.8" : "3.3");
01730558
DA
1593 return ret;
1594 }
1595 }
1596 mci_writel(host, UHS_REG, uhs);
1597
1598 return 0;
1599}
1600
f95f3850
WN
1601static int dw_mci_get_ro(struct mmc_host *mmc)
1602{
1603 int read_only;
1604 struct dw_mci_slot *slot = mmc_priv(mmc);
9795a846 1605 int gpio_ro = mmc_gpio_get_ro(mmc);
f95f3850
WN
1606
1607 /* Use platform get_ro function, else try on board write protect */
287980e4 1608 if (gpio_ro >= 0)
9795a846 1609 read_only = gpio_ro;
f95f3850
WN
1610 else
1611 read_only =
1612 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1613
1614 dev_dbg(&mmc->class_dev, "card is %s\n",
1615 read_only ? "read-only" : "read-write");
1616
1617 return read_only;
1618}
1619
935a665e
SL
1620static void dw_mci_hw_reset(struct mmc_host *mmc)
1621{
1622 struct dw_mci_slot *slot = mmc_priv(mmc);
1623 struct dw_mci *host = slot->host;
1624 int reset;
1625
1626 if (host->use_dma == TRANS_MODE_IDMAC)
1627 dw_mci_idmac_reset(host);
1628
1629 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1630 SDMMC_CTRL_FIFO_RESET))
1631 return;
1632
1633 /*
1634 * According to eMMC spec, card reset procedure:
1635 * tRstW >= 1us: RST_n pulse width
1636 * tRSCA >= 200us: RST_n to Command time
1637 * tRSTH >= 1us: RST_n high period
1638 */
1639 reset = mci_readl(host, RST_N);
1640 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1641 mci_writel(host, RST_N, reset);
1642 usleep_range(1, 2);
1643 reset |= SDMMC_RST_HWACTIVE << slot->id;
1644 mci_writel(host, RST_N, reset);
1645 usleep_range(200, 300);
1646}
1647
b24c8b26 1648static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
9623b5b9 1649{
b24c8b26 1650 struct dw_mci_slot *slot = mmc_priv(mmc);
9623b5b9 1651 struct dw_mci *host = slot->host;
9623b5b9 1652
b24c8b26
DA
1653 /*
1654 * Low power mode will stop the card clock when idle. According to the
1655 * description of the CLKENA register we should disable low power mode
1656 * for SDIO cards if we need SDIO interrupts to work.
1657 */
1658 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1659 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1660 u32 clk_en_a_old;
1661 u32 clk_en_a;
9623b5b9 1662
b24c8b26
DA
1663 clk_en_a_old = mci_readl(host, CLKENA);
1664
1665 if (card->type == MMC_TYPE_SDIO ||
1666 card->type == MMC_TYPE_SD_COMBO) {
0eebf9b9 1667 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
b24c8b26
DA
1668 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1669 } else {
0eebf9b9 1670 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
b24c8b26
DA
1671 clk_en_a = clk_en_a_old | clken_low_pwr;
1672 }
1673
1674 if (clk_en_a != clk_en_a_old) {
1675 mci_writel(host, CLKENA, clk_en_a);
1676 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1677 SDMMC_CMD_PRV_DAT_WAIT, 0);
1678 }
9623b5b9
DA
1679 }
1680}
1681
32dba737 1682static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1a5c8e1f 1683{
1a5c8e1f 1684 struct dw_mci *host = slot->host;
f8c58c11 1685 unsigned long irqflags;
1a5c8e1f
SH
1686 u32 int_mask;
1687
f8c58c11
DA
1688 spin_lock_irqsave(&host->irq_lock, irqflags);
1689
1a5c8e1f
SH
1690 /* Enable/disable Slot Specific SDIO interrupt */
1691 int_mask = mci_readl(host, INTMASK);
b24c8b26
DA
1692 if (enb)
1693 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1694 else
1695 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1696 mci_writel(host, INTMASK, int_mask);
f8c58c11
DA
1697
1698 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1a5c8e1f
SH
1699}
1700
32dba737
UH
1701static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1702{
1703 struct dw_mci_slot *slot = mmc_priv(mmc);
ca8971ca 1704 struct dw_mci *host = slot->host;
32dba737
UH
1705
1706 __dw_mci_enable_sdio_irq(slot, enb);
ca8971ca
UH
1707
1708 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1709 if (enb)
1710 pm_runtime_get_noresume(host->dev);
1711 else
1712 pm_runtime_put_noidle(host->dev);
32dba737
UH
1713}
1714
1715static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1716{
1717 struct dw_mci_slot *slot = mmc_priv(mmc);
1718
1719 __dw_mci_enable_sdio_irq(slot, 1);
1720}
1721
0976f16d
SJ
1722static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1723{
1724 struct dw_mci_slot *slot = mmc_priv(mmc);
1725 struct dw_mci *host = slot->host;
1726 const struct dw_mci_drv_data *drv_data = host->drv_data;
0e3a22c0 1727 int err = -EINVAL;
0976f16d 1728
0976f16d 1729 if (drv_data && drv_data->execute_tuning)
9979dbe5 1730 err = drv_data->execute_tuning(slot, opcode);
0976f16d
SJ
1731 return err;
1732}
1733
0e3a22c0
SL
1734static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1735 struct mmc_ios *ios)
80113132
SJ
1736{
1737 struct dw_mci_slot *slot = mmc_priv(mmc);
1738 struct dw_mci *host = slot->host;
1739 const struct dw_mci_drv_data *drv_data = host->drv_data;
1740
1741 if (drv_data && drv_data->prepare_hs400_tuning)
1742 return drv_data->prepare_hs400_tuning(host, ios);
1743
1744 return 0;
1745}
1746
4e7392b2
SL
1747static bool dw_mci_reset(struct dw_mci *host)
1748{
1749 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1750 bool ret = false;
bc2dcc1a 1751 u32 status = 0;
4e7392b2
SL
1752
1753 /*
1754 * Resetting generates a block interrupt, hence setting
1755 * the scatter-gather pointer to NULL.
1756 */
1757 if (host->sg) {
1758 sg_miter_stop(&host->sg_miter);
1759 host->sg = NULL;
1760 }
1761
1762 if (host->use_dma)
1763 flags |= SDMMC_CTRL_DMA_RESET;
1764
1765 if (dw_mci_ctrl_reset(host, flags)) {
1766 /*
bc2dcc1a
SL
1767 * In all cases we clear the RAWINTS
1768 * register to clear any interrupts.
4e7392b2
SL
1769 */
1770 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1771
bc2dcc1a
SL
1772 if (!host->use_dma) {
1773 ret = true;
1774 goto ciu_out;
1775 }
4e7392b2 1776
bc2dcc1a
SL
1777 /* Wait for dma_req to be cleared */
1778 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1779 status,
1780 !(status & SDMMC_STATUS_DMA_REQ),
1781 1, 500 * USEC_PER_MSEC)) {
1782 dev_err(host->dev,
1783 "%s: Timeout waiting for dma_req to be cleared\n",
1784 __func__);
1785 goto ciu_out;
4e7392b2 1786 }
bc2dcc1a
SL
1787
1788 /* when using DMA next we reset the fifo again */
1789 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1790 goto ciu_out;
4e7392b2
SL
1791 } else {
1792 /* if the controller reset bit did clear, then set clock regs */
1793 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1794 dev_err(host->dev,
1795 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1796 __func__);
1797 goto ciu_out;
1798 }
1799 }
1800
1801 if (host->use_dma == TRANS_MODE_IDMAC)
1802 /* It is also recommended that we reset and reprogram idmac */
1803 dw_mci_idmac_reset(host);
1804
1805 ret = true;
1806
1807ciu_out:
1808 /* After a CTRL reset we need to have CIU set clock registers */
42f989c0 1809 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
4e7392b2
SL
1810
1811 return ret;
1812}
1813
f95f3850 1814static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1815 .request = dw_mci_request,
9aa51408
SJ
1816 .pre_req = dw_mci_pre_req,
1817 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1818 .set_ios = dw_mci_set_ios,
1819 .get_ro = dw_mci_get_ro,
1820 .get_cd = dw_mci_get_cd,
935a665e 1821 .hw_reset = dw_mci_hw_reset,
1a5c8e1f 1822 .enable_sdio_irq = dw_mci_enable_sdio_irq,
32dba737 1823 .ack_sdio_irq = dw_mci_ack_sdio_irq,
0976f16d 1824 .execute_tuning = dw_mci_execute_tuning,
01730558
DA
1825 .card_busy = dw_mci_card_busy,
1826 .start_signal_voltage_switch = dw_mci_switch_voltage,
b24c8b26 1827 .init_card = dw_mci_init_card,
80113132 1828 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
f95f3850
WN
1829};
1830
1831static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1832 __releases(&host->lock)
1833 __acquires(&host->lock)
1834{
1835 struct dw_mci_slot *slot;
42f989c0 1836 struct mmc_host *prev_mmc = host->slot->mmc;
f95f3850
WN
1837
1838 WARN_ON(host->cmd || host->data);
1839
42f989c0 1840 host->slot->mrq = NULL;
f95f3850
WN
1841 host->mrq = NULL;
1842 if (!list_empty(&host->queue)) {
1843 slot = list_entry(host->queue.next,
1844 struct dw_mci_slot, queue_node);
1845 list_del(&slot->queue_node);
4a90920c 1846 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1847 mmc_hostname(slot->mmc));
1848 host->state = STATE_SENDING_CMD;
1849 dw_mci_start_request(host, slot);
1850 } else {
4a90920c 1851 dev_vdbg(host->dev, "list empty\n");
01730558
DA
1852
1853 if (host->state == STATE_SENDING_CMD11)
1854 host->state = STATE_WAITING_CMD11_DONE;
1855 else
1856 host->state = STATE_IDLE;
f95f3850
WN
1857 }
1858
1859 spin_unlock(&host->lock);
1860 mmc_request_done(prev_mmc, mrq);
1861 spin_lock(&host->lock);
1862}
1863
e352c813 1864static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1865{
1866 u32 status = host->cmd_status;
1867
1868 host->cmd_status = 0;
1869
1870 /* Read the response from the card (up to 16 bytes) */
1871 if (cmd->flags & MMC_RSP_PRESENT) {
1872 if (cmd->flags & MMC_RSP_136) {
1873 cmd->resp[3] = mci_readl(host, RESP0);
1874 cmd->resp[2] = mci_readl(host, RESP1);
1875 cmd->resp[1] = mci_readl(host, RESP2);
1876 cmd->resp[0] = mci_readl(host, RESP3);
1877 } else {
1878 cmd->resp[0] = mci_readl(host, RESP0);
1879 cmd->resp[1] = 0;
1880 cmd->resp[2] = 0;
1881 cmd->resp[3] = 0;
1882 }
1883 }
1884
1885 if (status & SDMMC_INT_RTO)
1886 cmd->error = -ETIMEDOUT;
1887 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1888 cmd->error = -EILSEQ;
1889 else if (status & SDMMC_INT_RESP_ERR)
1890 cmd->error = -EIO;
1891 else
1892 cmd->error = 0;
1893
e352c813
SJ
1894 return cmd->error;
1895}
1896
1897static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1898{
31bff450 1899 u32 status = host->data_status;
e352c813
SJ
1900
1901 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1902 if (status & SDMMC_INT_DRTO) {
1903 data->error = -ETIMEDOUT;
1904 } else if (status & SDMMC_INT_DCRC) {
1905 data->error = -EILSEQ;
1906 } else if (status & SDMMC_INT_EBE) {
1907 if (host->dir_status ==
1908 DW_MCI_SEND_STATUS) {
1909 /*
1910 * No data CRC status was returned.
1911 * The number of bytes transferred
1912 * will be exaggerated in PIO mode.
1913 */
1914 data->bytes_xfered = 0;
1915 data->error = -ETIMEDOUT;
1916 } else if (host->dir_status ==
1917 DW_MCI_RECV_STATUS) {
e7a1dec1 1918 data->error = -EILSEQ;
e352c813
SJ
1919 }
1920 } else {
1921 /* SDMMC_INT_SBE is included */
e7a1dec1 1922 data->error = -EILSEQ;
e352c813
SJ
1923 }
1924
e6cc0123 1925 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
e352c813
SJ
1926
1927 /*
1928 * After an error, there may be data lingering
31bff450 1929 * in the FIFO
e352c813 1930 */
3a33a94c 1931 dw_mci_reset(host);
e352c813
SJ
1932 } else {
1933 data->bytes_xfered = data->blocks * data->blksz;
1934 data->error = 0;
1935 }
1936
1937 return data->error;
f95f3850
WN
1938}
1939
57e10486
AK
1940static void dw_mci_set_drto(struct dw_mci *host)
1941{
1942 unsigned int drto_clks;
9d9491a7 1943 unsigned int drto_div;
57e10486 1944 unsigned int drto_ms;
93c23ae3 1945 unsigned long irqflags;
57e10486
AK
1946
1947 drto_clks = mci_readl(host, TMOUT) >> 8;
9d9491a7
DA
1948 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1949 if (drto_div == 0)
1950 drto_div = 1;
1951 drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div,
1952 host->bus_hz);
57e10486
AK
1953
1954 /* add a bit spare time */
1955 drto_ms += 10;
1956
93c23ae3
DA
1957 spin_lock_irqsave(&host->irq_lock, irqflags);
1958 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1959 mod_timer(&host->dto_timer,
1960 jiffies + msecs_to_jiffies(drto_ms));
1961 spin_unlock_irqrestore(&host->irq_lock, irqflags);
57e10486
AK
1962}
1963
8892b705
DA
1964static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1965{
1966 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1967 return false;
1968
1969 /*
1970 * Really be certain that the timer has stopped. This is a bit of
1971 * paranoia and could only really happen if we had really bad
1972 * interrupt latency and the interrupt routine and timeout were
1973 * running concurrently so that the del_timer() in the interrupt
1974 * handler couldn't run.
1975 */
1976 WARN_ON(del_timer_sync(&host->cto_timer));
1977 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1978
1979 return true;
1980}
1981
93c23ae3
DA
1982static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1983{
1984 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1985 return false;
1986
1987 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1988 WARN_ON(del_timer_sync(&host->dto_timer));
1989 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1990
1991 return true;
1992}
1993
f95f3850
WN
1994static void dw_mci_tasklet_func(unsigned long priv)
1995{
1996 struct dw_mci *host = (struct dw_mci *)priv;
1997 struct mmc_data *data;
1998 struct mmc_command *cmd;
e352c813 1999 struct mmc_request *mrq;
f95f3850
WN
2000 enum dw_mci_state state;
2001 enum dw_mci_state prev_state;
e352c813 2002 unsigned int err;
f95f3850
WN
2003
2004 spin_lock(&host->lock);
2005
2006 state = host->state;
2007 data = host->data;
e352c813 2008 mrq = host->mrq;
f95f3850
WN
2009
2010 do {
2011 prev_state = state;
2012
2013 switch (state) {
2014 case STATE_IDLE:
01730558 2015 case STATE_WAITING_CMD11_DONE:
f95f3850
WN
2016 break;
2017
01730558 2018 case STATE_SENDING_CMD11:
f95f3850 2019 case STATE_SENDING_CMD:
8892b705 2020 if (!dw_mci_clear_pending_cmd_complete(host))
f95f3850
WN
2021 break;
2022
2023 cmd = host->cmd;
2024 host->cmd = NULL;
2025 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
2026 err = dw_mci_command_complete(host, cmd);
2027 if (cmd == mrq->sbc && !err) {
053b3ce6 2028 prev_state = state = STATE_SENDING_CMD;
42f989c0 2029 __dw_mci_start_request(host, host->slot,
e352c813 2030 mrq->cmd);
053b3ce6
SJ
2031 goto unlock;
2032 }
2033
e352c813 2034 if (cmd->data && err) {
46d17952
DA
2035 /*
2036 * During UHS tuning sequence, sending the stop
2037 * command after the response CRC error would
2038 * throw the system into a confused state
2039 * causing all future tuning phases to report
2040 * failure.
2041 *
2042 * In such case controller will move into a data
2043 * transfer state after a response error or
2044 * response CRC error. Let's let that finish
2045 * before trying to send a stop, so we'll go to
2046 * STATE_SENDING_DATA.
2047 *
2048 * Although letting the data transfer take place
2049 * will waste a bit of time (we already know
2050 * the command was bad), it can't cause any
2051 * errors since it's possible it would have
2052 * taken place anyway if this tasklet got
2053 * delayed. Allowing the transfer to take place
2054 * avoids races and keeps things simple.
2055 */
2056 if ((err != -ETIMEDOUT) &&
2057 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
2058 state = STATE_SENDING_DATA;
2059 continue;
2060 }
2061
71abb133 2062 dw_mci_stop_dma(host);
90c2143a
SJ
2063 send_stop_abort(host, data);
2064 state = STATE_SENDING_STOP;
2065 break;
71abb133
SJ
2066 }
2067
e352c813
SJ
2068 if (!cmd->data || err) {
2069 dw_mci_request_end(host, mrq);
f95f3850
WN
2070 goto unlock;
2071 }
2072
2073 prev_state = state = STATE_SENDING_DATA;
2074 /* fall through */
2075
2076 case STATE_SENDING_DATA:
2aa35465
DA
2077 /*
2078 * We could get a data error and never a transfer
2079 * complete so we'd better check for it here.
2080 *
2081 * Note that we don't really care if we also got a
2082 * transfer complete; stopping the DMA and sending an
2083 * abort won't hurt.
2084 */
f95f3850
WN
2085 if (test_and_clear_bit(EVENT_DATA_ERROR,
2086 &host->pending_events)) {
2087 dw_mci_stop_dma(host);
e13c3c08 2088 if (!(host->data_status & (SDMMC_INT_DRTO |
bdb9a90b 2089 SDMMC_INT_EBE)))
2090 send_stop_abort(host, data);
f95f3850
WN
2091 state = STATE_DATA_ERROR;
2092 break;
2093 }
2094
2095 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
57e10486
AK
2096 &host->pending_events)) {
2097 /*
2098 * If all data-related interrupts don't come
2099 * within the given time in reading data state.
2100 */
16a34574 2101 if (host->dir_status == DW_MCI_RECV_STATUS)
57e10486 2102 dw_mci_set_drto(host);
f95f3850 2103 break;
57e10486 2104 }
f95f3850
WN
2105
2106 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2aa35465
DA
2107
2108 /*
2109 * Handle an EVENT_DATA_ERROR that might have shown up
2110 * before the transfer completed. This might not have
2111 * been caught by the check above because the interrupt
2112 * could have gone off between the previous check and
2113 * the check for transfer complete.
2114 *
2115 * Technically this ought not be needed assuming we
2116 * get a DATA_COMPLETE eventually (we'll notice the
2117 * error and end the request), but it shouldn't hurt.
2118 *
2119 * This has the advantage of sending the stop command.
2120 */
2121 if (test_and_clear_bit(EVENT_DATA_ERROR,
2122 &host->pending_events)) {
2123 dw_mci_stop_dma(host);
e13c3c08 2124 if (!(host->data_status & (SDMMC_INT_DRTO |
bdb9a90b 2125 SDMMC_INT_EBE)))
2126 send_stop_abort(host, data);
2aa35465
DA
2127 state = STATE_DATA_ERROR;
2128 break;
2129 }
f95f3850 2130 prev_state = state = STATE_DATA_BUSY;
2aa35465 2131
f95f3850
WN
2132 /* fall through */
2133
2134 case STATE_DATA_BUSY:
93c23ae3 2135 if (!dw_mci_clear_pending_data_complete(host)) {
57e10486
AK
2136 /*
2137 * If data error interrupt comes but data over
2138 * interrupt doesn't come within the given time.
2139 * in reading data state.
2140 */
16a34574 2141 if (host->dir_status == DW_MCI_RECV_STATUS)
57e10486 2142 dw_mci_set_drto(host);
f95f3850 2143 break;
57e10486 2144 }
f95f3850
WN
2145
2146 host->data = NULL;
2147 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
2148 err = dw_mci_data_complete(host, data);
2149
2150 if (!err) {
2151 if (!data->stop || mrq->sbc) {
17c8bc85 2152 if (mrq->sbc && data->stop)
e352c813
SJ
2153 data->stop->error = 0;
2154 dw_mci_request_end(host, mrq);
2155 goto unlock;
f95f3850 2156 }
f95f3850 2157
e352c813
SJ
2158 /* stop command for open-ended transfer*/
2159 if (data->stop)
2160 send_stop_abort(host, data);
2aa35465
DA
2161 } else {
2162 /*
2163 * If we don't have a command complete now we'll
2164 * never get one since we just reset everything;
2165 * better end the request.
2166 *
2167 * If we do have a command complete we'll fall
2168 * through to the SENDING_STOP command and
2169 * everything will be peachy keen.
2170 */
2171 if (!test_bit(EVENT_CMD_COMPLETE,
2172 &host->pending_events)) {
2173 host->cmd = NULL;
2174 dw_mci_request_end(host, mrq);
2175 goto unlock;
2176 }
053b3ce6
SJ
2177 }
2178
e352c813
SJ
2179 /*
2180 * If err has non-zero,
2181 * stop-abort command has been already issued.
2182 */
f95f3850 2183 prev_state = state = STATE_SENDING_STOP;
e352c813 2184
f95f3850
WN
2185 /* fall through */
2186
2187 case STATE_SENDING_STOP:
8892b705 2188 if (!dw_mci_clear_pending_cmd_complete(host))
f95f3850
WN
2189 break;
2190
71abb133 2191 /* CMD error in data command */
31bff450 2192 if (mrq->cmd->error && mrq->data)
3a33a94c 2193 dw_mci_reset(host);
71abb133 2194
f95f3850 2195 host->cmd = NULL;
71abb133 2196 host->data = NULL;
90c2143a 2197
e13c3c08 2198 if (!mrq->sbc && mrq->stop)
e352c813 2199 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
2200 else
2201 host->cmd_status = 0;
2202
e352c813 2203 dw_mci_request_end(host, mrq);
f95f3850
WN
2204 goto unlock;
2205
2206 case STATE_DATA_ERROR:
2207 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2208 &host->pending_events))
2209 break;
2210
2211 state = STATE_DATA_BUSY;
2212 break;
2213 }
2214 } while (state != prev_state);
2215
2216 host->state = state;
2217unlock:
2218 spin_unlock(&host->lock);
2219
2220}
2221
34b664a2
JH
2222/* push final bytes to part_buf, only use during push */
2223static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2224{
34b664a2
JH
2225 memcpy((void *)&host->part_buf, buf, cnt);
2226 host->part_buf_count = cnt;
2227}
f95f3850 2228
34b664a2
JH
2229/* append bytes to part_buf, only use during push */
2230static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2231{
2232 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2233 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2234 host->part_buf_count += cnt;
2235 return cnt;
2236}
f95f3850 2237
34b664a2
JH
2238/* pull first bytes from part_buf, only use during pull */
2239static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2240{
0e3a22c0 2241 cnt = min_t(int, cnt, host->part_buf_count);
34b664a2
JH
2242 if (cnt) {
2243 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2244 cnt);
2245 host->part_buf_count -= cnt;
2246 host->part_buf_start += cnt;
f95f3850 2247 }
34b664a2 2248 return cnt;
f95f3850
WN
2249}
2250
34b664a2
JH
2251/* pull final bytes from the part_buf, assuming it's just been filled */
2252static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2253{
34b664a2
JH
2254 memcpy(buf, &host->part_buf, cnt);
2255 host->part_buf_start = cnt;
2256 host->part_buf_count = (1 << host->data_shift) - cnt;
2257}
f95f3850 2258
34b664a2
JH
2259static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2260{
cfbeb59c
MC
2261 struct mmc_data *data = host->data;
2262 int init_cnt = cnt;
2263
34b664a2
JH
2264 /* try and push anything in the part_buf */
2265 if (unlikely(host->part_buf_count)) {
2266 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2267
34b664a2
JH
2268 buf += len;
2269 cnt -= len;
cfbeb59c 2270 if (host->part_buf_count == 2) {
76184ac1 2271 mci_fifo_writew(host->fifo_reg, host->part_buf16);
34b664a2
JH
2272 host->part_buf_count = 0;
2273 }
2274 }
2275#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2276 if (unlikely((unsigned long)buf & 0x1)) {
2277 while (cnt >= 2) {
2278 u16 aligned_buf[64];
2279 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2280 int items = len >> 1;
2281 int i;
2282 /* memcpy from input buffer into aligned buffer */
2283 memcpy(aligned_buf, buf, len);
2284 buf += len;
2285 cnt -= len;
2286 /* push data from aligned buffer into fifo */
2287 for (i = 0; i < items; ++i)
76184ac1 2288 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2289 }
2290 } else
2291#endif
2292 {
2293 u16 *pdata = buf;
0e3a22c0 2294
34b664a2 2295 for (; cnt >= 2; cnt -= 2)
76184ac1 2296 mci_fifo_writew(host->fifo_reg, *pdata++);
34b664a2
JH
2297 buf = pdata;
2298 }
2299 /* put anything remaining in the part_buf */
2300 if (cnt) {
2301 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2302 /* Push data if we have reached the expected data length */
2303 if ((data->bytes_xfered + init_cnt) ==
2304 (data->blksz * data->blocks))
76184ac1 2305 mci_fifo_writew(host->fifo_reg, host->part_buf16);
34b664a2
JH
2306 }
2307}
f95f3850 2308
34b664a2
JH
2309static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2310{
2311#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2312 if (unlikely((unsigned long)buf & 0x1)) {
2313 while (cnt >= 2) {
2314 /* pull data from fifo into aligned buffer */
2315 u16 aligned_buf[64];
2316 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2317 int items = len >> 1;
2318 int i;
0e3a22c0 2319
34b664a2 2320 for (i = 0; i < items; ++i)
76184ac1 2321 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
34b664a2
JH
2322 /* memcpy from aligned buffer into output buffer */
2323 memcpy(buf, aligned_buf, len);
2324 buf += len;
2325 cnt -= len;
2326 }
2327 } else
2328#endif
2329 {
2330 u16 *pdata = buf;
0e3a22c0 2331
34b664a2 2332 for (; cnt >= 2; cnt -= 2)
76184ac1 2333 *pdata++ = mci_fifo_readw(host->fifo_reg);
34b664a2
JH
2334 buf = pdata;
2335 }
2336 if (cnt) {
76184ac1 2337 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
34b664a2 2338 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
2339 }
2340}
2341
2342static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2343{
cfbeb59c
MC
2344 struct mmc_data *data = host->data;
2345 int init_cnt = cnt;
2346
34b664a2
JH
2347 /* try and push anything in the part_buf */
2348 if (unlikely(host->part_buf_count)) {
2349 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2350
34b664a2
JH
2351 buf += len;
2352 cnt -= len;
cfbeb59c 2353 if (host->part_buf_count == 4) {
76184ac1 2354 mci_fifo_writel(host->fifo_reg, host->part_buf32);
34b664a2
JH
2355 host->part_buf_count = 0;
2356 }
2357 }
2358#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2359 if (unlikely((unsigned long)buf & 0x3)) {
2360 while (cnt >= 4) {
2361 u32 aligned_buf[32];
2362 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2363 int items = len >> 2;
2364 int i;
2365 /* memcpy from input buffer into aligned buffer */
2366 memcpy(aligned_buf, buf, len);
2367 buf += len;
2368 cnt -= len;
2369 /* push data from aligned buffer into fifo */
2370 for (i = 0; i < items; ++i)
76184ac1 2371 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2372 }
2373 } else
2374#endif
2375 {
2376 u32 *pdata = buf;
0e3a22c0 2377
34b664a2 2378 for (; cnt >= 4; cnt -= 4)
76184ac1 2379 mci_fifo_writel(host->fifo_reg, *pdata++);
34b664a2
JH
2380 buf = pdata;
2381 }
2382 /* put anything remaining in the part_buf */
2383 if (cnt) {
2384 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2385 /* Push data if we have reached the expected data length */
2386 if ((data->bytes_xfered + init_cnt) ==
2387 (data->blksz * data->blocks))
76184ac1 2388 mci_fifo_writel(host->fifo_reg, host->part_buf32);
f95f3850
WN
2389 }
2390}
2391
2392static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2393{
34b664a2
JH
2394#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2395 if (unlikely((unsigned long)buf & 0x3)) {
2396 while (cnt >= 4) {
2397 /* pull data from fifo into aligned buffer */
2398 u32 aligned_buf[32];
2399 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2400 int items = len >> 2;
2401 int i;
0e3a22c0 2402
34b664a2 2403 for (i = 0; i < items; ++i)
76184ac1 2404 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
34b664a2
JH
2405 /* memcpy from aligned buffer into output buffer */
2406 memcpy(buf, aligned_buf, len);
2407 buf += len;
2408 cnt -= len;
2409 }
2410 } else
2411#endif
2412 {
2413 u32 *pdata = buf;
0e3a22c0 2414
34b664a2 2415 for (; cnt >= 4; cnt -= 4)
76184ac1 2416 *pdata++ = mci_fifo_readl(host->fifo_reg);
34b664a2
JH
2417 buf = pdata;
2418 }
2419 if (cnt) {
76184ac1 2420 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
34b664a2 2421 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
2422 }
2423}
2424
2425static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2426{
cfbeb59c
MC
2427 struct mmc_data *data = host->data;
2428 int init_cnt = cnt;
2429
34b664a2
JH
2430 /* try and push anything in the part_buf */
2431 if (unlikely(host->part_buf_count)) {
2432 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2433
34b664a2
JH
2434 buf += len;
2435 cnt -= len;
c09fbd74 2436
cfbeb59c 2437 if (host->part_buf_count == 8) {
76184ac1 2438 mci_fifo_writeq(host->fifo_reg, host->part_buf);
34b664a2
JH
2439 host->part_buf_count = 0;
2440 }
2441 }
2442#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2443 if (unlikely((unsigned long)buf & 0x7)) {
2444 while (cnt >= 8) {
2445 u64 aligned_buf[16];
2446 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2447 int items = len >> 3;
2448 int i;
2449 /* memcpy from input buffer into aligned buffer */
2450 memcpy(aligned_buf, buf, len);
2451 buf += len;
2452 cnt -= len;
2453 /* push data from aligned buffer into fifo */
2454 for (i = 0; i < items; ++i)
76184ac1 2455 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2456 }
2457 } else
2458#endif
2459 {
2460 u64 *pdata = buf;
0e3a22c0 2461
34b664a2 2462 for (; cnt >= 8; cnt -= 8)
76184ac1 2463 mci_fifo_writeq(host->fifo_reg, *pdata++);
34b664a2
JH
2464 buf = pdata;
2465 }
2466 /* put anything remaining in the part_buf */
2467 if (cnt) {
2468 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2469 /* Push data if we have reached the expected data length */
2470 if ((data->bytes_xfered + init_cnt) ==
2471 (data->blksz * data->blocks))
76184ac1 2472 mci_fifo_writeq(host->fifo_reg, host->part_buf);
f95f3850
WN
2473 }
2474}
2475
2476static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2477{
34b664a2
JH
2478#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2479 if (unlikely((unsigned long)buf & 0x7)) {
2480 while (cnt >= 8) {
2481 /* pull data from fifo into aligned buffer */
2482 u64 aligned_buf[16];
2483 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2484 int items = len >> 3;
2485 int i;
0e3a22c0 2486
34b664a2 2487 for (i = 0; i < items; ++i)
76184ac1
BD
2488 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2489
34b664a2
JH
2490 /* memcpy from aligned buffer into output buffer */
2491 memcpy(buf, aligned_buf, len);
2492 buf += len;
2493 cnt -= len;
2494 }
2495 } else
2496#endif
2497 {
2498 u64 *pdata = buf;
0e3a22c0 2499
34b664a2 2500 for (; cnt >= 8; cnt -= 8)
76184ac1 2501 *pdata++ = mci_fifo_readq(host->fifo_reg);
34b664a2
JH
2502 buf = pdata;
2503 }
2504 if (cnt) {
76184ac1 2505 host->part_buf = mci_fifo_readq(host->fifo_reg);
34b664a2
JH
2506 dw_mci_pull_final_bytes(host, buf, cnt);
2507 }
2508}
f95f3850 2509
34b664a2
JH
2510static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2511{
2512 int len;
f95f3850 2513
34b664a2
JH
2514 /* get remaining partial bytes */
2515 len = dw_mci_pull_part_bytes(host, buf, cnt);
2516 if (unlikely(len == cnt))
2517 return;
2518 buf += len;
2519 cnt -= len;
2520
2521 /* get the rest of the data */
2522 host->pull_data(host, buf, cnt);
f95f3850
WN
2523}
2524
87a74d39 2525static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 2526{
f9c2a0dc
SJ
2527 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2528 void *buf;
2529 unsigned int offset;
f95f3850
WN
2530 struct mmc_data *data = host->data;
2531 int shift = host->data_shift;
2532 u32 status;
3e4b0d8b 2533 unsigned int len;
f9c2a0dc 2534 unsigned int remain, fcnt;
f95f3850
WN
2535
2536 do {
f9c2a0dc
SJ
2537 if (!sg_miter_next(sg_miter))
2538 goto done;
2539
4225fc85 2540 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
2541 buf = sg_miter->addr;
2542 remain = sg_miter->length;
2543 offset = 0;
2544
2545 do {
2546 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2547 << shift) + host->part_buf_count;
2548 len = min(remain, fcnt);
2549 if (!len)
2550 break;
34b664a2 2551 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 2552 data->bytes_xfered += len;
f95f3850 2553 offset += len;
f9c2a0dc
SJ
2554 remain -= len;
2555 } while (remain);
f95f3850 2556
e74f3a9c 2557 sg_miter->consumed = offset;
f95f3850
WN
2558 status = mci_readl(host, MINTSTS);
2559 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
2560 /* if the RXDR is ready read again */
2561 } while ((status & SDMMC_INT_RXDR) ||
2562 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
2563
2564 if (!remain) {
2565 if (!sg_miter_next(sg_miter))
2566 goto done;
2567 sg_miter->consumed = 0;
2568 }
2569 sg_miter_stop(sg_miter);
f95f3850
WN
2570 return;
2571
2572done:
f9c2a0dc
SJ
2573 sg_miter_stop(sg_miter);
2574 host->sg = NULL;
0e3a22c0 2575 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2576 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2577}
2578
2579static void dw_mci_write_data_pio(struct dw_mci *host)
2580{
f9c2a0dc
SJ
2581 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2582 void *buf;
2583 unsigned int offset;
f95f3850
WN
2584 struct mmc_data *data = host->data;
2585 int shift = host->data_shift;
2586 u32 status;
3e4b0d8b 2587 unsigned int len;
f9c2a0dc
SJ
2588 unsigned int fifo_depth = host->fifo_depth;
2589 unsigned int remain, fcnt;
f95f3850
WN
2590
2591 do {
f9c2a0dc
SJ
2592 if (!sg_miter_next(sg_miter))
2593 goto done;
2594
4225fc85 2595 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
2596 buf = sg_miter->addr;
2597 remain = sg_miter->length;
2598 offset = 0;
2599
2600 do {
2601 fcnt = ((fifo_depth -
2602 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2603 << shift) - host->part_buf_count;
2604 len = min(remain, fcnt);
2605 if (!len)
2606 break;
f95f3850 2607 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 2608 data->bytes_xfered += len;
f95f3850 2609 offset += len;
f9c2a0dc
SJ
2610 remain -= len;
2611 } while (remain);
f95f3850 2612
e74f3a9c 2613 sg_miter->consumed = offset;
f95f3850
WN
2614 status = mci_readl(host, MINTSTS);
2615 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 2616 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
2617
2618 if (!remain) {
2619 if (!sg_miter_next(sg_miter))
2620 goto done;
2621 sg_miter->consumed = 0;
2622 }
2623 sg_miter_stop(sg_miter);
f95f3850
WN
2624 return;
2625
2626done:
f9c2a0dc
SJ
2627 sg_miter_stop(sg_miter);
2628 host->sg = NULL;
0e3a22c0 2629 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2630 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2631}
2632
2633static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2634{
0363b12d
DA
2635 del_timer(&host->cto_timer);
2636
f95f3850
WN
2637 if (!host->cmd_status)
2638 host->cmd_status = status;
2639
0e3a22c0 2640 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2641
2642 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2643 tasklet_schedule(&host->tasklet);
2644}
2645
6130e7a9
DA
2646static void dw_mci_handle_cd(struct dw_mci *host)
2647{
b23475fa 2648 struct dw_mci_slot *slot = host->slot;
6130e7a9 2649
58870241
JC
2650 if (slot->mmc->ops->card_event)
2651 slot->mmc->ops->card_event(slot->mmc);
2652 mmc_detect_change(slot->mmc,
2653 msecs_to_jiffies(host->pdata->detect_delay_ms));
6130e7a9
DA
2654}
2655
f95f3850
WN
2656static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2657{
2658 struct dw_mci *host = dev_id;
182c9081 2659 u32 pending;
b23475fa 2660 struct dw_mci_slot *slot = host->slot;
8892b705 2661 unsigned long irqflags;
f95f3850 2662
1fb5f68a
MC
2663 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2664
476d79f1 2665 if (pending) {
01730558
DA
2666 /* Check volt switch first, since it can look like an error */
2667 if ((host->state == STATE_SENDING_CMD11) &&
2668 (pending & SDMMC_INT_VOLT_SWITCH)) {
2669 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2670 pending &= ~SDMMC_INT_VOLT_SWITCH;
49ba0302
DA
2671
2672 /*
2673 * Hold the lock; we know cmd11_timer can't be kicked
2674 * off after the lock is released, so safe to delete.
2675 */
2676 spin_lock_irqsave(&host->irq_lock, irqflags);
01730558 2677 dw_mci_cmd_interrupt(host, pending);
49ba0302
DA
2678 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2679
2680 del_timer(&host->cmd11_timer);
01730558
DA
2681 }
2682
f95f3850 2683 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
8892b705
DA
2684 spin_lock_irqsave(&host->irq_lock, irqflags);
2685
03de1921 2686 del_timer(&host->cto_timer);
f95f3850 2687 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 2688 host->cmd_status = pending;
0e3a22c0 2689 smp_wmb(); /* drain writebuffer */
f95f3850 2690 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
8892b705
DA
2691
2692 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850
WN
2693 }
2694
2695 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2696 /* if there is an error report DATA_ERROR */
2697 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 2698 host->data_status = pending;
0e3a22c0 2699 smp_wmb(); /* drain writebuffer */
f95f3850 2700 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 2701 tasklet_schedule(&host->tasklet);
f95f3850
WN
2702 }
2703
2704 if (pending & SDMMC_INT_DATA_OVER) {
93c23ae3
DA
2705 spin_lock_irqsave(&host->irq_lock, irqflags);
2706
16a34574 2707 del_timer(&host->dto_timer);
57e10486 2708
f95f3850
WN
2709 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2710 if (!host->data_status)
182c9081 2711 host->data_status = pending;
0e3a22c0 2712 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2713 if (host->dir_status == DW_MCI_RECV_STATUS) {
2714 if (host->sg != NULL)
87a74d39 2715 dw_mci_read_data_pio(host, true);
f95f3850
WN
2716 }
2717 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2718 tasklet_schedule(&host->tasklet);
93c23ae3
DA
2719
2720 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850
WN
2721 }
2722
2723 if (pending & SDMMC_INT_RXDR) {
2724 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 2725 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 2726 dw_mci_read_data_pio(host, false);
f95f3850
WN
2727 }
2728
2729 if (pending & SDMMC_INT_TXDR) {
2730 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 2731 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
2732 dw_mci_write_data_pio(host);
2733 }
2734
2735 if (pending & SDMMC_INT_CMD_DONE) {
8892b705
DA
2736 spin_lock_irqsave(&host->irq_lock, irqflags);
2737
f95f3850 2738 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 2739 dw_mci_cmd_interrupt(host, pending);
8892b705
DA
2740
2741 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850
WN
2742 }
2743
2744 if (pending & SDMMC_INT_CD) {
2745 mci_writel(host, RINTSTS, SDMMC_INT_CD);
6130e7a9 2746 dw_mci_handle_cd(host);
f95f3850
WN
2747 }
2748
58870241
JC
2749 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2750 mci_writel(host, RINTSTS,
2751 SDMMC_INT_SDIO(slot->sdio_id));
2752 __dw_mci_enable_sdio_irq(slot, 0);
2753 sdio_signal_irq(slot->mmc);
1a5c8e1f
SH
2754 }
2755
1fb5f68a 2756 }
f95f3850 2757
3fc7eaef
SL
2758 if (host->use_dma != TRANS_MODE_IDMAC)
2759 return IRQ_HANDLED;
2760
2761 /* Handle IDMA interrupts */
69d99fdc
PT
2762 if (host->dma_64bit_address == 1) {
2763 pending = mci_readl(host, IDSTS64);
2764 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2765 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2766 SDMMC_IDMAC_INT_RI);
2767 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
faecf411
SL
2768 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2769 host->dma_ops->complete((void *)host);
69d99fdc
PT
2770 }
2771 } else {
2772 pending = mci_readl(host, IDSTS);
2773 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2774 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2775 SDMMC_IDMAC_INT_RI);
2776 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
faecf411
SL
2777 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2778 host->dma_ops->complete((void *)host);
69d99fdc 2779 }
f95f3850 2780 }
f95f3850
WN
2781
2782 return IRQ_HANDLED;
2783}
2784
3a43bbee
SL
2785static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2786{
2787 struct dw_mci *host = slot->host;
2788 const struct dw_mci_drv_data *drv_data = host->drv_data;
2789 struct mmc_host *mmc = slot->mmc;
2790 int ctrl_id;
2791
2792 if (host->pdata->caps)
2793 mmc->caps = host->pdata->caps;
2794
2795 /*
2796 * Support MMC_CAP_ERASE by default.
2797 * It needs to use trim/discard/erase commands.
2798 */
2799 mmc->caps |= MMC_CAP_ERASE;
2800
2801 if (host->pdata->pm_caps)
2802 mmc->pm_caps = host->pdata->pm_caps;
2803
2804 if (host->dev->of_node) {
2805 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2806 if (ctrl_id < 0)
2807 ctrl_id = 0;
2808 } else {
2809 ctrl_id = to_platform_device(host->dev)->id;
2810 }
358234b6
SL
2811
2812 if (drv_data && drv_data->caps) {
2813 if (ctrl_id >= drv_data->num_caps) {
2814 dev_err(host->dev, "invalid controller id %d\n",
2815 ctrl_id);
2816 return -EINVAL;
2817 }
3a43bbee 2818 mmc->caps |= drv_data->caps[ctrl_id];
358234b6 2819 }
3a43bbee
SL
2820
2821 if (host->pdata->caps2)
2822 mmc->caps2 = host->pdata->caps2;
2823
2824 /* Process SDIO IRQs through the sdio_irq_work. */
2825 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2826 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2827
2828 return 0;
2829}
2830
e4a65ef7 2831static int dw_mci_init_slot(struct dw_mci *host)
f95f3850
WN
2832{
2833 struct mmc_host *mmc;
2834 struct dw_mci_slot *slot;
3a43bbee 2835 int ret;
1f44a2a5 2836 u32 freq[2];
f95f3850 2837
4a90920c 2838 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2839 if (!mmc)
2840 return -ENOMEM;
2841
2842 slot = mmc_priv(mmc);
e4a65ef7
JC
2843 slot->id = 0;
2844 slot->sdio_id = host->sdio_id0 + slot->id;
f95f3850
WN
2845 slot->mmc = mmc;
2846 slot->host = host;
b23475fa 2847 host->slot = slot;
f95f3850
WN
2848
2849 mmc->ops = &dw_mci_ops;
852ff5fe
DW
2850 if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
2851 freq, 2)) {
1f44a2a5
SJ
2852 mmc->f_min = DW_MCI_FREQ_MIN;
2853 mmc->f_max = DW_MCI_FREQ_MAX;
2854 } else {
b023030f
JC
2855 dev_info(host->dev,
2856 "'clock-freq-min-max' property was deprecated.\n");
1f44a2a5
SJ
2857 mmc->f_min = freq[0];
2858 mmc->f_max = freq[1];
2859 }
f95f3850 2860
51da2240
YC
2861 /*if there are external regulators, get them*/
2862 ret = mmc_regulator_get_supply(mmc);
0f3a47b8 2863 if (ret)
3cf890fc 2864 goto err_host_allocated;
51da2240
YC
2865
2866 if (!mmc->ocr_avail)
2867 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
f95f3850 2868
3cf890fc
DA
2869 ret = mmc_of_parse(mmc);
2870 if (ret)
2871 goto err_host_allocated;
f95f3850 2872
3a43bbee
SL
2873 ret = dw_mci_init_slot_caps(slot);
2874 if (ret)
2875 goto err_host_allocated;
32dba737 2876
2b708df2 2877 /* Useful defaults if platform data is unset. */
3fc7eaef 2878 if (host->use_dma == TRANS_MODE_IDMAC) {
2b708df2 2879 mmc->max_segs = host->ring_size;
225faf87 2880 mmc->max_blk_size = 65535;
2b708df2
JC
2881 mmc->max_seg_size = 0x1000;
2882 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2883 mmc->max_blk_count = mmc->max_req_size / 512;
3fc7eaef
SL
2884 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2885 mmc->max_segs = 64;
225faf87 2886 mmc->max_blk_size = 65535;
3fc7eaef
SL
2887 mmc->max_blk_count = 65535;
2888 mmc->max_req_size =
2889 mmc->max_blk_size * mmc->max_blk_count;
2890 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2891 } else {
3fc7eaef 2892 /* TRANS_MODE_PIO */
2b708df2 2893 mmc->max_segs = 64;
225faf87 2894 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2b708df2
JC
2895 mmc->max_blk_count = 512;
2896 mmc->max_req_size = mmc->max_blk_size *
2897 mmc->max_blk_count;
2898 mmc->max_seg_size = mmc->max_req_size;
a39e5746 2899 }
f95f3850 2900
c0834a58 2901 dw_mci_get_cd(mmc);
ae0eb348 2902
0cea529d
JC
2903 ret = mmc_add_host(mmc);
2904 if (ret)
3cf890fc 2905 goto err_host_allocated;
f95f3850
WN
2906
2907#if defined(CONFIG_DEBUG_FS)
2908 dw_mci_init_debugfs(slot);
2909#endif
2910
f95f3850 2911 return 0;
800d78bf 2912
3cf890fc 2913err_host_allocated:
800d78bf 2914 mmc_free_host(mmc);
51da2240 2915 return ret;
f95f3850
WN
2916}
2917
e4a65ef7 2918static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
f95f3850 2919{
f95f3850
WN
2920 /* Debugfs stuff is cleaned up by mmc core */
2921 mmc_remove_host(slot->mmc);
b23475fa 2922 slot->host->slot = NULL;
f95f3850
WN
2923 mmc_free_host(slot->mmc);
2924}
2925
2926static void dw_mci_init_dma(struct dw_mci *host)
2927{
69d99fdc 2928 int addr_config;
3fc7eaef 2929 struct device *dev = host->dev;
69d99fdc 2930
3fc7eaef
SL
2931 /*
2932 * Check tansfer mode from HCON[17:16]
2933 * Clear the ambiguous description of dw_mmc databook:
2934 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2935 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2936 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2937 * 2b'11: Non DW DMA Interface -> pio only
2938 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2939 * simpler request/acknowledge handshake mechanism and both of them
2940 * are regarded as external dma master for dw_mmc.
2941 */
2942 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2943 if (host->use_dma == DMA_INTERFACE_IDMA) {
2944 host->use_dma = TRANS_MODE_IDMAC;
2945 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2946 host->use_dma == DMA_INTERFACE_GDMA) {
2947 host->use_dma = TRANS_MODE_EDMAC;
2948 } else {
f95f3850
WN
2949 goto no_dma;
2950 }
2951
2952 /* Determine which DMA interface to use */
3fc7eaef
SL
2953 if (host->use_dma == TRANS_MODE_IDMAC) {
2954 /*
2955 * Check ADDR_CONFIG bit in HCON to find
2956 * IDMAC address bus width
2957 */
70692752 2958 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3fc7eaef
SL
2959
2960 if (addr_config == 1) {
2961 /* host supports IDMAC in 64-bit address mode */
2962 host->dma_64bit_address = 1;
2963 dev_info(host->dev,
2964 "IDMAC supports 64-bit address mode.\n");
2965 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2966 dma_set_coherent_mask(host->dev,
2967 DMA_BIT_MASK(64));
2968 } else {
2969 /* host supports IDMAC in 32-bit address mode */
2970 host->dma_64bit_address = 0;
2971 dev_info(host->dev,
2972 "IDMAC supports 32-bit address mode.\n");
2973 }
f95f3850 2974
3fc7eaef 2975 /* Alloc memory for sg translation */
cc190d4c
SL
2976 host->sg_cpu = dmam_alloc_coherent(host->dev,
2977 DESC_RING_BUF_SZ,
3fc7eaef
SL
2978 &host->sg_dma, GFP_KERNEL);
2979 if (!host->sg_cpu) {
2980 dev_err(host->dev,
2981 "%s: could not alloc DMA memory\n",
2982 __func__);
2983 goto no_dma;
2984 }
2985
2986 host->dma_ops = &dw_mci_idmac_ops;
2987 dev_info(host->dev, "Using internal DMA controller.\n");
2988 } else {
2989 /* TRANS_MODE_EDMAC: check dma bindings again */
852ff5fe
DW
2990 if ((device_property_read_string_array(dev, "dma-names",
2991 NULL, 0) < 0) ||
2992 !device_property_present(dev, "dmas")) {
3fc7eaef
SL
2993 goto no_dma;
2994 }
2995 host->dma_ops = &dw_mci_edmac_ops;
2996 dev_info(host->dev, "Using external DMA controller.\n");
2997 }
f95f3850 2998
e1631f98
JC
2999 if (host->dma_ops->init && host->dma_ops->start &&
3000 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 3001 if (host->dma_ops->init(host)) {
0e3a22c0
SL
3002 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3003 __func__);
f95f3850
WN
3004 goto no_dma;
3005 }
3006 } else {
4a90920c 3007 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
3008 goto no_dma;
3009 }
3010
f95f3850
WN
3011 return;
3012
3013no_dma:
4a90920c 3014 dev_info(host->dev, "Using PIO mode.\n");
3fc7eaef 3015 host->use_dma = TRANS_MODE_PIO;
f95f3850
WN
3016}
3017
37977729 3018static void dw_mci_cmd11_timer(struct timer_list *t)
5c935165 3019{
37977729 3020 struct dw_mci *host = from_timer(host, t, cmd11_timer);
5c935165 3021
fd674198
DA
3022 if (host->state != STATE_SENDING_CMD11) {
3023 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3024 return;
3025 }
5c935165
DA
3026
3027 host->cmd_status = SDMMC_INT_RTO;
3028 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3029 tasklet_schedule(&host->tasklet);
3030}
3031
37977729 3032static void dw_mci_cto_timer(struct timer_list *t)
03de1921 3033{
37977729 3034 struct dw_mci *host = from_timer(host, t, cto_timer);
8892b705
DA
3035 unsigned long irqflags;
3036 u32 pending;
03de1921 3037
8892b705 3038 spin_lock_irqsave(&host->irq_lock, irqflags);
03de1921 3039
8892b705
DA
3040 /*
3041 * If somehow we have very bad interrupt latency it's remotely possible
3042 * that the timer could fire while the interrupt is still pending or
3043 * while the interrupt is midway through running. Let's be paranoid
3044 * and detect those two cases. Note that this is paranoia is somewhat
3045 * justified because in this function we don't actually cancel the
3046 * pending command in the controller--we just assume it will never come.
3047 */
3048 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3049 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3050 /* The interrupt should fire; no need to act but we can warn */
3051 dev_warn(host->dev, "Unexpected interrupt latency\n");
3052 goto exit;
3053 }
3054 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3055 /* Presumably interrupt handler couldn't delete the timer */
3056 dev_warn(host->dev, "CTO timeout when already completed\n");
3057 goto exit;
3058 }
3059
3060 /*
3061 * Continued paranoia to make sure we're in the state we expect.
3062 * This paranoia isn't really justified but it seems good to be safe.
3063 */
03de1921
AK
3064 switch (host->state) {
3065 case STATE_SENDING_CMD11:
3066 case STATE_SENDING_CMD:
3067 case STATE_SENDING_STOP:
3068 /*
3069 * If CMD_DONE interrupt does NOT come in sending command
3070 * state, we should notify the driver to terminate current
3071 * transfer and report a command timeout to the core.
3072 */
3073 host->cmd_status = SDMMC_INT_RTO;
3074 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3075 tasklet_schedule(&host->tasklet);
3076 break;
3077 default:
3078 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3079 host->state);
3080 break;
3081 }
8892b705
DA
3082
3083exit:
3084 spin_unlock_irqrestore(&host->irq_lock, irqflags);
03de1921
AK
3085}
3086
37977729 3087static void dw_mci_dto_timer(struct timer_list *t)
57e10486 3088{
37977729 3089 struct dw_mci *host = from_timer(host, t, dto_timer);
93c23ae3
DA
3090 unsigned long irqflags;
3091 u32 pending;
3092
3093 spin_lock_irqsave(&host->irq_lock, irqflags);
57e10486 3094
93c23ae3
DA
3095 /*
3096 * The DTO timer is much longer than the CTO timer, so it's even less
3097 * likely that we'll these cases, but it pays to be paranoid.
3098 */
3099 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3100 if (pending & SDMMC_INT_DATA_OVER) {
3101 /* The interrupt should fire; no need to act but we can warn */
3102 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3103 goto exit;
3104 }
3105 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3106 /* Presumably interrupt handler couldn't delete the timer */
3107 dev_warn(host->dev, "DTO timeout when already completed\n");
3108 goto exit;
3109 }
3110
3111 /*
3112 * Continued paranoia to make sure we're in the state we expect.
3113 * This paranoia isn't really justified but it seems good to be safe.
3114 */
57e10486
AK
3115 switch (host->state) {
3116 case STATE_SENDING_DATA:
3117 case STATE_DATA_BUSY:
3118 /*
3119 * If DTO interrupt does NOT come in sending data state,
3120 * we should notify the driver to terminate current transfer
3121 * and report a data timeout to the core.
3122 */
3123 host->data_status = SDMMC_INT_DRTO;
3124 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3125 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3126 tasklet_schedule(&host->tasklet);
3127 break;
3128 default:
93c23ae3
DA
3129 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3130 host->state);
57e10486
AK
3131 break;
3132 }
93c23ae3
DA
3133
3134exit:
3135 spin_unlock_irqrestore(&host->irq_lock, irqflags);
57e10486
AK
3136}
3137
c91eab4b 3138#ifdef CONFIG_OF
c91eab4b
TA
3139static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3140{
3141 struct dw_mci_board *pdata;
3142 struct device *dev = host->dev;
e95baf13 3143 const struct dw_mci_drv_data *drv_data = host->drv_data;
e8cc37b8 3144 int ret;
3c6d89ea 3145 u32 clock_frequency;
c91eab4b
TA
3146
3147 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
bf3707ea 3148 if (!pdata)
c91eab4b 3149 return ERR_PTR(-ENOMEM);
c91eab4b 3150
d6786fef 3151 /* find reset controller when exist */
a93d6f31 3152 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
d6786fef
GX
3153 if (IS_ERR(pdata->rstc)) {
3154 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3155 return ERR_PTR(-EPROBE_DEFER);
3156 }
3157
c91eab4b 3158 /* find out number of slots supported */
16f5df8b 3159 if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
d30a8f7b 3160 dev_info(dev, "'num-slots' was deprecated.\n");
c91eab4b 3161
852ff5fe 3162 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
0e3a22c0
SL
3163 dev_info(dev,
3164 "fifo-depth property not found, using value of FIFOTH register as default\n");
c91eab4b 3165
852ff5fe
DW
3166 device_property_read_u32(dev, "card-detect-delay",
3167 &pdata->detect_delay_ms);
c91eab4b 3168
852ff5fe 3169 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
a0361c1a 3170
852ff5fe 3171 if (device_property_present(dev, "fifo-watermark-aligned"))
d6fced83
JN
3172 host->wm_aligned = true;
3173
852ff5fe 3174 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3c6d89ea
DA
3175 pdata->bus_hz = clock_frequency;
3176
cb27a843
JH
3177 if (drv_data && drv_data->parse_dt) {
3178 ret = drv_data->parse_dt(host);
800d78bf
TA
3179 if (ret)
3180 return ERR_PTR(ret);
3181 }
3182
c91eab4b
TA
3183 return pdata;
3184}
3185
3186#else /* CONFIG_OF */
3187static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3188{
3189 return ERR_PTR(-EINVAL);
3190}
3191#endif /* CONFIG_OF */
3192
fa0c3283
DA
3193static void dw_mci_enable_cd(struct dw_mci *host)
3194{
fa0c3283
DA
3195 unsigned long irqflags;
3196 u32 temp;
fa0c3283 3197
e8cc37b8
SL
3198 /*
3199 * No need for CD if all slots have a non-error GPIO
3200 * as well as broken card detection is found.
3201 */
e47c0b96 3202 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
fa0c3283
DA
3203 return;
3204
e47c0b96 3205 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
58870241
JC
3206 spin_lock_irqsave(&host->irq_lock, irqflags);
3207 temp = mci_readl(host, INTMASK);
3208 temp |= SDMMC_INT_CD;
3209 mci_writel(host, INTMASK, temp);
3210 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3211 }
fa0c3283
DA
3212}
3213
62ca8034 3214int dw_mci_probe(struct dw_mci *host)
f95f3850 3215{
e95baf13 3216 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 3217 int width, i, ret = 0;
f95f3850
WN
3218 u32 fifo_size;
3219
c91eab4b
TA
3220 if (!host->pdata) {
3221 host->pdata = dw_mci_parse_dt(host);
d6786fef
GX
3222 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3223 return -EPROBE_DEFER;
3224 } else if (IS_ERR(host->pdata)) {
c91eab4b
TA
3225 dev_err(host->dev, "platform data not available\n");
3226 return -EINVAL;
3227 }
f95f3850
WN
3228 }
3229
780f22af 3230 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
3231 if (IS_ERR(host->biu_clk)) {
3232 dev_dbg(host->dev, "biu clock not available\n");
3233 } else {
3234 ret = clk_prepare_enable(host->biu_clk);
3235 if (ret) {
3236 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
3237 return ret;
3238 }
3239 }
3240
780f22af 3241 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
3242 if (IS_ERR(host->ciu_clk)) {
3243 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 3244 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
3245 } else {
3246 ret = clk_prepare_enable(host->ciu_clk);
3247 if (ret) {
3248 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
3249 goto err_clk_biu;
3250 }
f90a0612 3251
3c6d89ea
DA
3252 if (host->pdata->bus_hz) {
3253 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3254 if (ret)
3255 dev_warn(host->dev,
612de4c1 3256 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
3257 host->pdata->bus_hz);
3258 }
f90a0612 3259 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 3260 }
f90a0612 3261
612de4c1
JC
3262 if (!host->bus_hz) {
3263 dev_err(host->dev,
3264 "Platform data must supply bus speed\n");
3265 ret = -ENODEV;
3266 goto err_clk_ciu;
3267 }
3268
941e372d 3269 if (!IS_ERR(host->pdata->rstc)) {
3270 reset_control_assert(host->pdata->rstc);
3271 usleep_range(10, 50);
3272 reset_control_deassert(host->pdata->rstc);
3273 }
3274
002f0d5c
YK
3275 if (drv_data && drv_data->init) {
3276 ret = drv_data->init(host);
3277 if (ret) {
3278 dev_err(host->dev,
3279 "implementation specific init failed\n");
3280 goto err_clk_ciu;
3281 }
3282 }
3283
37977729
KC
3284 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3285 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3286 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
57e10486 3287
f95f3850 3288 spin_lock_init(&host->lock);
f8c58c11 3289 spin_lock_init(&host->irq_lock);
f95f3850
WN
3290 INIT_LIST_HEAD(&host->queue);
3291
f95f3850
WN
3292 /*
3293 * Get the host data width - this assumes that HCON has been set with
3294 * the correct values.
3295 */
70692752 3296 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
f95f3850
WN
3297 if (!i) {
3298 host->push_data = dw_mci_push_data16;
3299 host->pull_data = dw_mci_pull_data16;
3300 width = 16;
3301 host->data_shift = 1;
3302 } else if (i == 2) {
3303 host->push_data = dw_mci_push_data64;
3304 host->pull_data = dw_mci_pull_data64;
3305 width = 64;
3306 host->data_shift = 3;
3307 } else {
3308 /* Check for a reserved value, and warn if it is */
3309 WARN((i != 1),
3310 "HCON reports a reserved host data width!\n"
3311 "Defaulting to 32-bit access.\n");
3312 host->push_data = dw_mci_push_data32;
3313 host->pull_data = dw_mci_pull_data32;
3314 width = 32;
3315 host->data_shift = 2;
3316 }
3317
3318 /* Reset all blocks */
3744415c
SL
3319 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3320 ret = -ENODEV;
3321 goto err_clk_ciu;
3322 }
141a712a
SJ
3323
3324 host->dma_ops = host->pdata->dma_ops;
3325 dw_mci_init_dma(host);
f95f3850
WN
3326
3327 /* Clear the interrupts for the host controller */
3328 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3329 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3330
3331 /* Put in max timeout */
3332 mci_writel(host, TMOUT, 0xFFFFFFFF);
3333
3334 /*
3335 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3336 * Tx Mark = fifo_size / 2 DMA Size = 8
3337 */
b86d8253
JH
3338 if (!host->pdata->fifo_depth) {
3339 /*
3340 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3341 * have been overwritten by the bootloader, just like we're
3342 * about to do, so if you know the value for your hardware, you
3343 * should put it in the platform data.
3344 */
3345 fifo_size = mci_readl(host, FIFOTH);
8234e869 3346 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
3347 } else {
3348 fifo_size = host->pdata->fifo_depth;
3349 }
3350 host->fifo_depth = fifo_size;
52426899
SJ
3351 host->fifoth_val =
3352 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 3353 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
3354
3355 /* disable clock to CIU */
3356 mci_writel(host, CLKENA, 0);
3357 mci_writel(host, CLKSRC, 0);
3358
63008768
JH
3359 /*
3360 * In 2.40a spec, Data offset is changed.
3361 * Need to check the version-id and set data-offset for DATA register.
3362 */
3363 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3364 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3365
a0361c1a
JN
3366 if (host->data_addr_override)
3367 host->fifo_reg = host->regs + host->data_addr_override;
3368 else if (host->verid < DW_MMC_240A)
76184ac1 3369 host->fifo_reg = host->regs + DATA_OFFSET;
63008768 3370 else
76184ac1 3371 host->fifo_reg = host->regs + DATA_240A_OFFSET;
63008768 3372
f95f3850 3373 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
780f22af
SJ
3374 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3375 host->irq_flags, "dw-mci", host);
f95f3850 3376 if (ret)
6130e7a9 3377 goto err_dmaunmap;
f95f3850 3378
2da1d7f2 3379 /*
fa0c3283 3380 * Enable interrupts for command done, data over, data empty,
2da1d7f2
YC
3381 * receive ready and error such as transmit, receive timeout, crc error
3382 */
2da1d7f2
YC
3383 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3384 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
fa0c3283 3385 DW_MCI_ERROR_FLAGS);
0e3a22c0
SL
3386 /* Enable mci interrupt */
3387 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2da1d7f2 3388
0e3a22c0
SL
3389 dev_info(host->dev,
3390 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
2da1d7f2
YC
3391 host->irq, width, fifo_size);
3392
f95f3850 3393 /* We need at least one slot to succeed */
e4a65ef7 3394 ret = dw_mci_init_slot(host);
58870241
JC
3395 if (ret) {
3396 dev_dbg(host->dev, "slot %d init failed\n", i);
6130e7a9 3397 goto err_dmaunmap;
f95f3850
WN
3398 }
3399
b793f658
DA
3400 /* Now that slots are all setup, we can enable card detect */
3401 dw_mci_enable_cd(host);
3402
f95f3850
WN
3403 return 0;
3404
f95f3850
WN
3405err_dmaunmap:
3406 if (host->use_dma && host->dma_ops->exit)
3407 host->dma_ops->exit(host);
f90a0612 3408
d6786fef
GX
3409 if (!IS_ERR(host->pdata->rstc))
3410 reset_control_assert(host->pdata->rstc);
3411
f90a0612 3412err_clk_ciu:
7037f3be 3413 clk_disable_unprepare(host->ciu_clk);
780f22af 3414
f90a0612 3415err_clk_biu:
7037f3be 3416 clk_disable_unprepare(host->biu_clk);
780f22af 3417
f95f3850
WN
3418 return ret;
3419}
62ca8034 3420EXPORT_SYMBOL(dw_mci_probe);
f95f3850 3421
62ca8034 3422void dw_mci_remove(struct dw_mci *host)
f95f3850 3423{
e4a65ef7 3424 dev_dbg(host->dev, "remove slot\n");
b23475fa 3425 if (host->slot)
e4a65ef7 3426 dw_mci_cleanup_slot(host->slot);
f95f3850 3427
048fd7e6
PT
3428 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3429 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3430
f95f3850
WN
3431 /* disable clock to CIU */
3432 mci_writel(host, CLKENA, 0);
3433 mci_writel(host, CLKSRC, 0);
3434
f95f3850
WN
3435 if (host->use_dma && host->dma_ops->exit)
3436 host->dma_ops->exit(host);
3437
d6786fef
GX
3438 if (!IS_ERR(host->pdata->rstc))
3439 reset_control_assert(host->pdata->rstc);
3440
7037f3be
JC
3441 clk_disable_unprepare(host->ciu_clk);
3442 clk_disable_unprepare(host->biu_clk);
f95f3850 3443}
62ca8034
SH
3444EXPORT_SYMBOL(dw_mci_remove);
3445
3446
f95f3850 3447
e9ed8835 3448#ifdef CONFIG_PM
ed24e1ff 3449int dw_mci_runtime_suspend(struct device *dev)
f95f3850 3450{
ed24e1ff
SL
3451 struct dw_mci *host = dev_get_drvdata(dev);
3452
3fc7eaef
SL
3453 if (host->use_dma && host->dma_ops->exit)
3454 host->dma_ops->exit(host);
3455
ed24e1ff
SL
3456 clk_disable_unprepare(host->ciu_clk);
3457
42f989c0
JC
3458 if (host->slot &&
3459 (mmc_can_gpio_cd(host->slot->mmc) ||
3460 !mmc_card_is_removable(host->slot->mmc)))
ed24e1ff
SL
3461 clk_disable_unprepare(host->biu_clk);
3462
f95f3850
WN
3463 return 0;
3464}
ed24e1ff 3465EXPORT_SYMBOL(dw_mci_runtime_suspend);
f95f3850 3466
ed24e1ff 3467int dw_mci_runtime_resume(struct device *dev)
f95f3850 3468{
b23475fa 3469 int ret = 0;
ed24e1ff 3470 struct dw_mci *host = dev_get_drvdata(dev);
f95f3850 3471
42f989c0
JC
3472 if (host->slot &&
3473 (mmc_can_gpio_cd(host->slot->mmc) ||
3474 !mmc_card_is_removable(host->slot->mmc))) {
ed24e1ff
SL
3475 ret = clk_prepare_enable(host->biu_clk);
3476 if (ret)
3477 return ret;
e61cf118
JC
3478 }
3479
ed24e1ff
SL
3480 ret = clk_prepare_enable(host->ciu_clk);
3481 if (ret)
df9bcc2b
JS
3482 goto err;
3483
3484 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3485 clk_disable_unprepare(host->ciu_clk);
3486 ret = -ENODEV;
3487 goto err;
3488 }
ed24e1ff 3489
3bfe619d 3490 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
3491 host->dma_ops->init(host);
3492
52426899
SJ
3493 /*
3494 * Restore the initial value at FIFOTH register
3495 * And Invalidate the prev_blksz with zero
3496 */
ed24e1ff
SL
3497 mci_writel(host, FIFOTH, host->fifoth_val);
3498 host->prev_blksz = 0;
e61cf118 3499
2eb2944f
DA
3500 /* Put in max timeout */
3501 mci_writel(host, TMOUT, 0xFFFFFFFF);
3502
e61cf118
JC
3503 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3504 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3505 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
fa0c3283 3506 DW_MCI_ERROR_FLAGS);
e61cf118
JC
3507 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3508
0e3a22c0 3509
e47c0b96
JC
3510 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3511 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
e9748e03 3512
58870241 3513 /* Force setup bus to guarantee available clock output */
e47c0b96 3514 dw_mci_setup_bus(host->slot, true);
fa0c3283
DA
3515
3516 /* Now that slots are all setup, we can enable card detect */
3517 dw_mci_enable_cd(host);
3518
df9bcc2b
JS
3519 return 0;
3520
3521err:
42f989c0
JC
3522 if (host->slot &&
3523 (mmc_can_gpio_cd(host->slot->mmc) ||
3524 !mmc_card_is_removable(host->slot->mmc)))
df9bcc2b
JS
3525 clk_disable_unprepare(host->biu_clk);
3526
ed24e1ff 3527 return ret;
e9ed8835
SL
3528}
3529EXPORT_SYMBOL(dw_mci_runtime_resume);
3530#endif /* CONFIG_PM */
6fe8890d 3531
f95f3850
WN
3532static int __init dw_mci_init(void)
3533{
8e1c4e4d 3534 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 3535 return 0;
f95f3850
WN
3536}
3537
3538static void __exit dw_mci_exit(void)
3539{
f95f3850
WN
3540}
3541
3542module_init(dw_mci_init);
3543module_exit(dw_mci_exit);
3544
3545MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3546MODULE_AUTHOR("NXP Semiconductor VietNam");
3547MODULE_AUTHOR("Imagination Technologies Ltd");
3548MODULE_LICENSE("GPL v2");