]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: change the array of slots
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
b6d2d81c 22#include <linux/iopoll.h>
f95f3850
WN
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
a6db2c86 26#include <linux/pm_runtime.h>
f95f3850
WN
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/delay.h>
31#include <linux/irq.h>
b24c8b26 32#include <linux/mmc/card.h>
f95f3850
WN
33#include <linux/mmc/host.h>
34#include <linux/mmc/mmc.h>
01730558 35#include <linux/mmc/sd.h>
90c2143a 36#include <linux/mmc/sdio.h>
f95f3850 37#include <linux/bitops.h>
c07946a3 38#include <linux/regulator/consumer.h>
c91eab4b 39#include <linux/of.h>
55a6ceb2 40#include <linux/of_gpio.h>
bf626e55 41#include <linux/mmc/slot-gpio.h>
f95f3850
WN
42
43#include "dw_mmc.h"
44
45/* Common flag combinations */
3f7eec62 46#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850 47 SDMMC_INT_HTO | SDMMC_INT_SBE | \
7a3c5677 48 SDMMC_INT_EBE | SDMMC_INT_HLE)
f95f3850 49#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
7a3c5677 50 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
f95f3850 51#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
7a3c5677 52 DW_MCI_CMD_ERROR_FLAGS)
f95f3850
WN
53#define DW_MCI_SEND_STATUS 1
54#define DW_MCI_RECV_STATUS 2
55#define DW_MCI_DMA_THRESHOLD 16
56
1f44a2a5 57#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
72e83577 58#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
1f44a2a5 59
fc79a4d6
JS
60#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63 SDMMC_IDMAC_INT_TI)
64
cc190d4c
SL
65#define DESC_RING_BUF_SZ PAGE_SIZE
66
69d99fdc
PT
67struct idmac_desc_64addr {
68 u32 des0; /* Control Descriptor */
b6d2d81c
SL
69#define IDMAC_OWN_CLR64(x) \
70 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
69d99fdc
PT
71
72 u32 des1; /* Reserved */
73
74 u32 des2; /*Buffer sizes */
75#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
6687c42f
BD
76 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
77 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
69d99fdc
PT
78
79 u32 des3; /* Reserved */
80
81 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
82 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
83
84 u32 des6; /* Lower 32-bits of Next Descriptor Address */
85 u32 des7; /* Upper 32-bits of Next Descriptor Address */
86};
87
f95f3850 88struct idmac_desc {
6687c42f 89 __le32 des0; /* Control Descriptor */
f95f3850
WN
90#define IDMAC_DES0_DIC BIT(1)
91#define IDMAC_DES0_LD BIT(2)
92#define IDMAC_DES0_FD BIT(3)
93#define IDMAC_DES0_CH BIT(4)
94#define IDMAC_DES0_ER BIT(5)
95#define IDMAC_DES0_CES BIT(30)
96#define IDMAC_DES0_OWN BIT(31)
97
6687c42f 98 __le32 des1; /* Buffer sizes */
f95f3850 99#define IDMAC_SET_BUFFER1_SIZE(d, s) \
e5306c3a 100 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
f95f3850 101
6687c42f 102 __le32 des2; /* buffer 1 physical address */
f95f3850 103
6687c42f 104 __le32 des3; /* buffer 2 physical address */
f95f3850 105};
5959b32e
AB
106
107/* Each descriptor can transfer up to 4KB of data in chained mode */
108#define DW_MCI_DESC_DATA_LENGTH 0x1000
f95f3850 109
f95f3850
WN
110#if defined(CONFIG_DEBUG_FS)
111static int dw_mci_req_show(struct seq_file *s, void *v)
112{
113 struct dw_mci_slot *slot = s->private;
114 struct mmc_request *mrq;
115 struct mmc_command *cmd;
116 struct mmc_command *stop;
117 struct mmc_data *data;
118
119 /* Make sure we get a consistent snapshot */
120 spin_lock_bh(&slot->host->lock);
121 mrq = slot->mrq;
122
123 if (mrq) {
124 cmd = mrq->cmd;
125 data = mrq->data;
126 stop = mrq->stop;
127
128 if (cmd)
129 seq_printf(s,
130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
131 cmd->opcode, cmd->arg, cmd->flags,
132 cmd->resp[0], cmd->resp[1], cmd->resp[2],
133 cmd->resp[2], cmd->error);
134 if (data)
135 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
136 data->bytes_xfered, data->blocks,
137 data->blksz, data->flags, data->error);
138 if (stop)
139 seq_printf(s,
140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
141 stop->opcode, stop->arg, stop->flags,
142 stop->resp[0], stop->resp[1], stop->resp[2],
143 stop->resp[2], stop->error);
144 }
145
146 spin_unlock_bh(&slot->host->lock);
147
148 return 0;
149}
150
151static int dw_mci_req_open(struct inode *inode, struct file *file)
152{
153 return single_open(file, dw_mci_req_show, inode->i_private);
154}
155
156static const struct file_operations dw_mci_req_fops = {
157 .owner = THIS_MODULE,
158 .open = dw_mci_req_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
164static int dw_mci_regs_show(struct seq_file *s, void *v)
165{
21657ebd
JC
166 struct dw_mci *host = s->private;
167
168 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
169 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
170 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
171 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
172 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
173 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
f95f3850
WN
174
175 return 0;
176}
177
178static int dw_mci_regs_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, dw_mci_regs_show, inode->i_private);
181}
182
183static const struct file_operations dw_mci_regs_fops = {
184 .owner = THIS_MODULE,
185 .open = dw_mci_regs_open,
186 .read = seq_read,
187 .llseek = seq_lseek,
188 .release = single_release,
189};
190
191static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
192{
193 struct mmc_host *mmc = slot->mmc;
194 struct dw_mci *host = slot->host;
195 struct dentry *root;
196 struct dentry *node;
197
198 root = mmc->debugfs_root;
199 if (!root)
200 return;
201
202 node = debugfs_create_file("regs", S_IRUSR, root, host,
203 &dw_mci_regs_fops);
204 if (!node)
205 goto err;
206
207 node = debugfs_create_file("req", S_IRUSR, root, slot,
208 &dw_mci_req_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_x32("pending_events", S_IRUSR, root,
217 (u32 *)&host->pending_events);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("completed_events", S_IRUSR, root,
222 (u32 *)&host->completed_events);
223 if (!node)
224 goto err;
225
226 return;
227
228err:
229 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
230}
231#endif /* defined(CONFIG_DEBUG_FS) */
232
8e6db1f6
SL
233static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
234{
235 u32 ctrl;
236
237 ctrl = mci_readl(host, CTRL);
238 ctrl |= reset;
239 mci_writel(host, CTRL, ctrl);
240
241 /* wait till resets clear */
242 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
243 !(ctrl & reset),
244 1, 500 * USEC_PER_MSEC)) {
245 dev_err(host->dev,
246 "Timeout resetting block (ctrl reset %#x)\n",
247 ctrl & reset);
248 return false;
249 }
250
251 return true;
252}
01730558 253
4dba18de
SL
254static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
255{
256 u32 status;
257
258 /*
259 * Databook says that before issuing a new data transfer command
260 * we need to check to see if the card is busy. Data transfer commands
261 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
262 *
263 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
264 * expected.
265 */
266 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
267 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
268 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
269 status,
270 !(status & SDMMC_STATUS_BUSY),
271 10, 500 * USEC_PER_MSEC))
272 dev_err(host->dev, "Busy; trying anyway\n");
273 }
274}
275
276static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
277{
278 struct dw_mci *host = slot->host;
279 unsigned int cmd_status = 0;
280
281 mci_writel(host, CMDARG, arg);
282 wmb(); /* drain writebuffer */
283 dw_mci_wait_while_busy(host, cmd);
284 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
285
286 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
287 !(cmd_status & SDMMC_CMD_START),
288 1, 500 * USEC_PER_MSEC))
289 dev_err(&slot->mmc->class_dev,
290 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
291 cmd, arg, cmd_status);
292}
293
f95f3850
WN
294static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
295{
800d78bf 296 struct dw_mci_slot *slot = mmc_priv(mmc);
01730558 297 struct dw_mci *host = slot->host;
f95f3850 298 u32 cmdr;
f95f3850 299
0e3a22c0 300 cmd->error = -EINPROGRESS;
f95f3850
WN
301 cmdr = cmd->opcode;
302
90c2143a
SJ
303 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
304 cmd->opcode == MMC_GO_IDLE_STATE ||
305 cmd->opcode == MMC_GO_INACTIVE_STATE ||
306 (cmd->opcode == SD_IO_RW_DIRECT &&
307 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850 308 cmdr |= SDMMC_CMD_STOP;
4a1b27ad
JC
309 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
310 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850 311
01730558
DA
312 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
313 u32 clk_en_a;
314
315 /* Special bit makes CMD11 not die */
316 cmdr |= SDMMC_CMD_VOLT_SWITCH;
317
318 /* Change state to continue to handle CMD11 weirdness */
319 WARN_ON(slot->host->state != STATE_SENDING_CMD);
320 slot->host->state = STATE_SENDING_CMD11;
321
322 /*
323 * We need to disable low power mode (automatic clock stop)
324 * while doing voltage switch so we don't confuse the card,
325 * since stopping the clock is a specific part of the UHS
326 * voltage change dance.
327 *
328 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
329 * unconditionally turned back on in dw_mci_setup_bus() if it's
330 * ever called with a non-zero clock. That shouldn't happen
331 * until the voltage change is all done.
332 */
333 clk_en_a = mci_readl(host, CLKENA);
334 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
335 mci_writel(host, CLKENA, clk_en_a);
336 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
337 SDMMC_CMD_PRV_DAT_WAIT, 0);
338 }
339
f95f3850
WN
340 if (cmd->flags & MMC_RSP_PRESENT) {
341 /* We expect a response, so set this bit */
342 cmdr |= SDMMC_CMD_RESP_EXP;
343 if (cmd->flags & MMC_RSP_136)
344 cmdr |= SDMMC_CMD_RESP_LONG;
345 }
346
347 if (cmd->flags & MMC_RSP_CRC)
348 cmdr |= SDMMC_CMD_RESP_CRC;
349
0349c085 350 if (cmd->data) {
f95f3850 351 cmdr |= SDMMC_CMD_DAT_EXP;
0349c085 352 if (cmd->data->flags & MMC_DATA_WRITE)
f95f3850
WN
353 cmdr |= SDMMC_CMD_DAT_WR;
354 }
355
aaaaeb7a
JC
356 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
357 cmdr |= SDMMC_CMD_USE_HOLD_REG;
800d78bf 358
f95f3850
WN
359 return cmdr;
360}
361
90c2143a
SJ
362static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
363{
364 struct mmc_command *stop;
365 u32 cmdr;
366
367 if (!cmd->data)
368 return 0;
369
370 stop = &host->stop_abort;
371 cmdr = cmd->opcode;
372 memset(stop, 0, sizeof(struct mmc_command));
373
374 if (cmdr == MMC_READ_SINGLE_BLOCK ||
375 cmdr == MMC_READ_MULTIPLE_BLOCK ||
376 cmdr == MMC_WRITE_BLOCK ||
6c2c6506
UH
377 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
378 cmdr == MMC_SEND_TUNING_BLOCK ||
379 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
90c2143a
SJ
380 stop->opcode = MMC_STOP_TRANSMISSION;
381 stop->arg = 0;
382 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
383 } else if (cmdr == SD_IO_RW_EXTENDED) {
384 stop->opcode = SD_IO_RW_DIRECT;
385 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
386 ((cmd->arg >> 28) & 0x7);
387 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
388 } else {
389 return 0;
390 }
391
392 cmdr = stop->opcode | SDMMC_CMD_STOP |
393 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
394
8c005b40
JC
395 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
396 cmdr |= SDMMC_CMD_USE_HOLD_REG;
397
90c2143a
SJ
398 return cmdr;
399}
400
f95f3850
WN
401static void dw_mci_start_command(struct dw_mci *host,
402 struct mmc_command *cmd, u32 cmd_flags)
403{
404 host->cmd = cmd;
4a90920c 405 dev_vdbg(host->dev,
f95f3850
WN
406 "start command: ARGR=0x%08x CMDR=0x%08x\n",
407 cmd->arg, cmd_flags);
408
409 mci_writel(host, CMDARG, cmd->arg);
0e3a22c0 410 wmb(); /* drain writebuffer */
0bdbd0e8 411 dw_mci_wait_while_busy(host, cmd_flags);
f95f3850
WN
412
413 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
414}
415
90c2143a 416static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 417{
e13c3c08 418 struct mmc_command *stop = &host->stop_abort;
0e3a22c0 419
90c2143a 420 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
421}
422
423/* DMA interface functions */
424static void dw_mci_stop_dma(struct dw_mci *host)
425{
03e8cb53 426 if (host->using_dma) {
f95f3850
WN
427 host->dma_ops->stop(host);
428 host->dma_ops->cleanup(host);
f95f3850 429 }
aa50f259
SJ
430
431 /* Data transfer was stopped by the interrupt handler */
432 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
433}
434
f95f3850
WN
435static void dw_mci_dma_cleanup(struct dw_mci *host)
436{
437 struct mmc_data *data = host->data;
438
a4cc7eb4
JC
439 if (data && data->host_cookie == COOKIE_MAPPED) {
440 dma_unmap_sg(host->dev,
441 data->sg,
442 data->sg_len,
feeef096 443 mmc_get_dma_dir(data));
a4cc7eb4
JC
444 data->host_cookie = COOKIE_UNMAPPED;
445 }
f95f3850
WN
446}
447
5ce9d961
SJ
448static void dw_mci_idmac_reset(struct dw_mci *host)
449{
450 u32 bmod = mci_readl(host, BMOD);
451 /* Software reset of DMA */
452 bmod |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, bmod);
454}
455
f95f3850
WN
456static void dw_mci_idmac_stop_dma(struct dw_mci *host)
457{
458 u32 temp;
459
460 /* Disable and reset the IDMAC interface */
461 temp = mci_readl(host, CTRL);
462 temp &= ~SDMMC_CTRL_USE_IDMAC;
463 temp |= SDMMC_CTRL_DMA_RESET;
464 mci_writel(host, CTRL, temp);
465
466 /* Stop the IDMAC running */
467 temp = mci_readl(host, BMOD);
a5289a43 468 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 469 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
470 mci_writel(host, BMOD, temp);
471}
472
3fc7eaef 473static void dw_mci_dmac_complete_dma(void *arg)
f95f3850 474{
3fc7eaef 475 struct dw_mci *host = arg;
f95f3850
WN
476 struct mmc_data *data = host->data;
477
4a90920c 478 dev_vdbg(host->dev, "DMA complete\n");
f95f3850 479
3fc7eaef
SL
480 if ((host->use_dma == TRANS_MODE_EDMAC) &&
481 data && (data->flags & MMC_DATA_READ))
482 /* Invalidate cache after read */
483 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
484 data->sg,
485 data->sg_len,
486 DMA_FROM_DEVICE);
487
f95f3850
WN
488 host->dma_ops->cleanup(host);
489
490 /*
491 * If the card was removed, data will be NULL. No point in trying to
492 * send the stop command or waiting for NBUSY in this case.
493 */
494 if (data) {
495 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
496 tasklet_schedule(&host->tasklet);
497 }
498}
499
3b2a067b
SL
500static int dw_mci_idmac_init(struct dw_mci *host)
501{
502 int i;
503
504 if (host->dma_64bit_address == 1) {
505 struct idmac_desc_64addr *p;
506 /* Number of descriptors in the ring buffer */
cc190d4c
SL
507 host->ring_size =
508 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
3b2a067b
SL
509
510 /* Forward link the descriptor list */
511 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
512 i++, p++) {
513 p->des6 = (host->sg_dma +
514 (sizeof(struct idmac_desc_64addr) *
515 (i + 1))) & 0xffffffff;
516
517 p->des7 = (u64)(host->sg_dma +
518 (sizeof(struct idmac_desc_64addr) *
519 (i + 1))) >> 32;
520 /* Initialize reserved and buffer size fields to "0" */
521 p->des1 = 0;
522 p->des2 = 0;
523 p->des3 = 0;
524 }
525
526 /* Set the last descriptor as the end-of-ring descriptor */
527 p->des6 = host->sg_dma & 0xffffffff;
528 p->des7 = (u64)host->sg_dma >> 32;
529 p->des0 = IDMAC_DES0_ER;
530
531 } else {
532 struct idmac_desc *p;
533 /* Number of descriptors in the ring buffer */
cc190d4c
SL
534 host->ring_size =
535 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
3b2a067b
SL
536
537 /* Forward link the descriptor list */
538 for (i = 0, p = host->sg_cpu;
539 i < host->ring_size - 1;
540 i++, p++) {
541 p->des3 = cpu_to_le32(host->sg_dma +
542 (sizeof(struct idmac_desc) * (i + 1)));
543 p->des1 = 0;
544 }
545
546 /* Set the last descriptor as the end-of-ring descriptor */
547 p->des3 = cpu_to_le32(host->sg_dma);
548 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
549 }
550
551 dw_mci_idmac_reset(host);
552
553 if (host->dma_64bit_address == 1) {
554 /* Mask out interrupts - get Tx & Rx complete only */
555 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
556 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
557 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
558
559 /* Set the descriptor base address */
560 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
561 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
562
563 } else {
564 /* Mask out interrupts - get Tx & Rx complete only */
565 mci_writel(host, IDSTS, IDMAC_INT_CLR);
566 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
567 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
568
569 /* Set the descriptor base address */
570 mci_writel(host, DBADDR, host->sg_dma);
571 }
572
573 return 0;
574}
575
576static inline int dw_mci_prepare_desc64(struct dw_mci *host,
ec0baaa6
SL
577 struct mmc_data *data,
578 unsigned int sg_len)
f95f3850 579{
5959b32e 580 unsigned int desc_len;
ec0baaa6 581 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
b6d2d81c 582 u32 val;
f95f3850 583 int i;
0e3a22c0 584
ec0baaa6 585 desc_first = desc_last = desc = host->sg_cpu;
5959b32e 586
ec0baaa6
SL
587 for (i = 0; i < sg_len; i++) {
588 unsigned int length = sg_dma_len(&data->sg[i]);
69d99fdc 589
ec0baaa6 590 u64 mem_addr = sg_dma_address(&data->sg[i]);
0e3a22c0 591
ec0baaa6
SL
592 for ( ; length ; desc++) {
593 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
594 length : DW_MCI_DESC_DATA_LENGTH;
f95f3850 595
ec0baaa6 596 length -= desc_len;
5959b32e 597
3b2a067b
SL
598 /*
599 * Wait for the former clear OWN bit operation
600 * of IDMAC to make sure that this descriptor
601 * isn't still owned by IDMAC as IDMAC's write
602 * ops and CPU's read ops are asynchronous.
603 */
b6d2d81c
SL
604 if (readl_poll_timeout_atomic(&desc->des0, val,
605 !(val & IDMAC_DES0_OWN),
606 10, 100 * USEC_PER_MSEC))
607 goto err_own_bit;
3b2a067b 608
ec0baaa6
SL
609 /*
610 * Set the OWN bit and disable interrupts
611 * for this descriptor
612 */
613 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
614 IDMAC_DES0_CH;
5959b32e 615
ec0baaa6
SL
616 /* Buffer length */
617 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
5959b32e 618
ec0baaa6
SL
619 /* Physical address to DMA to/from */
620 desc->des4 = mem_addr & 0xffffffff;
621 desc->des5 = mem_addr >> 32;
5959b32e 622
ec0baaa6
SL
623 /* Update physical address for the next desc */
624 mem_addr += desc_len;
5959b32e 625
ec0baaa6
SL
626 /* Save pointer to the last descriptor */
627 desc_last = desc;
69d99fdc 628 }
ec0baaa6 629 }
f95f3850 630
ec0baaa6
SL
631 /* Set first descriptor */
632 desc_first->des0 |= IDMAC_DES0_FD;
f95f3850 633
ec0baaa6
SL
634 /* Set last descriptor */
635 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
636 desc_last->des0 |= IDMAC_DES0_LD;
3b2a067b
SL
637
638 return 0;
639err_own_bit:
640 /* restore the descriptor chain as it's polluted */
26be9d70 641 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
cc190d4c 642 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
3b2a067b
SL
643 dw_mci_idmac_init(host);
644 return -EINVAL;
ec0baaa6 645}
5959b32e 646
69d99fdc 647
3b2a067b 648static inline int dw_mci_prepare_desc32(struct dw_mci *host,
ec0baaa6
SL
649 struct mmc_data *data,
650 unsigned int sg_len)
651{
652 unsigned int desc_len;
653 struct idmac_desc *desc_first, *desc_last, *desc;
b6d2d81c 654 u32 val;
ec0baaa6 655 int i;
0e3a22c0 656
ec0baaa6 657 desc_first = desc_last = desc = host->sg_cpu;
69d99fdc 658
ec0baaa6
SL
659 for (i = 0; i < sg_len; i++) {
660 unsigned int length = sg_dma_len(&data->sg[i]);
5959b32e 661
ec0baaa6 662 u32 mem_addr = sg_dma_address(&data->sg[i]);
5959b32e 663
ec0baaa6
SL
664 for ( ; length ; desc++) {
665 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
666 length : DW_MCI_DESC_DATA_LENGTH;
5959b32e 667
ec0baaa6 668 length -= desc_len;
f95f3850 669
3b2a067b
SL
670 /*
671 * Wait for the former clear OWN bit operation
672 * of IDMAC to make sure that this descriptor
673 * isn't still owned by IDMAC as IDMAC's write
674 * ops and CPU's read ops are asynchronous.
675 */
b6d2d81c
SL
676 if (readl_poll_timeout_atomic(&desc->des0, val,
677 IDMAC_OWN_CLR64(val),
678 10,
679 100 * USEC_PER_MSEC))
680 goto err_own_bit;
3b2a067b 681
ec0baaa6
SL
682 /*
683 * Set the OWN bit and disable interrupts
684 * for this descriptor
685 */
686 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
687 IDMAC_DES0_DIC |
688 IDMAC_DES0_CH);
5959b32e 689
ec0baaa6
SL
690 /* Buffer length */
691 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
5959b32e 692
ec0baaa6
SL
693 /* Physical address to DMA to/from */
694 desc->des2 = cpu_to_le32(mem_addr);
69d99fdc 695
ec0baaa6
SL
696 /* Update physical address for the next desc */
697 mem_addr += desc_len;
f95f3850 698
ec0baaa6
SL
699 /* Save pointer to the last descriptor */
700 desc_last = desc;
701 }
69d99fdc 702 }
f95f3850 703
ec0baaa6
SL
704 /* Set first descriptor */
705 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
706
707 /* Set last descriptor */
708 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
709 IDMAC_DES0_DIC));
710 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
3b2a067b
SL
711
712 return 0;
713err_own_bit:
714 /* restore the descriptor chain as it's polluted */
26be9d70 715 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
cc190d4c 716 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
3b2a067b
SL
717 dw_mci_idmac_init(host);
718 return -EINVAL;
f95f3850
WN
719}
720
3fc7eaef 721static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
f95f3850
WN
722{
723 u32 temp;
3b2a067b 724 int ret;
f95f3850 725
ec0baaa6 726 if (host->dma_64bit_address == 1)
3b2a067b 727 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
ec0baaa6 728 else
3b2a067b
SL
729 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
730
731 if (ret)
732 goto out;
ec0baaa6
SL
733
734 /* drain writebuffer */
735 wmb();
f95f3850 736
536f6b91
SR
737 /* Make sure to reset DMA in case we did PIO before this */
738 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
739 dw_mci_idmac_reset(host);
740
f95f3850
WN
741 /* Select IDMAC interface */
742 temp = mci_readl(host, CTRL);
743 temp |= SDMMC_CTRL_USE_IDMAC;
744 mci_writel(host, CTRL, temp);
745
0e3a22c0 746 /* drain writebuffer */
f95f3850
WN
747 wmb();
748
749 /* Enable the IDMAC */
750 temp = mci_readl(host, BMOD);
a5289a43 751 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
752 mci_writel(host, BMOD, temp);
753
754 /* Start it running */
755 mci_writel(host, PLDMND, 1);
3fc7eaef 756
3b2a067b
SL
757out:
758 return ret;
f95f3850
WN
759}
760
8e2b36ea 761static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
762 .init = dw_mci_idmac_init,
763 .start = dw_mci_idmac_start_dma,
764 .stop = dw_mci_idmac_stop_dma,
3fc7eaef
SL
765 .complete = dw_mci_dmac_complete_dma,
766 .cleanup = dw_mci_dma_cleanup,
767};
768
769static void dw_mci_edmac_stop_dma(struct dw_mci *host)
770{
ab925a31 771 dmaengine_terminate_async(host->dms->ch);
3fc7eaef
SL
772}
773
774static int dw_mci_edmac_start_dma(struct dw_mci *host,
775 unsigned int sg_len)
776{
777 struct dma_slave_config cfg;
778 struct dma_async_tx_descriptor *desc = NULL;
779 struct scatterlist *sgl = host->data->sg;
780 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
781 u32 sg_elems = host->data->sg_len;
782 u32 fifoth_val;
783 u32 fifo_offset = host->fifo_reg - host->regs;
784 int ret = 0;
785
786 /* Set external dma config: burst size, burst width */
260b3164 787 cfg.dst_addr = host->phy_regs + fifo_offset;
3fc7eaef
SL
788 cfg.src_addr = cfg.dst_addr;
789 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
790 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
791
792 /* Match burst msize with external dma config */
793 fifoth_val = mci_readl(host, FIFOTH);
794 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
795 cfg.src_maxburst = cfg.dst_maxburst;
796
797 if (host->data->flags & MMC_DATA_WRITE)
798 cfg.direction = DMA_MEM_TO_DEV;
799 else
800 cfg.direction = DMA_DEV_TO_MEM;
801
802 ret = dmaengine_slave_config(host->dms->ch, &cfg);
803 if (ret) {
804 dev_err(host->dev, "Failed to config edmac.\n");
805 return -EBUSY;
806 }
807
808 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
809 sg_len, cfg.direction,
810 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
811 if (!desc) {
812 dev_err(host->dev, "Can't prepare slave sg.\n");
813 return -EBUSY;
814 }
815
816 /* Set dw_mci_dmac_complete_dma as callback */
817 desc->callback = dw_mci_dmac_complete_dma;
818 desc->callback_param = (void *)host;
819 dmaengine_submit(desc);
820
821 /* Flush cache before write */
822 if (host->data->flags & MMC_DATA_WRITE)
823 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
824 sg_elems, DMA_TO_DEVICE);
825
826 dma_async_issue_pending(host->dms->ch);
827
828 return 0;
829}
830
831static int dw_mci_edmac_init(struct dw_mci *host)
832{
833 /* Request external dma channel */
834 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
835 if (!host->dms)
836 return -ENOMEM;
837
838 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
839 if (!host->dms->ch) {
4539d36e 840 dev_err(host->dev, "Failed to get external DMA channel.\n");
3fc7eaef
SL
841 kfree(host->dms);
842 host->dms = NULL;
843 return -ENXIO;
844 }
845
846 return 0;
847}
848
849static void dw_mci_edmac_exit(struct dw_mci *host)
850{
851 if (host->dms) {
852 if (host->dms->ch) {
853 dma_release_channel(host->dms->ch);
854 host->dms->ch = NULL;
855 }
856 kfree(host->dms);
857 host->dms = NULL;
858 }
859}
860
861static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
862 .init = dw_mci_edmac_init,
863 .exit = dw_mci_edmac_exit,
864 .start = dw_mci_edmac_start_dma,
865 .stop = dw_mci_edmac_stop_dma,
866 .complete = dw_mci_dmac_complete_dma,
885c3e80
SJ
867 .cleanup = dw_mci_dma_cleanup,
868};
885c3e80 869
9aa51408
SJ
870static int dw_mci_pre_dma_transfer(struct dw_mci *host,
871 struct mmc_data *data,
a4cc7eb4 872 int cookie)
f95f3850
WN
873{
874 struct scatterlist *sg;
9aa51408 875 unsigned int i, sg_len;
03e8cb53 876
a4cc7eb4
JC
877 if (data->host_cookie == COOKIE_PRE_MAPPED)
878 return data->sg_len;
f95f3850
WN
879
880 /*
881 * We don't do DMA on "complex" transfers, i.e. with
882 * non-word-aligned buffers or lengths. Also, we don't bother
883 * with all the DMA setup overhead for short transfers.
884 */
885 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
886 return -EINVAL;
9aa51408 887
f95f3850
WN
888 if (data->blksz & 3)
889 return -EINVAL;
890
891 for_each_sg(data->sg, sg, data->sg_len, i) {
892 if (sg->offset & 3 || sg->length & 3)
893 return -EINVAL;
894 }
895
4a90920c 896 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
897 data->sg,
898 data->sg_len,
feeef096 899 mmc_get_dma_dir(data));
9aa51408
SJ
900 if (sg_len == 0)
901 return -EINVAL;
03e8cb53 902
a4cc7eb4 903 data->host_cookie = cookie;
f95f3850 904
9aa51408
SJ
905 return sg_len;
906}
907
9aa51408 908static void dw_mci_pre_req(struct mmc_host *mmc,
d3c6aac3 909 struct mmc_request *mrq)
9aa51408
SJ
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct mmc_data *data = mrq->data;
913
914 if (!slot->host->use_dma || !data)
915 return;
916
a4cc7eb4
JC
917 /* This data might be unmapped at this time */
918 data->host_cookie = COOKIE_UNMAPPED;
9aa51408 919
a4cc7eb4
JC
920 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
921 COOKIE_PRE_MAPPED) < 0)
922 data->host_cookie = COOKIE_UNMAPPED;
9aa51408
SJ
923}
924
925static void dw_mci_post_req(struct mmc_host *mmc,
926 struct mmc_request *mrq,
927 int err)
928{
929 struct dw_mci_slot *slot = mmc_priv(mmc);
930 struct mmc_data *data = mrq->data;
931
932 if (!slot->host->use_dma || !data)
933 return;
934
a4cc7eb4 935 if (data->host_cookie != COOKIE_UNMAPPED)
4a90920c 936 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
937 data->sg,
938 data->sg_len,
feeef096 939 mmc_get_dma_dir(data));
a4cc7eb4 940 data->host_cookie = COOKIE_UNMAPPED;
9aa51408
SJ
941}
942
671fa142
SL
943static int dw_mci_get_cd(struct mmc_host *mmc)
944{
945 int present;
946 struct dw_mci_slot *slot = mmc_priv(mmc);
947 struct dw_mci *host = slot->host;
948 int gpio_cd = mmc_gpio_get_cd(mmc);
949
950 /* Use platform get_cd function, else try onboard card detect */
951 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
952 || !mmc_card_is_removable(mmc))) {
953 present = 1;
954
955 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
956 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
957 dev_info(&mmc->class_dev,
958 "card is polling.\n");
959 } else {
960 dev_info(&mmc->class_dev,
961 "card is non-removable.\n");
962 }
963 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
964 }
965
966 return present;
967 } else if (gpio_cd >= 0)
968 present = gpio_cd;
969 else
970 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
971 == 0 ? 1 : 0;
972
973 spin_lock_bh(&host->lock);
974 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
975 dev_dbg(&mmc->class_dev, "card is present\n");
976 else if (!present &&
977 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
978 dev_dbg(&mmc->class_dev, "card is not present\n");
979 spin_unlock_bh(&host->lock);
980
981 return present;
982}
983
52426899
SJ
984static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
985{
52426899
SJ
986 unsigned int blksz = data->blksz;
987 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
988 u32 fifo_width = 1 << host->data_shift;
989 u32 blksz_depth = blksz / fifo_width, fifoth_val;
990 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
0e3a22c0 991 int idx = ARRAY_SIZE(mszs) - 1;
52426899 992
3fc7eaef
SL
993 /* pio should ship this scenario */
994 if (!host->use_dma)
995 return;
996
52426899
SJ
997 tx_wmark = (host->fifo_depth) / 2;
998 tx_wmark_invers = host->fifo_depth - tx_wmark;
999
1000 /*
1001 * MSIZE is '1',
1002 * if blksz is not a multiple of the FIFO width
1003 */
20753569 1004 if (blksz % fifo_width)
52426899 1005 goto done;
52426899
SJ
1006
1007 do {
1008 if (!((blksz_depth % mszs[idx]) ||
1009 (tx_wmark_invers % mszs[idx]))) {
1010 msize = idx;
1011 rx_wmark = mszs[idx] - 1;
1012 break;
1013 }
1014 } while (--idx > 0);
1015 /*
1016 * If idx is '0', it won't be tried
1017 * Thus, initial values are uesed
1018 */
1019done:
1020 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1021 mci_writel(host, FIFOTH, fifoth_val);
52426899
SJ
1022}
1023
7e4bf1bc 1024static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
f1d2736c
SJ
1025{
1026 unsigned int blksz = data->blksz;
1027 u32 blksz_depth, fifo_depth;
1028 u16 thld_size;
7e4bf1bc 1029 u8 enable;
f1d2736c 1030
66dfd101
JH
1031 /*
1032 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1033 * in the FIFO region, so we really shouldn't access it).
1034 */
7e4bf1bc
JC
1035 if (host->verid < DW_MMC_240A ||
1036 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1037 return;
1038
1039 /*
1040 * Card write Threshold is introduced since 2.80a
1041 * It's used when HS400 mode is enabled.
1042 */
1043 if (data->flags & MMC_DATA_WRITE &&
1044 !(host->timing != MMC_TIMING_MMC_HS400))
66dfd101
JH
1045 return;
1046
7e4bf1bc
JC
1047 if (data->flags & MMC_DATA_WRITE)
1048 enable = SDMMC_CARD_WR_THR_EN;
1049 else
1050 enable = SDMMC_CARD_RD_THR_EN;
1051
f1d2736c
SJ
1052 if (host->timing != MMC_TIMING_MMC_HS200 &&
1053 host->timing != MMC_TIMING_UHS_SDR104)
1054 goto disable;
1055
1056 blksz_depth = blksz / (1 << host->data_shift);
1057 fifo_depth = host->fifo_depth;
1058
1059 if (blksz_depth > fifo_depth)
1060 goto disable;
1061
1062 /*
1063 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1064 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1065 * Currently just choose blksz.
1066 */
1067 thld_size = blksz;
7e4bf1bc 1068 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
f1d2736c
SJ
1069 return;
1070
1071disable:
7e4bf1bc 1072 mci_writel(host, CDTHRCTL, 0);
f1d2736c
SJ
1073}
1074
9aa51408
SJ
1075static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1076{
f8c58c11 1077 unsigned long irqflags;
9aa51408
SJ
1078 int sg_len;
1079 u32 temp;
1080
1081 host->using_dma = 0;
1082
1083 /* If we don't have a channel, we can't do DMA */
1084 if (!host->use_dma)
1085 return -ENODEV;
1086
a4cc7eb4 1087 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
a99aa9b9
SJ
1088 if (sg_len < 0) {
1089 host->dma_ops->stop(host);
9aa51408 1090 return sg_len;
a99aa9b9 1091 }
9aa51408
SJ
1092
1093 host->using_dma = 1;
f95f3850 1094
3fc7eaef
SL
1095 if (host->use_dma == TRANS_MODE_IDMAC)
1096 dev_vdbg(host->dev,
1097 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1098 (unsigned long)host->sg_cpu,
1099 (unsigned long)host->sg_dma,
1100 sg_len);
f95f3850 1101
52426899
SJ
1102 /*
1103 * Decide the MSIZE and RX/TX Watermark.
1104 * If current block size is same with previous size,
1105 * no need to update fifoth.
1106 */
1107 if (host->prev_blksz != data->blksz)
1108 dw_mci_adjust_fifoth(host, data);
1109
f95f3850
WN
1110 /* Enable the DMA interface */
1111 temp = mci_readl(host, CTRL);
1112 temp |= SDMMC_CTRL_DMA_ENABLE;
1113 mci_writel(host, CTRL, temp);
1114
1115 /* Disable RX/TX IRQs, let DMA handle it */
f8c58c11 1116 spin_lock_irqsave(&host->irq_lock, irqflags);
f95f3850
WN
1117 temp = mci_readl(host, INTMASK);
1118 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1119 mci_writel(host, INTMASK, temp);
f8c58c11 1120 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850 1121
3fc7eaef 1122 if (host->dma_ops->start(host, sg_len)) {
647f80a1 1123 host->dma_ops->stop(host);
d12d0cb1
SL
1124 /* We can't do DMA, try PIO for this one */
1125 dev_dbg(host->dev,
1126 "%s: fall back to PIO mode for current transfer\n",
1127 __func__);
3fc7eaef
SL
1128 return -ENODEV;
1129 }
f95f3850
WN
1130
1131 return 0;
1132}
1133
1134static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1135{
f8c58c11 1136 unsigned long irqflags;
0e3a22c0 1137 int flags = SG_MITER_ATOMIC;
f95f3850
WN
1138 u32 temp;
1139
1140 data->error = -EINPROGRESS;
1141
1142 WARN_ON(host->data);
1143 host->sg = NULL;
1144 host->data = data;
1145
7e4bf1bc 1146 if (data->flags & MMC_DATA_READ)
55c5efbc 1147 host->dir_status = DW_MCI_RECV_STATUS;
7e4bf1bc 1148 else
55c5efbc 1149 host->dir_status = DW_MCI_SEND_STATUS;
7e4bf1bc
JC
1150
1151 dw_mci_ctrl_thld(host, data);
55c5efbc 1152
f95f3850 1153 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
1154 if (host->data->flags & MMC_DATA_READ)
1155 flags |= SG_MITER_TO_SG;
1156 else
1157 flags |= SG_MITER_FROM_SG;
1158
1159 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 1160 host->sg = data->sg;
34b664a2
JH
1161 host->part_buf_start = 0;
1162 host->part_buf_count = 0;
f95f3850 1163
b40af3aa 1164 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f8c58c11
DA
1165
1166 spin_lock_irqsave(&host->irq_lock, irqflags);
f95f3850
WN
1167 temp = mci_readl(host, INTMASK);
1168 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1169 mci_writel(host, INTMASK, temp);
f8c58c11 1170 spin_unlock_irqrestore(&host->irq_lock, irqflags);
f95f3850
WN
1171
1172 temp = mci_readl(host, CTRL);
1173 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1174 mci_writel(host, CTRL, temp);
52426899
SJ
1175
1176 /*
d6fced83
JN
1177 * Use the initial fifoth_val for PIO mode. If wm_algined
1178 * is set, we set watermark same as data size.
52426899
SJ
1179 * If next issued data may be transfered by DMA mode,
1180 * prev_blksz should be invalidated.
1181 */
d6fced83
JN
1182 if (host->wm_aligned)
1183 dw_mci_adjust_fifoth(host, data);
1184 else
1185 mci_writel(host, FIFOTH, host->fifoth_val);
52426899
SJ
1186 host->prev_blksz = 0;
1187 } else {
1188 /*
1189 * Keep the current block size.
1190 * It will be used to decide whether to update
1191 * fifoth register next time.
1192 */
1193 host->prev_blksz = data->blksz;
f95f3850
WN
1194 }
1195}
1196
ab269128 1197static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
1198{
1199 struct dw_mci *host = slot->host;
fdf492a1 1200 unsigned int clock = slot->clock;
f95f3850 1201 u32 div;
9623b5b9 1202 u32 clk_en_a;
01730558
DA
1203 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1204
1205 /* We must continue to set bit 28 in CMD until the change is complete */
1206 if (host->state == STATE_WAITING_CMD11_DONE)
1207 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
f95f3850 1208
fdf492a1
DA
1209 if (!clock) {
1210 mci_writel(host, CLKENA, 0);
01730558 1211 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
fdf492a1
DA
1212 } else if (clock != host->current_speed || force_clkinit) {
1213 div = host->bus_hz / clock;
1214 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
1215 /*
1216 * move the + 1 after the divide to prevent
1217 * over-clocking the card.
1218 */
e419990b
SJ
1219 div += 1;
1220
fdf492a1 1221 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 1222
e6cd7a8e
JC
1223 if ((clock != slot->__clk_old &&
1224 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1225 force_clkinit) {
ce69e2fe
SL
1226 /* Silent the verbose log if calling from PM context */
1227 if (!force_clkinit)
1228 dev_info(&slot->mmc->class_dev,
1229 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1230 slot->id, host->bus_hz, clock,
1231 div ? ((host->bus_hz / div) >> 1) :
1232 host->bus_hz, div);
f95f3850 1233
e6cd7a8e
JC
1234 /*
1235 * If card is polling, display the message only
1236 * one time at boot time.
1237 */
1238 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1239 slot->mmc->f_min == clock)
1240 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1241 }
1242
f95f3850
WN
1243 /* disable clock */
1244 mci_writel(host, CLKENA, 0);
1245 mci_writel(host, CLKSRC, 0);
1246
1247 /* inform CIU */
01730558 1248 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
f95f3850
WN
1249
1250 /* set clock to desired speed */
1251 mci_writel(host, CLKDIV, div);
1252
1253 /* inform CIU */
01730558 1254 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
f95f3850 1255
9623b5b9
DA
1256 /* enable clock; only low power if no SDIO */
1257 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
b24c8b26 1258 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
9623b5b9
DA
1259 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1260 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
1261
1262 /* inform CIU */
01730558 1263 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
005d675a
JC
1264
1265 /* keep the last clock value that was requested from core */
1266 slot->__clk_old = clock;
f95f3850
WN
1267 }
1268
fdf492a1
DA
1269 host->current_speed = clock;
1270
f95f3850 1271 /* Set the current slot bus width */
1d56c453 1272 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
1273}
1274
053b3ce6
SJ
1275static void __dw_mci_start_request(struct dw_mci *host,
1276 struct dw_mci_slot *slot,
1277 struct mmc_command *cmd)
f95f3850
WN
1278{
1279 struct mmc_request *mrq;
f95f3850
WN
1280 struct mmc_data *data;
1281 u32 cmdflags;
1282
1283 mrq = slot->mrq;
f95f3850 1284
f95f3850
WN
1285 host->cur_slot = slot;
1286 host->mrq = mrq;
1287
1288 host->pending_events = 0;
1289 host->completed_events = 0;
e352c813 1290 host->cmd_status = 0;
f95f3850 1291 host->data_status = 0;
e352c813 1292 host->dir_status = 0;
f95f3850 1293
053b3ce6 1294 data = cmd->data;
f95f3850 1295 if (data) {
f16afa88 1296 mci_writel(host, TMOUT, 0xFFFFFFFF);
f95f3850
WN
1297 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1298 mci_writel(host, BLKSIZ, data->blksz);
1299 }
1300
f95f3850
WN
1301 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1302
1303 /* this is the first command, send the initialization clock */
1304 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1305 cmdflags |= SDMMC_CMD_INIT;
1306
1307 if (data) {
1308 dw_mci_submit_data(host, data);
0e3a22c0 1309 wmb(); /* drain writebuffer */
f95f3850
WN
1310 }
1311
1312 dw_mci_start_command(host, cmd, cmdflags);
1313
5c935165 1314 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
49ba0302
DA
1315 unsigned long irqflags;
1316
5c935165 1317 /*
8886a6fd
DA
1318 * Databook says to fail after 2ms w/ no response, but evidence
1319 * shows that sometimes the cmd11 interrupt takes over 130ms.
1320 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1321 * is just about to roll over.
49ba0302
DA
1322 *
1323 * We do this whole thing under spinlock and only if the
1324 * command hasn't already completed (indicating the the irq
1325 * already ran so we don't want the timeout).
5c935165 1326 */
49ba0302
DA
1327 spin_lock_irqsave(&host->irq_lock, irqflags);
1328 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1329 mod_timer(&host->cmd11_timer,
1330 jiffies + msecs_to_jiffies(500) + 1);
1331 spin_unlock_irqrestore(&host->irq_lock, irqflags);
5c935165
DA
1332 }
1333
e13c3c08 1334 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
1335}
1336
053b3ce6
SJ
1337static void dw_mci_start_request(struct dw_mci *host,
1338 struct dw_mci_slot *slot)
1339{
1340 struct mmc_request *mrq = slot->mrq;
1341 struct mmc_command *cmd;
1342
1343 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1344 __dw_mci_start_request(host, slot, cmd);
1345}
1346
7456caae 1347/* must be called with host->lock held */
f95f3850
WN
1348static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1349 struct mmc_request *mrq)
1350{
1351 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1352 host->state);
1353
f95f3850
WN
1354 slot->mrq = mrq;
1355
01730558
DA
1356 if (host->state == STATE_WAITING_CMD11_DONE) {
1357 dev_warn(&slot->mmc->class_dev,
1358 "Voltage change didn't complete\n");
1359 /*
1360 * this case isn't expected to happen, so we can
1361 * either crash here or just try to continue on
1362 * in the closest possible state
1363 */
1364 host->state = STATE_IDLE;
1365 }
1366
f95f3850
WN
1367 if (host->state == STATE_IDLE) {
1368 host->state = STATE_SENDING_CMD;
1369 dw_mci_start_request(host, slot);
1370 } else {
1371 list_add_tail(&slot->queue_node, &host->queue);
1372 }
f95f3850
WN
1373}
1374
1375static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1376{
1377 struct dw_mci_slot *slot = mmc_priv(mmc);
1378 struct dw_mci *host = slot->host;
1379
1380 WARN_ON(slot->mrq);
1381
7456caae
JH
1382 /*
1383 * The check for card presence and queueing of the request must be
1384 * atomic, otherwise the card could be removed in between and the
1385 * request wouldn't fail until another card was inserted.
1386 */
7456caae 1387
56f6911c 1388 if (!dw_mci_get_cd(mmc)) {
f95f3850
WN
1389 mrq->cmd->error = -ENOMEDIUM;
1390 mmc_request_done(mmc, mrq);
1391 return;
1392 }
1393
56f6911c
SL
1394 spin_lock_bh(&host->lock);
1395
f95f3850 1396 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
1397
1398 spin_unlock_bh(&host->lock);
f95f3850
WN
1399}
1400
1401static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1402{
1403 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 1404 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 1405 u32 regs;
51da2240 1406 int ret;
f95f3850 1407
f95f3850 1408 switch (ios->bus_width) {
f95f3850
WN
1409 case MMC_BUS_WIDTH_4:
1410 slot->ctype = SDMMC_CTYPE_4BIT;
1411 break;
c9b2a06f
JC
1412 case MMC_BUS_WIDTH_8:
1413 slot->ctype = SDMMC_CTYPE_8BIT;
1414 break;
b2f7cb45
JC
1415 default:
1416 /* set default 1 bit mode */
1417 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
1418 }
1419
3f514291
SJ
1420 regs = mci_readl(slot->host, UHS_REG);
1421
41babf75 1422 /* DDR mode set */
80113132 1423 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
7cc8d580 1424 ios->timing == MMC_TIMING_UHS_DDR50 ||
80113132 1425 ios->timing == MMC_TIMING_MMC_HS400)
c69042a5 1426 regs |= ((0x1 << slot->id) << 16);
3f514291 1427 else
c69042a5 1428 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
1429
1430 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 1431 slot->host->timing = ios->timing;
41babf75 1432
fdf492a1
DA
1433 /*
1434 * Use mirror of ios->clock to prevent race with mmc
1435 * core ios update when finding the minimum.
1436 */
1437 slot->clock = ios->clock;
f95f3850 1438
cb27a843
JH
1439 if (drv_data && drv_data->set_ios)
1440 drv_data->set_ios(slot->host, ios);
800d78bf 1441
f95f3850
WN
1442 switch (ios->power_mode) {
1443 case MMC_POWER_UP:
51da2240
YC
1444 if (!IS_ERR(mmc->supply.vmmc)) {
1445 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1446 ios->vdd);
1447 if (ret) {
1448 dev_err(slot->host->dev,
1449 "failed to enable vmmc regulator\n");
1450 /*return, if failed turn on vmmc*/
1451 return;
1452 }
1453 }
29d0d161
DA
1454 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1455 regs = mci_readl(slot->host, PWREN);
1456 regs |= (1 << slot->id);
1457 mci_writel(slot->host, PWREN, regs);
1458 break;
1459 case MMC_POWER_ON:
d1f1dd86
DA
1460 if (!slot->host->vqmmc_enabled) {
1461 if (!IS_ERR(mmc->supply.vqmmc)) {
1462 ret = regulator_enable(mmc->supply.vqmmc);
1463 if (ret < 0)
1464 dev_err(slot->host->dev,
1465 "failed to enable vqmmc\n");
1466 else
1467 slot->host->vqmmc_enabled = true;
1468
1469 } else {
1470 /* Keep track so we don't reset again */
51da2240 1471 slot->host->vqmmc_enabled = true;
d1f1dd86
DA
1472 }
1473
1474 /* Reset our state machine after powering on */
1475 dw_mci_ctrl_reset(slot->host,
1476 SDMMC_CTRL_ALL_RESET_FLAGS);
51da2240 1477 }
655babbd
DA
1478
1479 /* Adjust clock / bus width after power is up */
1480 dw_mci_setup_bus(slot, false);
1481
e6f34e2f
JH
1482 break;
1483 case MMC_POWER_OFF:
655babbd
DA
1484 /* Turn clock off before power goes down */
1485 dw_mci_setup_bus(slot, false);
1486
51da2240
YC
1487 if (!IS_ERR(mmc->supply.vmmc))
1488 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1489
d1f1dd86 1490 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
51da2240 1491 regulator_disable(mmc->supply.vqmmc);
d1f1dd86 1492 slot->host->vqmmc_enabled = false;
51da2240 1493
4366dcc5
JC
1494 regs = mci_readl(slot->host, PWREN);
1495 regs &= ~(1 << slot->id);
1496 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
1497 break;
1498 default:
1499 break;
1500 }
655babbd
DA
1501
1502 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1503 slot->host->state = STATE_IDLE;
f95f3850
WN
1504}
1505
01730558
DA
1506static int dw_mci_card_busy(struct mmc_host *mmc)
1507{
1508 struct dw_mci_slot *slot = mmc_priv(mmc);
1509 u32 status;
1510
1511 /*
1512 * Check the busy bit which is low when DAT[3:0]
1513 * (the data lines) are 0000
1514 */
1515 status = mci_readl(slot->host, STATUS);
1516
1517 return !!(status & SDMMC_STATUS_BUSY);
1518}
1519
1520static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1521{
1522 struct dw_mci_slot *slot = mmc_priv(mmc);
1523 struct dw_mci *host = slot->host;
8f7849c4 1524 const struct dw_mci_drv_data *drv_data = host->drv_data;
01730558
DA
1525 u32 uhs;
1526 u32 v18 = SDMMC_UHS_18V << slot->id;
01730558
DA
1527 int ret;
1528
8f7849c4
ZG
1529 if (drv_data && drv_data->switch_voltage)
1530 return drv_data->switch_voltage(mmc, ios);
1531
01730558
DA
1532 /*
1533 * Program the voltage. Note that some instances of dw_mmc may use
1534 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1535 * does no harm but you need to set the regulator directly. Try both.
1536 */
1537 uhs = mci_readl(host, UHS_REG);
e0848f5d 1538 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
01730558 1539 uhs &= ~v18;
e0848f5d 1540 else
01730558 1541 uhs |= v18;
e0848f5d 1542
01730558 1543 if (!IS_ERR(mmc->supply.vqmmc)) {
e0848f5d 1544 ret = mmc_regulator_set_vqmmc(mmc, ios);
01730558
DA
1545
1546 if (ret) {
b19caf37 1547 dev_dbg(&mmc->class_dev,
e0848f5d
DA
1548 "Regulator set error %d - %s V\n",
1549 ret, uhs & v18 ? "1.8" : "3.3");
01730558
DA
1550 return ret;
1551 }
1552 }
1553 mci_writel(host, UHS_REG, uhs);
1554
1555 return 0;
1556}
1557
f95f3850
WN
1558static int dw_mci_get_ro(struct mmc_host *mmc)
1559{
1560 int read_only;
1561 struct dw_mci_slot *slot = mmc_priv(mmc);
9795a846 1562 int gpio_ro = mmc_gpio_get_ro(mmc);
f95f3850
WN
1563
1564 /* Use platform get_ro function, else try on board write protect */
287980e4 1565 if (gpio_ro >= 0)
9795a846 1566 read_only = gpio_ro;
f95f3850
WN
1567 else
1568 read_only =
1569 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1570
1571 dev_dbg(&mmc->class_dev, "card is %s\n",
1572 read_only ? "read-only" : "read-write");
1573
1574 return read_only;
1575}
1576
935a665e
SL
1577static void dw_mci_hw_reset(struct mmc_host *mmc)
1578{
1579 struct dw_mci_slot *slot = mmc_priv(mmc);
1580 struct dw_mci *host = slot->host;
1581 int reset;
1582
1583 if (host->use_dma == TRANS_MODE_IDMAC)
1584 dw_mci_idmac_reset(host);
1585
1586 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1587 SDMMC_CTRL_FIFO_RESET))
1588 return;
1589
1590 /*
1591 * According to eMMC spec, card reset procedure:
1592 * tRstW >= 1us: RST_n pulse width
1593 * tRSCA >= 200us: RST_n to Command time
1594 * tRSTH >= 1us: RST_n high period
1595 */
1596 reset = mci_readl(host, RST_N);
1597 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1598 mci_writel(host, RST_N, reset);
1599 usleep_range(1, 2);
1600 reset |= SDMMC_RST_HWACTIVE << slot->id;
1601 mci_writel(host, RST_N, reset);
1602 usleep_range(200, 300);
1603}
1604
b24c8b26 1605static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
9623b5b9 1606{
b24c8b26 1607 struct dw_mci_slot *slot = mmc_priv(mmc);
9623b5b9 1608 struct dw_mci *host = slot->host;
9623b5b9 1609
b24c8b26
DA
1610 /*
1611 * Low power mode will stop the card clock when idle. According to the
1612 * description of the CLKENA register we should disable low power mode
1613 * for SDIO cards if we need SDIO interrupts to work.
1614 */
1615 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1616 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1617 u32 clk_en_a_old;
1618 u32 clk_en_a;
9623b5b9 1619
b24c8b26
DA
1620 clk_en_a_old = mci_readl(host, CLKENA);
1621
1622 if (card->type == MMC_TYPE_SDIO ||
1623 card->type == MMC_TYPE_SD_COMBO) {
0eebf9b9 1624 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
b24c8b26
DA
1625 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1626 } else {
0eebf9b9 1627 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
b24c8b26
DA
1628 clk_en_a = clk_en_a_old | clken_low_pwr;
1629 }
1630
1631 if (clk_en_a != clk_en_a_old) {
1632 mci_writel(host, CLKENA, clk_en_a);
1633 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1634 SDMMC_CMD_PRV_DAT_WAIT, 0);
1635 }
9623b5b9
DA
1636 }
1637}
1638
32dba737 1639static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1a5c8e1f 1640{
1a5c8e1f 1641 struct dw_mci *host = slot->host;
f8c58c11 1642 unsigned long irqflags;
1a5c8e1f
SH
1643 u32 int_mask;
1644
f8c58c11
DA
1645 spin_lock_irqsave(&host->irq_lock, irqflags);
1646
1a5c8e1f
SH
1647 /* Enable/disable Slot Specific SDIO interrupt */
1648 int_mask = mci_readl(host, INTMASK);
b24c8b26
DA
1649 if (enb)
1650 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1651 else
1652 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1653 mci_writel(host, INTMASK, int_mask);
f8c58c11
DA
1654
1655 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1a5c8e1f
SH
1656}
1657
32dba737
UH
1658static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1659{
1660 struct dw_mci_slot *slot = mmc_priv(mmc);
ca8971ca 1661 struct dw_mci *host = slot->host;
32dba737
UH
1662
1663 __dw_mci_enable_sdio_irq(slot, enb);
ca8971ca
UH
1664
1665 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1666 if (enb)
1667 pm_runtime_get_noresume(host->dev);
1668 else
1669 pm_runtime_put_noidle(host->dev);
32dba737
UH
1670}
1671
1672static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1673{
1674 struct dw_mci_slot *slot = mmc_priv(mmc);
1675
1676 __dw_mci_enable_sdio_irq(slot, 1);
1677}
1678
0976f16d
SJ
1679static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1680{
1681 struct dw_mci_slot *slot = mmc_priv(mmc);
1682 struct dw_mci *host = slot->host;
1683 const struct dw_mci_drv_data *drv_data = host->drv_data;
0e3a22c0 1684 int err = -EINVAL;
0976f16d 1685
0976f16d 1686 if (drv_data && drv_data->execute_tuning)
9979dbe5 1687 err = drv_data->execute_tuning(slot, opcode);
0976f16d
SJ
1688 return err;
1689}
1690
0e3a22c0
SL
1691static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1692 struct mmc_ios *ios)
80113132
SJ
1693{
1694 struct dw_mci_slot *slot = mmc_priv(mmc);
1695 struct dw_mci *host = slot->host;
1696 const struct dw_mci_drv_data *drv_data = host->drv_data;
1697
1698 if (drv_data && drv_data->prepare_hs400_tuning)
1699 return drv_data->prepare_hs400_tuning(host, ios);
1700
1701 return 0;
1702}
1703
4e7392b2
SL
1704static bool dw_mci_reset(struct dw_mci *host)
1705{
1706 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1707 bool ret = false;
bc2dcc1a 1708 u32 status = 0;
4e7392b2
SL
1709
1710 /*
1711 * Resetting generates a block interrupt, hence setting
1712 * the scatter-gather pointer to NULL.
1713 */
1714 if (host->sg) {
1715 sg_miter_stop(&host->sg_miter);
1716 host->sg = NULL;
1717 }
1718
1719 if (host->use_dma)
1720 flags |= SDMMC_CTRL_DMA_RESET;
1721
1722 if (dw_mci_ctrl_reset(host, flags)) {
1723 /*
bc2dcc1a
SL
1724 * In all cases we clear the RAWINTS
1725 * register to clear any interrupts.
4e7392b2
SL
1726 */
1727 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1728
bc2dcc1a
SL
1729 if (!host->use_dma) {
1730 ret = true;
1731 goto ciu_out;
1732 }
4e7392b2 1733
bc2dcc1a
SL
1734 /* Wait for dma_req to be cleared */
1735 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1736 status,
1737 !(status & SDMMC_STATUS_DMA_REQ),
1738 1, 500 * USEC_PER_MSEC)) {
1739 dev_err(host->dev,
1740 "%s: Timeout waiting for dma_req to be cleared\n",
1741 __func__);
1742 goto ciu_out;
4e7392b2 1743 }
bc2dcc1a
SL
1744
1745 /* when using DMA next we reset the fifo again */
1746 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1747 goto ciu_out;
4e7392b2
SL
1748 } else {
1749 /* if the controller reset bit did clear, then set clock regs */
1750 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1751 dev_err(host->dev,
1752 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1753 __func__);
1754 goto ciu_out;
1755 }
1756 }
1757
1758 if (host->use_dma == TRANS_MODE_IDMAC)
1759 /* It is also recommended that we reset and reprogram idmac */
1760 dw_mci_idmac_reset(host);
1761
1762 ret = true;
1763
1764ciu_out:
1765 /* After a CTRL reset we need to have CIU set clock registers */
1766 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
1767
1768 return ret;
1769}
1770
f95f3850 1771static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1772 .request = dw_mci_request,
9aa51408
SJ
1773 .pre_req = dw_mci_pre_req,
1774 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1775 .set_ios = dw_mci_set_ios,
1776 .get_ro = dw_mci_get_ro,
1777 .get_cd = dw_mci_get_cd,
935a665e 1778 .hw_reset = dw_mci_hw_reset,
1a5c8e1f 1779 .enable_sdio_irq = dw_mci_enable_sdio_irq,
32dba737 1780 .ack_sdio_irq = dw_mci_ack_sdio_irq,
0976f16d 1781 .execute_tuning = dw_mci_execute_tuning,
01730558
DA
1782 .card_busy = dw_mci_card_busy,
1783 .start_signal_voltage_switch = dw_mci_switch_voltage,
b24c8b26 1784 .init_card = dw_mci_init_card,
80113132 1785 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
f95f3850
WN
1786};
1787
1788static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1789 __releases(&host->lock)
1790 __acquires(&host->lock)
1791{
1792 struct dw_mci_slot *slot;
1793 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1794
1795 WARN_ON(host->cmd || host->data);
1796
1797 host->cur_slot->mrq = NULL;
1798 host->mrq = NULL;
1799 if (!list_empty(&host->queue)) {
1800 slot = list_entry(host->queue.next,
1801 struct dw_mci_slot, queue_node);
1802 list_del(&slot->queue_node);
4a90920c 1803 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1804 mmc_hostname(slot->mmc));
1805 host->state = STATE_SENDING_CMD;
1806 dw_mci_start_request(host, slot);
1807 } else {
4a90920c 1808 dev_vdbg(host->dev, "list empty\n");
01730558
DA
1809
1810 if (host->state == STATE_SENDING_CMD11)
1811 host->state = STATE_WAITING_CMD11_DONE;
1812 else
1813 host->state = STATE_IDLE;
f95f3850
WN
1814 }
1815
1816 spin_unlock(&host->lock);
1817 mmc_request_done(prev_mmc, mrq);
1818 spin_lock(&host->lock);
1819}
1820
e352c813 1821static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1822{
1823 u32 status = host->cmd_status;
1824
1825 host->cmd_status = 0;
1826
1827 /* Read the response from the card (up to 16 bytes) */
1828 if (cmd->flags & MMC_RSP_PRESENT) {
1829 if (cmd->flags & MMC_RSP_136) {
1830 cmd->resp[3] = mci_readl(host, RESP0);
1831 cmd->resp[2] = mci_readl(host, RESP1);
1832 cmd->resp[1] = mci_readl(host, RESP2);
1833 cmd->resp[0] = mci_readl(host, RESP3);
1834 } else {
1835 cmd->resp[0] = mci_readl(host, RESP0);
1836 cmd->resp[1] = 0;
1837 cmd->resp[2] = 0;
1838 cmd->resp[3] = 0;
1839 }
1840 }
1841
1842 if (status & SDMMC_INT_RTO)
1843 cmd->error = -ETIMEDOUT;
1844 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1845 cmd->error = -EILSEQ;
1846 else if (status & SDMMC_INT_RESP_ERR)
1847 cmd->error = -EIO;
1848 else
1849 cmd->error = 0;
1850
e352c813
SJ
1851 return cmd->error;
1852}
1853
1854static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1855{
31bff450 1856 u32 status = host->data_status;
e352c813
SJ
1857
1858 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1859 if (status & SDMMC_INT_DRTO) {
1860 data->error = -ETIMEDOUT;
1861 } else if (status & SDMMC_INT_DCRC) {
1862 data->error = -EILSEQ;
1863 } else if (status & SDMMC_INT_EBE) {
1864 if (host->dir_status ==
1865 DW_MCI_SEND_STATUS) {
1866 /*
1867 * No data CRC status was returned.
1868 * The number of bytes transferred
1869 * will be exaggerated in PIO mode.
1870 */
1871 data->bytes_xfered = 0;
1872 data->error = -ETIMEDOUT;
1873 } else if (host->dir_status ==
1874 DW_MCI_RECV_STATUS) {
e7a1dec1 1875 data->error = -EILSEQ;
e352c813
SJ
1876 }
1877 } else {
1878 /* SDMMC_INT_SBE is included */
e7a1dec1 1879 data->error = -EILSEQ;
e352c813
SJ
1880 }
1881
e6cc0123 1882 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
e352c813
SJ
1883
1884 /*
1885 * After an error, there may be data lingering
31bff450 1886 * in the FIFO
e352c813 1887 */
3a33a94c 1888 dw_mci_reset(host);
e352c813
SJ
1889 } else {
1890 data->bytes_xfered = data->blocks * data->blksz;
1891 data->error = 0;
1892 }
1893
1894 return data->error;
f95f3850
WN
1895}
1896
57e10486
AK
1897static void dw_mci_set_drto(struct dw_mci *host)
1898{
1899 unsigned int drto_clks;
1900 unsigned int drto_ms;
1901
1902 drto_clks = mci_readl(host, TMOUT) >> 8;
1903 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
1904
1905 /* add a bit spare time */
1906 drto_ms += 10;
1907
1908 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
1909}
1910
f95f3850
WN
1911static void dw_mci_tasklet_func(unsigned long priv)
1912{
1913 struct dw_mci *host = (struct dw_mci *)priv;
1914 struct mmc_data *data;
1915 struct mmc_command *cmd;
e352c813 1916 struct mmc_request *mrq;
f95f3850
WN
1917 enum dw_mci_state state;
1918 enum dw_mci_state prev_state;
e352c813 1919 unsigned int err;
f95f3850
WN
1920
1921 spin_lock(&host->lock);
1922
1923 state = host->state;
1924 data = host->data;
e352c813 1925 mrq = host->mrq;
f95f3850
WN
1926
1927 do {
1928 prev_state = state;
1929
1930 switch (state) {
1931 case STATE_IDLE:
01730558 1932 case STATE_WAITING_CMD11_DONE:
f95f3850
WN
1933 break;
1934
01730558 1935 case STATE_SENDING_CMD11:
f95f3850
WN
1936 case STATE_SENDING_CMD:
1937 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1938 &host->pending_events))
1939 break;
1940
1941 cmd = host->cmd;
1942 host->cmd = NULL;
1943 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1944 err = dw_mci_command_complete(host, cmd);
1945 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1946 prev_state = state = STATE_SENDING_CMD;
1947 __dw_mci_start_request(host, host->cur_slot,
e352c813 1948 mrq->cmd);
053b3ce6
SJ
1949 goto unlock;
1950 }
1951
e352c813 1952 if (cmd->data && err) {
46d17952
DA
1953 /*
1954 * During UHS tuning sequence, sending the stop
1955 * command after the response CRC error would
1956 * throw the system into a confused state
1957 * causing all future tuning phases to report
1958 * failure.
1959 *
1960 * In such case controller will move into a data
1961 * transfer state after a response error or
1962 * response CRC error. Let's let that finish
1963 * before trying to send a stop, so we'll go to
1964 * STATE_SENDING_DATA.
1965 *
1966 * Although letting the data transfer take place
1967 * will waste a bit of time (we already know
1968 * the command was bad), it can't cause any
1969 * errors since it's possible it would have
1970 * taken place anyway if this tasklet got
1971 * delayed. Allowing the transfer to take place
1972 * avoids races and keeps things simple.
1973 */
1974 if ((err != -ETIMEDOUT) &&
1975 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
1976 state = STATE_SENDING_DATA;
1977 continue;
1978 }
1979
71abb133 1980 dw_mci_stop_dma(host);
90c2143a
SJ
1981 send_stop_abort(host, data);
1982 state = STATE_SENDING_STOP;
1983 break;
71abb133
SJ
1984 }
1985
e352c813
SJ
1986 if (!cmd->data || err) {
1987 dw_mci_request_end(host, mrq);
f95f3850
WN
1988 goto unlock;
1989 }
1990
1991 prev_state = state = STATE_SENDING_DATA;
1992 /* fall through */
1993
1994 case STATE_SENDING_DATA:
2aa35465
DA
1995 /*
1996 * We could get a data error and never a transfer
1997 * complete so we'd better check for it here.
1998 *
1999 * Note that we don't really care if we also got a
2000 * transfer complete; stopping the DMA and sending an
2001 * abort won't hurt.
2002 */
f95f3850
WN
2003 if (test_and_clear_bit(EVENT_DATA_ERROR,
2004 &host->pending_events)) {
2005 dw_mci_stop_dma(host);
e13c3c08 2006 if (!(host->data_status & (SDMMC_INT_DRTO |
bdb9a90b 2007 SDMMC_INT_EBE)))
2008 send_stop_abort(host, data);
f95f3850
WN
2009 state = STATE_DATA_ERROR;
2010 break;
2011 }
2012
2013 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
57e10486
AK
2014 &host->pending_events)) {
2015 /*
2016 * If all data-related interrupts don't come
2017 * within the given time in reading data state.
2018 */
16a34574 2019 if (host->dir_status == DW_MCI_RECV_STATUS)
57e10486 2020 dw_mci_set_drto(host);
f95f3850 2021 break;
57e10486 2022 }
f95f3850
WN
2023
2024 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2aa35465
DA
2025
2026 /*
2027 * Handle an EVENT_DATA_ERROR that might have shown up
2028 * before the transfer completed. This might not have
2029 * been caught by the check above because the interrupt
2030 * could have gone off between the previous check and
2031 * the check for transfer complete.
2032 *
2033 * Technically this ought not be needed assuming we
2034 * get a DATA_COMPLETE eventually (we'll notice the
2035 * error and end the request), but it shouldn't hurt.
2036 *
2037 * This has the advantage of sending the stop command.
2038 */
2039 if (test_and_clear_bit(EVENT_DATA_ERROR,
2040 &host->pending_events)) {
2041 dw_mci_stop_dma(host);
e13c3c08 2042 if (!(host->data_status & (SDMMC_INT_DRTO |
bdb9a90b 2043 SDMMC_INT_EBE)))
2044 send_stop_abort(host, data);
2aa35465
DA
2045 state = STATE_DATA_ERROR;
2046 break;
2047 }
f95f3850 2048 prev_state = state = STATE_DATA_BUSY;
2aa35465 2049
f95f3850
WN
2050 /* fall through */
2051
2052 case STATE_DATA_BUSY:
2053 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
57e10486
AK
2054 &host->pending_events)) {
2055 /*
2056 * If data error interrupt comes but data over
2057 * interrupt doesn't come within the given time.
2058 * in reading data state.
2059 */
16a34574 2060 if (host->dir_status == DW_MCI_RECV_STATUS)
57e10486 2061 dw_mci_set_drto(host);
f95f3850 2062 break;
57e10486 2063 }
f95f3850
WN
2064
2065 host->data = NULL;
2066 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
2067 err = dw_mci_data_complete(host, data);
2068
2069 if (!err) {
2070 if (!data->stop || mrq->sbc) {
17c8bc85 2071 if (mrq->sbc && data->stop)
e352c813
SJ
2072 data->stop->error = 0;
2073 dw_mci_request_end(host, mrq);
2074 goto unlock;
f95f3850 2075 }
f95f3850 2076
e352c813
SJ
2077 /* stop command for open-ended transfer*/
2078 if (data->stop)
2079 send_stop_abort(host, data);
2aa35465
DA
2080 } else {
2081 /*
2082 * If we don't have a command complete now we'll
2083 * never get one since we just reset everything;
2084 * better end the request.
2085 *
2086 * If we do have a command complete we'll fall
2087 * through to the SENDING_STOP command and
2088 * everything will be peachy keen.
2089 */
2090 if (!test_bit(EVENT_CMD_COMPLETE,
2091 &host->pending_events)) {
2092 host->cmd = NULL;
2093 dw_mci_request_end(host, mrq);
2094 goto unlock;
2095 }
053b3ce6
SJ
2096 }
2097
e352c813
SJ
2098 /*
2099 * If err has non-zero,
2100 * stop-abort command has been already issued.
2101 */
f95f3850 2102 prev_state = state = STATE_SENDING_STOP;
e352c813 2103
f95f3850
WN
2104 /* fall through */
2105
2106 case STATE_SENDING_STOP:
2107 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2108 &host->pending_events))
2109 break;
2110
71abb133 2111 /* CMD error in data command */
31bff450 2112 if (mrq->cmd->error && mrq->data)
3a33a94c 2113 dw_mci_reset(host);
71abb133 2114
f95f3850 2115 host->cmd = NULL;
71abb133 2116 host->data = NULL;
90c2143a 2117
e13c3c08 2118 if (!mrq->sbc && mrq->stop)
e352c813 2119 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
2120 else
2121 host->cmd_status = 0;
2122
e352c813 2123 dw_mci_request_end(host, mrq);
f95f3850
WN
2124 goto unlock;
2125
2126 case STATE_DATA_ERROR:
2127 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2128 &host->pending_events))
2129 break;
2130
2131 state = STATE_DATA_BUSY;
2132 break;
2133 }
2134 } while (state != prev_state);
2135
2136 host->state = state;
2137unlock:
2138 spin_unlock(&host->lock);
2139
2140}
2141
34b664a2
JH
2142/* push final bytes to part_buf, only use during push */
2143static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2144{
34b664a2
JH
2145 memcpy((void *)&host->part_buf, buf, cnt);
2146 host->part_buf_count = cnt;
2147}
f95f3850 2148
34b664a2
JH
2149/* append bytes to part_buf, only use during push */
2150static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2151{
2152 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2153 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2154 host->part_buf_count += cnt;
2155 return cnt;
2156}
f95f3850 2157
34b664a2
JH
2158/* pull first bytes from part_buf, only use during pull */
2159static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2160{
0e3a22c0 2161 cnt = min_t(int, cnt, host->part_buf_count);
34b664a2
JH
2162 if (cnt) {
2163 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2164 cnt);
2165 host->part_buf_count -= cnt;
2166 host->part_buf_start += cnt;
f95f3850 2167 }
34b664a2 2168 return cnt;
f95f3850
WN
2169}
2170
34b664a2
JH
2171/* pull final bytes from the part_buf, assuming it's just been filled */
2172static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2173{
34b664a2
JH
2174 memcpy(buf, &host->part_buf, cnt);
2175 host->part_buf_start = cnt;
2176 host->part_buf_count = (1 << host->data_shift) - cnt;
2177}
f95f3850 2178
34b664a2
JH
2179static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2180{
cfbeb59c
MC
2181 struct mmc_data *data = host->data;
2182 int init_cnt = cnt;
2183
34b664a2
JH
2184 /* try and push anything in the part_buf */
2185 if (unlikely(host->part_buf_count)) {
2186 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2187
34b664a2
JH
2188 buf += len;
2189 cnt -= len;
cfbeb59c 2190 if (host->part_buf_count == 2) {
76184ac1 2191 mci_fifo_writew(host->fifo_reg, host->part_buf16);
34b664a2
JH
2192 host->part_buf_count = 0;
2193 }
2194 }
2195#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2196 if (unlikely((unsigned long)buf & 0x1)) {
2197 while (cnt >= 2) {
2198 u16 aligned_buf[64];
2199 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2200 int items = len >> 1;
2201 int i;
2202 /* memcpy from input buffer into aligned buffer */
2203 memcpy(aligned_buf, buf, len);
2204 buf += len;
2205 cnt -= len;
2206 /* push data from aligned buffer into fifo */
2207 for (i = 0; i < items; ++i)
76184ac1 2208 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2209 }
2210 } else
2211#endif
2212 {
2213 u16 *pdata = buf;
0e3a22c0 2214
34b664a2 2215 for (; cnt >= 2; cnt -= 2)
76184ac1 2216 mci_fifo_writew(host->fifo_reg, *pdata++);
34b664a2
JH
2217 buf = pdata;
2218 }
2219 /* put anything remaining in the part_buf */
2220 if (cnt) {
2221 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2222 /* Push data if we have reached the expected data length */
2223 if ((data->bytes_xfered + init_cnt) ==
2224 (data->blksz * data->blocks))
76184ac1 2225 mci_fifo_writew(host->fifo_reg, host->part_buf16);
34b664a2
JH
2226 }
2227}
f95f3850 2228
34b664a2
JH
2229static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2230{
2231#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2232 if (unlikely((unsigned long)buf & 0x1)) {
2233 while (cnt >= 2) {
2234 /* pull data from fifo into aligned buffer */
2235 u16 aligned_buf[64];
2236 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2237 int items = len >> 1;
2238 int i;
0e3a22c0 2239
34b664a2 2240 for (i = 0; i < items; ++i)
76184ac1 2241 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
34b664a2
JH
2242 /* memcpy from aligned buffer into output buffer */
2243 memcpy(buf, aligned_buf, len);
2244 buf += len;
2245 cnt -= len;
2246 }
2247 } else
2248#endif
2249 {
2250 u16 *pdata = buf;
0e3a22c0 2251
34b664a2 2252 for (; cnt >= 2; cnt -= 2)
76184ac1 2253 *pdata++ = mci_fifo_readw(host->fifo_reg);
34b664a2
JH
2254 buf = pdata;
2255 }
2256 if (cnt) {
76184ac1 2257 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
34b664a2 2258 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
2259 }
2260}
2261
2262static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2263{
cfbeb59c
MC
2264 struct mmc_data *data = host->data;
2265 int init_cnt = cnt;
2266
34b664a2
JH
2267 /* try and push anything in the part_buf */
2268 if (unlikely(host->part_buf_count)) {
2269 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2270
34b664a2
JH
2271 buf += len;
2272 cnt -= len;
cfbeb59c 2273 if (host->part_buf_count == 4) {
76184ac1 2274 mci_fifo_writel(host->fifo_reg, host->part_buf32);
34b664a2
JH
2275 host->part_buf_count = 0;
2276 }
2277 }
2278#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2279 if (unlikely((unsigned long)buf & 0x3)) {
2280 while (cnt >= 4) {
2281 u32 aligned_buf[32];
2282 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2283 int items = len >> 2;
2284 int i;
2285 /* memcpy from input buffer into aligned buffer */
2286 memcpy(aligned_buf, buf, len);
2287 buf += len;
2288 cnt -= len;
2289 /* push data from aligned buffer into fifo */
2290 for (i = 0; i < items; ++i)
76184ac1 2291 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2292 }
2293 } else
2294#endif
2295 {
2296 u32 *pdata = buf;
0e3a22c0 2297
34b664a2 2298 for (; cnt >= 4; cnt -= 4)
76184ac1 2299 mci_fifo_writel(host->fifo_reg, *pdata++);
34b664a2
JH
2300 buf = pdata;
2301 }
2302 /* put anything remaining in the part_buf */
2303 if (cnt) {
2304 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2305 /* Push data if we have reached the expected data length */
2306 if ((data->bytes_xfered + init_cnt) ==
2307 (data->blksz * data->blocks))
76184ac1 2308 mci_fifo_writel(host->fifo_reg, host->part_buf32);
f95f3850
WN
2309 }
2310}
2311
2312static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2313{
34b664a2
JH
2314#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2315 if (unlikely((unsigned long)buf & 0x3)) {
2316 while (cnt >= 4) {
2317 /* pull data from fifo into aligned buffer */
2318 u32 aligned_buf[32];
2319 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2320 int items = len >> 2;
2321 int i;
0e3a22c0 2322
34b664a2 2323 for (i = 0; i < items; ++i)
76184ac1 2324 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
34b664a2
JH
2325 /* memcpy from aligned buffer into output buffer */
2326 memcpy(buf, aligned_buf, len);
2327 buf += len;
2328 cnt -= len;
2329 }
2330 } else
2331#endif
2332 {
2333 u32 *pdata = buf;
0e3a22c0 2334
34b664a2 2335 for (; cnt >= 4; cnt -= 4)
76184ac1 2336 *pdata++ = mci_fifo_readl(host->fifo_reg);
34b664a2
JH
2337 buf = pdata;
2338 }
2339 if (cnt) {
76184ac1 2340 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
34b664a2 2341 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
2342 }
2343}
2344
2345static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2346{
cfbeb59c
MC
2347 struct mmc_data *data = host->data;
2348 int init_cnt = cnt;
2349
34b664a2
JH
2350 /* try and push anything in the part_buf */
2351 if (unlikely(host->part_buf_count)) {
2352 int len = dw_mci_push_part_bytes(host, buf, cnt);
0e3a22c0 2353
34b664a2
JH
2354 buf += len;
2355 cnt -= len;
c09fbd74 2356
cfbeb59c 2357 if (host->part_buf_count == 8) {
76184ac1 2358 mci_fifo_writeq(host->fifo_reg, host->part_buf);
34b664a2
JH
2359 host->part_buf_count = 0;
2360 }
2361 }
2362#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2363 if (unlikely((unsigned long)buf & 0x7)) {
2364 while (cnt >= 8) {
2365 u64 aligned_buf[16];
2366 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2367 int items = len >> 3;
2368 int i;
2369 /* memcpy from input buffer into aligned buffer */
2370 memcpy(aligned_buf, buf, len);
2371 buf += len;
2372 cnt -= len;
2373 /* push data from aligned buffer into fifo */
2374 for (i = 0; i < items; ++i)
76184ac1 2375 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
34b664a2
JH
2376 }
2377 } else
2378#endif
2379 {
2380 u64 *pdata = buf;
0e3a22c0 2381
34b664a2 2382 for (; cnt >= 8; cnt -= 8)
76184ac1 2383 mci_fifo_writeq(host->fifo_reg, *pdata++);
34b664a2
JH
2384 buf = pdata;
2385 }
2386 /* put anything remaining in the part_buf */
2387 if (cnt) {
2388 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
2389 /* Push data if we have reached the expected data length */
2390 if ((data->bytes_xfered + init_cnt) ==
2391 (data->blksz * data->blocks))
76184ac1 2392 mci_fifo_writeq(host->fifo_reg, host->part_buf);
f95f3850
WN
2393 }
2394}
2395
2396static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2397{
34b664a2
JH
2398#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2399 if (unlikely((unsigned long)buf & 0x7)) {
2400 while (cnt >= 8) {
2401 /* pull data from fifo into aligned buffer */
2402 u64 aligned_buf[16];
2403 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2404 int items = len >> 3;
2405 int i;
0e3a22c0 2406
34b664a2 2407 for (i = 0; i < items; ++i)
76184ac1
BD
2408 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2409
34b664a2
JH
2410 /* memcpy from aligned buffer into output buffer */
2411 memcpy(buf, aligned_buf, len);
2412 buf += len;
2413 cnt -= len;
2414 }
2415 } else
2416#endif
2417 {
2418 u64 *pdata = buf;
0e3a22c0 2419
34b664a2 2420 for (; cnt >= 8; cnt -= 8)
76184ac1 2421 *pdata++ = mci_fifo_readq(host->fifo_reg);
34b664a2
JH
2422 buf = pdata;
2423 }
2424 if (cnt) {
76184ac1 2425 host->part_buf = mci_fifo_readq(host->fifo_reg);
34b664a2
JH
2426 dw_mci_pull_final_bytes(host, buf, cnt);
2427 }
2428}
f95f3850 2429
34b664a2
JH
2430static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2431{
2432 int len;
f95f3850 2433
34b664a2
JH
2434 /* get remaining partial bytes */
2435 len = dw_mci_pull_part_bytes(host, buf, cnt);
2436 if (unlikely(len == cnt))
2437 return;
2438 buf += len;
2439 cnt -= len;
2440
2441 /* get the rest of the data */
2442 host->pull_data(host, buf, cnt);
f95f3850
WN
2443}
2444
87a74d39 2445static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 2446{
f9c2a0dc
SJ
2447 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2448 void *buf;
2449 unsigned int offset;
f95f3850
WN
2450 struct mmc_data *data = host->data;
2451 int shift = host->data_shift;
2452 u32 status;
3e4b0d8b 2453 unsigned int len;
f9c2a0dc 2454 unsigned int remain, fcnt;
f95f3850
WN
2455
2456 do {
f9c2a0dc
SJ
2457 if (!sg_miter_next(sg_miter))
2458 goto done;
2459
4225fc85 2460 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
2461 buf = sg_miter->addr;
2462 remain = sg_miter->length;
2463 offset = 0;
2464
2465 do {
2466 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2467 << shift) + host->part_buf_count;
2468 len = min(remain, fcnt);
2469 if (!len)
2470 break;
34b664a2 2471 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 2472 data->bytes_xfered += len;
f95f3850 2473 offset += len;
f9c2a0dc
SJ
2474 remain -= len;
2475 } while (remain);
f95f3850 2476
e74f3a9c 2477 sg_miter->consumed = offset;
f95f3850
WN
2478 status = mci_readl(host, MINTSTS);
2479 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
2480 /* if the RXDR is ready read again */
2481 } while ((status & SDMMC_INT_RXDR) ||
2482 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
2483
2484 if (!remain) {
2485 if (!sg_miter_next(sg_miter))
2486 goto done;
2487 sg_miter->consumed = 0;
2488 }
2489 sg_miter_stop(sg_miter);
f95f3850
WN
2490 return;
2491
2492done:
f9c2a0dc
SJ
2493 sg_miter_stop(sg_miter);
2494 host->sg = NULL;
0e3a22c0 2495 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2496 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2497}
2498
2499static void dw_mci_write_data_pio(struct dw_mci *host)
2500{
f9c2a0dc
SJ
2501 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2502 void *buf;
2503 unsigned int offset;
f95f3850
WN
2504 struct mmc_data *data = host->data;
2505 int shift = host->data_shift;
2506 u32 status;
3e4b0d8b 2507 unsigned int len;
f9c2a0dc
SJ
2508 unsigned int fifo_depth = host->fifo_depth;
2509 unsigned int remain, fcnt;
f95f3850
WN
2510
2511 do {
f9c2a0dc
SJ
2512 if (!sg_miter_next(sg_miter))
2513 goto done;
2514
4225fc85 2515 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
2516 buf = sg_miter->addr;
2517 remain = sg_miter->length;
2518 offset = 0;
2519
2520 do {
2521 fcnt = ((fifo_depth -
2522 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2523 << shift) - host->part_buf_count;
2524 len = min(remain, fcnt);
2525 if (!len)
2526 break;
f95f3850 2527 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 2528 data->bytes_xfered += len;
f95f3850 2529 offset += len;
f9c2a0dc
SJ
2530 remain -= len;
2531 } while (remain);
f95f3850 2532
e74f3a9c 2533 sg_miter->consumed = offset;
f95f3850
WN
2534 status = mci_readl(host, MINTSTS);
2535 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 2536 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
2537
2538 if (!remain) {
2539 if (!sg_miter_next(sg_miter))
2540 goto done;
2541 sg_miter->consumed = 0;
2542 }
2543 sg_miter_stop(sg_miter);
f95f3850
WN
2544 return;
2545
2546done:
f9c2a0dc
SJ
2547 sg_miter_stop(sg_miter);
2548 host->sg = NULL;
0e3a22c0 2549 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2550 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2551}
2552
2553static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2554{
2555 if (!host->cmd_status)
2556 host->cmd_status = status;
2557
0e3a22c0 2558 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2559
2560 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2561 tasklet_schedule(&host->tasklet);
2562}
2563
6130e7a9
DA
2564static void dw_mci_handle_cd(struct dw_mci *host)
2565{
b23475fa 2566 struct dw_mci_slot *slot = host->slot;
6130e7a9 2567
58870241
JC
2568 if (slot->mmc->ops->card_event)
2569 slot->mmc->ops->card_event(slot->mmc);
2570 mmc_detect_change(slot->mmc,
2571 msecs_to_jiffies(host->pdata->detect_delay_ms));
6130e7a9
DA
2572}
2573
f95f3850
WN
2574static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2575{
2576 struct dw_mci *host = dev_id;
182c9081 2577 u32 pending;
b23475fa 2578 struct dw_mci_slot *slot = host->slot;
f95f3850 2579
1fb5f68a
MC
2580 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2581
476d79f1 2582 if (pending) {
01730558
DA
2583 /* Check volt switch first, since it can look like an error */
2584 if ((host->state == STATE_SENDING_CMD11) &&
2585 (pending & SDMMC_INT_VOLT_SWITCH)) {
49ba0302 2586 unsigned long irqflags;
5c935165 2587
01730558
DA
2588 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2589 pending &= ~SDMMC_INT_VOLT_SWITCH;
49ba0302
DA
2590
2591 /*
2592 * Hold the lock; we know cmd11_timer can't be kicked
2593 * off after the lock is released, so safe to delete.
2594 */
2595 spin_lock_irqsave(&host->irq_lock, irqflags);
01730558 2596 dw_mci_cmd_interrupt(host, pending);
49ba0302
DA
2597 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2598
2599 del_timer(&host->cmd11_timer);
01730558
DA
2600 }
2601
f95f3850
WN
2602 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2603 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 2604 host->cmd_status = pending;
0e3a22c0 2605 smp_wmb(); /* drain writebuffer */
f95f3850 2606 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
2607 }
2608
2609 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2610 /* if there is an error report DATA_ERROR */
2611 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 2612 host->data_status = pending;
0e3a22c0 2613 smp_wmb(); /* drain writebuffer */
f95f3850 2614 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 2615 tasklet_schedule(&host->tasklet);
f95f3850
WN
2616 }
2617
2618 if (pending & SDMMC_INT_DATA_OVER) {
16a34574 2619 del_timer(&host->dto_timer);
57e10486 2620
f95f3850
WN
2621 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2622 if (!host->data_status)
182c9081 2623 host->data_status = pending;
0e3a22c0 2624 smp_wmb(); /* drain writebuffer */
f95f3850
WN
2625 if (host->dir_status == DW_MCI_RECV_STATUS) {
2626 if (host->sg != NULL)
87a74d39 2627 dw_mci_read_data_pio(host, true);
f95f3850
WN
2628 }
2629 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2630 tasklet_schedule(&host->tasklet);
2631 }
2632
2633 if (pending & SDMMC_INT_RXDR) {
2634 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 2635 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 2636 dw_mci_read_data_pio(host, false);
f95f3850
WN
2637 }
2638
2639 if (pending & SDMMC_INT_TXDR) {
2640 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 2641 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
2642 dw_mci_write_data_pio(host);
2643 }
2644
2645 if (pending & SDMMC_INT_CMD_DONE) {
2646 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 2647 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
2648 }
2649
2650 if (pending & SDMMC_INT_CD) {
2651 mci_writel(host, RINTSTS, SDMMC_INT_CD);
6130e7a9 2652 dw_mci_handle_cd(host);
f95f3850
WN
2653 }
2654
58870241
JC
2655 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2656 mci_writel(host, RINTSTS,
2657 SDMMC_INT_SDIO(slot->sdio_id));
2658 __dw_mci_enable_sdio_irq(slot, 0);
2659 sdio_signal_irq(slot->mmc);
1a5c8e1f
SH
2660 }
2661
1fb5f68a 2662 }
f95f3850 2663
3fc7eaef
SL
2664 if (host->use_dma != TRANS_MODE_IDMAC)
2665 return IRQ_HANDLED;
2666
2667 /* Handle IDMA interrupts */
69d99fdc
PT
2668 if (host->dma_64bit_address == 1) {
2669 pending = mci_readl(host, IDSTS64);
2670 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2671 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2672 SDMMC_IDMAC_INT_RI);
2673 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
faecf411
SL
2674 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2675 host->dma_ops->complete((void *)host);
69d99fdc
PT
2676 }
2677 } else {
2678 pending = mci_readl(host, IDSTS);
2679 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2680 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2681 SDMMC_IDMAC_INT_RI);
2682 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
faecf411
SL
2683 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2684 host->dma_ops->complete((void *)host);
69d99fdc 2685 }
f95f3850 2686 }
f95f3850
WN
2687
2688 return IRQ_HANDLED;
2689}
2690
36c179a9 2691static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2692{
2693 struct mmc_host *mmc;
2694 struct dw_mci_slot *slot;
e95baf13 2695 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2696 int ctrl_id, ret;
1f44a2a5 2697 u32 freq[2];
f95f3850 2698
4a90920c 2699 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2700 if (!mmc)
2701 return -ENOMEM;
2702
2703 slot = mmc_priv(mmc);
2704 slot->id = id;
76756234 2705 slot->sdio_id = host->sdio_id0 + id;
f95f3850
WN
2706 slot->mmc = mmc;
2707 slot->host = host;
b23475fa 2708 host->slot = slot;
f95f3850
WN
2709
2710 mmc->ops = &dw_mci_ops;
852ff5fe
DW
2711 if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
2712 freq, 2)) {
1f44a2a5
SJ
2713 mmc->f_min = DW_MCI_FREQ_MIN;
2714 mmc->f_max = DW_MCI_FREQ_MAX;
2715 } else {
b023030f
JC
2716 dev_info(host->dev,
2717 "'clock-freq-min-max' property was deprecated.\n");
1f44a2a5
SJ
2718 mmc->f_min = freq[0];
2719 mmc->f_max = freq[1];
2720 }
f95f3850 2721
51da2240
YC
2722 /*if there are external regulators, get them*/
2723 ret = mmc_regulator_get_supply(mmc);
2724 if (ret == -EPROBE_DEFER)
3cf890fc 2725 goto err_host_allocated;
51da2240
YC
2726
2727 if (!mmc->ocr_avail)
2728 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
f95f3850 2729
fc3d7720
JC
2730 if (host->pdata->caps)
2731 mmc->caps = host->pdata->caps;
fc3d7720 2732
6024e166
JC
2733 /*
2734 * Support MMC_CAP_ERASE by default.
2735 * It needs to use trim/discard/erase commands.
2736 */
2737 mmc->caps |= MMC_CAP_ERASE;
2738
ab269128
AK
2739 if (host->pdata->pm_caps)
2740 mmc->pm_caps = host->pdata->pm_caps;
2741
800d78bf
TA
2742 if (host->dev->of_node) {
2743 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2744 if (ctrl_id < 0)
2745 ctrl_id = 0;
2746 } else {
2747 ctrl_id = to_platform_device(host->dev)->id;
2748 }
cb27a843
JH
2749 if (drv_data && drv_data->caps)
2750 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2751
4f408cc6
SJ
2752 if (host->pdata->caps2)
2753 mmc->caps2 = host->pdata->caps2;
4f408cc6 2754
3cf890fc
DA
2755 ret = mmc_of_parse(mmc);
2756 if (ret)
2757 goto err_host_allocated;
f95f3850 2758
32dba737
UH
2759 /* Process SDIO IRQs through the sdio_irq_work. */
2760 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2761 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2762
2b708df2 2763 /* Useful defaults if platform data is unset. */
3fc7eaef 2764 if (host->use_dma == TRANS_MODE_IDMAC) {
2b708df2 2765 mmc->max_segs = host->ring_size;
225faf87 2766 mmc->max_blk_size = 65535;
2b708df2
JC
2767 mmc->max_seg_size = 0x1000;
2768 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2769 mmc->max_blk_count = mmc->max_req_size / 512;
3fc7eaef
SL
2770 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2771 mmc->max_segs = 64;
225faf87 2772 mmc->max_blk_size = 65535;
3fc7eaef
SL
2773 mmc->max_blk_count = 65535;
2774 mmc->max_req_size =
2775 mmc->max_blk_size * mmc->max_blk_count;
2776 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2777 } else {
3fc7eaef 2778 /* TRANS_MODE_PIO */
2b708df2 2779 mmc->max_segs = 64;
225faf87 2780 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2b708df2
JC
2781 mmc->max_blk_count = 512;
2782 mmc->max_req_size = mmc->max_blk_size *
2783 mmc->max_blk_count;
2784 mmc->max_seg_size = mmc->max_req_size;
a39e5746 2785 }
f95f3850 2786
c0834a58 2787 dw_mci_get_cd(mmc);
ae0eb348 2788
0cea529d
JC
2789 ret = mmc_add_host(mmc);
2790 if (ret)
3cf890fc 2791 goto err_host_allocated;
f95f3850
WN
2792
2793#if defined(CONFIG_DEBUG_FS)
2794 dw_mci_init_debugfs(slot);
2795#endif
2796
f95f3850 2797 return 0;
800d78bf 2798
3cf890fc 2799err_host_allocated:
800d78bf 2800 mmc_free_host(mmc);
51da2240 2801 return ret;
f95f3850
WN
2802}
2803
2804static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2805{
f95f3850
WN
2806 /* Debugfs stuff is cleaned up by mmc core */
2807 mmc_remove_host(slot->mmc);
b23475fa 2808 slot->host->slot = NULL;
f95f3850
WN
2809 mmc_free_host(slot->mmc);
2810}
2811
2812static void dw_mci_init_dma(struct dw_mci *host)
2813{
69d99fdc 2814 int addr_config;
3fc7eaef 2815 struct device *dev = host->dev;
69d99fdc 2816
3fc7eaef
SL
2817 /*
2818 * Check tansfer mode from HCON[17:16]
2819 * Clear the ambiguous description of dw_mmc databook:
2820 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2821 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2822 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2823 * 2b'11: Non DW DMA Interface -> pio only
2824 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2825 * simpler request/acknowledge handshake mechanism and both of them
2826 * are regarded as external dma master for dw_mmc.
2827 */
2828 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2829 if (host->use_dma == DMA_INTERFACE_IDMA) {
2830 host->use_dma = TRANS_MODE_IDMAC;
2831 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2832 host->use_dma == DMA_INTERFACE_GDMA) {
2833 host->use_dma = TRANS_MODE_EDMAC;
2834 } else {
f95f3850
WN
2835 goto no_dma;
2836 }
2837
2838 /* Determine which DMA interface to use */
3fc7eaef
SL
2839 if (host->use_dma == TRANS_MODE_IDMAC) {
2840 /*
2841 * Check ADDR_CONFIG bit in HCON to find
2842 * IDMAC address bus width
2843 */
70692752 2844 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3fc7eaef
SL
2845
2846 if (addr_config == 1) {
2847 /* host supports IDMAC in 64-bit address mode */
2848 host->dma_64bit_address = 1;
2849 dev_info(host->dev,
2850 "IDMAC supports 64-bit address mode.\n");
2851 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2852 dma_set_coherent_mask(host->dev,
2853 DMA_BIT_MASK(64));
2854 } else {
2855 /* host supports IDMAC in 32-bit address mode */
2856 host->dma_64bit_address = 0;
2857 dev_info(host->dev,
2858 "IDMAC supports 32-bit address mode.\n");
2859 }
f95f3850 2860
3fc7eaef 2861 /* Alloc memory for sg translation */
cc190d4c
SL
2862 host->sg_cpu = dmam_alloc_coherent(host->dev,
2863 DESC_RING_BUF_SZ,
3fc7eaef
SL
2864 &host->sg_dma, GFP_KERNEL);
2865 if (!host->sg_cpu) {
2866 dev_err(host->dev,
2867 "%s: could not alloc DMA memory\n",
2868 __func__);
2869 goto no_dma;
2870 }
2871
2872 host->dma_ops = &dw_mci_idmac_ops;
2873 dev_info(host->dev, "Using internal DMA controller.\n");
2874 } else {
2875 /* TRANS_MODE_EDMAC: check dma bindings again */
852ff5fe
DW
2876 if ((device_property_read_string_array(dev, "dma-names",
2877 NULL, 0) < 0) ||
2878 !device_property_present(dev, "dmas")) {
3fc7eaef
SL
2879 goto no_dma;
2880 }
2881 host->dma_ops = &dw_mci_edmac_ops;
2882 dev_info(host->dev, "Using external DMA controller.\n");
2883 }
f95f3850 2884
e1631f98
JC
2885 if (host->dma_ops->init && host->dma_ops->start &&
2886 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2887 if (host->dma_ops->init(host)) {
0e3a22c0
SL
2888 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2889 __func__);
f95f3850
WN
2890 goto no_dma;
2891 }
2892 } else {
4a90920c 2893 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2894 goto no_dma;
2895 }
2896
f95f3850
WN
2897 return;
2898
2899no_dma:
4a90920c 2900 dev_info(host->dev, "Using PIO mode.\n");
3fc7eaef 2901 host->use_dma = TRANS_MODE_PIO;
f95f3850
WN
2902}
2903
5c935165
DA
2904static void dw_mci_cmd11_timer(unsigned long arg)
2905{
2906 struct dw_mci *host = (struct dw_mci *)arg;
2907
fd674198
DA
2908 if (host->state != STATE_SENDING_CMD11) {
2909 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2910 return;
2911 }
5c935165
DA
2912
2913 host->cmd_status = SDMMC_INT_RTO;
2914 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2915 tasklet_schedule(&host->tasklet);
2916}
2917
57e10486
AK
2918static void dw_mci_dto_timer(unsigned long arg)
2919{
2920 struct dw_mci *host = (struct dw_mci *)arg;
2921
2922 switch (host->state) {
2923 case STATE_SENDING_DATA:
2924 case STATE_DATA_BUSY:
2925 /*
2926 * If DTO interrupt does NOT come in sending data state,
2927 * we should notify the driver to terminate current transfer
2928 * and report a data timeout to the core.
2929 */
2930 host->data_status = SDMMC_INT_DRTO;
2931 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2932 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2933 tasklet_schedule(&host->tasklet);
2934 break;
2935 default:
2936 break;
2937 }
2938}
2939
c91eab4b 2940#ifdef CONFIG_OF
c91eab4b
TA
2941static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2942{
2943 struct dw_mci_board *pdata;
2944 struct device *dev = host->dev;
e95baf13 2945 const struct dw_mci_drv_data *drv_data = host->drv_data;
e8cc37b8 2946 int ret;
3c6d89ea 2947 u32 clock_frequency;
c91eab4b
TA
2948
2949 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
bf3707ea 2950 if (!pdata)
c91eab4b 2951 return ERR_PTR(-ENOMEM);
c91eab4b 2952
d6786fef 2953 /* find reset controller when exist */
3a667e3f 2954 pdata->rstc = devm_reset_control_get_optional(dev, "reset");
d6786fef
GX
2955 if (IS_ERR(pdata->rstc)) {
2956 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
2957 return ERR_PTR(-EPROBE_DEFER);
2958 }
2959
c91eab4b 2960 /* find out number of slots supported */
d30a8f7b
JC
2961 if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
2962 dev_info(dev, "'num-slots' was deprecated.\n");
c91eab4b 2963
852ff5fe 2964 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
0e3a22c0
SL
2965 dev_info(dev,
2966 "fifo-depth property not found, using value of FIFOTH register as default\n");
c91eab4b 2967
852ff5fe
DW
2968 device_property_read_u32(dev, "card-detect-delay",
2969 &pdata->detect_delay_ms);
c91eab4b 2970
852ff5fe 2971 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
a0361c1a 2972
852ff5fe 2973 if (device_property_present(dev, "fifo-watermark-aligned"))
d6fced83
JN
2974 host->wm_aligned = true;
2975
852ff5fe 2976 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3c6d89ea
DA
2977 pdata->bus_hz = clock_frequency;
2978
cb27a843
JH
2979 if (drv_data && drv_data->parse_dt) {
2980 ret = drv_data->parse_dt(host);
800d78bf
TA
2981 if (ret)
2982 return ERR_PTR(ret);
2983 }
2984
c91eab4b
TA
2985 return pdata;
2986}
2987
2988#else /* CONFIG_OF */
2989static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2990{
2991 return ERR_PTR(-EINVAL);
2992}
2993#endif /* CONFIG_OF */
2994
fa0c3283
DA
2995static void dw_mci_enable_cd(struct dw_mci *host)
2996{
fa0c3283
DA
2997 unsigned long irqflags;
2998 u32 temp;
e8cc37b8 2999 struct dw_mci_slot *slot;
fa0c3283 3000
e8cc37b8
SL
3001 /*
3002 * No need for CD if all slots have a non-error GPIO
3003 * as well as broken card detection is found.
3004 */
b23475fa 3005 slot = host->slot;
58870241 3006 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
fa0c3283
DA
3007 return;
3008
58870241
JC
3009 if (mmc_gpio_get_cd(slot->mmc) < 0) {
3010 spin_lock_irqsave(&host->irq_lock, irqflags);
3011 temp = mci_readl(host, INTMASK);
3012 temp |= SDMMC_INT_CD;
3013 mci_writel(host, INTMASK, temp);
3014 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3015 }
fa0c3283
DA
3016}
3017
62ca8034 3018int dw_mci_probe(struct dw_mci *host)
f95f3850 3019{
e95baf13 3020 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 3021 int width, i, ret = 0;
f95f3850
WN
3022 u32 fifo_size;
3023
c91eab4b
TA
3024 if (!host->pdata) {
3025 host->pdata = dw_mci_parse_dt(host);
d6786fef
GX
3026 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3027 return -EPROBE_DEFER;
3028 } else if (IS_ERR(host->pdata)) {
c91eab4b
TA
3029 dev_err(host->dev, "platform data not available\n");
3030 return -EINVAL;
3031 }
f95f3850
WN
3032 }
3033
780f22af 3034 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
3035 if (IS_ERR(host->biu_clk)) {
3036 dev_dbg(host->dev, "biu clock not available\n");
3037 } else {
3038 ret = clk_prepare_enable(host->biu_clk);
3039 if (ret) {
3040 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
3041 return ret;
3042 }
3043 }
3044
780f22af 3045 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
3046 if (IS_ERR(host->ciu_clk)) {
3047 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 3048 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
3049 } else {
3050 ret = clk_prepare_enable(host->ciu_clk);
3051 if (ret) {
3052 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
3053 goto err_clk_biu;
3054 }
f90a0612 3055
3c6d89ea
DA
3056 if (host->pdata->bus_hz) {
3057 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3058 if (ret)
3059 dev_warn(host->dev,
612de4c1 3060 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
3061 host->pdata->bus_hz);
3062 }
f90a0612 3063 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 3064 }
f90a0612 3065
612de4c1
JC
3066 if (!host->bus_hz) {
3067 dev_err(host->dev,
3068 "Platform data must supply bus speed\n");
3069 ret = -ENODEV;
3070 goto err_clk_ciu;
3071 }
3072
002f0d5c
YK
3073 if (drv_data && drv_data->init) {
3074 ret = drv_data->init(host);
3075 if (ret) {
3076 dev_err(host->dev,
3077 "implementation specific init failed\n");
3078 goto err_clk_ciu;
3079 }
3080 }
3081
d6786fef
GX
3082 if (!IS_ERR(host->pdata->rstc)) {
3083 reset_control_assert(host->pdata->rstc);
3084 usleep_range(10, 50);
3085 reset_control_deassert(host->pdata->rstc);
3086 }
3087
5c935165
DA
3088 setup_timer(&host->cmd11_timer,
3089 dw_mci_cmd11_timer, (unsigned long)host);
3090
16a34574
JC
3091 setup_timer(&host->dto_timer,
3092 dw_mci_dto_timer, (unsigned long)host);
57e10486 3093
f95f3850 3094 spin_lock_init(&host->lock);
f8c58c11 3095 spin_lock_init(&host->irq_lock);
f95f3850
WN
3096 INIT_LIST_HEAD(&host->queue);
3097
f95f3850
WN
3098 /*
3099 * Get the host data width - this assumes that HCON has been set with
3100 * the correct values.
3101 */
70692752 3102 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
f95f3850
WN
3103 if (!i) {
3104 host->push_data = dw_mci_push_data16;
3105 host->pull_data = dw_mci_pull_data16;
3106 width = 16;
3107 host->data_shift = 1;
3108 } else if (i == 2) {
3109 host->push_data = dw_mci_push_data64;
3110 host->pull_data = dw_mci_pull_data64;
3111 width = 64;
3112 host->data_shift = 3;
3113 } else {
3114 /* Check for a reserved value, and warn if it is */
3115 WARN((i != 1),
3116 "HCON reports a reserved host data width!\n"
3117 "Defaulting to 32-bit access.\n");
3118 host->push_data = dw_mci_push_data32;
3119 host->pull_data = dw_mci_pull_data32;
3120 width = 32;
3121 host->data_shift = 2;
3122 }
3123
3124 /* Reset all blocks */
3744415c
SL
3125 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3126 ret = -ENODEV;
3127 goto err_clk_ciu;
3128 }
141a712a
SJ
3129
3130 host->dma_ops = host->pdata->dma_ops;
3131 dw_mci_init_dma(host);
f95f3850
WN
3132
3133 /* Clear the interrupts for the host controller */
3134 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3135 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3136
3137 /* Put in max timeout */
3138 mci_writel(host, TMOUT, 0xFFFFFFFF);
3139
3140 /*
3141 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3142 * Tx Mark = fifo_size / 2 DMA Size = 8
3143 */
b86d8253
JH
3144 if (!host->pdata->fifo_depth) {
3145 /*
3146 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3147 * have been overwritten by the bootloader, just like we're
3148 * about to do, so if you know the value for your hardware, you
3149 * should put it in the platform data.
3150 */
3151 fifo_size = mci_readl(host, FIFOTH);
8234e869 3152 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
3153 } else {
3154 fifo_size = host->pdata->fifo_depth;
3155 }
3156 host->fifo_depth = fifo_size;
52426899
SJ
3157 host->fifoth_val =
3158 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 3159 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
3160
3161 /* disable clock to CIU */
3162 mci_writel(host, CLKENA, 0);
3163 mci_writel(host, CLKSRC, 0);
3164
63008768
JH
3165 /*
3166 * In 2.40a spec, Data offset is changed.
3167 * Need to check the version-id and set data-offset for DATA register.
3168 */
3169 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3170 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3171
a0361c1a
JN
3172 if (host->data_addr_override)
3173 host->fifo_reg = host->regs + host->data_addr_override;
3174 else if (host->verid < DW_MMC_240A)
76184ac1 3175 host->fifo_reg = host->regs + DATA_OFFSET;
63008768 3176 else
76184ac1 3177 host->fifo_reg = host->regs + DATA_240A_OFFSET;
63008768 3178
f95f3850 3179 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
780f22af
SJ
3180 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3181 host->irq_flags, "dw-mci", host);
f95f3850 3182 if (ret)
6130e7a9 3183 goto err_dmaunmap;
f95f3850 3184
2da1d7f2 3185 /*
fa0c3283 3186 * Enable interrupts for command done, data over, data empty,
2da1d7f2
YC
3187 * receive ready and error such as transmit, receive timeout, crc error
3188 */
2da1d7f2
YC
3189 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3190 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
fa0c3283 3191 DW_MCI_ERROR_FLAGS);
0e3a22c0
SL
3192 /* Enable mci interrupt */
3193 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2da1d7f2 3194
0e3a22c0
SL
3195 dev_info(host->dev,
3196 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
2da1d7f2
YC
3197 host->irq, width, fifo_size);
3198
f95f3850 3199 /* We need at least one slot to succeed */
58870241
JC
3200 ret = dw_mci_init_slot(host, 0);
3201 if (ret) {
3202 dev_dbg(host->dev, "slot %d init failed\n", i);
6130e7a9 3203 goto err_dmaunmap;
f95f3850
WN
3204 }
3205
b793f658
DA
3206 /* Now that slots are all setup, we can enable card detect */
3207 dw_mci_enable_cd(host);
3208
f95f3850
WN
3209 return 0;
3210
f95f3850
WN
3211err_dmaunmap:
3212 if (host->use_dma && host->dma_ops->exit)
3213 host->dma_ops->exit(host);
f90a0612 3214
d6786fef
GX
3215 if (!IS_ERR(host->pdata->rstc))
3216 reset_control_assert(host->pdata->rstc);
3217
f90a0612 3218err_clk_ciu:
7037f3be 3219 clk_disable_unprepare(host->ciu_clk);
780f22af 3220
f90a0612 3221err_clk_biu:
7037f3be 3222 clk_disable_unprepare(host->biu_clk);
780f22af 3223
f95f3850
WN
3224 return ret;
3225}
62ca8034 3226EXPORT_SYMBOL(dw_mci_probe);
f95f3850 3227
62ca8034 3228void dw_mci_remove(struct dw_mci *host)
f95f3850 3229{
58870241 3230 int i = 0;
f95f3850 3231
58870241 3232 dev_dbg(host->dev, "remove slot %d\n", i);
b23475fa
JC
3233 if (host->slot)
3234 dw_mci_cleanup_slot(host->slot, i);
f95f3850 3235
048fd7e6
PT
3236 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3237 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3238
f95f3850
WN
3239 /* disable clock to CIU */
3240 mci_writel(host, CLKENA, 0);
3241 mci_writel(host, CLKSRC, 0);
3242
f95f3850
WN
3243 if (host->use_dma && host->dma_ops->exit)
3244 host->dma_ops->exit(host);
3245
d6786fef
GX
3246 if (!IS_ERR(host->pdata->rstc))
3247 reset_control_assert(host->pdata->rstc);
3248
7037f3be
JC
3249 clk_disable_unprepare(host->ciu_clk);
3250 clk_disable_unprepare(host->biu_clk);
f95f3850 3251}
62ca8034
SH
3252EXPORT_SYMBOL(dw_mci_remove);
3253
3254
f95f3850 3255
e9ed8835 3256#ifdef CONFIG_PM
ed24e1ff 3257int dw_mci_runtime_suspend(struct device *dev)
f95f3850 3258{
ed24e1ff
SL
3259 struct dw_mci *host = dev_get_drvdata(dev);
3260
3fc7eaef
SL
3261 if (host->use_dma && host->dma_ops->exit)
3262 host->dma_ops->exit(host);
3263
ed24e1ff
SL
3264 clk_disable_unprepare(host->ciu_clk);
3265
3266 if (host->cur_slot &&
3267 (mmc_can_gpio_cd(host->cur_slot->mmc) ||
3268 !mmc_card_is_removable(host->cur_slot->mmc)))
3269 clk_disable_unprepare(host->biu_clk);
3270
f95f3850
WN
3271 return 0;
3272}
ed24e1ff 3273EXPORT_SYMBOL(dw_mci_runtime_suspend);
f95f3850 3274
ed24e1ff 3275int dw_mci_runtime_resume(struct device *dev)
f95f3850 3276{
b23475fa 3277 int ret = 0;
ed24e1ff 3278 struct dw_mci *host = dev_get_drvdata(dev);
b23475fa 3279 struct dw_mci_slot *slot = host->slot;
f95f3850 3280
ed24e1ff
SL
3281 if (host->cur_slot &&
3282 (mmc_can_gpio_cd(host->cur_slot->mmc) ||
3283 !mmc_card_is_removable(host->cur_slot->mmc))) {
3284 ret = clk_prepare_enable(host->biu_clk);
3285 if (ret)
3286 return ret;
e61cf118
JC
3287 }
3288
ed24e1ff
SL
3289 ret = clk_prepare_enable(host->ciu_clk);
3290 if (ret)
df9bcc2b
JS
3291 goto err;
3292
3293 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3294 clk_disable_unprepare(host->ciu_clk);
3295 ret = -ENODEV;
3296 goto err;
3297 }
ed24e1ff 3298
3bfe619d 3299 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
3300 host->dma_ops->init(host);
3301
52426899
SJ
3302 /*
3303 * Restore the initial value at FIFOTH register
3304 * And Invalidate the prev_blksz with zero
3305 */
ed24e1ff
SL
3306 mci_writel(host, FIFOTH, host->fifoth_val);
3307 host->prev_blksz = 0;
e61cf118 3308
2eb2944f
DA
3309 /* Put in max timeout */
3310 mci_writel(host, TMOUT, 0xFFFFFFFF);
3311
e61cf118
JC
3312 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3313 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3314 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
fa0c3283 3315 DW_MCI_ERROR_FLAGS);
e61cf118
JC
3316 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3317
0e3a22c0 3318
58870241
JC
3319 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3320 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
e9748e03 3321
58870241
JC
3322 /* Force setup bus to guarantee available clock output */
3323 dw_mci_setup_bus(slot, true);
fa0c3283
DA
3324
3325 /* Now that slots are all setup, we can enable card detect */
3326 dw_mci_enable_cd(host);
3327
df9bcc2b
JS
3328 return 0;
3329
3330err:
3331 if (host->cur_slot &&
3332 (mmc_can_gpio_cd(host->cur_slot->mmc) ||
3333 !mmc_card_is_removable(host->cur_slot->mmc)))
3334 clk_disable_unprepare(host->biu_clk);
3335
ed24e1ff 3336 return ret;
e9ed8835
SL
3337}
3338EXPORT_SYMBOL(dw_mci_runtime_resume);
3339#endif /* CONFIG_PM */
6fe8890d 3340
f95f3850
WN
3341static int __init dw_mci_init(void)
3342{
8e1c4e4d 3343 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 3344 return 0;
f95f3850
WN
3345}
3346
3347static void __exit dw_mci_exit(void)
3348{
f95f3850
WN
3349}
3350
3351module_init(dw_mci_init);
3352module_exit(dw_mci_exit);
3353
3354MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3355MODULE_AUTHOR("NXP Semiconductor VietNam");
3356MODULE_AUTHOR("Imagination Technologies Ltd");
3357MODULE_LICENSE("GPL v2");