]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: fix minor coding style.
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
90c2143a 32#include <linux/mmc/sdio.h>
f95f3850
WN
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
c07946a3 35#include <linux/regulator/consumer.h>
1791b13e 36#include <linux/workqueue.h>
c91eab4b 37#include <linux/of.h>
55a6ceb2 38#include <linux/of_gpio.h>
bf626e55 39#include <linux/mmc/slot-gpio.h>
f95f3850
WN
40
41#include "dw_mmc.h"
42
43/* Common flag combinations */
3f7eec62 44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
1f44a2a5
SJ
55#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
f95f3850 58#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
f95f3850
WN
64struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
77
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
0976f16d
SJ
84static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
f95f3850 94
0976f16d
SJ
95static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
112};
113
31bff450
SJ
114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
f95f3850
WN
117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
238static void dw_mci_set_timeout(struct dw_mci *host)
239{
240 /* timeout (maximum) */
241 mci_writel(host, TMOUT, 0xffffffff);
242}
243
244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245{
246 struct mmc_data *data;
800d78bf 247 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 248 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
249 u32 cmdr;
250 cmd->error = -EINPROGRESS;
251
252 cmdr = cmd->opcode;
253
90c2143a
SJ
254 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
255 cmd->opcode == MMC_GO_IDLE_STATE ||
256 cmd->opcode == MMC_GO_INACTIVE_STATE ||
257 (cmd->opcode == SD_IO_RW_DIRECT &&
258 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850 259 cmdr |= SDMMC_CMD_STOP;
4a1b27ad
JC
260 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
261 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850
WN
262
263 if (cmd->flags & MMC_RSP_PRESENT) {
264 /* We expect a response, so set this bit */
265 cmdr |= SDMMC_CMD_RESP_EXP;
266 if (cmd->flags & MMC_RSP_136)
267 cmdr |= SDMMC_CMD_RESP_LONG;
268 }
269
270 if (cmd->flags & MMC_RSP_CRC)
271 cmdr |= SDMMC_CMD_RESP_CRC;
272
273 data = cmd->data;
274 if (data) {
275 cmdr |= SDMMC_CMD_DAT_EXP;
276 if (data->flags & MMC_DATA_STREAM)
277 cmdr |= SDMMC_CMD_STRM_MODE;
278 if (data->flags & MMC_DATA_WRITE)
279 cmdr |= SDMMC_CMD_DAT_WR;
280 }
281
cb27a843
JH
282 if (drv_data && drv_data->prepare_command)
283 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 284
f95f3850
WN
285 return cmdr;
286}
287
90c2143a
SJ
288static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
289{
290 struct mmc_command *stop;
291 u32 cmdr;
292
293 if (!cmd->data)
294 return 0;
295
296 stop = &host->stop_abort;
297 cmdr = cmd->opcode;
298 memset(stop, 0, sizeof(struct mmc_command));
299
300 if (cmdr == MMC_READ_SINGLE_BLOCK ||
301 cmdr == MMC_READ_MULTIPLE_BLOCK ||
302 cmdr == MMC_WRITE_BLOCK ||
303 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
304 stop->opcode = MMC_STOP_TRANSMISSION;
305 stop->arg = 0;
306 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
307 } else if (cmdr == SD_IO_RW_EXTENDED) {
308 stop->opcode = SD_IO_RW_DIRECT;
309 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
310 ((cmd->arg >> 28) & 0x7);
311 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
312 } else {
313 return 0;
314 }
315
316 cmdr = stop->opcode | SDMMC_CMD_STOP |
317 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
318
319 return cmdr;
320}
321
f95f3850
WN
322static void dw_mci_start_command(struct dw_mci *host,
323 struct mmc_command *cmd, u32 cmd_flags)
324{
325 host->cmd = cmd;
4a90920c 326 dev_vdbg(host->dev,
f95f3850
WN
327 "start command: ARGR=0x%08x CMDR=0x%08x\n",
328 cmd->arg, cmd_flags);
329
330 mci_writel(host, CMDARG, cmd->arg);
331 wmb();
332
333 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
334}
335
90c2143a 336static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 337{
90c2143a
SJ
338 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
339 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
340}
341
342/* DMA interface functions */
343static void dw_mci_stop_dma(struct dw_mci *host)
344{
03e8cb53 345 if (host->using_dma) {
f95f3850
WN
346 host->dma_ops->stop(host);
347 host->dma_ops->cleanup(host);
f95f3850 348 }
aa50f259
SJ
349
350 /* Data transfer was stopped by the interrupt handler */
351 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
352}
353
9aa51408
SJ
354static int dw_mci_get_dma_dir(struct mmc_data *data)
355{
356 if (data->flags & MMC_DATA_WRITE)
357 return DMA_TO_DEVICE;
358 else
359 return DMA_FROM_DEVICE;
360}
361
9beee912 362#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
363static void dw_mci_dma_cleanup(struct dw_mci *host)
364{
365 struct mmc_data *data = host->data;
366
367 if (data)
9aa51408 368 if (!data->host_cookie)
4a90920c 369 dma_unmap_sg(host->dev,
9aa51408
SJ
370 data->sg,
371 data->sg_len,
372 dw_mci_get_dma_dir(data));
f95f3850
WN
373}
374
5ce9d961
SJ
375static void dw_mci_idmac_reset(struct dw_mci *host)
376{
377 u32 bmod = mci_readl(host, BMOD);
378 /* Software reset of DMA */
379 bmod |= SDMMC_IDMAC_SWRESET;
380 mci_writel(host, BMOD, bmod);
381}
382
f95f3850
WN
383static void dw_mci_idmac_stop_dma(struct dw_mci *host)
384{
385 u32 temp;
386
387 /* Disable and reset the IDMAC interface */
388 temp = mci_readl(host, CTRL);
389 temp &= ~SDMMC_CTRL_USE_IDMAC;
390 temp |= SDMMC_CTRL_DMA_RESET;
391 mci_writel(host, CTRL, temp);
392
393 /* Stop the IDMAC running */
394 temp = mci_readl(host, BMOD);
a5289a43 395 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 396 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
397 mci_writel(host, BMOD, temp);
398}
399
400static void dw_mci_idmac_complete_dma(struct dw_mci *host)
401{
402 struct mmc_data *data = host->data;
403
4a90920c 404 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
405
406 host->dma_ops->cleanup(host);
407
408 /*
409 * If the card was removed, data will be NULL. No point in trying to
410 * send the stop command or waiting for NBUSY in this case.
411 */
412 if (data) {
413 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
414 tasklet_schedule(&host->tasklet);
415 }
416}
417
418static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
419 unsigned int sg_len)
420{
421 int i;
422 struct idmac_desc *desc = host->sg_cpu;
423
424 for (i = 0; i < sg_len; i++, desc++) {
425 unsigned int length = sg_dma_len(&data->sg[i]);
426 u32 mem_addr = sg_dma_address(&data->sg[i]);
427
428 /* Set the OWN bit and disable interrupts for this descriptor */
429 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
430
431 /* Buffer length */
432 IDMAC_SET_BUFFER1_SIZE(desc, length);
433
434 /* Physical address to DMA to/from */
435 desc->des2 = mem_addr;
436 }
437
438 /* Set first descriptor */
439 desc = host->sg_cpu;
440 desc->des0 |= IDMAC_DES0_FD;
441
442 /* Set last descriptor */
443 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
444 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
445 desc->des0 |= IDMAC_DES0_LD;
446
447 wmb();
448}
449
450static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
451{
452 u32 temp;
453
454 dw_mci_translate_sglist(host, host->data, sg_len);
455
456 /* Select IDMAC interface */
457 temp = mci_readl(host, CTRL);
458 temp |= SDMMC_CTRL_USE_IDMAC;
459 mci_writel(host, CTRL, temp);
460
461 wmb();
462
463 /* Enable the IDMAC */
464 temp = mci_readl(host, BMOD);
a5289a43 465 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
466 mci_writel(host, BMOD, temp);
467
468 /* Start it running */
469 mci_writel(host, PLDMND, 1);
470}
471
472static int dw_mci_idmac_init(struct dw_mci *host)
473{
474 struct idmac_desc *p;
897b69e7 475 int i;
f95f3850
WN
476
477 /* Number of descriptors in the ring buffer */
478 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
479
480 /* Forward link the descriptor list */
481 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
482 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
483
484 /* Set the last descriptor as the end-of-ring descriptor */
485 p->des3 = host->sg_dma;
486 p->des0 = IDMAC_DES0_ER;
487
5ce9d961 488 dw_mci_idmac_reset(host);
141a712a 489
f95f3850 490 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 491 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
492 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
493 SDMMC_IDMAC_INT_TI);
494
495 /* Set the descriptor base address */
496 mci_writel(host, DBADDR, host->sg_dma);
497 return 0;
498}
499
8e2b36ea 500static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
501 .init = dw_mci_idmac_init,
502 .start = dw_mci_idmac_start_dma,
503 .stop = dw_mci_idmac_stop_dma,
504 .complete = dw_mci_idmac_complete_dma,
505 .cleanup = dw_mci_dma_cleanup,
506};
507#endif /* CONFIG_MMC_DW_IDMAC */
508
9aa51408
SJ
509static int dw_mci_pre_dma_transfer(struct dw_mci *host,
510 struct mmc_data *data,
511 bool next)
f95f3850
WN
512{
513 struct scatterlist *sg;
9aa51408 514 unsigned int i, sg_len;
03e8cb53 515
9aa51408
SJ
516 if (!next && data->host_cookie)
517 return data->host_cookie;
f95f3850
WN
518
519 /*
520 * We don't do DMA on "complex" transfers, i.e. with
521 * non-word-aligned buffers or lengths. Also, we don't bother
522 * with all the DMA setup overhead for short transfers.
523 */
524 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
525 return -EINVAL;
9aa51408 526
f95f3850
WN
527 if (data->blksz & 3)
528 return -EINVAL;
529
530 for_each_sg(data->sg, sg, data->sg_len, i) {
531 if (sg->offset & 3 || sg->length & 3)
532 return -EINVAL;
533 }
534
4a90920c 535 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
536 data->sg,
537 data->sg_len,
538 dw_mci_get_dma_dir(data));
539 if (sg_len == 0)
540 return -EINVAL;
03e8cb53 541
9aa51408
SJ
542 if (next)
543 data->host_cookie = sg_len;
f95f3850 544
9aa51408
SJ
545 return sg_len;
546}
547
9aa51408
SJ
548static void dw_mci_pre_req(struct mmc_host *mmc,
549 struct mmc_request *mrq,
550 bool is_first_req)
551{
552 struct dw_mci_slot *slot = mmc_priv(mmc);
553 struct mmc_data *data = mrq->data;
554
555 if (!slot->host->use_dma || !data)
556 return;
557
558 if (data->host_cookie) {
559 data->host_cookie = 0;
560 return;
561 }
562
563 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
564 data->host_cookie = 0;
565}
566
567static void dw_mci_post_req(struct mmc_host *mmc,
568 struct mmc_request *mrq,
569 int err)
570{
571 struct dw_mci_slot *slot = mmc_priv(mmc);
572 struct mmc_data *data = mrq->data;
573
574 if (!slot->host->use_dma || !data)
575 return;
576
577 if (data->host_cookie)
4a90920c 578 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
579 data->sg,
580 data->sg_len,
581 dw_mci_get_dma_dir(data));
582 data->host_cookie = 0;
583}
584
52426899
SJ
585static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
586{
587#ifdef CONFIG_MMC_DW_IDMAC
588 unsigned int blksz = data->blksz;
589 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
590 u32 fifo_width = 1 << host->data_shift;
591 u32 blksz_depth = blksz / fifo_width, fifoth_val;
592 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
593 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
594
595 tx_wmark = (host->fifo_depth) / 2;
596 tx_wmark_invers = host->fifo_depth - tx_wmark;
597
598 /*
599 * MSIZE is '1',
600 * if blksz is not a multiple of the FIFO width
601 */
602 if (blksz % fifo_width) {
603 msize = 0;
604 rx_wmark = 1;
605 goto done;
606 }
607
608 do {
609 if (!((blksz_depth % mszs[idx]) ||
610 (tx_wmark_invers % mszs[idx]))) {
611 msize = idx;
612 rx_wmark = mszs[idx] - 1;
613 break;
614 }
615 } while (--idx > 0);
616 /*
617 * If idx is '0', it won't be tried
618 * Thus, initial values are uesed
619 */
620done:
621 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
622 mci_writel(host, FIFOTH, fifoth_val);
623#endif
624}
625
f1d2736c
SJ
626static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
627{
628 unsigned int blksz = data->blksz;
629 u32 blksz_depth, fifo_depth;
630 u16 thld_size;
631
632 WARN_ON(!(data->flags & MMC_DATA_READ));
633
634 if (host->timing != MMC_TIMING_MMC_HS200 &&
635 host->timing != MMC_TIMING_UHS_SDR104)
636 goto disable;
637
638 blksz_depth = blksz / (1 << host->data_shift);
639 fifo_depth = host->fifo_depth;
640
641 if (blksz_depth > fifo_depth)
642 goto disable;
643
644 /*
645 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
646 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
647 * Currently just choose blksz.
648 */
649 thld_size = blksz;
650 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
651 return;
652
653disable:
654 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
655}
656
9aa51408
SJ
657static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
658{
659 int sg_len;
660 u32 temp;
661
662 host->using_dma = 0;
663
664 /* If we don't have a channel, we can't do DMA */
665 if (!host->use_dma)
666 return -ENODEV;
667
668 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
669 if (sg_len < 0) {
670 host->dma_ops->stop(host);
9aa51408 671 return sg_len;
a99aa9b9 672 }
9aa51408
SJ
673
674 host->using_dma = 1;
f95f3850 675
4a90920c 676 dev_vdbg(host->dev,
f95f3850
WN
677 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
678 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
679 sg_len);
680
52426899
SJ
681 /*
682 * Decide the MSIZE and RX/TX Watermark.
683 * If current block size is same with previous size,
684 * no need to update fifoth.
685 */
686 if (host->prev_blksz != data->blksz)
687 dw_mci_adjust_fifoth(host, data);
688
f95f3850
WN
689 /* Enable the DMA interface */
690 temp = mci_readl(host, CTRL);
691 temp |= SDMMC_CTRL_DMA_ENABLE;
692 mci_writel(host, CTRL, temp);
693
694 /* Disable RX/TX IRQs, let DMA handle it */
695 temp = mci_readl(host, INTMASK);
696 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
697 mci_writel(host, INTMASK, temp);
698
699 host->dma_ops->start(host, sg_len);
700
701 return 0;
702}
703
704static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
705{
706 u32 temp;
707
708 data->error = -EINPROGRESS;
709
710 WARN_ON(host->data);
711 host->sg = NULL;
712 host->data = data;
713
f1d2736c 714 if (data->flags & MMC_DATA_READ) {
55c5efbc 715 host->dir_status = DW_MCI_RECV_STATUS;
f1d2736c
SJ
716 dw_mci_ctrl_rd_thld(host, data);
717 } else {
55c5efbc 718 host->dir_status = DW_MCI_SEND_STATUS;
f1d2736c 719 }
55c5efbc 720
f95f3850 721 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
722 int flags = SG_MITER_ATOMIC;
723 if (host->data->flags & MMC_DATA_READ)
724 flags |= SG_MITER_TO_SG;
725 else
726 flags |= SG_MITER_FROM_SG;
727
728 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 729 host->sg = data->sg;
34b664a2
JH
730 host->part_buf_start = 0;
731 host->part_buf_count = 0;
f95f3850 732
b40af3aa 733 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
734 temp = mci_readl(host, INTMASK);
735 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
736 mci_writel(host, INTMASK, temp);
737
738 temp = mci_readl(host, CTRL);
739 temp &= ~SDMMC_CTRL_DMA_ENABLE;
740 mci_writel(host, CTRL, temp);
52426899
SJ
741
742 /*
743 * Use the initial fifoth_val for PIO mode.
744 * If next issued data may be transfered by DMA mode,
745 * prev_blksz should be invalidated.
746 */
747 mci_writel(host, FIFOTH, host->fifoth_val);
748 host->prev_blksz = 0;
749 } else {
750 /*
751 * Keep the current block size.
752 * It will be used to decide whether to update
753 * fifoth register next time.
754 */
755 host->prev_blksz = data->blksz;
f95f3850
WN
756 }
757}
758
759static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
760{
761 struct dw_mci *host = slot->host;
762 unsigned long timeout = jiffies + msecs_to_jiffies(500);
763 unsigned int cmd_status = 0;
764
765 mci_writel(host, CMDARG, arg);
766 wmb();
767 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
768
769 while (time_before(jiffies, timeout)) {
770 cmd_status = mci_readl(host, CMD);
771 if (!(cmd_status & SDMMC_CMD_START))
772 return;
773 }
774 dev_err(&slot->mmc->class_dev,
775 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
776 cmd, arg, cmd_status);
777}
778
ab269128 779static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
780{
781 struct dw_mci *host = slot->host;
fdf492a1 782 unsigned int clock = slot->clock;
f95f3850 783 u32 div;
9623b5b9 784 u32 clk_en_a;
f95f3850 785
fdf492a1
DA
786 if (!clock) {
787 mci_writel(host, CLKENA, 0);
788 mci_send_cmd(slot,
789 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
790 } else if (clock != host->current_speed || force_clkinit) {
791 div = host->bus_hz / clock;
792 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
793 /*
794 * move the + 1 after the divide to prevent
795 * over-clocking the card.
796 */
e419990b
SJ
797 div += 1;
798
fdf492a1 799 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 800
fdf492a1
DA
801 if ((clock << div) != slot->__clk_old || force_clkinit)
802 dev_info(&slot->mmc->class_dev,
803 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
804 slot->id, host->bus_hz, clock,
805 div ? ((host->bus_hz / div) >> 1) :
806 host->bus_hz, div);
f95f3850
WN
807
808 /* disable clock */
809 mci_writel(host, CLKENA, 0);
810 mci_writel(host, CLKSRC, 0);
811
812 /* inform CIU */
813 mci_send_cmd(slot,
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815
816 /* set clock to desired speed */
817 mci_writel(host, CLKDIV, div);
818
819 /* inform CIU */
820 mci_send_cmd(slot,
821 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
822
9623b5b9
DA
823 /* enable clock; only low power if no SDIO */
824 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
825 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
826 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
827 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
828
829 /* inform CIU */
830 mci_send_cmd(slot,
831 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
832
fdf492a1
DA
833 /* keep the clock with reflecting clock dividor */
834 slot->__clk_old = clock << div;
f95f3850
WN
835 }
836
fdf492a1
DA
837 host->current_speed = clock;
838
f95f3850 839 /* Set the current slot bus width */
1d56c453 840 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
841}
842
053b3ce6
SJ
843static void __dw_mci_start_request(struct dw_mci *host,
844 struct dw_mci_slot *slot,
845 struct mmc_command *cmd)
f95f3850
WN
846{
847 struct mmc_request *mrq;
f95f3850
WN
848 struct mmc_data *data;
849 u32 cmdflags;
850
851 mrq = slot->mrq;
f95f3850 852
f95f3850
WN
853 host->cur_slot = slot;
854 host->mrq = mrq;
855
856 host->pending_events = 0;
857 host->completed_events = 0;
e352c813 858 host->cmd_status = 0;
f95f3850 859 host->data_status = 0;
e352c813 860 host->dir_status = 0;
f95f3850 861
053b3ce6 862 data = cmd->data;
f95f3850
WN
863 if (data) {
864 dw_mci_set_timeout(host);
865 mci_writel(host, BYTCNT, data->blksz*data->blocks);
866 mci_writel(host, BLKSIZ, data->blksz);
867 }
868
f95f3850
WN
869 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
870
871 /* this is the first command, send the initialization clock */
872 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
873 cmdflags |= SDMMC_CMD_INIT;
874
875 if (data) {
876 dw_mci_submit_data(host, data);
877 wmb();
878 }
879
880 dw_mci_start_command(host, cmd, cmdflags);
881
882 if (mrq->stop)
883 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
90c2143a
SJ
884 else
885 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
886}
887
053b3ce6
SJ
888static void dw_mci_start_request(struct dw_mci *host,
889 struct dw_mci_slot *slot)
890{
891 struct mmc_request *mrq = slot->mrq;
892 struct mmc_command *cmd;
893
894 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
895 __dw_mci_start_request(host, slot, cmd);
896}
897
7456caae 898/* must be called with host->lock held */
f95f3850
WN
899static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
900 struct mmc_request *mrq)
901{
902 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
903 host->state);
904
f95f3850
WN
905 slot->mrq = mrq;
906
907 if (host->state == STATE_IDLE) {
908 host->state = STATE_SENDING_CMD;
909 dw_mci_start_request(host, slot);
910 } else {
911 list_add_tail(&slot->queue_node, &host->queue);
912 }
f95f3850
WN
913}
914
915static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
916{
917 struct dw_mci_slot *slot = mmc_priv(mmc);
918 struct dw_mci *host = slot->host;
919
920 WARN_ON(slot->mrq);
921
7456caae
JH
922 /*
923 * The check for card presence and queueing of the request must be
924 * atomic, otherwise the card could be removed in between and the
925 * request wouldn't fail until another card was inserted.
926 */
927 spin_lock_bh(&host->lock);
928
f95f3850 929 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 930 spin_unlock_bh(&host->lock);
f95f3850
WN
931 mrq->cmd->error = -ENOMEDIUM;
932 mmc_request_done(mmc, mrq);
933 return;
934 }
935
f95f3850 936 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
937
938 spin_unlock_bh(&host->lock);
f95f3850
WN
939}
940
941static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
942{
943 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 944 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 945 u32 regs;
f95f3850 946
f95f3850 947 switch (ios->bus_width) {
f95f3850
WN
948 case MMC_BUS_WIDTH_4:
949 slot->ctype = SDMMC_CTYPE_4BIT;
950 break;
c9b2a06f
JC
951 case MMC_BUS_WIDTH_8:
952 slot->ctype = SDMMC_CTYPE_8BIT;
953 break;
b2f7cb45
JC
954 default:
955 /* set default 1 bit mode */
956 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
957 }
958
3f514291
SJ
959 regs = mci_readl(slot->host, UHS_REG);
960
41babf75 961 /* DDR mode set */
cab3a802 962 if (ios->timing == MMC_TIMING_MMC_DDR52)
c69042a5 963 regs |= ((0x1 << slot->id) << 16);
3f514291 964 else
c69042a5 965 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
966
967 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 968 slot->host->timing = ios->timing;
41babf75 969
fdf492a1
DA
970 /*
971 * Use mirror of ios->clock to prevent race with mmc
972 * core ios update when finding the minimum.
973 */
974 slot->clock = ios->clock;
f95f3850 975
cb27a843
JH
976 if (drv_data && drv_data->set_ios)
977 drv_data->set_ios(slot->host, ios);
800d78bf 978
bf7cb224
JC
979 /* Slot specific timing and width adjustment */
980 dw_mci_setup_bus(slot, false);
981
f95f3850
WN
982 switch (ios->power_mode) {
983 case MMC_POWER_UP:
984 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
4366dcc5
JC
985 regs = mci_readl(slot->host, PWREN);
986 regs |= (1 << slot->id);
987 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
988 break;
989 case MMC_POWER_OFF:
4366dcc5
JC
990 regs = mci_readl(slot->host, PWREN);
991 regs &= ~(1 << slot->id);
992 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
993 break;
994 default:
995 break;
996 }
997}
998
999static int dw_mci_get_ro(struct mmc_host *mmc)
1000{
1001 int read_only;
1002 struct dw_mci_slot *slot = mmc_priv(mmc);
f95f3850
WN
1003
1004 /* Use platform get_ro function, else try on board write protect */
9640639b 1005 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5 1006 read_only = 0;
55a6ceb2
DA
1007 else if (gpio_is_valid(slot->wp_gpio))
1008 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
1009 else
1010 read_only =
1011 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1012
1013 dev_dbg(&mmc->class_dev, "card is %s\n",
1014 read_only ? "read-only" : "read-write");
1015
1016 return read_only;
1017}
1018
1019static int dw_mci_get_cd(struct mmc_host *mmc)
1020{
1021 int present;
1022 struct dw_mci_slot *slot = mmc_priv(mmc);
1023 struct dw_mci_board *brd = slot->host->pdata;
7cf347bd
ZG
1024 struct dw_mci *host = slot->host;
1025 int gpio_cd = mmc_gpio_get_cd(mmc);
f95f3850
WN
1026
1027 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
1028 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1029 present = 1;
bf626e55 1030 else if (!IS_ERR_VALUE(gpio_cd))
7cf347bd 1031 present = gpio_cd;
f95f3850
WN
1032 else
1033 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1034 == 0 ? 1 : 0;
1035
7cf347bd 1036 spin_lock_bh(&host->lock);
bf626e55
ZG
1037 if (present) {
1038 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1039 dev_dbg(&mmc->class_dev, "card is present\n");
bf626e55
ZG
1040 } else {
1041 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1042 dev_dbg(&mmc->class_dev, "card is not present\n");
bf626e55 1043 }
7cf347bd 1044 spin_unlock_bh(&host->lock);
f95f3850
WN
1045
1046 return present;
1047}
1048
9623b5b9
DA
1049/*
1050 * Disable lower power mode.
1051 *
1052 * Low power mode will stop the card clock when idle. According to the
1053 * description of the CLKENA register we should disable low power mode
1054 * for SDIO cards if we need SDIO interrupts to work.
1055 *
1056 * This function is fast if low power mode is already disabled.
1057 */
1058static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1059{
1060 struct dw_mci *host = slot->host;
1061 u32 clk_en_a;
1062 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1063
1064 clk_en_a = mci_readl(host, CLKENA);
1065
1066 if (clk_en_a & clken_low_pwr) {
1067 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1068 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1069 SDMMC_CMD_PRV_DAT_WAIT, 0);
1070 }
1071}
1072
1a5c8e1f
SH
1073static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1074{
1075 struct dw_mci_slot *slot = mmc_priv(mmc);
1076 struct dw_mci *host = slot->host;
1077 u32 int_mask;
1078
1079 /* Enable/disable Slot Specific SDIO interrupt */
1080 int_mask = mci_readl(host, INTMASK);
1081 if (enb) {
9623b5b9
DA
1082 /*
1083 * Turn off low power mode if it was enabled. This is a bit of
1084 * a heavy operation and we disable / enable IRQs a lot, so
1085 * we'll leave low power mode disabled and it will get
1086 * re-enabled again in dw_mci_setup_bus().
1087 */
1088 dw_mci_disable_low_power(slot);
1089
1a5c8e1f 1090 mci_writel(host, INTMASK,
705ad047 1091 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1092 } else {
1093 mci_writel(host, INTMASK,
705ad047 1094 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1095 }
1096}
1097
0976f16d
SJ
1098static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1099{
1100 struct dw_mci_slot *slot = mmc_priv(mmc);
1101 struct dw_mci *host = slot->host;
1102 const struct dw_mci_drv_data *drv_data = host->drv_data;
1103 struct dw_mci_tuning_data tuning_data;
1104 int err = -ENOSYS;
1105
1106 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1107 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1108 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1109 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1110 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1111 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1112 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1113 } else {
1114 return -EINVAL;
1115 }
1116 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1117 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1118 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1119 } else {
1120 dev_err(host->dev,
1121 "Undefined command(%d) for tuning\n", opcode);
1122 return -EINVAL;
1123 }
1124
1125 if (drv_data && drv_data->execute_tuning)
1126 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1127 return err;
1128}
1129
f95f3850 1130static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1131 .request = dw_mci_request,
9aa51408
SJ
1132 .pre_req = dw_mci_pre_req,
1133 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1134 .set_ios = dw_mci_set_ios,
1135 .get_ro = dw_mci_get_ro,
1136 .get_cd = dw_mci_get_cd,
1137 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1138 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1139};
1140
1141static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1142 __releases(&host->lock)
1143 __acquires(&host->lock)
1144{
1145 struct dw_mci_slot *slot;
1146 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1147
1148 WARN_ON(host->cmd || host->data);
1149
1150 host->cur_slot->mrq = NULL;
1151 host->mrq = NULL;
1152 if (!list_empty(&host->queue)) {
1153 slot = list_entry(host->queue.next,
1154 struct dw_mci_slot, queue_node);
1155 list_del(&slot->queue_node);
4a90920c 1156 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1157 mmc_hostname(slot->mmc));
1158 host->state = STATE_SENDING_CMD;
1159 dw_mci_start_request(host, slot);
1160 } else {
4a90920c 1161 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1162 host->state = STATE_IDLE;
1163 }
1164
1165 spin_unlock(&host->lock);
1166 mmc_request_done(prev_mmc, mrq);
1167 spin_lock(&host->lock);
1168}
1169
e352c813 1170static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1171{
1172 u32 status = host->cmd_status;
1173
1174 host->cmd_status = 0;
1175
1176 /* Read the response from the card (up to 16 bytes) */
1177 if (cmd->flags & MMC_RSP_PRESENT) {
1178 if (cmd->flags & MMC_RSP_136) {
1179 cmd->resp[3] = mci_readl(host, RESP0);
1180 cmd->resp[2] = mci_readl(host, RESP1);
1181 cmd->resp[1] = mci_readl(host, RESP2);
1182 cmd->resp[0] = mci_readl(host, RESP3);
1183 } else {
1184 cmd->resp[0] = mci_readl(host, RESP0);
1185 cmd->resp[1] = 0;
1186 cmd->resp[2] = 0;
1187 cmd->resp[3] = 0;
1188 }
1189 }
1190
1191 if (status & SDMMC_INT_RTO)
1192 cmd->error = -ETIMEDOUT;
1193 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1194 cmd->error = -EILSEQ;
1195 else if (status & SDMMC_INT_RESP_ERR)
1196 cmd->error = -EIO;
1197 else
1198 cmd->error = 0;
1199
1200 if (cmd->error) {
1201 /* newer ip versions need a delay between retries */
1202 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1203 mdelay(20);
f95f3850 1204 }
e352c813
SJ
1205
1206 return cmd->error;
1207}
1208
1209static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1210{
31bff450 1211 u32 status = host->data_status;
e352c813
SJ
1212
1213 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1214 if (status & SDMMC_INT_DRTO) {
1215 data->error = -ETIMEDOUT;
1216 } else if (status & SDMMC_INT_DCRC) {
1217 data->error = -EILSEQ;
1218 } else if (status & SDMMC_INT_EBE) {
1219 if (host->dir_status ==
1220 DW_MCI_SEND_STATUS) {
1221 /*
1222 * No data CRC status was returned.
1223 * The number of bytes transferred
1224 * will be exaggerated in PIO mode.
1225 */
1226 data->bytes_xfered = 0;
1227 data->error = -ETIMEDOUT;
1228 } else if (host->dir_status ==
1229 DW_MCI_RECV_STATUS) {
1230 data->error = -EIO;
1231 }
1232 } else {
1233 /* SDMMC_INT_SBE is included */
1234 data->error = -EIO;
1235 }
1236
1237 dev_err(host->dev, "data error, status 0x%08x\n", status);
1238
1239 /*
1240 * After an error, there may be data lingering
31bff450 1241 * in the FIFO
e352c813 1242 */
31bff450 1243 dw_mci_fifo_reset(host);
e352c813
SJ
1244 } else {
1245 data->bytes_xfered = data->blocks * data->blksz;
1246 data->error = 0;
1247 }
1248
1249 return data->error;
f95f3850
WN
1250}
1251
1252static void dw_mci_tasklet_func(unsigned long priv)
1253{
1254 struct dw_mci *host = (struct dw_mci *)priv;
1255 struct mmc_data *data;
1256 struct mmc_command *cmd;
e352c813 1257 struct mmc_request *mrq;
f95f3850
WN
1258 enum dw_mci_state state;
1259 enum dw_mci_state prev_state;
e352c813 1260 unsigned int err;
f95f3850
WN
1261
1262 spin_lock(&host->lock);
1263
1264 state = host->state;
1265 data = host->data;
e352c813 1266 mrq = host->mrq;
f95f3850
WN
1267
1268 do {
1269 prev_state = state;
1270
1271 switch (state) {
1272 case STATE_IDLE:
1273 break;
1274
1275 case STATE_SENDING_CMD:
1276 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1277 &host->pending_events))
1278 break;
1279
1280 cmd = host->cmd;
1281 host->cmd = NULL;
1282 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1283 err = dw_mci_command_complete(host, cmd);
1284 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1285 prev_state = state = STATE_SENDING_CMD;
1286 __dw_mci_start_request(host, host->cur_slot,
e352c813 1287 mrq->cmd);
053b3ce6
SJ
1288 goto unlock;
1289 }
1290
e352c813 1291 if (cmd->data && err) {
71abb133 1292 dw_mci_stop_dma(host);
90c2143a
SJ
1293 send_stop_abort(host, data);
1294 state = STATE_SENDING_STOP;
1295 break;
71abb133
SJ
1296 }
1297
e352c813
SJ
1298 if (!cmd->data || err) {
1299 dw_mci_request_end(host, mrq);
f95f3850
WN
1300 goto unlock;
1301 }
1302
1303 prev_state = state = STATE_SENDING_DATA;
1304 /* fall through */
1305
1306 case STATE_SENDING_DATA:
1307 if (test_and_clear_bit(EVENT_DATA_ERROR,
1308 &host->pending_events)) {
1309 dw_mci_stop_dma(host);
90c2143a 1310 send_stop_abort(host, data);
f95f3850
WN
1311 state = STATE_DATA_ERROR;
1312 break;
1313 }
1314
1315 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1316 &host->pending_events))
1317 break;
1318
1319 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1320 prev_state = state = STATE_DATA_BUSY;
1321 /* fall through */
1322
1323 case STATE_DATA_BUSY:
1324 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1325 &host->pending_events))
1326 break;
1327
1328 host->data = NULL;
1329 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
1330 err = dw_mci_data_complete(host, data);
1331
1332 if (!err) {
1333 if (!data->stop || mrq->sbc) {
17c8bc85 1334 if (mrq->sbc && data->stop)
e352c813
SJ
1335 data->stop->error = 0;
1336 dw_mci_request_end(host, mrq);
1337 goto unlock;
f95f3850 1338 }
f95f3850 1339
e352c813
SJ
1340 /* stop command for open-ended transfer*/
1341 if (data->stop)
1342 send_stop_abort(host, data);
053b3ce6
SJ
1343 }
1344
e352c813
SJ
1345 /*
1346 * If err has non-zero,
1347 * stop-abort command has been already issued.
1348 */
f95f3850 1349 prev_state = state = STATE_SENDING_STOP;
e352c813 1350
f95f3850
WN
1351 /* fall through */
1352
1353 case STATE_SENDING_STOP:
1354 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1355 &host->pending_events))
1356 break;
1357
71abb133 1358 /* CMD error in data command */
31bff450
SJ
1359 if (mrq->cmd->error && mrq->data)
1360 dw_mci_fifo_reset(host);
71abb133 1361
f95f3850 1362 host->cmd = NULL;
71abb133 1363 host->data = NULL;
90c2143a 1364
e352c813
SJ
1365 if (mrq->stop)
1366 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
1367 else
1368 host->cmd_status = 0;
1369
e352c813 1370 dw_mci_request_end(host, mrq);
f95f3850
WN
1371 goto unlock;
1372
1373 case STATE_DATA_ERROR:
1374 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1375 &host->pending_events))
1376 break;
1377
1378 state = STATE_DATA_BUSY;
1379 break;
1380 }
1381 } while (state != prev_state);
1382
1383 host->state = state;
1384unlock:
1385 spin_unlock(&host->lock);
1386
1387}
1388
34b664a2
JH
1389/* push final bytes to part_buf, only use during push */
1390static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1391{
34b664a2
JH
1392 memcpy((void *)&host->part_buf, buf, cnt);
1393 host->part_buf_count = cnt;
1394}
f95f3850 1395
34b664a2
JH
1396/* append bytes to part_buf, only use during push */
1397static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1398{
1399 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1400 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1401 host->part_buf_count += cnt;
1402 return cnt;
1403}
f95f3850 1404
34b664a2
JH
1405/* pull first bytes from part_buf, only use during pull */
1406static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1407{
1408 cnt = min(cnt, (int)host->part_buf_count);
1409 if (cnt) {
1410 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1411 cnt);
1412 host->part_buf_count -= cnt;
1413 host->part_buf_start += cnt;
f95f3850 1414 }
34b664a2 1415 return cnt;
f95f3850
WN
1416}
1417
34b664a2
JH
1418/* pull final bytes from the part_buf, assuming it's just been filled */
1419static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1420{
34b664a2
JH
1421 memcpy(buf, &host->part_buf, cnt);
1422 host->part_buf_start = cnt;
1423 host->part_buf_count = (1 << host->data_shift) - cnt;
1424}
f95f3850 1425
34b664a2
JH
1426static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1427{
cfbeb59c
MC
1428 struct mmc_data *data = host->data;
1429 int init_cnt = cnt;
1430
34b664a2
JH
1431 /* try and push anything in the part_buf */
1432 if (unlikely(host->part_buf_count)) {
1433 int len = dw_mci_push_part_bytes(host, buf, cnt);
1434 buf += len;
1435 cnt -= len;
cfbeb59c 1436 if (host->part_buf_count == 2) {
4e0a5adf
JC
1437 mci_writew(host, DATA(host->data_offset),
1438 host->part_buf16);
34b664a2
JH
1439 host->part_buf_count = 0;
1440 }
1441 }
1442#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1443 if (unlikely((unsigned long)buf & 0x1)) {
1444 while (cnt >= 2) {
1445 u16 aligned_buf[64];
1446 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1447 int items = len >> 1;
1448 int i;
1449 /* memcpy from input buffer into aligned buffer */
1450 memcpy(aligned_buf, buf, len);
1451 buf += len;
1452 cnt -= len;
1453 /* push data from aligned buffer into fifo */
1454 for (i = 0; i < items; ++i)
4e0a5adf
JC
1455 mci_writew(host, DATA(host->data_offset),
1456 aligned_buf[i]);
34b664a2
JH
1457 }
1458 } else
1459#endif
1460 {
1461 u16 *pdata = buf;
1462 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1463 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1464 buf = pdata;
1465 }
1466 /* put anything remaining in the part_buf */
1467 if (cnt) {
1468 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1469 /* Push data if we have reached the expected data length */
1470 if ((data->bytes_xfered + init_cnt) ==
1471 (data->blksz * data->blocks))
4e0a5adf 1472 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1473 host->part_buf16);
34b664a2
JH
1474 }
1475}
f95f3850 1476
34b664a2
JH
1477static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1478{
1479#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1480 if (unlikely((unsigned long)buf & 0x1)) {
1481 while (cnt >= 2) {
1482 /* pull data from fifo into aligned buffer */
1483 u16 aligned_buf[64];
1484 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1485 int items = len >> 1;
1486 int i;
1487 for (i = 0; i < items; ++i)
4e0a5adf
JC
1488 aligned_buf[i] = mci_readw(host,
1489 DATA(host->data_offset));
34b664a2
JH
1490 /* memcpy from aligned buffer into output buffer */
1491 memcpy(buf, aligned_buf, len);
1492 buf += len;
1493 cnt -= len;
1494 }
1495 } else
1496#endif
1497 {
1498 u16 *pdata = buf;
1499 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1500 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1501 buf = pdata;
1502 }
1503 if (cnt) {
4e0a5adf 1504 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1505 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1506 }
1507}
1508
1509static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1510{
cfbeb59c
MC
1511 struct mmc_data *data = host->data;
1512 int init_cnt = cnt;
1513
34b664a2
JH
1514 /* try and push anything in the part_buf */
1515 if (unlikely(host->part_buf_count)) {
1516 int len = dw_mci_push_part_bytes(host, buf, cnt);
1517 buf += len;
1518 cnt -= len;
cfbeb59c 1519 if (host->part_buf_count == 4) {
4e0a5adf
JC
1520 mci_writel(host, DATA(host->data_offset),
1521 host->part_buf32);
34b664a2
JH
1522 host->part_buf_count = 0;
1523 }
1524 }
1525#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1526 if (unlikely((unsigned long)buf & 0x3)) {
1527 while (cnt >= 4) {
1528 u32 aligned_buf[32];
1529 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1530 int items = len >> 2;
1531 int i;
1532 /* memcpy from input buffer into aligned buffer */
1533 memcpy(aligned_buf, buf, len);
1534 buf += len;
1535 cnt -= len;
1536 /* push data from aligned buffer into fifo */
1537 for (i = 0; i < items; ++i)
4e0a5adf
JC
1538 mci_writel(host, DATA(host->data_offset),
1539 aligned_buf[i]);
34b664a2
JH
1540 }
1541 } else
1542#endif
1543 {
1544 u32 *pdata = buf;
1545 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1546 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1547 buf = pdata;
1548 }
1549 /* put anything remaining in the part_buf */
1550 if (cnt) {
1551 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1552 /* Push data if we have reached the expected data length */
1553 if ((data->bytes_xfered + init_cnt) ==
1554 (data->blksz * data->blocks))
4e0a5adf 1555 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1556 host->part_buf32);
f95f3850
WN
1557 }
1558}
1559
1560static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1561{
34b664a2
JH
1562#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1563 if (unlikely((unsigned long)buf & 0x3)) {
1564 while (cnt >= 4) {
1565 /* pull data from fifo into aligned buffer */
1566 u32 aligned_buf[32];
1567 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1568 int items = len >> 2;
1569 int i;
1570 for (i = 0; i < items; ++i)
4e0a5adf
JC
1571 aligned_buf[i] = mci_readl(host,
1572 DATA(host->data_offset));
34b664a2
JH
1573 /* memcpy from aligned buffer into output buffer */
1574 memcpy(buf, aligned_buf, len);
1575 buf += len;
1576 cnt -= len;
1577 }
1578 } else
1579#endif
1580 {
1581 u32 *pdata = buf;
1582 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1583 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1584 buf = pdata;
1585 }
1586 if (cnt) {
4e0a5adf 1587 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1588 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1589 }
1590}
1591
1592static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1593{
cfbeb59c
MC
1594 struct mmc_data *data = host->data;
1595 int init_cnt = cnt;
1596
34b664a2
JH
1597 /* try and push anything in the part_buf */
1598 if (unlikely(host->part_buf_count)) {
1599 int len = dw_mci_push_part_bytes(host, buf, cnt);
1600 buf += len;
1601 cnt -= len;
c09fbd74 1602
cfbeb59c 1603 if (host->part_buf_count == 8) {
c09fbd74 1604 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1605 host->part_buf);
34b664a2
JH
1606 host->part_buf_count = 0;
1607 }
1608 }
1609#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1610 if (unlikely((unsigned long)buf & 0x7)) {
1611 while (cnt >= 8) {
1612 u64 aligned_buf[16];
1613 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1614 int items = len >> 3;
1615 int i;
1616 /* memcpy from input buffer into aligned buffer */
1617 memcpy(aligned_buf, buf, len);
1618 buf += len;
1619 cnt -= len;
1620 /* push data from aligned buffer into fifo */
1621 for (i = 0; i < items; ++i)
4e0a5adf
JC
1622 mci_writeq(host, DATA(host->data_offset),
1623 aligned_buf[i]);
34b664a2
JH
1624 }
1625 } else
1626#endif
1627 {
1628 u64 *pdata = buf;
1629 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1630 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1631 buf = pdata;
1632 }
1633 /* put anything remaining in the part_buf */
1634 if (cnt) {
1635 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1636 /* Push data if we have reached the expected data length */
1637 if ((data->bytes_xfered + init_cnt) ==
1638 (data->blksz * data->blocks))
4e0a5adf 1639 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1640 host->part_buf);
f95f3850
WN
1641 }
1642}
1643
1644static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1645{
34b664a2
JH
1646#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1647 if (unlikely((unsigned long)buf & 0x7)) {
1648 while (cnt >= 8) {
1649 /* pull data from fifo into aligned buffer */
1650 u64 aligned_buf[16];
1651 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1652 int items = len >> 3;
1653 int i;
1654 for (i = 0; i < items; ++i)
4e0a5adf
JC
1655 aligned_buf[i] = mci_readq(host,
1656 DATA(host->data_offset));
34b664a2
JH
1657 /* memcpy from aligned buffer into output buffer */
1658 memcpy(buf, aligned_buf, len);
1659 buf += len;
1660 cnt -= len;
1661 }
1662 } else
1663#endif
1664 {
1665 u64 *pdata = buf;
1666 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1667 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1668 buf = pdata;
1669 }
1670 if (cnt) {
4e0a5adf 1671 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1672 dw_mci_pull_final_bytes(host, buf, cnt);
1673 }
1674}
f95f3850 1675
34b664a2
JH
1676static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1677{
1678 int len;
f95f3850 1679
34b664a2
JH
1680 /* get remaining partial bytes */
1681 len = dw_mci_pull_part_bytes(host, buf, cnt);
1682 if (unlikely(len == cnt))
1683 return;
1684 buf += len;
1685 cnt -= len;
1686
1687 /* get the rest of the data */
1688 host->pull_data(host, buf, cnt);
f95f3850
WN
1689}
1690
87a74d39 1691static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1692{
f9c2a0dc
SJ
1693 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1694 void *buf;
1695 unsigned int offset;
f95f3850
WN
1696 struct mmc_data *data = host->data;
1697 int shift = host->data_shift;
1698 u32 status;
3e4b0d8b 1699 unsigned int len;
f9c2a0dc 1700 unsigned int remain, fcnt;
f95f3850
WN
1701
1702 do {
f9c2a0dc
SJ
1703 if (!sg_miter_next(sg_miter))
1704 goto done;
1705
4225fc85 1706 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1707 buf = sg_miter->addr;
1708 remain = sg_miter->length;
1709 offset = 0;
1710
1711 do {
1712 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1713 << shift) + host->part_buf_count;
1714 len = min(remain, fcnt);
1715 if (!len)
1716 break;
34b664a2 1717 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1718 data->bytes_xfered += len;
f95f3850 1719 offset += len;
f9c2a0dc
SJ
1720 remain -= len;
1721 } while (remain);
f95f3850 1722
e74f3a9c 1723 sg_miter->consumed = offset;
f95f3850
WN
1724 status = mci_readl(host, MINTSTS);
1725 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1726 /* if the RXDR is ready read again */
1727 } while ((status & SDMMC_INT_RXDR) ||
1728 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1729
1730 if (!remain) {
1731 if (!sg_miter_next(sg_miter))
1732 goto done;
1733 sg_miter->consumed = 0;
1734 }
1735 sg_miter_stop(sg_miter);
f95f3850
WN
1736 return;
1737
1738done:
f9c2a0dc
SJ
1739 sg_miter_stop(sg_miter);
1740 host->sg = NULL;
f95f3850
WN
1741 smp_wmb();
1742 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1743}
1744
1745static void dw_mci_write_data_pio(struct dw_mci *host)
1746{
f9c2a0dc
SJ
1747 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1748 void *buf;
1749 unsigned int offset;
f95f3850
WN
1750 struct mmc_data *data = host->data;
1751 int shift = host->data_shift;
1752 u32 status;
3e4b0d8b 1753 unsigned int len;
f9c2a0dc
SJ
1754 unsigned int fifo_depth = host->fifo_depth;
1755 unsigned int remain, fcnt;
f95f3850
WN
1756
1757 do {
f9c2a0dc
SJ
1758 if (!sg_miter_next(sg_miter))
1759 goto done;
1760
4225fc85 1761 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1762 buf = sg_miter->addr;
1763 remain = sg_miter->length;
1764 offset = 0;
1765
1766 do {
1767 fcnt = ((fifo_depth -
1768 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1769 << shift) - host->part_buf_count;
1770 len = min(remain, fcnt);
1771 if (!len)
1772 break;
f95f3850 1773 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1774 data->bytes_xfered += len;
f95f3850 1775 offset += len;
f9c2a0dc
SJ
1776 remain -= len;
1777 } while (remain);
f95f3850 1778
e74f3a9c 1779 sg_miter->consumed = offset;
f95f3850
WN
1780 status = mci_readl(host, MINTSTS);
1781 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1782 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1783
1784 if (!remain) {
1785 if (!sg_miter_next(sg_miter))
1786 goto done;
1787 sg_miter->consumed = 0;
1788 }
1789 sg_miter_stop(sg_miter);
f95f3850
WN
1790 return;
1791
1792done:
f9c2a0dc
SJ
1793 sg_miter_stop(sg_miter);
1794 host->sg = NULL;
f95f3850
WN
1795 smp_wmb();
1796 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1797}
1798
1799static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1800{
1801 if (!host->cmd_status)
1802 host->cmd_status = status;
1803
1804 smp_wmb();
1805
1806 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1807 tasklet_schedule(&host->tasklet);
1808}
1809
1810static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1811{
1812 struct dw_mci *host = dev_id;
182c9081 1813 u32 pending;
1a5c8e1f 1814 int i;
f95f3850 1815
1fb5f68a
MC
1816 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1817
476d79f1
DA
1818 /*
1819 * DTO fix - version 2.10a and below, and only if internal DMA
1820 * is configured.
1821 */
1822 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1823 if (!pending &&
1824 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1825 pending |= SDMMC_INT_DATA_OVER;
1826 }
f95f3850 1827
476d79f1 1828 if (pending) {
f95f3850
WN
1829 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1830 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1831 host->cmd_status = pending;
f95f3850
WN
1832 smp_wmb();
1833 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1834 }
1835
1836 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1837 /* if there is an error report DATA_ERROR */
1838 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1839 host->data_status = pending;
f95f3850
WN
1840 smp_wmb();
1841 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1842 tasklet_schedule(&host->tasklet);
f95f3850
WN
1843 }
1844
1845 if (pending & SDMMC_INT_DATA_OVER) {
1846 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1847 if (!host->data_status)
182c9081 1848 host->data_status = pending;
f95f3850
WN
1849 smp_wmb();
1850 if (host->dir_status == DW_MCI_RECV_STATUS) {
1851 if (host->sg != NULL)
87a74d39 1852 dw_mci_read_data_pio(host, true);
f95f3850
WN
1853 }
1854 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1855 tasklet_schedule(&host->tasklet);
1856 }
1857
1858 if (pending & SDMMC_INT_RXDR) {
1859 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1860 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1861 dw_mci_read_data_pio(host, false);
f95f3850
WN
1862 }
1863
1864 if (pending & SDMMC_INT_TXDR) {
1865 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1866 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1867 dw_mci_write_data_pio(host);
1868 }
1869
1870 if (pending & SDMMC_INT_CMD_DONE) {
1871 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1872 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1873 }
1874
1875 if (pending & SDMMC_INT_CD) {
1876 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1877 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1878 }
1879
1a5c8e1f
SH
1880 /* Handle SDIO Interrupts */
1881 for (i = 0; i < host->num_slots; i++) {
1882 struct dw_mci_slot *slot = host->slot[i];
1883 if (pending & SDMMC_INT_SDIO(i)) {
1884 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1885 mmc_signal_sdio_irq(slot->mmc);
1886 }
1887 }
1888
1fb5f68a 1889 }
f95f3850
WN
1890
1891#ifdef CONFIG_MMC_DW_IDMAC
1892 /* Handle DMA interrupts */
1893 pending = mci_readl(host, IDSTS);
1894 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1895 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1896 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1897 host->dma_ops->complete(host);
1898 }
1899#endif
1900
1901 return IRQ_HANDLED;
1902}
1903
1791b13e 1904static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1905{
1791b13e 1906 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1907 int i;
1908
1909 for (i = 0; i < host->num_slots; i++) {
1910 struct dw_mci_slot *slot = host->slot[i];
1911 struct mmc_host *mmc = slot->mmc;
1912 struct mmc_request *mrq;
1913 int present;
f95f3850
WN
1914
1915 present = dw_mci_get_cd(mmc);
1916 while (present != slot->last_detect_state) {
f95f3850
WN
1917 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1918 present ? "inserted" : "removed");
1919
1791b13e
JH
1920 spin_lock_bh(&host->lock);
1921
f95f3850
WN
1922 /* Card change detected */
1923 slot->last_detect_state = present;
1924
f95f3850
WN
1925 /* Clean up queue if present */
1926 mrq = slot->mrq;
1927 if (mrq) {
1928 if (mrq == host->mrq) {
1929 host->data = NULL;
1930 host->cmd = NULL;
1931
1932 switch (host->state) {
1933 case STATE_IDLE:
1934 break;
1935 case STATE_SENDING_CMD:
1936 mrq->cmd->error = -ENOMEDIUM;
1937 if (!mrq->data)
1938 break;
1939 /* fall through */
1940 case STATE_SENDING_DATA:
1941 mrq->data->error = -ENOMEDIUM;
1942 dw_mci_stop_dma(host);
1943 break;
1944 case STATE_DATA_BUSY:
1945 case STATE_DATA_ERROR:
1946 if (mrq->data->error == -EINPROGRESS)
1947 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
1948 /* fall through */
1949 case STATE_SENDING_STOP:
90c2143a
SJ
1950 if (mrq->stop)
1951 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
1952 break;
1953 }
1954
1955 dw_mci_request_end(host, mrq);
1956 } else {
1957 list_del(&slot->queue_node);
1958 mrq->cmd->error = -ENOMEDIUM;
1959 if (mrq->data)
1960 mrq->data->error = -ENOMEDIUM;
1961 if (mrq->stop)
1962 mrq->stop->error = -ENOMEDIUM;
1963
1964 spin_unlock(&host->lock);
1965 mmc_request_done(slot->mmc, mrq);
1966 spin_lock(&host->lock);
1967 }
1968 }
1969
1970 /* Power down slot */
1971 if (present == 0) {
31bff450
SJ
1972 /* Clear down the FIFO */
1973 dw_mci_fifo_reset(host);
f95f3850 1974#ifdef CONFIG_MMC_DW_IDMAC
5ce9d961 1975 dw_mci_idmac_reset(host);
f95f3850
WN
1976#endif
1977
1978 }
1979
1791b13e
JH
1980 spin_unlock_bh(&host->lock);
1981
f95f3850
WN
1982 present = dw_mci_get_cd(mmc);
1983 }
1984
1985 mmc_detect_change(slot->mmc,
1986 msecs_to_jiffies(host->pdata->detect_delay_ms));
1987 }
1988}
1989
c91eab4b
TA
1990#ifdef CONFIG_OF
1991/* given a slot id, find out the device node representing that slot */
1992static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1993{
1994 struct device_node *np;
1995 const __be32 *addr;
1996 int len;
1997
1998 if (!dev || !dev->of_node)
1999 return NULL;
2000
2001 for_each_child_of_node(dev->of_node, np) {
2002 addr = of_get_property(np, "reg", &len);
2003 if (!addr || (len < sizeof(int)))
2004 continue;
2005 if (be32_to_cpup(addr) == slot)
2006 return np;
2007 }
2008 return NULL;
2009}
2010
a70aaa64
DA
2011static struct dw_mci_of_slot_quirks {
2012 char *quirk;
2013 int id;
2014} of_slot_quirks[] = {
2015 {
2016 .quirk = "disable-wp",
2017 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2018 },
2019};
2020
2021static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2022{
2023 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2024 int quirks = 0;
2025 int idx;
2026
2027 /* get quirks */
2028 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2029 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2030 quirks |= of_slot_quirks[idx].id;
2031
2032 return quirks;
2033}
2034
55a6ceb2
DA
2035/* find the write protect gpio for a given slot; or -1 if none specified */
2036static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2037{
2038 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2039 int gpio;
2040
2041 if (!np)
2042 return -EINVAL;
2043
2044 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2045
2046 /* Having a missing entry is valid; return silently */
2047 if (!gpio_is_valid(gpio))
2048 return -EINVAL;
2049
2050 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2051 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2052 return -EINVAL;
2053 }
2054
2055 return gpio;
2056}
bf626e55 2057
7cf347bd 2058/* find the cd gpio for a given slot */
bf626e55
ZG
2059static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2060 struct mmc_host *mmc)
2061{
2062 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2063 int gpio;
2064
2065 if (!np)
2066 return;
2067
2068 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2069
2070 /* Having a missing entry is valid; return silently */
2071 if (!gpio_is_valid(gpio))
2072 return;
2073
2074 if (mmc_gpio_request_cd(mmc, gpio, 0))
2075 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2076}
c91eab4b 2077#else /* CONFIG_OF */
a70aaa64
DA
2078static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2079{
2080 return 0;
2081}
c91eab4b
TA
2082static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2083{
2084 return NULL;
2085}
55a6ceb2
DA
2086static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2087{
2088 return -EINVAL;
2089}
bf626e55
ZG
2090static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2091 struct mmc_host *mmc)
2092{
2093 return;
2094}
c91eab4b
TA
2095#endif /* CONFIG_OF */
2096
36c179a9 2097static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2098{
2099 struct mmc_host *mmc;
2100 struct dw_mci_slot *slot;
e95baf13 2101 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2102 int ctrl_id, ret;
1f44a2a5 2103 u32 freq[2];
f95f3850 2104
4a90920c 2105 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2106 if (!mmc)
2107 return -ENOMEM;
2108
2109 slot = mmc_priv(mmc);
2110 slot->id = id;
2111 slot->mmc = mmc;
2112 slot->host = host;
c91eab4b 2113 host->slot[id] = slot;
f95f3850 2114
a70aaa64
DA
2115 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2116
f95f3850 2117 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2118 if (of_property_read_u32_array(host->dev->of_node,
2119 "clock-freq-min-max", freq, 2)) {
2120 mmc->f_min = DW_MCI_FREQ_MIN;
2121 mmc->f_max = DW_MCI_FREQ_MAX;
2122 } else {
2123 mmc->f_min = freq[0];
2124 mmc->f_max = freq[1];
2125 }
f95f3850 2126
907abd51 2127 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
f95f3850 2128
fc3d7720
JC
2129 if (host->pdata->caps)
2130 mmc->caps = host->pdata->caps;
fc3d7720 2131
ab269128
AK
2132 if (host->pdata->pm_caps)
2133 mmc->pm_caps = host->pdata->pm_caps;
2134
800d78bf
TA
2135 if (host->dev->of_node) {
2136 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2137 if (ctrl_id < 0)
2138 ctrl_id = 0;
2139 } else {
2140 ctrl_id = to_platform_device(host->dev)->id;
2141 }
cb27a843
JH
2142 if (drv_data && drv_data->caps)
2143 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2144
4f408cc6
SJ
2145 if (host->pdata->caps2)
2146 mmc->caps2 = host->pdata->caps2;
4f408cc6 2147
d8a4fb0e 2148 mmc_of_parse(mmc);
f95f3850 2149
f95f3850
WN
2150 if (host->pdata->blk_settings) {
2151 mmc->max_segs = host->pdata->blk_settings->max_segs;
2152 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2153 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2154 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2155 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2156 } else {
2157 /* Useful defaults if platform data is unset. */
a39e5746
JC
2158#ifdef CONFIG_MMC_DW_IDMAC
2159 mmc->max_segs = host->ring_size;
2160 mmc->max_blk_size = 65536;
2161 mmc->max_blk_count = host->ring_size;
2162 mmc->max_seg_size = 0x1000;
2163 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2164#else
f95f3850
WN
2165 mmc->max_segs = 64;
2166 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2167 mmc->max_blk_count = 512;
2168 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2169 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2170#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2171 }
f95f3850 2172
55a6ceb2 2173 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
bf626e55 2174 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
55a6ceb2 2175
0cea529d
JC
2176 ret = mmc_add_host(mmc);
2177 if (ret)
2178 goto err_setup_bus;
f95f3850
WN
2179
2180#if defined(CONFIG_DEBUG_FS)
2181 dw_mci_init_debugfs(slot);
2182#endif
2183
2184 /* Card initially undetected */
2185 slot->last_detect_state = 0;
2186
2187 return 0;
800d78bf
TA
2188
2189err_setup_bus:
2190 mmc_free_host(mmc);
2191 return -EINVAL;
f95f3850
WN
2192}
2193
2194static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2195{
f95f3850
WN
2196 /* Debugfs stuff is cleaned up by mmc core */
2197 mmc_remove_host(slot->mmc);
2198 slot->host->slot[id] = NULL;
2199 mmc_free_host(slot->mmc);
2200}
2201
2202static void dw_mci_init_dma(struct dw_mci *host)
2203{
2204 /* Alloc memory for sg translation */
780f22af 2205 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2206 &host->sg_dma, GFP_KERNEL);
2207 if (!host->sg_cpu) {
4a90920c 2208 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2209 __func__);
2210 goto no_dma;
2211 }
2212
2213 /* Determine which DMA interface to use */
2214#ifdef CONFIG_MMC_DW_IDMAC
2215 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2216 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2217#endif
2218
2219 if (!host->dma_ops)
2220 goto no_dma;
2221
e1631f98
JC
2222 if (host->dma_ops->init && host->dma_ops->start &&
2223 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2224 if (host->dma_ops->init(host)) {
4a90920c 2225 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2226 "DMA Controller.\n", __func__);
2227 goto no_dma;
2228 }
2229 } else {
4a90920c 2230 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2231 goto no_dma;
2232 }
2233
2234 host->use_dma = 1;
2235 return;
2236
2237no_dma:
4a90920c 2238 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2239 host->use_dma = 0;
2240 return;
2241}
2242
31bff450 2243static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
f95f3850
WN
2244{
2245 unsigned long timeout = jiffies + msecs_to_jiffies(500);
31bff450 2246 u32 ctrl;
f95f3850 2247
31bff450
SJ
2248 ctrl = mci_readl(host, CTRL);
2249 ctrl |= reset;
2250 mci_writel(host, CTRL, ctrl);
f95f3850
WN
2251
2252 /* wait till resets clear */
2253 do {
2254 ctrl = mci_readl(host, CTRL);
31bff450 2255 if (!(ctrl & reset))
f95f3850
WN
2256 return true;
2257 } while (time_before(jiffies, timeout));
2258
31bff450
SJ
2259 dev_err(host->dev,
2260 "Timeout resetting block (ctrl reset %#x)\n",
2261 ctrl & reset);
f95f3850
WN
2262
2263 return false;
2264}
2265
31bff450
SJ
2266static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2267{
2268 /*
2269 * Reseting generates a block interrupt, hence setting
2270 * the scatter-gather pointer to NULL.
2271 */
2272 if (host->sg) {
2273 sg_miter_stop(&host->sg_miter);
2274 host->sg = NULL;
2275 }
2276
2277 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2278}
2279
2280static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2281{
2282 return dw_mci_ctrl_reset(host,
2283 SDMMC_CTRL_FIFO_RESET |
2284 SDMMC_CTRL_RESET |
2285 SDMMC_CTRL_DMA_RESET);
2286}
2287
c91eab4b
TA
2288#ifdef CONFIG_OF
2289static struct dw_mci_of_quirks {
2290 char *quirk;
2291 int id;
2292} of_quirks[] = {
2293 {
c91eab4b
TA
2294 .quirk = "broken-cd",
2295 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2296 },
2297};
2298
2299static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2300{
2301 struct dw_mci_board *pdata;
2302 struct device *dev = host->dev;
2303 struct device_node *np = dev->of_node;
e95baf13 2304 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2305 int idx, ret;
3c6d89ea 2306 u32 clock_frequency;
c91eab4b
TA
2307
2308 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2309 if (!pdata) {
2310 dev_err(dev, "could not allocate memory for pdata\n");
2311 return ERR_PTR(-ENOMEM);
2312 }
2313
2314 /* find out number of slots supported */
2315 if (of_property_read_u32(dev->of_node, "num-slots",
2316 &pdata->num_slots)) {
2317 dev_info(dev, "num-slots property not found, "
2318 "assuming 1 slot is available\n");
2319 pdata->num_slots = 1;
2320 }
2321
2322 /* get quirks */
2323 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2324 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2325 pdata->quirks |= of_quirks[idx].id;
2326
2327 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2328 dev_info(dev, "fifo-depth property not found, using "
2329 "value of FIFOTH register as default\n");
2330
2331 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2332
3c6d89ea
DA
2333 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2334 pdata->bus_hz = clock_frequency;
2335
cb27a843
JH
2336 if (drv_data && drv_data->parse_dt) {
2337 ret = drv_data->parse_dt(host);
800d78bf
TA
2338 if (ret)
2339 return ERR_PTR(ret);
2340 }
2341
10b49841
SJ
2342 if (of_find_property(np, "supports-highspeed", NULL))
2343 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2344
c91eab4b
TA
2345 return pdata;
2346}
2347
2348#else /* CONFIG_OF */
2349static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2350{
2351 return ERR_PTR(-EINVAL);
2352}
2353#endif /* CONFIG_OF */
2354
62ca8034 2355int dw_mci_probe(struct dw_mci *host)
f95f3850 2356{
e95baf13 2357 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2358 int width, i, ret = 0;
f95f3850 2359 u32 fifo_size;
1c2215b7 2360 int init_slots = 0;
f95f3850 2361
c91eab4b
TA
2362 if (!host->pdata) {
2363 host->pdata = dw_mci_parse_dt(host);
2364 if (IS_ERR(host->pdata)) {
2365 dev_err(host->dev, "platform data not available\n");
2366 return -EINVAL;
2367 }
f95f3850
WN
2368 }
2369
907abd51 2370 if (host->pdata->num_slots > 1) {
4a90920c 2371 dev_err(host->dev,
907abd51 2372 "Platform data must supply num_slots.\n");
62ca8034 2373 return -ENODEV;
f95f3850
WN
2374 }
2375
780f22af 2376 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2377 if (IS_ERR(host->biu_clk)) {
2378 dev_dbg(host->dev, "biu clock not available\n");
2379 } else {
2380 ret = clk_prepare_enable(host->biu_clk);
2381 if (ret) {
2382 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2383 return ret;
2384 }
2385 }
2386
780f22af 2387 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2388 if (IS_ERR(host->ciu_clk)) {
2389 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2390 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2391 } else {
2392 ret = clk_prepare_enable(host->ciu_clk);
2393 if (ret) {
2394 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2395 goto err_clk_biu;
2396 }
f90a0612 2397
3c6d89ea
DA
2398 if (host->pdata->bus_hz) {
2399 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2400 if (ret)
2401 dev_warn(host->dev,
612de4c1 2402 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
2403 host->pdata->bus_hz);
2404 }
f90a0612 2405 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2406 }
f90a0612 2407
612de4c1
JC
2408 if (!host->bus_hz) {
2409 dev_err(host->dev,
2410 "Platform data must supply bus speed\n");
2411 ret = -ENODEV;
2412 goto err_clk_ciu;
2413 }
2414
002f0d5c
YK
2415 if (drv_data && drv_data->init) {
2416 ret = drv_data->init(host);
2417 if (ret) {
2418 dev_err(host->dev,
2419 "implementation specific init failed\n");
2420 goto err_clk_ciu;
2421 }
2422 }
2423
cb27a843
JH
2424 if (drv_data && drv_data->setup_clock) {
2425 ret = drv_data->setup_clock(host);
800d78bf
TA
2426 if (ret) {
2427 dev_err(host->dev,
2428 "implementation specific clock setup failed\n");
2429 goto err_clk_ciu;
2430 }
2431 }
2432
a55d6ff0 2433 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2434 if (IS_ERR(host->vmmc)) {
2435 ret = PTR_ERR(host->vmmc);
2436 if (ret == -EPROBE_DEFER)
2437 goto err_clk_ciu;
2438
2439 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2440 host->vmmc = NULL;
2441 } else {
2442 ret = regulator_enable(host->vmmc);
2443 if (ret) {
2444 if (ret != -EPROBE_DEFER)
2445 dev_err(host->dev,
2446 "regulator_enable fail: %d\n", ret);
2447 goto err_clk_ciu;
2448 }
2449 }
2450
62ca8034 2451 host->quirks = host->pdata->quirks;
f95f3850
WN
2452
2453 spin_lock_init(&host->lock);
2454 INIT_LIST_HEAD(&host->queue);
2455
f95f3850
WN
2456 /*
2457 * Get the host data width - this assumes that HCON has been set with
2458 * the correct values.
2459 */
2460 i = (mci_readl(host, HCON) >> 7) & 0x7;
2461 if (!i) {
2462 host->push_data = dw_mci_push_data16;
2463 host->pull_data = dw_mci_pull_data16;
2464 width = 16;
2465 host->data_shift = 1;
2466 } else if (i == 2) {
2467 host->push_data = dw_mci_push_data64;
2468 host->pull_data = dw_mci_pull_data64;
2469 width = 64;
2470 host->data_shift = 3;
2471 } else {
2472 /* Check for a reserved value, and warn if it is */
2473 WARN((i != 1),
2474 "HCON reports a reserved host data width!\n"
2475 "Defaulting to 32-bit access.\n");
2476 host->push_data = dw_mci_push_data32;
2477 host->pull_data = dw_mci_pull_data32;
2478 width = 32;
2479 host->data_shift = 2;
2480 }
2481
2482 /* Reset all blocks */
31bff450 2483 if (!dw_mci_ctrl_all_reset(host))
141a712a
SJ
2484 return -ENODEV;
2485
2486 host->dma_ops = host->pdata->dma_ops;
2487 dw_mci_init_dma(host);
f95f3850
WN
2488
2489 /* Clear the interrupts for the host controller */
2490 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2491 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2492
2493 /* Put in max timeout */
2494 mci_writel(host, TMOUT, 0xFFFFFFFF);
2495
2496 /*
2497 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2498 * Tx Mark = fifo_size / 2 DMA Size = 8
2499 */
b86d8253
JH
2500 if (!host->pdata->fifo_depth) {
2501 /*
2502 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2503 * have been overwritten by the bootloader, just like we're
2504 * about to do, so if you know the value for your hardware, you
2505 * should put it in the platform data.
2506 */
2507 fifo_size = mci_readl(host, FIFOTH);
8234e869 2508 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2509 } else {
2510 fifo_size = host->pdata->fifo_depth;
2511 }
2512 host->fifo_depth = fifo_size;
52426899
SJ
2513 host->fifoth_val =
2514 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2515 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2516
2517 /* disable clock to CIU */
2518 mci_writel(host, CLKENA, 0);
2519 mci_writel(host, CLKSRC, 0);
2520
63008768
JH
2521 /*
2522 * In 2.40a spec, Data offset is changed.
2523 * Need to check the version-id and set data-offset for DATA register.
2524 */
2525 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2526 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2527
2528 if (host->verid < DW_MMC_240A)
2529 host->data_offset = DATA_OFFSET;
2530 else
2531 host->data_offset = DATA_240A_OFFSET;
2532
f95f3850 2533 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2534 host->card_workqueue = alloc_workqueue("dw-mci-card",
59ff3eb6 2535 WQ_MEM_RECLAIM, 1);
ef7aef9a
WY
2536 if (!host->card_workqueue) {
2537 ret = -ENOMEM;
1791b13e 2538 goto err_dmaunmap;
ef7aef9a 2539 }
1791b13e 2540 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2541 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2542 host->irq_flags, "dw-mci", host);
f95f3850 2543 if (ret)
1791b13e 2544 goto err_workqueue;
f95f3850 2545
f95f3850
WN
2546 if (host->pdata->num_slots)
2547 host->num_slots = host->pdata->num_slots;
2548 else
2549 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2550
2da1d7f2
YC
2551 /*
2552 * Enable interrupts for command done, data over, data empty, card det,
2553 * receive ready and error such as transmit, receive timeout, crc error
2554 */
2555 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2556 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2557 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2558 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2559 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2560
2561 dev_info(host->dev, "DW MMC controller at irq %d, "
2562 "%d bit host data width, "
2563 "%u deep fifo\n",
2564 host->irq, width, fifo_size);
2565
f95f3850
WN
2566 /* We need at least one slot to succeed */
2567 for (i = 0; i < host->num_slots; i++) {
2568 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2569 if (ret)
2570 dev_dbg(host->dev, "slot %d init failed\n", i);
2571 else
2572 init_slots++;
2573 }
2574
2575 if (init_slots) {
2576 dev_info(host->dev, "%d slots initialized\n", init_slots);
2577 } else {
2578 dev_dbg(host->dev, "attempted to initialize %d slots, "
2579 "but failed on all\n", host->num_slots);
780f22af 2580 goto err_workqueue;
f95f3850
WN
2581 }
2582
f95f3850 2583 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2584 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2585
2586 return 0;
2587
1791b13e 2588err_workqueue:
95dcc2cb 2589 destroy_workqueue(host->card_workqueue);
1791b13e 2590
f95f3850
WN
2591err_dmaunmap:
2592 if (host->use_dma && host->dma_ops->exit)
2593 host->dma_ops->exit(host);
780f22af 2594 if (host->vmmc)
c07946a3 2595 regulator_disable(host->vmmc);
f90a0612
TA
2596
2597err_clk_ciu:
780f22af 2598 if (!IS_ERR(host->ciu_clk))
f90a0612 2599 clk_disable_unprepare(host->ciu_clk);
780f22af 2600
f90a0612 2601err_clk_biu:
780f22af 2602 if (!IS_ERR(host->biu_clk))
f90a0612 2603 clk_disable_unprepare(host->biu_clk);
780f22af 2604
f95f3850
WN
2605 return ret;
2606}
62ca8034 2607EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2608
62ca8034 2609void dw_mci_remove(struct dw_mci *host)
f95f3850 2610{
f95f3850
WN
2611 int i;
2612
2613 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2614 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2615
f95f3850 2616 for (i = 0; i < host->num_slots; i++) {
4a90920c 2617 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2618 if (host->slot[i])
2619 dw_mci_cleanup_slot(host->slot[i], i);
2620 }
2621
2622 /* disable clock to CIU */
2623 mci_writel(host, CLKENA, 0);
2624 mci_writel(host, CLKSRC, 0);
2625
95dcc2cb 2626 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2627
2628 if (host->use_dma && host->dma_ops->exit)
2629 host->dma_ops->exit(host);
2630
780f22af 2631 if (host->vmmc)
c07946a3 2632 regulator_disable(host->vmmc);
c07946a3 2633
f90a0612
TA
2634 if (!IS_ERR(host->ciu_clk))
2635 clk_disable_unprepare(host->ciu_clk);
780f22af 2636
f90a0612
TA
2637 if (!IS_ERR(host->biu_clk))
2638 clk_disable_unprepare(host->biu_clk);
f95f3850 2639}
62ca8034
SH
2640EXPORT_SYMBOL(dw_mci_remove);
2641
2642
f95f3850 2643
6fe8890d 2644#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2645/*
2646 * TODO: we should probably disable the clock to the card in the suspend path.
2647 */
62ca8034 2648int dw_mci_suspend(struct dw_mci *host)
f95f3850 2649{
c07946a3
JC
2650 if (host->vmmc)
2651 regulator_disable(host->vmmc);
2652
f95f3850
WN
2653 return 0;
2654}
62ca8034 2655EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2656
62ca8034 2657int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2658{
2659 int i, ret;
f95f3850 2660
f2f942ce
SK
2661 if (host->vmmc) {
2662 ret = regulator_enable(host->vmmc);
2663 if (ret) {
2664 dev_err(host->dev,
2665 "failed to enable regulator: %d\n", ret);
2666 return ret;
2667 }
2668 }
1d6c4e0a 2669
31bff450 2670 if (!dw_mci_ctrl_all_reset(host)) {
e61cf118
JC
2671 ret = -ENODEV;
2672 return ret;
2673 }
2674
3bfe619d 2675 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2676 host->dma_ops->init(host);
2677
52426899
SJ
2678 /*
2679 * Restore the initial value at FIFOTH register
2680 * And Invalidate the prev_blksz with zero
2681 */
e61cf118 2682 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2683 host->prev_blksz = 0;
e61cf118 2684
2eb2944f
DA
2685 /* Put in max timeout */
2686 mci_writel(host, TMOUT, 0xFFFFFFFF);
2687
e61cf118
JC
2688 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2689 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2690 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2691 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2692 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2693
f95f3850
WN
2694 for (i = 0; i < host->num_slots; i++) {
2695 struct dw_mci_slot *slot = host->slot[i];
2696 if (!slot)
2697 continue;
ab269128
AK
2698 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2699 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2700 dw_mci_setup_bus(slot, true);
2701 }
f95f3850 2702 }
f95f3850
WN
2703 return 0;
2704}
62ca8034 2705EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2706#endif /* CONFIG_PM_SLEEP */
2707
f95f3850
WN
2708static int __init dw_mci_init(void)
2709{
8e1c4e4d 2710 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2711 return 0;
f95f3850
WN
2712}
2713
2714static void __exit dw_mci_exit(void)
2715{
f95f3850
WN
2716}
2717
2718module_init(dw_mci_init);
2719module_exit(dw_mci_exit);
2720
2721MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2722MODULE_AUTHOR("NXP Semiconductor VietNam");
2723MODULE_AUTHOR("Imagination Technologies Ltd");
2724MODULE_LICENSE("GPL v2");