]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: adjust the fifoth with block size
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
32#include <linux/mmc/dw_mmc.h>
33#include <linux/bitops.h>
c07946a3 34#include <linux/regulator/consumer.h>
1791b13e 35#include <linux/workqueue.h>
c91eab4b 36#include <linux/of.h>
55a6ceb2 37#include <linux/of_gpio.h>
f95f3850
WN
38
39#include "dw_mmc.h"
40
41/* Common flag combinations */
3f7eec62 42#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
43 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE)
45#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR)
47#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
49#define DW_MCI_SEND_STATUS 1
50#define DW_MCI_RECV_STATUS 2
51#define DW_MCI_DMA_THRESHOLD 16
52
1f44a2a5
SJ
53#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
55
f95f3850 56#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
57#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
60 SDMMC_IDMAC_INT_TI)
61
f95f3850
WN
62struct idmac_desc {
63 u32 des0; /* Control Descriptor */
64#define IDMAC_DES0_DIC BIT(1)
65#define IDMAC_DES0_LD BIT(2)
66#define IDMAC_DES0_FD BIT(3)
67#define IDMAC_DES0_CH BIT(4)
68#define IDMAC_DES0_ER BIT(5)
69#define IDMAC_DES0_CES BIT(30)
70#define IDMAC_DES0_OWN BIT(31)
71
72 u32 des1; /* Buffer sizes */
73#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 74 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
75
76 u32 des2; /* buffer 1 physical address */
77
78 u32 des3; /* buffer 2 physical address */
79};
80#endif /* CONFIG_MMC_DW_IDMAC */
81
0976f16d
SJ
82static const u8 tuning_blk_pattern_4bit[] = {
83 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
84 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
85 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
86 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
87 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
88 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
89 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
90 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
91};
f95f3850 92
0976f16d
SJ
93static const u8 tuning_blk_pattern_8bit[] = {
94 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
95 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
96 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
97 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
98 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
99 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
100 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
101 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
102 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
103 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
104 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
105 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
106 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
107 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
108 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
109 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
110};
111
112#if defined(CONFIG_DEBUG_FS)
113static int dw_mci_req_show(struct seq_file *s, void *v)
114{
115 struct dw_mci_slot *slot = s->private;
116 struct mmc_request *mrq;
117 struct mmc_command *cmd;
118 struct mmc_command *stop;
119 struct mmc_data *data;
120
121 /* Make sure we get a consistent snapshot */
122 spin_lock_bh(&slot->host->lock);
123 mrq = slot->mrq;
124
125 if (mrq) {
126 cmd = mrq->cmd;
127 data = mrq->data;
128 stop = mrq->stop;
129
130 if (cmd)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 cmd->opcode, cmd->arg, cmd->flags,
134 cmd->resp[0], cmd->resp[1], cmd->resp[2],
135 cmd->resp[2], cmd->error);
136 if (data)
137 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
138 data->bytes_xfered, data->blocks,
139 data->blksz, data->flags, data->error);
140 if (stop)
141 seq_printf(s,
142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143 stop->opcode, stop->arg, stop->flags,
144 stop->resp[0], stop->resp[1], stop->resp[2],
145 stop->resp[2], stop->error);
146 }
147
148 spin_unlock_bh(&slot->host->lock);
149
150 return 0;
151}
152
153static int dw_mci_req_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, dw_mci_req_show, inode->i_private);
156}
157
158static const struct file_operations dw_mci_req_fops = {
159 .owner = THIS_MODULE,
160 .open = dw_mci_req_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = single_release,
164};
165
166static int dw_mci_regs_show(struct seq_file *s, void *v)
167{
168 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
169 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
170 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
171 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
172 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
173 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
174
175 return 0;
176}
177
178static int dw_mci_regs_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, dw_mci_regs_show, inode->i_private);
181}
182
183static const struct file_operations dw_mci_regs_fops = {
184 .owner = THIS_MODULE,
185 .open = dw_mci_regs_open,
186 .read = seq_read,
187 .llseek = seq_lseek,
188 .release = single_release,
189};
190
191static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
192{
193 struct mmc_host *mmc = slot->mmc;
194 struct dw_mci *host = slot->host;
195 struct dentry *root;
196 struct dentry *node;
197
198 root = mmc->debugfs_root;
199 if (!root)
200 return;
201
202 node = debugfs_create_file("regs", S_IRUSR, root, host,
203 &dw_mci_regs_fops);
204 if (!node)
205 goto err;
206
207 node = debugfs_create_file("req", S_IRUSR, root, slot,
208 &dw_mci_req_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_x32("pending_events", S_IRUSR, root,
217 (u32 *)&host->pending_events);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("completed_events", S_IRUSR, root,
222 (u32 *)&host->completed_events);
223 if (!node)
224 goto err;
225
226 return;
227
228err:
229 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
230}
231#endif /* defined(CONFIG_DEBUG_FS) */
232
233static void dw_mci_set_timeout(struct dw_mci *host)
234{
235 /* timeout (maximum) */
236 mci_writel(host, TMOUT, 0xffffffff);
237}
238
239static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
240{
241 struct mmc_data *data;
800d78bf 242 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 243 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
244 u32 cmdr;
245 cmd->error = -EINPROGRESS;
246
247 cmdr = cmd->opcode;
248
249 if (cmdr == MMC_STOP_TRANSMISSION)
250 cmdr |= SDMMC_CMD_STOP;
251 else
252 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
253
254 if (cmd->flags & MMC_RSP_PRESENT) {
255 /* We expect a response, so set this bit */
256 cmdr |= SDMMC_CMD_RESP_EXP;
257 if (cmd->flags & MMC_RSP_136)
258 cmdr |= SDMMC_CMD_RESP_LONG;
259 }
260
261 if (cmd->flags & MMC_RSP_CRC)
262 cmdr |= SDMMC_CMD_RESP_CRC;
263
264 data = cmd->data;
265 if (data) {
266 cmdr |= SDMMC_CMD_DAT_EXP;
267 if (data->flags & MMC_DATA_STREAM)
268 cmdr |= SDMMC_CMD_STRM_MODE;
269 if (data->flags & MMC_DATA_WRITE)
270 cmdr |= SDMMC_CMD_DAT_WR;
271 }
272
cb27a843
JH
273 if (drv_data && drv_data->prepare_command)
274 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 275
f95f3850
WN
276 return cmdr;
277}
278
279static void dw_mci_start_command(struct dw_mci *host,
280 struct mmc_command *cmd, u32 cmd_flags)
281{
282 host->cmd = cmd;
4a90920c 283 dev_vdbg(host->dev,
f95f3850
WN
284 "start command: ARGR=0x%08x CMDR=0x%08x\n",
285 cmd->arg, cmd_flags);
286
287 mci_writel(host, CMDARG, cmd->arg);
288 wmb();
289
290 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
291}
292
293static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
294{
295 dw_mci_start_command(host, data->stop, host->stop_cmdr);
296}
297
298/* DMA interface functions */
299static void dw_mci_stop_dma(struct dw_mci *host)
300{
03e8cb53 301 if (host->using_dma) {
f95f3850
WN
302 host->dma_ops->stop(host);
303 host->dma_ops->cleanup(host);
304 } else {
305 /* Data transfer was stopped by the interrupt handler */
306 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
307 }
308}
309
9aa51408
SJ
310static int dw_mci_get_dma_dir(struct mmc_data *data)
311{
312 if (data->flags & MMC_DATA_WRITE)
313 return DMA_TO_DEVICE;
314 else
315 return DMA_FROM_DEVICE;
316}
317
9beee912 318#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
319static void dw_mci_dma_cleanup(struct dw_mci *host)
320{
321 struct mmc_data *data = host->data;
322
323 if (data)
9aa51408 324 if (!data->host_cookie)
4a90920c 325 dma_unmap_sg(host->dev,
9aa51408
SJ
326 data->sg,
327 data->sg_len,
328 dw_mci_get_dma_dir(data));
f95f3850
WN
329}
330
331static void dw_mci_idmac_stop_dma(struct dw_mci *host)
332{
333 u32 temp;
334
335 /* Disable and reset the IDMAC interface */
336 temp = mci_readl(host, CTRL);
337 temp &= ~SDMMC_CTRL_USE_IDMAC;
338 temp |= SDMMC_CTRL_DMA_RESET;
339 mci_writel(host, CTRL, temp);
340
341 /* Stop the IDMAC running */
342 temp = mci_readl(host, BMOD);
a5289a43 343 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
f95f3850
WN
344 mci_writel(host, BMOD, temp);
345}
346
347static void dw_mci_idmac_complete_dma(struct dw_mci *host)
348{
349 struct mmc_data *data = host->data;
350
4a90920c 351 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
352
353 host->dma_ops->cleanup(host);
354
355 /*
356 * If the card was removed, data will be NULL. No point in trying to
357 * send the stop command or waiting for NBUSY in this case.
358 */
359 if (data) {
360 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
361 tasklet_schedule(&host->tasklet);
362 }
363}
364
365static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
366 unsigned int sg_len)
367{
368 int i;
369 struct idmac_desc *desc = host->sg_cpu;
370
371 for (i = 0; i < sg_len; i++, desc++) {
372 unsigned int length = sg_dma_len(&data->sg[i]);
373 u32 mem_addr = sg_dma_address(&data->sg[i]);
374
375 /* Set the OWN bit and disable interrupts for this descriptor */
376 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
377
378 /* Buffer length */
379 IDMAC_SET_BUFFER1_SIZE(desc, length);
380
381 /* Physical address to DMA to/from */
382 desc->des2 = mem_addr;
383 }
384
385 /* Set first descriptor */
386 desc = host->sg_cpu;
387 desc->des0 |= IDMAC_DES0_FD;
388
389 /* Set last descriptor */
390 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
391 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
392 desc->des0 |= IDMAC_DES0_LD;
393
394 wmb();
395}
396
397static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
398{
399 u32 temp;
400
401 dw_mci_translate_sglist(host, host->data, sg_len);
402
403 /* Select IDMAC interface */
404 temp = mci_readl(host, CTRL);
405 temp |= SDMMC_CTRL_USE_IDMAC;
406 mci_writel(host, CTRL, temp);
407
408 wmb();
409
410 /* Enable the IDMAC */
411 temp = mci_readl(host, BMOD);
a5289a43 412 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
413 mci_writel(host, BMOD, temp);
414
415 /* Start it running */
416 mci_writel(host, PLDMND, 1);
417}
418
419static int dw_mci_idmac_init(struct dw_mci *host)
420{
421 struct idmac_desc *p;
897b69e7 422 int i;
f95f3850
WN
423
424 /* Number of descriptors in the ring buffer */
425 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
426
427 /* Forward link the descriptor list */
428 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
429 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
430
431 /* Set the last descriptor as the end-of-ring descriptor */
432 p->des3 = host->sg_dma;
433 p->des0 = IDMAC_DES0_ER;
434
141a712a
SJ
435 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
436
f95f3850 437 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 438 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
439 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
440 SDMMC_IDMAC_INT_TI);
441
442 /* Set the descriptor base address */
443 mci_writel(host, DBADDR, host->sg_dma);
444 return 0;
445}
446
8e2b36ea 447static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
448 .init = dw_mci_idmac_init,
449 .start = dw_mci_idmac_start_dma,
450 .stop = dw_mci_idmac_stop_dma,
451 .complete = dw_mci_idmac_complete_dma,
452 .cleanup = dw_mci_dma_cleanup,
453};
454#endif /* CONFIG_MMC_DW_IDMAC */
455
9aa51408
SJ
456static int dw_mci_pre_dma_transfer(struct dw_mci *host,
457 struct mmc_data *data,
458 bool next)
f95f3850
WN
459{
460 struct scatterlist *sg;
9aa51408 461 unsigned int i, sg_len;
03e8cb53 462
9aa51408
SJ
463 if (!next && data->host_cookie)
464 return data->host_cookie;
f95f3850
WN
465
466 /*
467 * We don't do DMA on "complex" transfers, i.e. with
468 * non-word-aligned buffers or lengths. Also, we don't bother
469 * with all the DMA setup overhead for short transfers.
470 */
471 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
472 return -EINVAL;
9aa51408 473
f95f3850
WN
474 if (data->blksz & 3)
475 return -EINVAL;
476
477 for_each_sg(data->sg, sg, data->sg_len, i) {
478 if (sg->offset & 3 || sg->length & 3)
479 return -EINVAL;
480 }
481
4a90920c 482 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
483 data->sg,
484 data->sg_len,
485 dw_mci_get_dma_dir(data));
486 if (sg_len == 0)
487 return -EINVAL;
03e8cb53 488
9aa51408
SJ
489 if (next)
490 data->host_cookie = sg_len;
f95f3850 491
9aa51408
SJ
492 return sg_len;
493}
494
9aa51408
SJ
495static void dw_mci_pre_req(struct mmc_host *mmc,
496 struct mmc_request *mrq,
497 bool is_first_req)
498{
499 struct dw_mci_slot *slot = mmc_priv(mmc);
500 struct mmc_data *data = mrq->data;
501
502 if (!slot->host->use_dma || !data)
503 return;
504
505 if (data->host_cookie) {
506 data->host_cookie = 0;
507 return;
508 }
509
510 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
511 data->host_cookie = 0;
512}
513
514static void dw_mci_post_req(struct mmc_host *mmc,
515 struct mmc_request *mrq,
516 int err)
517{
518 struct dw_mci_slot *slot = mmc_priv(mmc);
519 struct mmc_data *data = mrq->data;
520
521 if (!slot->host->use_dma || !data)
522 return;
523
524 if (data->host_cookie)
4a90920c 525 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
526 data->sg,
527 data->sg_len,
528 dw_mci_get_dma_dir(data));
529 data->host_cookie = 0;
530}
531
52426899
SJ
532static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
533{
534#ifdef CONFIG_MMC_DW_IDMAC
535 unsigned int blksz = data->blksz;
536 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
537 u32 fifo_width = 1 << host->data_shift;
538 u32 blksz_depth = blksz / fifo_width, fifoth_val;
539 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
540 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
541
542 tx_wmark = (host->fifo_depth) / 2;
543 tx_wmark_invers = host->fifo_depth - tx_wmark;
544
545 /*
546 * MSIZE is '1',
547 * if blksz is not a multiple of the FIFO width
548 */
549 if (blksz % fifo_width) {
550 msize = 0;
551 rx_wmark = 1;
552 goto done;
553 }
554
555 do {
556 if (!((blksz_depth % mszs[idx]) ||
557 (tx_wmark_invers % mszs[idx]))) {
558 msize = idx;
559 rx_wmark = mszs[idx] - 1;
560 break;
561 }
562 } while (--idx > 0);
563 /*
564 * If idx is '0', it won't be tried
565 * Thus, initial values are uesed
566 */
567done:
568 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
569 mci_writel(host, FIFOTH, fifoth_val);
570#endif
571}
572
9aa51408
SJ
573static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
574{
575 int sg_len;
576 u32 temp;
577
578 host->using_dma = 0;
579
580 /* If we don't have a channel, we can't do DMA */
581 if (!host->use_dma)
582 return -ENODEV;
583
584 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
585 if (sg_len < 0) {
586 host->dma_ops->stop(host);
9aa51408 587 return sg_len;
a99aa9b9 588 }
9aa51408
SJ
589
590 host->using_dma = 1;
f95f3850 591
4a90920c 592 dev_vdbg(host->dev,
f95f3850
WN
593 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
594 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
595 sg_len);
596
52426899
SJ
597 /*
598 * Decide the MSIZE and RX/TX Watermark.
599 * If current block size is same with previous size,
600 * no need to update fifoth.
601 */
602 if (host->prev_blksz != data->blksz)
603 dw_mci_adjust_fifoth(host, data);
604
f95f3850
WN
605 /* Enable the DMA interface */
606 temp = mci_readl(host, CTRL);
607 temp |= SDMMC_CTRL_DMA_ENABLE;
608 mci_writel(host, CTRL, temp);
609
610 /* Disable RX/TX IRQs, let DMA handle it */
611 temp = mci_readl(host, INTMASK);
612 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
613 mci_writel(host, INTMASK, temp);
614
615 host->dma_ops->start(host, sg_len);
616
617 return 0;
618}
619
620static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
621{
622 u32 temp;
623
624 data->error = -EINPROGRESS;
625
626 WARN_ON(host->data);
627 host->sg = NULL;
628 host->data = data;
629
55c5efbc
JH
630 if (data->flags & MMC_DATA_READ)
631 host->dir_status = DW_MCI_RECV_STATUS;
632 else
633 host->dir_status = DW_MCI_SEND_STATUS;
634
f95f3850 635 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
636 int flags = SG_MITER_ATOMIC;
637 if (host->data->flags & MMC_DATA_READ)
638 flags |= SG_MITER_TO_SG;
639 else
640 flags |= SG_MITER_FROM_SG;
641
642 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 643 host->sg = data->sg;
34b664a2
JH
644 host->part_buf_start = 0;
645 host->part_buf_count = 0;
f95f3850 646
b40af3aa 647 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
648 temp = mci_readl(host, INTMASK);
649 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
650 mci_writel(host, INTMASK, temp);
651
652 temp = mci_readl(host, CTRL);
653 temp &= ~SDMMC_CTRL_DMA_ENABLE;
654 mci_writel(host, CTRL, temp);
52426899
SJ
655
656 /*
657 * Use the initial fifoth_val for PIO mode.
658 * If next issued data may be transfered by DMA mode,
659 * prev_blksz should be invalidated.
660 */
661 mci_writel(host, FIFOTH, host->fifoth_val);
662 host->prev_blksz = 0;
663 } else {
664 /*
665 * Keep the current block size.
666 * It will be used to decide whether to update
667 * fifoth register next time.
668 */
669 host->prev_blksz = data->blksz;
f95f3850
WN
670 }
671}
672
673static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
674{
675 struct dw_mci *host = slot->host;
676 unsigned long timeout = jiffies + msecs_to_jiffies(500);
677 unsigned int cmd_status = 0;
678
679 mci_writel(host, CMDARG, arg);
680 wmb();
681 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
682
683 while (time_before(jiffies, timeout)) {
684 cmd_status = mci_readl(host, CMD);
685 if (!(cmd_status & SDMMC_CMD_START))
686 return;
687 }
688 dev_err(&slot->mmc->class_dev,
689 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
690 cmd, arg, cmd_status);
691}
692
ab269128 693static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
694{
695 struct dw_mci *host = slot->host;
fdf492a1 696 unsigned int clock = slot->clock;
f95f3850 697 u32 div;
9623b5b9 698 u32 clk_en_a;
f95f3850 699
fdf492a1
DA
700 if (!clock) {
701 mci_writel(host, CLKENA, 0);
702 mci_send_cmd(slot,
703 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
704 } else if (clock != host->current_speed || force_clkinit) {
705 div = host->bus_hz / clock;
706 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
707 /*
708 * move the + 1 after the divide to prevent
709 * over-clocking the card.
710 */
e419990b
SJ
711 div += 1;
712
fdf492a1 713 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 714
fdf492a1
DA
715 if ((clock << div) != slot->__clk_old || force_clkinit)
716 dev_info(&slot->mmc->class_dev,
717 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
718 slot->id, host->bus_hz, clock,
719 div ? ((host->bus_hz / div) >> 1) :
720 host->bus_hz, div);
f95f3850
WN
721
722 /* disable clock */
723 mci_writel(host, CLKENA, 0);
724 mci_writel(host, CLKSRC, 0);
725
726 /* inform CIU */
727 mci_send_cmd(slot,
728 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
729
730 /* set clock to desired speed */
731 mci_writel(host, CLKDIV, div);
732
733 /* inform CIU */
734 mci_send_cmd(slot,
735 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
736
9623b5b9
DA
737 /* enable clock; only low power if no SDIO */
738 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
739 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
740 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
741 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
742
743 /* inform CIU */
744 mci_send_cmd(slot,
745 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
746
fdf492a1
DA
747 /* keep the clock with reflecting clock dividor */
748 slot->__clk_old = clock << div;
f95f3850
WN
749 }
750
fdf492a1
DA
751 host->current_speed = clock;
752
f95f3850 753 /* Set the current slot bus width */
1d56c453 754 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
755}
756
053b3ce6
SJ
757static void __dw_mci_start_request(struct dw_mci *host,
758 struct dw_mci_slot *slot,
759 struct mmc_command *cmd)
f95f3850
WN
760{
761 struct mmc_request *mrq;
f95f3850
WN
762 struct mmc_data *data;
763 u32 cmdflags;
764
765 mrq = slot->mrq;
766 if (host->pdata->select_slot)
767 host->pdata->select_slot(slot->id);
768
f95f3850
WN
769 host->cur_slot = slot;
770 host->mrq = mrq;
771
772 host->pending_events = 0;
773 host->completed_events = 0;
774 host->data_status = 0;
775
053b3ce6 776 data = cmd->data;
f95f3850
WN
777 if (data) {
778 dw_mci_set_timeout(host);
779 mci_writel(host, BYTCNT, data->blksz*data->blocks);
780 mci_writel(host, BLKSIZ, data->blksz);
781 }
782
f95f3850
WN
783 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
784
785 /* this is the first command, send the initialization clock */
786 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
787 cmdflags |= SDMMC_CMD_INIT;
788
789 if (data) {
790 dw_mci_submit_data(host, data);
791 wmb();
792 }
793
794 dw_mci_start_command(host, cmd, cmdflags);
795
796 if (mrq->stop)
797 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
798}
799
053b3ce6
SJ
800static void dw_mci_start_request(struct dw_mci *host,
801 struct dw_mci_slot *slot)
802{
803 struct mmc_request *mrq = slot->mrq;
804 struct mmc_command *cmd;
805
806 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
807 __dw_mci_start_request(host, slot, cmd);
808}
809
7456caae 810/* must be called with host->lock held */
f95f3850
WN
811static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
812 struct mmc_request *mrq)
813{
814 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
815 host->state);
816
f95f3850
WN
817 slot->mrq = mrq;
818
819 if (host->state == STATE_IDLE) {
820 host->state = STATE_SENDING_CMD;
821 dw_mci_start_request(host, slot);
822 } else {
823 list_add_tail(&slot->queue_node, &host->queue);
824 }
f95f3850
WN
825}
826
827static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
828{
829 struct dw_mci_slot *slot = mmc_priv(mmc);
830 struct dw_mci *host = slot->host;
831
832 WARN_ON(slot->mrq);
833
7456caae
JH
834 /*
835 * The check for card presence and queueing of the request must be
836 * atomic, otherwise the card could be removed in between and the
837 * request wouldn't fail until another card was inserted.
838 */
839 spin_lock_bh(&host->lock);
840
f95f3850 841 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 842 spin_unlock_bh(&host->lock);
f95f3850
WN
843 mrq->cmd->error = -ENOMEDIUM;
844 mmc_request_done(mmc, mrq);
845 return;
846 }
847
f95f3850 848 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
849
850 spin_unlock_bh(&host->lock);
f95f3850
WN
851}
852
853static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
854{
855 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 856 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 857 u32 regs;
f95f3850 858
f95f3850 859 switch (ios->bus_width) {
f95f3850
WN
860 case MMC_BUS_WIDTH_4:
861 slot->ctype = SDMMC_CTYPE_4BIT;
862 break;
c9b2a06f
JC
863 case MMC_BUS_WIDTH_8:
864 slot->ctype = SDMMC_CTYPE_8BIT;
865 break;
b2f7cb45
JC
866 default:
867 /* set default 1 bit mode */
868 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
869 }
870
3f514291
SJ
871 regs = mci_readl(slot->host, UHS_REG);
872
41babf75 873 /* DDR mode set */
3f514291 874 if (ios->timing == MMC_TIMING_UHS_DDR50)
c69042a5 875 regs |= ((0x1 << slot->id) << 16);
3f514291 876 else
c69042a5 877 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
878
879 mci_writel(slot->host, UHS_REG, regs);
41babf75 880
fdf492a1
DA
881 /*
882 * Use mirror of ios->clock to prevent race with mmc
883 * core ios update when finding the minimum.
884 */
885 slot->clock = ios->clock;
f95f3850 886
cb27a843
JH
887 if (drv_data && drv_data->set_ios)
888 drv_data->set_ios(slot->host, ios);
800d78bf 889
bf7cb224
JC
890 /* Slot specific timing and width adjustment */
891 dw_mci_setup_bus(slot, false);
892
f95f3850
WN
893 switch (ios->power_mode) {
894 case MMC_POWER_UP:
895 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
e6f34e2f
JH
896 /* Power up slot */
897 if (slot->host->pdata->setpower)
898 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
4366dcc5
JC
899 regs = mci_readl(slot->host, PWREN);
900 regs |= (1 << slot->id);
901 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
902 break;
903 case MMC_POWER_OFF:
904 /* Power down slot */
905 if (slot->host->pdata->setpower)
906 slot->host->pdata->setpower(slot->id, 0);
4366dcc5
JC
907 regs = mci_readl(slot->host, PWREN);
908 regs &= ~(1 << slot->id);
909 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
910 break;
911 default:
912 break;
913 }
914}
915
916static int dw_mci_get_ro(struct mmc_host *mmc)
917{
918 int read_only;
919 struct dw_mci_slot *slot = mmc_priv(mmc);
920 struct dw_mci_board *brd = slot->host->pdata;
921
922 /* Use platform get_ro function, else try on board write protect */
9640639b 923 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5
TA
924 read_only = 0;
925 else if (brd->get_ro)
f95f3850 926 read_only = brd->get_ro(slot->id);
55a6ceb2
DA
927 else if (gpio_is_valid(slot->wp_gpio))
928 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
929 else
930 read_only =
931 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
932
933 dev_dbg(&mmc->class_dev, "card is %s\n",
934 read_only ? "read-only" : "read-write");
935
936 return read_only;
937}
938
939static int dw_mci_get_cd(struct mmc_host *mmc)
940{
941 int present;
942 struct dw_mci_slot *slot = mmc_priv(mmc);
943 struct dw_mci_board *brd = slot->host->pdata;
944
945 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
946 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
947 present = 1;
948 else if (brd->get_cd)
f95f3850
WN
949 present = !brd->get_cd(slot->id);
950 else
951 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
952 == 0 ? 1 : 0;
953
954 if (present)
955 dev_dbg(&mmc->class_dev, "card is present\n");
956 else
957 dev_dbg(&mmc->class_dev, "card is not present\n");
958
959 return present;
960}
961
9623b5b9
DA
962/*
963 * Disable lower power mode.
964 *
965 * Low power mode will stop the card clock when idle. According to the
966 * description of the CLKENA register we should disable low power mode
967 * for SDIO cards if we need SDIO interrupts to work.
968 *
969 * This function is fast if low power mode is already disabled.
970 */
971static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
972{
973 struct dw_mci *host = slot->host;
974 u32 clk_en_a;
975 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
976
977 clk_en_a = mci_readl(host, CLKENA);
978
979 if (clk_en_a & clken_low_pwr) {
980 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
981 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
982 SDMMC_CMD_PRV_DAT_WAIT, 0);
983 }
984}
985
1a5c8e1f
SH
986static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
987{
988 struct dw_mci_slot *slot = mmc_priv(mmc);
989 struct dw_mci *host = slot->host;
990 u32 int_mask;
991
992 /* Enable/disable Slot Specific SDIO interrupt */
993 int_mask = mci_readl(host, INTMASK);
994 if (enb) {
9623b5b9
DA
995 /*
996 * Turn off low power mode if it was enabled. This is a bit of
997 * a heavy operation and we disable / enable IRQs a lot, so
998 * we'll leave low power mode disabled and it will get
999 * re-enabled again in dw_mci_setup_bus().
1000 */
1001 dw_mci_disable_low_power(slot);
1002
1a5c8e1f 1003 mci_writel(host, INTMASK,
705ad047 1004 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1005 } else {
1006 mci_writel(host, INTMASK,
705ad047 1007 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1008 }
1009}
1010
0976f16d
SJ
1011static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1012{
1013 struct dw_mci_slot *slot = mmc_priv(mmc);
1014 struct dw_mci *host = slot->host;
1015 const struct dw_mci_drv_data *drv_data = host->drv_data;
1016 struct dw_mci_tuning_data tuning_data;
1017 int err = -ENOSYS;
1018
1019 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1020 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1021 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1022 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1023 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1024 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1025 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1026 } else {
1027 return -EINVAL;
1028 }
1029 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1030 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1031 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1032 } else {
1033 dev_err(host->dev,
1034 "Undefined command(%d) for tuning\n", opcode);
1035 return -EINVAL;
1036 }
1037
1038 if (drv_data && drv_data->execute_tuning)
1039 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1040 return err;
1041}
1042
f95f3850 1043static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1044 .request = dw_mci_request,
9aa51408
SJ
1045 .pre_req = dw_mci_pre_req,
1046 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1047 .set_ios = dw_mci_set_ios,
1048 .get_ro = dw_mci_get_ro,
1049 .get_cd = dw_mci_get_cd,
1050 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1051 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1052};
1053
1054static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1055 __releases(&host->lock)
1056 __acquires(&host->lock)
1057{
1058 struct dw_mci_slot *slot;
1059 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1060
1061 WARN_ON(host->cmd || host->data);
1062
1063 host->cur_slot->mrq = NULL;
1064 host->mrq = NULL;
1065 if (!list_empty(&host->queue)) {
1066 slot = list_entry(host->queue.next,
1067 struct dw_mci_slot, queue_node);
1068 list_del(&slot->queue_node);
4a90920c 1069 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1070 mmc_hostname(slot->mmc));
1071 host->state = STATE_SENDING_CMD;
1072 dw_mci_start_request(host, slot);
1073 } else {
4a90920c 1074 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1075 host->state = STATE_IDLE;
1076 }
1077
1078 spin_unlock(&host->lock);
1079 mmc_request_done(prev_mmc, mrq);
1080 spin_lock(&host->lock);
1081}
1082
1083static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1084{
1085 u32 status = host->cmd_status;
1086
1087 host->cmd_status = 0;
1088
1089 /* Read the response from the card (up to 16 bytes) */
1090 if (cmd->flags & MMC_RSP_PRESENT) {
1091 if (cmd->flags & MMC_RSP_136) {
1092 cmd->resp[3] = mci_readl(host, RESP0);
1093 cmd->resp[2] = mci_readl(host, RESP1);
1094 cmd->resp[1] = mci_readl(host, RESP2);
1095 cmd->resp[0] = mci_readl(host, RESP3);
1096 } else {
1097 cmd->resp[0] = mci_readl(host, RESP0);
1098 cmd->resp[1] = 0;
1099 cmd->resp[2] = 0;
1100 cmd->resp[3] = 0;
1101 }
1102 }
1103
1104 if (status & SDMMC_INT_RTO)
1105 cmd->error = -ETIMEDOUT;
1106 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1107 cmd->error = -EILSEQ;
1108 else if (status & SDMMC_INT_RESP_ERR)
1109 cmd->error = -EIO;
1110 else
1111 cmd->error = 0;
1112
1113 if (cmd->error) {
1114 /* newer ip versions need a delay between retries */
1115 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1116 mdelay(20);
1117
1118 if (cmd->data) {
f95f3850 1119 dw_mci_stop_dma(host);
fda5f736 1120 host->data = NULL;
f95f3850
WN
1121 }
1122 }
1123}
1124
1125static void dw_mci_tasklet_func(unsigned long priv)
1126{
1127 struct dw_mci *host = (struct dw_mci *)priv;
1128 struct mmc_data *data;
1129 struct mmc_command *cmd;
1130 enum dw_mci_state state;
1131 enum dw_mci_state prev_state;
94dd5b33 1132 u32 status, ctrl;
f95f3850
WN
1133
1134 spin_lock(&host->lock);
1135
1136 state = host->state;
1137 data = host->data;
1138
1139 do {
1140 prev_state = state;
1141
1142 switch (state) {
1143 case STATE_IDLE:
1144 break;
1145
1146 case STATE_SENDING_CMD:
1147 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1148 &host->pending_events))
1149 break;
1150
1151 cmd = host->cmd;
1152 host->cmd = NULL;
1153 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
053b3ce6
SJ
1154 dw_mci_command_complete(host, cmd);
1155 if (cmd == host->mrq->sbc && !cmd->error) {
1156 prev_state = state = STATE_SENDING_CMD;
1157 __dw_mci_start_request(host, host->cur_slot,
1158 host->mrq->cmd);
1159 goto unlock;
1160 }
1161
f95f3850
WN
1162 if (!host->mrq->data || cmd->error) {
1163 dw_mci_request_end(host, host->mrq);
1164 goto unlock;
1165 }
1166
1167 prev_state = state = STATE_SENDING_DATA;
1168 /* fall through */
1169
1170 case STATE_SENDING_DATA:
1171 if (test_and_clear_bit(EVENT_DATA_ERROR,
1172 &host->pending_events)) {
1173 dw_mci_stop_dma(host);
1174 if (data->stop)
1175 send_stop_cmd(host, data);
1176 state = STATE_DATA_ERROR;
1177 break;
1178 }
1179
1180 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1181 &host->pending_events))
1182 break;
1183
1184 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1185 prev_state = state = STATE_DATA_BUSY;
1186 /* fall through */
1187
1188 case STATE_DATA_BUSY:
1189 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1190 &host->pending_events))
1191 break;
1192
1193 host->data = NULL;
1194 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1195 status = host->data_status;
1196
1197 if (status & DW_MCI_DATA_ERROR_FLAGS) {
3f7eec62 1198 if (status & SDMMC_INT_DRTO) {
f95f3850
WN
1199 data->error = -ETIMEDOUT;
1200 } else if (status & SDMMC_INT_DCRC) {
f95f3850 1201 data->error = -EILSEQ;
55c5efbc
JH
1202 } else if (status & SDMMC_INT_EBE &&
1203 host->dir_status ==
1204 DW_MCI_SEND_STATUS) {
1205 /*
1206 * No data CRC status was returned.
1207 * The number of bytes transferred will
1208 * be exaggerated in PIO mode.
1209 */
1210 data->bytes_xfered = 0;
1211 data->error = -ETIMEDOUT;
f95f3850 1212 } else {
4a90920c 1213 dev_err(host->dev,
f95f3850
WN
1214 "data FIFO error "
1215 "(status=%08x)\n",
1216 status);
1217 data->error = -EIO;
1218 }
94dd5b33
JH
1219 /*
1220 * After an error, there may be data lingering
1221 * in the FIFO, so reset it - doing so
1222 * generates a block interrupt, hence setting
1223 * the scatter-gather pointer to NULL.
1224 */
f9c2a0dc 1225 sg_miter_stop(&host->sg_miter);
94dd5b33
JH
1226 host->sg = NULL;
1227 ctrl = mci_readl(host, CTRL);
1228 ctrl |= SDMMC_CTRL_FIFO_RESET;
1229 mci_writel(host, CTRL, ctrl);
f95f3850
WN
1230 } else {
1231 data->bytes_xfered = data->blocks * data->blksz;
1232 data->error = 0;
1233 }
1234
1235 if (!data->stop) {
1236 dw_mci_request_end(host, host->mrq);
1237 goto unlock;
1238 }
1239
053b3ce6
SJ
1240 if (host->mrq->sbc && !data->error) {
1241 data->stop->error = 0;
1242 dw_mci_request_end(host, host->mrq);
1243 goto unlock;
1244 }
1245
f95f3850
WN
1246 prev_state = state = STATE_SENDING_STOP;
1247 if (!data->error)
1248 send_stop_cmd(host, data);
1249 /* fall through */
1250
1251 case STATE_SENDING_STOP:
1252 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1253 &host->pending_events))
1254 break;
1255
1256 host->cmd = NULL;
1257 dw_mci_command_complete(host, host->mrq->stop);
1258 dw_mci_request_end(host, host->mrq);
1259 goto unlock;
1260
1261 case STATE_DATA_ERROR:
1262 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1263 &host->pending_events))
1264 break;
1265
1266 state = STATE_DATA_BUSY;
1267 break;
1268 }
1269 } while (state != prev_state);
1270
1271 host->state = state;
1272unlock:
1273 spin_unlock(&host->lock);
1274
1275}
1276
34b664a2
JH
1277/* push final bytes to part_buf, only use during push */
1278static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1279{
34b664a2
JH
1280 memcpy((void *)&host->part_buf, buf, cnt);
1281 host->part_buf_count = cnt;
1282}
f95f3850 1283
34b664a2
JH
1284/* append bytes to part_buf, only use during push */
1285static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1286{
1287 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1288 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1289 host->part_buf_count += cnt;
1290 return cnt;
1291}
f95f3850 1292
34b664a2
JH
1293/* pull first bytes from part_buf, only use during pull */
1294static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1295{
1296 cnt = min(cnt, (int)host->part_buf_count);
1297 if (cnt) {
1298 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1299 cnt);
1300 host->part_buf_count -= cnt;
1301 host->part_buf_start += cnt;
f95f3850 1302 }
34b664a2 1303 return cnt;
f95f3850
WN
1304}
1305
34b664a2
JH
1306/* pull final bytes from the part_buf, assuming it's just been filled */
1307static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1308{
34b664a2
JH
1309 memcpy(buf, &host->part_buf, cnt);
1310 host->part_buf_start = cnt;
1311 host->part_buf_count = (1 << host->data_shift) - cnt;
1312}
f95f3850 1313
34b664a2
JH
1314static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1315{
cfbeb59c
MC
1316 struct mmc_data *data = host->data;
1317 int init_cnt = cnt;
1318
34b664a2
JH
1319 /* try and push anything in the part_buf */
1320 if (unlikely(host->part_buf_count)) {
1321 int len = dw_mci_push_part_bytes(host, buf, cnt);
1322 buf += len;
1323 cnt -= len;
cfbeb59c 1324 if (host->part_buf_count == 2) {
4e0a5adf
JC
1325 mci_writew(host, DATA(host->data_offset),
1326 host->part_buf16);
34b664a2
JH
1327 host->part_buf_count = 0;
1328 }
1329 }
1330#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1331 if (unlikely((unsigned long)buf & 0x1)) {
1332 while (cnt >= 2) {
1333 u16 aligned_buf[64];
1334 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1335 int items = len >> 1;
1336 int i;
1337 /* memcpy from input buffer into aligned buffer */
1338 memcpy(aligned_buf, buf, len);
1339 buf += len;
1340 cnt -= len;
1341 /* push data from aligned buffer into fifo */
1342 for (i = 0; i < items; ++i)
4e0a5adf
JC
1343 mci_writew(host, DATA(host->data_offset),
1344 aligned_buf[i]);
34b664a2
JH
1345 }
1346 } else
1347#endif
1348 {
1349 u16 *pdata = buf;
1350 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1351 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1352 buf = pdata;
1353 }
1354 /* put anything remaining in the part_buf */
1355 if (cnt) {
1356 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1357 /* Push data if we have reached the expected data length */
1358 if ((data->bytes_xfered + init_cnt) ==
1359 (data->blksz * data->blocks))
4e0a5adf 1360 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1361 host->part_buf16);
34b664a2
JH
1362 }
1363}
f95f3850 1364
34b664a2
JH
1365static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1366{
1367#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1368 if (unlikely((unsigned long)buf & 0x1)) {
1369 while (cnt >= 2) {
1370 /* pull data from fifo into aligned buffer */
1371 u16 aligned_buf[64];
1372 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1373 int items = len >> 1;
1374 int i;
1375 for (i = 0; i < items; ++i)
4e0a5adf
JC
1376 aligned_buf[i] = mci_readw(host,
1377 DATA(host->data_offset));
34b664a2
JH
1378 /* memcpy from aligned buffer into output buffer */
1379 memcpy(buf, aligned_buf, len);
1380 buf += len;
1381 cnt -= len;
1382 }
1383 } else
1384#endif
1385 {
1386 u16 *pdata = buf;
1387 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1388 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1389 buf = pdata;
1390 }
1391 if (cnt) {
4e0a5adf 1392 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1393 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1394 }
1395}
1396
1397static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1398{
cfbeb59c
MC
1399 struct mmc_data *data = host->data;
1400 int init_cnt = cnt;
1401
34b664a2
JH
1402 /* try and push anything in the part_buf */
1403 if (unlikely(host->part_buf_count)) {
1404 int len = dw_mci_push_part_bytes(host, buf, cnt);
1405 buf += len;
1406 cnt -= len;
cfbeb59c 1407 if (host->part_buf_count == 4) {
4e0a5adf
JC
1408 mci_writel(host, DATA(host->data_offset),
1409 host->part_buf32);
34b664a2
JH
1410 host->part_buf_count = 0;
1411 }
1412 }
1413#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1414 if (unlikely((unsigned long)buf & 0x3)) {
1415 while (cnt >= 4) {
1416 u32 aligned_buf[32];
1417 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1418 int items = len >> 2;
1419 int i;
1420 /* memcpy from input buffer into aligned buffer */
1421 memcpy(aligned_buf, buf, len);
1422 buf += len;
1423 cnt -= len;
1424 /* push data from aligned buffer into fifo */
1425 for (i = 0; i < items; ++i)
4e0a5adf
JC
1426 mci_writel(host, DATA(host->data_offset),
1427 aligned_buf[i]);
34b664a2
JH
1428 }
1429 } else
1430#endif
1431 {
1432 u32 *pdata = buf;
1433 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1434 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1435 buf = pdata;
1436 }
1437 /* put anything remaining in the part_buf */
1438 if (cnt) {
1439 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1440 /* Push data if we have reached the expected data length */
1441 if ((data->bytes_xfered + init_cnt) ==
1442 (data->blksz * data->blocks))
4e0a5adf 1443 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1444 host->part_buf32);
f95f3850
WN
1445 }
1446}
1447
1448static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1449{
34b664a2
JH
1450#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1451 if (unlikely((unsigned long)buf & 0x3)) {
1452 while (cnt >= 4) {
1453 /* pull data from fifo into aligned buffer */
1454 u32 aligned_buf[32];
1455 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1456 int items = len >> 2;
1457 int i;
1458 for (i = 0; i < items; ++i)
4e0a5adf
JC
1459 aligned_buf[i] = mci_readl(host,
1460 DATA(host->data_offset));
34b664a2
JH
1461 /* memcpy from aligned buffer into output buffer */
1462 memcpy(buf, aligned_buf, len);
1463 buf += len;
1464 cnt -= len;
1465 }
1466 } else
1467#endif
1468 {
1469 u32 *pdata = buf;
1470 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1471 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1472 buf = pdata;
1473 }
1474 if (cnt) {
4e0a5adf 1475 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1476 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1477 }
1478}
1479
1480static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1481{
cfbeb59c
MC
1482 struct mmc_data *data = host->data;
1483 int init_cnt = cnt;
1484
34b664a2
JH
1485 /* try and push anything in the part_buf */
1486 if (unlikely(host->part_buf_count)) {
1487 int len = dw_mci_push_part_bytes(host, buf, cnt);
1488 buf += len;
1489 cnt -= len;
c09fbd74 1490
cfbeb59c 1491 if (host->part_buf_count == 8) {
c09fbd74 1492 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1493 host->part_buf);
34b664a2
JH
1494 host->part_buf_count = 0;
1495 }
1496 }
1497#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1498 if (unlikely((unsigned long)buf & 0x7)) {
1499 while (cnt >= 8) {
1500 u64 aligned_buf[16];
1501 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1502 int items = len >> 3;
1503 int i;
1504 /* memcpy from input buffer into aligned buffer */
1505 memcpy(aligned_buf, buf, len);
1506 buf += len;
1507 cnt -= len;
1508 /* push data from aligned buffer into fifo */
1509 for (i = 0; i < items; ++i)
4e0a5adf
JC
1510 mci_writeq(host, DATA(host->data_offset),
1511 aligned_buf[i]);
34b664a2
JH
1512 }
1513 } else
1514#endif
1515 {
1516 u64 *pdata = buf;
1517 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1518 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1519 buf = pdata;
1520 }
1521 /* put anything remaining in the part_buf */
1522 if (cnt) {
1523 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1524 /* Push data if we have reached the expected data length */
1525 if ((data->bytes_xfered + init_cnt) ==
1526 (data->blksz * data->blocks))
4e0a5adf 1527 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1528 host->part_buf);
f95f3850
WN
1529 }
1530}
1531
1532static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1533{
34b664a2
JH
1534#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1535 if (unlikely((unsigned long)buf & 0x7)) {
1536 while (cnt >= 8) {
1537 /* pull data from fifo into aligned buffer */
1538 u64 aligned_buf[16];
1539 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1540 int items = len >> 3;
1541 int i;
1542 for (i = 0; i < items; ++i)
4e0a5adf
JC
1543 aligned_buf[i] = mci_readq(host,
1544 DATA(host->data_offset));
34b664a2
JH
1545 /* memcpy from aligned buffer into output buffer */
1546 memcpy(buf, aligned_buf, len);
1547 buf += len;
1548 cnt -= len;
1549 }
1550 } else
1551#endif
1552 {
1553 u64 *pdata = buf;
1554 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1555 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1556 buf = pdata;
1557 }
1558 if (cnt) {
4e0a5adf 1559 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1560 dw_mci_pull_final_bytes(host, buf, cnt);
1561 }
1562}
f95f3850 1563
34b664a2
JH
1564static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1565{
1566 int len;
f95f3850 1567
34b664a2
JH
1568 /* get remaining partial bytes */
1569 len = dw_mci_pull_part_bytes(host, buf, cnt);
1570 if (unlikely(len == cnt))
1571 return;
1572 buf += len;
1573 cnt -= len;
1574
1575 /* get the rest of the data */
1576 host->pull_data(host, buf, cnt);
f95f3850
WN
1577}
1578
87a74d39 1579static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1580{
f9c2a0dc
SJ
1581 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1582 void *buf;
1583 unsigned int offset;
f95f3850
WN
1584 struct mmc_data *data = host->data;
1585 int shift = host->data_shift;
1586 u32 status;
3e4b0d8b 1587 unsigned int len;
f9c2a0dc 1588 unsigned int remain, fcnt;
f95f3850
WN
1589
1590 do {
f9c2a0dc
SJ
1591 if (!sg_miter_next(sg_miter))
1592 goto done;
1593
4225fc85 1594 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1595 buf = sg_miter->addr;
1596 remain = sg_miter->length;
1597 offset = 0;
1598
1599 do {
1600 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1601 << shift) + host->part_buf_count;
1602 len = min(remain, fcnt);
1603 if (!len)
1604 break;
34b664a2 1605 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1606 data->bytes_xfered += len;
f95f3850 1607 offset += len;
f9c2a0dc
SJ
1608 remain -= len;
1609 } while (remain);
f95f3850 1610
e74f3a9c 1611 sg_miter->consumed = offset;
f95f3850
WN
1612 status = mci_readl(host, MINTSTS);
1613 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1614 /* if the RXDR is ready read again */
1615 } while ((status & SDMMC_INT_RXDR) ||
1616 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1617
1618 if (!remain) {
1619 if (!sg_miter_next(sg_miter))
1620 goto done;
1621 sg_miter->consumed = 0;
1622 }
1623 sg_miter_stop(sg_miter);
f95f3850
WN
1624 return;
1625
1626done:
f9c2a0dc
SJ
1627 sg_miter_stop(sg_miter);
1628 host->sg = NULL;
f95f3850
WN
1629 smp_wmb();
1630 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1631}
1632
1633static void dw_mci_write_data_pio(struct dw_mci *host)
1634{
f9c2a0dc
SJ
1635 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1636 void *buf;
1637 unsigned int offset;
f95f3850
WN
1638 struct mmc_data *data = host->data;
1639 int shift = host->data_shift;
1640 u32 status;
3e4b0d8b 1641 unsigned int len;
f9c2a0dc
SJ
1642 unsigned int fifo_depth = host->fifo_depth;
1643 unsigned int remain, fcnt;
f95f3850
WN
1644
1645 do {
f9c2a0dc
SJ
1646 if (!sg_miter_next(sg_miter))
1647 goto done;
1648
4225fc85 1649 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1650 buf = sg_miter->addr;
1651 remain = sg_miter->length;
1652 offset = 0;
1653
1654 do {
1655 fcnt = ((fifo_depth -
1656 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1657 << shift) - host->part_buf_count;
1658 len = min(remain, fcnt);
1659 if (!len)
1660 break;
f95f3850 1661 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1662 data->bytes_xfered += len;
f95f3850 1663 offset += len;
f9c2a0dc
SJ
1664 remain -= len;
1665 } while (remain);
f95f3850 1666
e74f3a9c 1667 sg_miter->consumed = offset;
f95f3850
WN
1668 status = mci_readl(host, MINTSTS);
1669 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1670 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1671
1672 if (!remain) {
1673 if (!sg_miter_next(sg_miter))
1674 goto done;
1675 sg_miter->consumed = 0;
1676 }
1677 sg_miter_stop(sg_miter);
f95f3850
WN
1678 return;
1679
1680done:
f9c2a0dc
SJ
1681 sg_miter_stop(sg_miter);
1682 host->sg = NULL;
f95f3850
WN
1683 smp_wmb();
1684 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1685}
1686
1687static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1688{
1689 if (!host->cmd_status)
1690 host->cmd_status = status;
1691
1692 smp_wmb();
1693
1694 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1695 tasklet_schedule(&host->tasklet);
1696}
1697
1698static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1699{
1700 struct dw_mci *host = dev_id;
182c9081 1701 u32 pending;
1a5c8e1f 1702 int i;
f95f3850 1703
1fb5f68a
MC
1704 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1705
476d79f1
DA
1706 /*
1707 * DTO fix - version 2.10a and below, and only if internal DMA
1708 * is configured.
1709 */
1710 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1711 if (!pending &&
1712 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1713 pending |= SDMMC_INT_DATA_OVER;
1714 }
f95f3850 1715
476d79f1 1716 if (pending) {
f95f3850
WN
1717 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1718 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1719 host->cmd_status = pending;
f95f3850
WN
1720 smp_wmb();
1721 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1722 }
1723
1724 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1725 /* if there is an error report DATA_ERROR */
1726 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1727 host->data_status = pending;
f95f3850
WN
1728 smp_wmb();
1729 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1730 tasklet_schedule(&host->tasklet);
f95f3850
WN
1731 }
1732
1733 if (pending & SDMMC_INT_DATA_OVER) {
1734 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1735 if (!host->data_status)
182c9081 1736 host->data_status = pending;
f95f3850
WN
1737 smp_wmb();
1738 if (host->dir_status == DW_MCI_RECV_STATUS) {
1739 if (host->sg != NULL)
87a74d39 1740 dw_mci_read_data_pio(host, true);
f95f3850
WN
1741 }
1742 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1743 tasklet_schedule(&host->tasklet);
1744 }
1745
1746 if (pending & SDMMC_INT_RXDR) {
1747 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1748 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1749 dw_mci_read_data_pio(host, false);
f95f3850
WN
1750 }
1751
1752 if (pending & SDMMC_INT_TXDR) {
1753 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1754 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1755 dw_mci_write_data_pio(host);
1756 }
1757
1758 if (pending & SDMMC_INT_CMD_DONE) {
1759 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1760 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1761 }
1762
1763 if (pending & SDMMC_INT_CD) {
1764 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1765 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1766 }
1767
1a5c8e1f
SH
1768 /* Handle SDIO Interrupts */
1769 for (i = 0; i < host->num_slots; i++) {
1770 struct dw_mci_slot *slot = host->slot[i];
1771 if (pending & SDMMC_INT_SDIO(i)) {
1772 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1773 mmc_signal_sdio_irq(slot->mmc);
1774 }
1775 }
1776
1fb5f68a 1777 }
f95f3850
WN
1778
1779#ifdef CONFIG_MMC_DW_IDMAC
1780 /* Handle DMA interrupts */
1781 pending = mci_readl(host, IDSTS);
1782 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1783 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1784 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1785 host->dma_ops->complete(host);
1786 }
1787#endif
1788
1789 return IRQ_HANDLED;
1790}
1791
1791b13e 1792static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1793{
1791b13e 1794 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1795 int i;
1796
1797 for (i = 0; i < host->num_slots; i++) {
1798 struct dw_mci_slot *slot = host->slot[i];
1799 struct mmc_host *mmc = slot->mmc;
1800 struct mmc_request *mrq;
1801 int present;
1802 u32 ctrl;
1803
1804 present = dw_mci_get_cd(mmc);
1805 while (present != slot->last_detect_state) {
f95f3850
WN
1806 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1807 present ? "inserted" : "removed");
1808
1791b13e
JH
1809 spin_lock_bh(&host->lock);
1810
f95f3850
WN
1811 /* Card change detected */
1812 slot->last_detect_state = present;
1813
1791b13e
JH
1814 /* Mark card as present if applicable */
1815 if (present != 0)
f95f3850 1816 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850
WN
1817
1818 /* Clean up queue if present */
1819 mrq = slot->mrq;
1820 if (mrq) {
1821 if (mrq == host->mrq) {
1822 host->data = NULL;
1823 host->cmd = NULL;
1824
1825 switch (host->state) {
1826 case STATE_IDLE:
1827 break;
1828 case STATE_SENDING_CMD:
1829 mrq->cmd->error = -ENOMEDIUM;
1830 if (!mrq->data)
1831 break;
1832 /* fall through */
1833 case STATE_SENDING_DATA:
1834 mrq->data->error = -ENOMEDIUM;
1835 dw_mci_stop_dma(host);
1836 break;
1837 case STATE_DATA_BUSY:
1838 case STATE_DATA_ERROR:
1839 if (mrq->data->error == -EINPROGRESS)
1840 mrq->data->error = -ENOMEDIUM;
1841 if (!mrq->stop)
1842 break;
1843 /* fall through */
1844 case STATE_SENDING_STOP:
1845 mrq->stop->error = -ENOMEDIUM;
1846 break;
1847 }
1848
1849 dw_mci_request_end(host, mrq);
1850 } else {
1851 list_del(&slot->queue_node);
1852 mrq->cmd->error = -ENOMEDIUM;
1853 if (mrq->data)
1854 mrq->data->error = -ENOMEDIUM;
1855 if (mrq->stop)
1856 mrq->stop->error = -ENOMEDIUM;
1857
1858 spin_unlock(&host->lock);
1859 mmc_request_done(slot->mmc, mrq);
1860 spin_lock(&host->lock);
1861 }
1862 }
1863
1864 /* Power down slot */
1865 if (present == 0) {
f95f3850
WN
1866 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1867
1868 /*
1869 * Clear down the FIFO - doing so generates a
1870 * block interrupt, hence setting the
1871 * scatter-gather pointer to NULL.
1872 */
f9c2a0dc 1873 sg_miter_stop(&host->sg_miter);
f95f3850
WN
1874 host->sg = NULL;
1875
1876 ctrl = mci_readl(host, CTRL);
1877 ctrl |= SDMMC_CTRL_FIFO_RESET;
1878 mci_writel(host, CTRL, ctrl);
1879
1880#ifdef CONFIG_MMC_DW_IDMAC
1881 ctrl = mci_readl(host, BMOD);
141a712a
SJ
1882 /* Software reset of DMA */
1883 ctrl |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
1884 mci_writel(host, BMOD, ctrl);
1885#endif
1886
1887 }
1888
1791b13e
JH
1889 spin_unlock_bh(&host->lock);
1890
f95f3850
WN
1891 present = dw_mci_get_cd(mmc);
1892 }
1893
1894 mmc_detect_change(slot->mmc,
1895 msecs_to_jiffies(host->pdata->detect_delay_ms));
1896 }
1897}
1898
c91eab4b
TA
1899#ifdef CONFIG_OF
1900/* given a slot id, find out the device node representing that slot */
1901static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1902{
1903 struct device_node *np;
1904 const __be32 *addr;
1905 int len;
1906
1907 if (!dev || !dev->of_node)
1908 return NULL;
1909
1910 for_each_child_of_node(dev->of_node, np) {
1911 addr = of_get_property(np, "reg", &len);
1912 if (!addr || (len < sizeof(int)))
1913 continue;
1914 if (be32_to_cpup(addr) == slot)
1915 return np;
1916 }
1917 return NULL;
1918}
1919
a70aaa64
DA
1920static struct dw_mci_of_slot_quirks {
1921 char *quirk;
1922 int id;
1923} of_slot_quirks[] = {
1924 {
1925 .quirk = "disable-wp",
1926 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1927 },
1928};
1929
1930static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1931{
1932 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1933 int quirks = 0;
1934 int idx;
1935
1936 /* get quirks */
1937 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1938 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1939 quirks |= of_slot_quirks[idx].id;
1940
1941 return quirks;
1942}
1943
c91eab4b
TA
1944/* find out bus-width for a given slot */
1945static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1946{
1947 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1948 u32 bus_wd = 1;
1949
1950 if (!np)
1951 return 1;
1952
1953 if (of_property_read_u32(np, "bus-width", &bus_wd))
1954 dev_err(dev, "bus-width property not found, assuming width"
1955 " as 1\n");
1956 return bus_wd;
1957}
55a6ceb2
DA
1958
1959/* find the write protect gpio for a given slot; or -1 if none specified */
1960static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1961{
1962 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1963 int gpio;
1964
1965 if (!np)
1966 return -EINVAL;
1967
1968 gpio = of_get_named_gpio(np, "wp-gpios", 0);
1969
1970 /* Having a missing entry is valid; return silently */
1971 if (!gpio_is_valid(gpio))
1972 return -EINVAL;
1973
1974 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
1975 dev_warn(dev, "gpio [%d] request failed\n", gpio);
1976 return -EINVAL;
1977 }
1978
1979 return gpio;
1980}
c91eab4b 1981#else /* CONFIG_OF */
a70aaa64
DA
1982static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1983{
1984 return 0;
1985}
c91eab4b
TA
1986static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1987{
1988 return 1;
1989}
1990static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1991{
1992 return NULL;
1993}
55a6ceb2
DA
1994static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1995{
1996 return -EINVAL;
1997}
c91eab4b
TA
1998#endif /* CONFIG_OF */
1999
36c179a9 2000static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2001{
2002 struct mmc_host *mmc;
2003 struct dw_mci_slot *slot;
e95baf13 2004 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2005 int ctrl_id, ret;
1f44a2a5 2006 u32 freq[2];
c91eab4b 2007 u8 bus_width;
f95f3850 2008
4a90920c 2009 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2010 if (!mmc)
2011 return -ENOMEM;
2012
2013 slot = mmc_priv(mmc);
2014 slot->id = id;
2015 slot->mmc = mmc;
2016 slot->host = host;
c91eab4b 2017 host->slot[id] = slot;
f95f3850 2018
a70aaa64
DA
2019 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2020
f95f3850 2021 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2022 if (of_property_read_u32_array(host->dev->of_node,
2023 "clock-freq-min-max", freq, 2)) {
2024 mmc->f_min = DW_MCI_FREQ_MIN;
2025 mmc->f_max = DW_MCI_FREQ_MAX;
2026 } else {
2027 mmc->f_min = freq[0];
2028 mmc->f_max = freq[1];
2029 }
f95f3850
WN
2030
2031 if (host->pdata->get_ocr)
2032 mmc->ocr_avail = host->pdata->get_ocr(id);
2033 else
2034 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2035
2036 /*
2037 * Start with slot power disabled, it will be enabled when a card
2038 * is detected.
2039 */
2040 if (host->pdata->setpower)
2041 host->pdata->setpower(id, 0);
2042
fc3d7720
JC
2043 if (host->pdata->caps)
2044 mmc->caps = host->pdata->caps;
fc3d7720 2045
ab269128
AK
2046 if (host->pdata->pm_caps)
2047 mmc->pm_caps = host->pdata->pm_caps;
2048
800d78bf
TA
2049 if (host->dev->of_node) {
2050 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2051 if (ctrl_id < 0)
2052 ctrl_id = 0;
2053 } else {
2054 ctrl_id = to_platform_device(host->dev)->id;
2055 }
cb27a843
JH
2056 if (drv_data && drv_data->caps)
2057 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2058
4f408cc6
SJ
2059 if (host->pdata->caps2)
2060 mmc->caps2 = host->pdata->caps2;
4f408cc6 2061
f95f3850 2062 if (host->pdata->get_bus_wd)
c91eab4b
TA
2063 bus_width = host->pdata->get_bus_wd(slot->id);
2064 else if (host->dev->of_node)
2065 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2066 else
2067 bus_width = 1;
2068
2069 switch (bus_width) {
2070 case 8:
2071 mmc->caps |= MMC_CAP_8_BIT_DATA;
2072 case 4:
2073 mmc->caps |= MMC_CAP_4_BIT_DATA;
2074 }
f95f3850 2075
f95f3850
WN
2076 if (host->pdata->blk_settings) {
2077 mmc->max_segs = host->pdata->blk_settings->max_segs;
2078 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2079 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2080 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2081 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2082 } else {
2083 /* Useful defaults if platform data is unset. */
a39e5746
JC
2084#ifdef CONFIG_MMC_DW_IDMAC
2085 mmc->max_segs = host->ring_size;
2086 mmc->max_blk_size = 65536;
2087 mmc->max_blk_count = host->ring_size;
2088 mmc->max_seg_size = 0x1000;
2089 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2090#else
f95f3850
WN
2091 mmc->max_segs = 64;
2092 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2093 mmc->max_blk_count = 512;
2094 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2095 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2096#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2097 }
f95f3850
WN
2098
2099 if (dw_mci_get_cd(mmc))
2100 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2101 else
2102 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2103
55a6ceb2
DA
2104 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2105
0cea529d
JC
2106 ret = mmc_add_host(mmc);
2107 if (ret)
2108 goto err_setup_bus;
f95f3850
WN
2109
2110#if defined(CONFIG_DEBUG_FS)
2111 dw_mci_init_debugfs(slot);
2112#endif
2113
2114 /* Card initially undetected */
2115 slot->last_detect_state = 0;
2116
2117 return 0;
800d78bf
TA
2118
2119err_setup_bus:
2120 mmc_free_host(mmc);
2121 return -EINVAL;
f95f3850
WN
2122}
2123
2124static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2125{
2126 /* Shutdown detect IRQ */
2127 if (slot->host->pdata->exit)
2128 slot->host->pdata->exit(id);
2129
2130 /* Debugfs stuff is cleaned up by mmc core */
2131 mmc_remove_host(slot->mmc);
2132 slot->host->slot[id] = NULL;
2133 mmc_free_host(slot->mmc);
2134}
2135
2136static void dw_mci_init_dma(struct dw_mci *host)
2137{
2138 /* Alloc memory for sg translation */
780f22af 2139 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2140 &host->sg_dma, GFP_KERNEL);
2141 if (!host->sg_cpu) {
4a90920c 2142 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2143 __func__);
2144 goto no_dma;
2145 }
2146
2147 /* Determine which DMA interface to use */
2148#ifdef CONFIG_MMC_DW_IDMAC
2149 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2150 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2151#endif
2152
2153 if (!host->dma_ops)
2154 goto no_dma;
2155
e1631f98
JC
2156 if (host->dma_ops->init && host->dma_ops->start &&
2157 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2158 if (host->dma_ops->init(host)) {
4a90920c 2159 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2160 "DMA Controller.\n", __func__);
2161 goto no_dma;
2162 }
2163 } else {
4a90920c 2164 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2165 goto no_dma;
2166 }
2167
2168 host->use_dma = 1;
2169 return;
2170
2171no_dma:
4a90920c 2172 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2173 host->use_dma = 0;
2174 return;
2175}
2176
2177static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2178{
2179 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2180 unsigned int ctrl;
2181
2182 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2183 SDMMC_CTRL_DMA_RESET));
2184
2185 /* wait till resets clear */
2186 do {
2187 ctrl = mci_readl(host, CTRL);
2188 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2189 SDMMC_CTRL_DMA_RESET)))
2190 return true;
2191 } while (time_before(jiffies, timeout));
2192
2193 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2194
2195 return false;
2196}
2197
c91eab4b
TA
2198#ifdef CONFIG_OF
2199static struct dw_mci_of_quirks {
2200 char *quirk;
2201 int id;
2202} of_quirks[] = {
2203 {
c91eab4b
TA
2204 .quirk = "broken-cd",
2205 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2206 },
2207};
2208
2209static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2210{
2211 struct dw_mci_board *pdata;
2212 struct device *dev = host->dev;
2213 struct device_node *np = dev->of_node;
e95baf13 2214 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2215 int idx, ret;
3c6d89ea 2216 u32 clock_frequency;
c91eab4b
TA
2217
2218 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2219 if (!pdata) {
2220 dev_err(dev, "could not allocate memory for pdata\n");
2221 return ERR_PTR(-ENOMEM);
2222 }
2223
2224 /* find out number of slots supported */
2225 if (of_property_read_u32(dev->of_node, "num-slots",
2226 &pdata->num_slots)) {
2227 dev_info(dev, "num-slots property not found, "
2228 "assuming 1 slot is available\n");
2229 pdata->num_slots = 1;
2230 }
2231
2232 /* get quirks */
2233 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2234 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2235 pdata->quirks |= of_quirks[idx].id;
2236
2237 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2238 dev_info(dev, "fifo-depth property not found, using "
2239 "value of FIFOTH register as default\n");
2240
2241 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2242
3c6d89ea
DA
2243 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2244 pdata->bus_hz = clock_frequency;
2245
cb27a843
JH
2246 if (drv_data && drv_data->parse_dt) {
2247 ret = drv_data->parse_dt(host);
800d78bf
TA
2248 if (ret)
2249 return ERR_PTR(ret);
2250 }
2251
ab269128
AK
2252 if (of_find_property(np, "keep-power-in-suspend", NULL))
2253 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2254
2255 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2256 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2257
10b49841
SJ
2258 if (of_find_property(np, "supports-highspeed", NULL))
2259 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2260
5dd63f52
SJ
2261 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2262 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2263
2264 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2265 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2266
c91eab4b
TA
2267 return pdata;
2268}
2269
2270#else /* CONFIG_OF */
2271static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2272{
2273 return ERR_PTR(-EINVAL);
2274}
2275#endif /* CONFIG_OF */
2276
62ca8034 2277int dw_mci_probe(struct dw_mci *host)
f95f3850 2278{
e95baf13 2279 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2280 int width, i, ret = 0;
f95f3850 2281 u32 fifo_size;
1c2215b7 2282 int init_slots = 0;
f95f3850 2283
c91eab4b
TA
2284 if (!host->pdata) {
2285 host->pdata = dw_mci_parse_dt(host);
2286 if (IS_ERR(host->pdata)) {
2287 dev_err(host->dev, "platform data not available\n");
2288 return -EINVAL;
2289 }
f95f3850
WN
2290 }
2291
62ca8034 2292 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4a90920c 2293 dev_err(host->dev,
f95f3850 2294 "Platform data must supply select_slot function\n");
62ca8034 2295 return -ENODEV;
f95f3850
WN
2296 }
2297
780f22af 2298 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2299 if (IS_ERR(host->biu_clk)) {
2300 dev_dbg(host->dev, "biu clock not available\n");
2301 } else {
2302 ret = clk_prepare_enable(host->biu_clk);
2303 if (ret) {
2304 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2305 return ret;
2306 }
2307 }
2308
780f22af 2309 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2310 if (IS_ERR(host->ciu_clk)) {
2311 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2312 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2313 } else {
2314 ret = clk_prepare_enable(host->ciu_clk);
2315 if (ret) {
2316 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2317 goto err_clk_biu;
2318 }
f90a0612 2319
3c6d89ea
DA
2320 if (host->pdata->bus_hz) {
2321 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2322 if (ret)
2323 dev_warn(host->dev,
2324 "Unable to set bus rate to %ul\n",
2325 host->pdata->bus_hz);
2326 }
f90a0612 2327 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2328 }
f90a0612 2329
002f0d5c
YK
2330 if (drv_data && drv_data->init) {
2331 ret = drv_data->init(host);
2332 if (ret) {
2333 dev_err(host->dev,
2334 "implementation specific init failed\n");
2335 goto err_clk_ciu;
2336 }
2337 }
2338
cb27a843
JH
2339 if (drv_data && drv_data->setup_clock) {
2340 ret = drv_data->setup_clock(host);
800d78bf
TA
2341 if (ret) {
2342 dev_err(host->dev,
2343 "implementation specific clock setup failed\n");
2344 goto err_clk_ciu;
2345 }
2346 }
2347
a55d6ff0 2348 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2349 if (IS_ERR(host->vmmc)) {
2350 ret = PTR_ERR(host->vmmc);
2351 if (ret == -EPROBE_DEFER)
2352 goto err_clk_ciu;
2353
2354 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2355 host->vmmc = NULL;
2356 } else {
2357 ret = regulator_enable(host->vmmc);
2358 if (ret) {
2359 if (ret != -EPROBE_DEFER)
2360 dev_err(host->dev,
2361 "regulator_enable fail: %d\n", ret);
2362 goto err_clk_ciu;
2363 }
2364 }
2365
f90a0612 2366 if (!host->bus_hz) {
4a90920c 2367 dev_err(host->dev,
f95f3850 2368 "Platform data must supply bus speed\n");
f90a0612 2369 ret = -ENODEV;
870556a3 2370 goto err_regulator;
f95f3850
WN
2371 }
2372
62ca8034 2373 host->quirks = host->pdata->quirks;
f95f3850
WN
2374
2375 spin_lock_init(&host->lock);
2376 INIT_LIST_HEAD(&host->queue);
2377
f95f3850
WN
2378 /*
2379 * Get the host data width - this assumes that HCON has been set with
2380 * the correct values.
2381 */
2382 i = (mci_readl(host, HCON) >> 7) & 0x7;
2383 if (!i) {
2384 host->push_data = dw_mci_push_data16;
2385 host->pull_data = dw_mci_pull_data16;
2386 width = 16;
2387 host->data_shift = 1;
2388 } else if (i == 2) {
2389 host->push_data = dw_mci_push_data64;
2390 host->pull_data = dw_mci_pull_data64;
2391 width = 64;
2392 host->data_shift = 3;
2393 } else {
2394 /* Check for a reserved value, and warn if it is */
2395 WARN((i != 1),
2396 "HCON reports a reserved host data width!\n"
2397 "Defaulting to 32-bit access.\n");
2398 host->push_data = dw_mci_push_data32;
2399 host->pull_data = dw_mci_pull_data32;
2400 width = 32;
2401 host->data_shift = 2;
2402 }
2403
2404 /* Reset all blocks */
4a90920c 2405 if (!mci_wait_reset(host->dev, host))
141a712a
SJ
2406 return -ENODEV;
2407
2408 host->dma_ops = host->pdata->dma_ops;
2409 dw_mci_init_dma(host);
f95f3850
WN
2410
2411 /* Clear the interrupts for the host controller */
2412 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2413 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2414
2415 /* Put in max timeout */
2416 mci_writel(host, TMOUT, 0xFFFFFFFF);
2417
2418 /*
2419 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2420 * Tx Mark = fifo_size / 2 DMA Size = 8
2421 */
b86d8253
JH
2422 if (!host->pdata->fifo_depth) {
2423 /*
2424 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2425 * have been overwritten by the bootloader, just like we're
2426 * about to do, so if you know the value for your hardware, you
2427 * should put it in the platform data.
2428 */
2429 fifo_size = mci_readl(host, FIFOTH);
8234e869 2430 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2431 } else {
2432 fifo_size = host->pdata->fifo_depth;
2433 }
2434 host->fifo_depth = fifo_size;
52426899
SJ
2435 host->fifoth_val =
2436 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2437 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2438
2439 /* disable clock to CIU */
2440 mci_writel(host, CLKENA, 0);
2441 mci_writel(host, CLKSRC, 0);
2442
63008768
JH
2443 /*
2444 * In 2.40a spec, Data offset is changed.
2445 * Need to check the version-id and set data-offset for DATA register.
2446 */
2447 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2448 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2449
2450 if (host->verid < DW_MMC_240A)
2451 host->data_offset = DATA_OFFSET;
2452 else
2453 host->data_offset = DATA_240A_OFFSET;
2454
f95f3850 2455 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2456 host->card_workqueue = alloc_workqueue("dw-mci-card",
1791b13e 2457 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
ef7aef9a
WY
2458 if (!host->card_workqueue) {
2459 ret = -ENOMEM;
1791b13e 2460 goto err_dmaunmap;
ef7aef9a 2461 }
1791b13e 2462 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2463 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2464 host->irq_flags, "dw-mci", host);
f95f3850 2465 if (ret)
1791b13e 2466 goto err_workqueue;
f95f3850 2467
f95f3850
WN
2468 if (host->pdata->num_slots)
2469 host->num_slots = host->pdata->num_slots;
2470 else
2471 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2472
2da1d7f2
YC
2473 /*
2474 * Enable interrupts for command done, data over, data empty, card det,
2475 * receive ready and error such as transmit, receive timeout, crc error
2476 */
2477 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2478 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2479 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2480 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2481 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2482
2483 dev_info(host->dev, "DW MMC controller at irq %d, "
2484 "%d bit host data width, "
2485 "%u deep fifo\n",
2486 host->irq, width, fifo_size);
2487
f95f3850
WN
2488 /* We need at least one slot to succeed */
2489 for (i = 0; i < host->num_slots; i++) {
2490 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2491 if (ret)
2492 dev_dbg(host->dev, "slot %d init failed\n", i);
2493 else
2494 init_slots++;
2495 }
2496
2497 if (init_slots) {
2498 dev_info(host->dev, "%d slots initialized\n", init_slots);
2499 } else {
2500 dev_dbg(host->dev, "attempted to initialize %d slots, "
2501 "but failed on all\n", host->num_slots);
780f22af 2502 goto err_workqueue;
f95f3850
WN
2503 }
2504
f95f3850 2505 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2506 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2507
2508 return 0;
2509
1791b13e 2510err_workqueue:
95dcc2cb 2511 destroy_workqueue(host->card_workqueue);
1791b13e 2512
f95f3850
WN
2513err_dmaunmap:
2514 if (host->use_dma && host->dma_ops->exit)
2515 host->dma_ops->exit(host);
f95f3850 2516
870556a3 2517err_regulator:
780f22af 2518 if (host->vmmc)
c07946a3 2519 regulator_disable(host->vmmc);
f90a0612
TA
2520
2521err_clk_ciu:
780f22af 2522 if (!IS_ERR(host->ciu_clk))
f90a0612 2523 clk_disable_unprepare(host->ciu_clk);
780f22af 2524
f90a0612 2525err_clk_biu:
780f22af 2526 if (!IS_ERR(host->biu_clk))
f90a0612 2527 clk_disable_unprepare(host->biu_clk);
780f22af 2528
f95f3850
WN
2529 return ret;
2530}
62ca8034 2531EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2532
62ca8034 2533void dw_mci_remove(struct dw_mci *host)
f95f3850 2534{
f95f3850
WN
2535 int i;
2536
2537 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2538 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2539
f95f3850 2540 for (i = 0; i < host->num_slots; i++) {
4a90920c 2541 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2542 if (host->slot[i])
2543 dw_mci_cleanup_slot(host->slot[i], i);
2544 }
2545
2546 /* disable clock to CIU */
2547 mci_writel(host, CLKENA, 0);
2548 mci_writel(host, CLKSRC, 0);
2549
95dcc2cb 2550 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2551
2552 if (host->use_dma && host->dma_ops->exit)
2553 host->dma_ops->exit(host);
2554
780f22af 2555 if (host->vmmc)
c07946a3 2556 regulator_disable(host->vmmc);
c07946a3 2557
f90a0612
TA
2558 if (!IS_ERR(host->ciu_clk))
2559 clk_disable_unprepare(host->ciu_clk);
780f22af 2560
f90a0612
TA
2561 if (!IS_ERR(host->biu_clk))
2562 clk_disable_unprepare(host->biu_clk);
f95f3850 2563}
62ca8034
SH
2564EXPORT_SYMBOL(dw_mci_remove);
2565
2566
f95f3850 2567
6fe8890d 2568#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2569/*
2570 * TODO: we should probably disable the clock to the card in the suspend path.
2571 */
62ca8034 2572int dw_mci_suspend(struct dw_mci *host)
f95f3850 2573{
62ca8034 2574 int i, ret = 0;
f95f3850
WN
2575
2576 for (i = 0; i < host->num_slots; i++) {
2577 struct dw_mci_slot *slot = host->slot[i];
2578 if (!slot)
2579 continue;
2580 ret = mmc_suspend_host(slot->mmc);
2581 if (ret < 0) {
2582 while (--i >= 0) {
2583 slot = host->slot[i];
2584 if (slot)
2585 mmc_resume_host(host->slot[i]->mmc);
2586 }
2587 return ret;
2588 }
2589 }
2590
c07946a3
JC
2591 if (host->vmmc)
2592 regulator_disable(host->vmmc);
2593
f95f3850
WN
2594 return 0;
2595}
62ca8034 2596EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2597
62ca8034 2598int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2599{
2600 int i, ret;
f95f3850 2601
f2f942ce
SK
2602 if (host->vmmc) {
2603 ret = regulator_enable(host->vmmc);
2604 if (ret) {
2605 dev_err(host->dev,
2606 "failed to enable regulator: %d\n", ret);
2607 return ret;
2608 }
2609 }
1d6c4e0a 2610
4a90920c 2611 if (!mci_wait_reset(host->dev, host)) {
e61cf118
JC
2612 ret = -ENODEV;
2613 return ret;
2614 }
2615
3bfe619d 2616 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2617 host->dma_ops->init(host);
2618
52426899
SJ
2619 /*
2620 * Restore the initial value at FIFOTH register
2621 * And Invalidate the prev_blksz with zero
2622 */
e61cf118 2623 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2624 host->prev_blksz = 0;
e61cf118 2625
2eb2944f
DA
2626 /* Put in max timeout */
2627 mci_writel(host, TMOUT, 0xFFFFFFFF);
2628
e61cf118
JC
2629 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2630 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2631 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2632 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2633 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2634
f95f3850
WN
2635 for (i = 0; i < host->num_slots; i++) {
2636 struct dw_mci_slot *slot = host->slot[i];
2637 if (!slot)
2638 continue;
ab269128
AK
2639 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2640 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2641 dw_mci_setup_bus(slot, true);
2642 }
2643
f95f3850
WN
2644 ret = mmc_resume_host(host->slot[i]->mmc);
2645 if (ret < 0)
2646 return ret;
2647 }
f95f3850
WN
2648 return 0;
2649}
62ca8034 2650EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2651#endif /* CONFIG_PM_SLEEP */
2652
f95f3850
WN
2653static int __init dw_mci_init(void)
2654{
8e1c4e4d 2655 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2656 return 0;
f95f3850
WN
2657}
2658
2659static void __exit dw_mci_exit(void)
2660{
f95f3850
WN
2661}
2662
2663module_init(dw_mci_init);
2664module_exit(dw_mci_exit);
2665
2666MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2667MODULE_AUTHOR("NXP Semiconductor VietNam");
2668MODULE_AUTHOR("Imagination Technologies Ltd");
2669MODULE_LICENSE("GPL v2");