]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: rework the code related to cmd/data completion
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
90c2143a 32#include <linux/mmc/sdio.h>
f95f3850
WN
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
c07946a3 35#include <linux/regulator/consumer.h>
1791b13e 36#include <linux/workqueue.h>
c91eab4b 37#include <linux/of.h>
55a6ceb2 38#include <linux/of_gpio.h>
f95f3850
WN
39
40#include "dw_mmc.h"
41
42/* Common flag combinations */
3f7eec62 43#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
44 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 SDMMC_INT_EBE)
46#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 SDMMC_INT_RESP_ERR)
48#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
50#define DW_MCI_SEND_STATUS 1
51#define DW_MCI_RECV_STATUS 2
52#define DW_MCI_DMA_THRESHOLD 16
53
1f44a2a5
SJ
54#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
56
f95f3850 57#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
58#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 SDMMC_IDMAC_INT_TI)
62
f95f3850
WN
63struct idmac_desc {
64 u32 des0; /* Control Descriptor */
65#define IDMAC_DES0_DIC BIT(1)
66#define IDMAC_DES0_LD BIT(2)
67#define IDMAC_DES0_FD BIT(3)
68#define IDMAC_DES0_CH BIT(4)
69#define IDMAC_DES0_ER BIT(5)
70#define IDMAC_DES0_CES BIT(30)
71#define IDMAC_DES0_OWN BIT(31)
72
73 u32 des1; /* Buffer sizes */
74#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 75 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
76
77 u32 des2; /* buffer 1 physical address */
78
79 u32 des3; /* buffer 2 physical address */
80};
81#endif /* CONFIG_MMC_DW_IDMAC */
82
0976f16d
SJ
83static const u8 tuning_blk_pattern_4bit[] = {
84 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
85 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
86 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
87 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
88 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
89 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
90 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
91 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
92};
f95f3850 93
0976f16d
SJ
94static const u8 tuning_blk_pattern_8bit[] = {
95 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
96 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
97 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
98 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
99 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
100 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
101 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
102 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
103 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
104 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
105 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
106 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
107 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
108 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
109 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
110 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
111};
112
113#if defined(CONFIG_DEBUG_FS)
114static int dw_mci_req_show(struct seq_file *s, void *v)
115{
116 struct dw_mci_slot *slot = s->private;
117 struct mmc_request *mrq;
118 struct mmc_command *cmd;
119 struct mmc_command *stop;
120 struct mmc_data *data;
121
122 /* Make sure we get a consistent snapshot */
123 spin_lock_bh(&slot->host->lock);
124 mrq = slot->mrq;
125
126 if (mrq) {
127 cmd = mrq->cmd;
128 data = mrq->data;
129 stop = mrq->stop;
130
131 if (cmd)
132 seq_printf(s,
133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
134 cmd->opcode, cmd->arg, cmd->flags,
135 cmd->resp[0], cmd->resp[1], cmd->resp[2],
136 cmd->resp[2], cmd->error);
137 if (data)
138 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
139 data->bytes_xfered, data->blocks,
140 data->blksz, data->flags, data->error);
141 if (stop)
142 seq_printf(s,
143 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
144 stop->opcode, stop->arg, stop->flags,
145 stop->resp[0], stop->resp[1], stop->resp[2],
146 stop->resp[2], stop->error);
147 }
148
149 spin_unlock_bh(&slot->host->lock);
150
151 return 0;
152}
153
154static int dw_mci_req_open(struct inode *inode, struct file *file)
155{
156 return single_open(file, dw_mci_req_show, inode->i_private);
157}
158
159static const struct file_operations dw_mci_req_fops = {
160 .owner = THIS_MODULE,
161 .open = dw_mci_req_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
167static int dw_mci_regs_show(struct seq_file *s, void *v)
168{
169 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
170 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
171 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
172 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
173 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
174 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
175
176 return 0;
177}
178
179static int dw_mci_regs_open(struct inode *inode, struct file *file)
180{
181 return single_open(file, dw_mci_regs_show, inode->i_private);
182}
183
184static const struct file_operations dw_mci_regs_fops = {
185 .owner = THIS_MODULE,
186 .open = dw_mci_regs_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
192static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
193{
194 struct mmc_host *mmc = slot->mmc;
195 struct dw_mci *host = slot->host;
196 struct dentry *root;
197 struct dentry *node;
198
199 root = mmc->debugfs_root;
200 if (!root)
201 return;
202
203 node = debugfs_create_file("regs", S_IRUSR, root, host,
204 &dw_mci_regs_fops);
205 if (!node)
206 goto err;
207
208 node = debugfs_create_file("req", S_IRUSR, root, slot,
209 &dw_mci_req_fops);
210 if (!node)
211 goto err;
212
213 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_x32("pending_events", S_IRUSR, root,
218 (u32 *)&host->pending_events);
219 if (!node)
220 goto err;
221
222 node = debugfs_create_x32("completed_events", S_IRUSR, root,
223 (u32 *)&host->completed_events);
224 if (!node)
225 goto err;
226
227 return;
228
229err:
230 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
231}
232#endif /* defined(CONFIG_DEBUG_FS) */
233
234static void dw_mci_set_timeout(struct dw_mci *host)
235{
236 /* timeout (maximum) */
237 mci_writel(host, TMOUT, 0xffffffff);
238}
239
240static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
241{
242 struct mmc_data *data;
800d78bf 243 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 244 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
245 u32 cmdr;
246 cmd->error = -EINPROGRESS;
247
248 cmdr = cmd->opcode;
249
90c2143a
SJ
250 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
251 cmd->opcode == MMC_GO_IDLE_STATE ||
252 cmd->opcode == MMC_GO_INACTIVE_STATE ||
253 (cmd->opcode == SD_IO_RW_DIRECT &&
254 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850
WN
255 cmdr |= SDMMC_CMD_STOP;
256 else
90c2143a
SJ
257 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
258 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850
WN
259
260 if (cmd->flags & MMC_RSP_PRESENT) {
261 /* We expect a response, so set this bit */
262 cmdr |= SDMMC_CMD_RESP_EXP;
263 if (cmd->flags & MMC_RSP_136)
264 cmdr |= SDMMC_CMD_RESP_LONG;
265 }
266
267 if (cmd->flags & MMC_RSP_CRC)
268 cmdr |= SDMMC_CMD_RESP_CRC;
269
270 data = cmd->data;
271 if (data) {
272 cmdr |= SDMMC_CMD_DAT_EXP;
273 if (data->flags & MMC_DATA_STREAM)
274 cmdr |= SDMMC_CMD_STRM_MODE;
275 if (data->flags & MMC_DATA_WRITE)
276 cmdr |= SDMMC_CMD_DAT_WR;
277 }
278
cb27a843
JH
279 if (drv_data && drv_data->prepare_command)
280 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 281
f95f3850
WN
282 return cmdr;
283}
284
90c2143a
SJ
285static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
286{
287 struct mmc_command *stop;
288 u32 cmdr;
289
290 if (!cmd->data)
291 return 0;
292
293 stop = &host->stop_abort;
294 cmdr = cmd->opcode;
295 memset(stop, 0, sizeof(struct mmc_command));
296
297 if (cmdr == MMC_READ_SINGLE_BLOCK ||
298 cmdr == MMC_READ_MULTIPLE_BLOCK ||
299 cmdr == MMC_WRITE_BLOCK ||
300 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
301 stop->opcode = MMC_STOP_TRANSMISSION;
302 stop->arg = 0;
303 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
304 } else if (cmdr == SD_IO_RW_EXTENDED) {
305 stop->opcode = SD_IO_RW_DIRECT;
306 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
307 ((cmd->arg >> 28) & 0x7);
308 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
309 } else {
310 return 0;
311 }
312
313 cmdr = stop->opcode | SDMMC_CMD_STOP |
314 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
315
316 return cmdr;
317}
318
f95f3850
WN
319static void dw_mci_start_command(struct dw_mci *host,
320 struct mmc_command *cmd, u32 cmd_flags)
321{
322 host->cmd = cmd;
4a90920c 323 dev_vdbg(host->dev,
f95f3850
WN
324 "start command: ARGR=0x%08x CMDR=0x%08x\n",
325 cmd->arg, cmd_flags);
326
327 mci_writel(host, CMDARG, cmd->arg);
328 wmb();
329
330 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
331}
332
90c2143a 333static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 334{
90c2143a
SJ
335 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
336 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
337}
338
339/* DMA interface functions */
340static void dw_mci_stop_dma(struct dw_mci *host)
341{
03e8cb53 342 if (host->using_dma) {
f95f3850
WN
343 host->dma_ops->stop(host);
344 host->dma_ops->cleanup(host);
345 } else {
346 /* Data transfer was stopped by the interrupt handler */
347 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
348 }
349}
350
9aa51408
SJ
351static int dw_mci_get_dma_dir(struct mmc_data *data)
352{
353 if (data->flags & MMC_DATA_WRITE)
354 return DMA_TO_DEVICE;
355 else
356 return DMA_FROM_DEVICE;
357}
358
9beee912 359#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
360static void dw_mci_dma_cleanup(struct dw_mci *host)
361{
362 struct mmc_data *data = host->data;
363
364 if (data)
9aa51408 365 if (!data->host_cookie)
4a90920c 366 dma_unmap_sg(host->dev,
9aa51408
SJ
367 data->sg,
368 data->sg_len,
369 dw_mci_get_dma_dir(data));
f95f3850
WN
370}
371
372static void dw_mci_idmac_stop_dma(struct dw_mci *host)
373{
374 u32 temp;
375
376 /* Disable and reset the IDMAC interface */
377 temp = mci_readl(host, CTRL);
378 temp &= ~SDMMC_CTRL_USE_IDMAC;
379 temp |= SDMMC_CTRL_DMA_RESET;
380 mci_writel(host, CTRL, temp);
381
382 /* Stop the IDMAC running */
383 temp = mci_readl(host, BMOD);
a5289a43 384 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
f95f3850
WN
385 mci_writel(host, BMOD, temp);
386}
387
388static void dw_mci_idmac_complete_dma(struct dw_mci *host)
389{
390 struct mmc_data *data = host->data;
391
4a90920c 392 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
393
394 host->dma_ops->cleanup(host);
395
396 /*
397 * If the card was removed, data will be NULL. No point in trying to
398 * send the stop command or waiting for NBUSY in this case.
399 */
400 if (data) {
401 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
402 tasklet_schedule(&host->tasklet);
403 }
404}
405
406static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
407 unsigned int sg_len)
408{
409 int i;
410 struct idmac_desc *desc = host->sg_cpu;
411
412 for (i = 0; i < sg_len; i++, desc++) {
413 unsigned int length = sg_dma_len(&data->sg[i]);
414 u32 mem_addr = sg_dma_address(&data->sg[i]);
415
416 /* Set the OWN bit and disable interrupts for this descriptor */
417 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
418
419 /* Buffer length */
420 IDMAC_SET_BUFFER1_SIZE(desc, length);
421
422 /* Physical address to DMA to/from */
423 desc->des2 = mem_addr;
424 }
425
426 /* Set first descriptor */
427 desc = host->sg_cpu;
428 desc->des0 |= IDMAC_DES0_FD;
429
430 /* Set last descriptor */
431 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
432 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
433 desc->des0 |= IDMAC_DES0_LD;
434
435 wmb();
436}
437
438static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
439{
440 u32 temp;
441
442 dw_mci_translate_sglist(host, host->data, sg_len);
443
444 /* Select IDMAC interface */
445 temp = mci_readl(host, CTRL);
446 temp |= SDMMC_CTRL_USE_IDMAC;
447 mci_writel(host, CTRL, temp);
448
449 wmb();
450
451 /* Enable the IDMAC */
452 temp = mci_readl(host, BMOD);
a5289a43 453 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
454 mci_writel(host, BMOD, temp);
455
456 /* Start it running */
457 mci_writel(host, PLDMND, 1);
458}
459
460static int dw_mci_idmac_init(struct dw_mci *host)
461{
462 struct idmac_desc *p;
897b69e7 463 int i;
f95f3850
WN
464
465 /* Number of descriptors in the ring buffer */
466 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
467
468 /* Forward link the descriptor list */
469 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
470 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
471
472 /* Set the last descriptor as the end-of-ring descriptor */
473 p->des3 = host->sg_dma;
474 p->des0 = IDMAC_DES0_ER;
475
141a712a
SJ
476 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
477
f95f3850 478 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 479 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
480 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
481 SDMMC_IDMAC_INT_TI);
482
483 /* Set the descriptor base address */
484 mci_writel(host, DBADDR, host->sg_dma);
485 return 0;
486}
487
8e2b36ea 488static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
489 .init = dw_mci_idmac_init,
490 .start = dw_mci_idmac_start_dma,
491 .stop = dw_mci_idmac_stop_dma,
492 .complete = dw_mci_idmac_complete_dma,
493 .cleanup = dw_mci_dma_cleanup,
494};
495#endif /* CONFIG_MMC_DW_IDMAC */
496
9aa51408
SJ
497static int dw_mci_pre_dma_transfer(struct dw_mci *host,
498 struct mmc_data *data,
499 bool next)
f95f3850
WN
500{
501 struct scatterlist *sg;
9aa51408 502 unsigned int i, sg_len;
03e8cb53 503
9aa51408
SJ
504 if (!next && data->host_cookie)
505 return data->host_cookie;
f95f3850
WN
506
507 /*
508 * We don't do DMA on "complex" transfers, i.e. with
509 * non-word-aligned buffers or lengths. Also, we don't bother
510 * with all the DMA setup overhead for short transfers.
511 */
512 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
513 return -EINVAL;
9aa51408 514
f95f3850
WN
515 if (data->blksz & 3)
516 return -EINVAL;
517
518 for_each_sg(data->sg, sg, data->sg_len, i) {
519 if (sg->offset & 3 || sg->length & 3)
520 return -EINVAL;
521 }
522
4a90920c 523 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
524 data->sg,
525 data->sg_len,
526 dw_mci_get_dma_dir(data));
527 if (sg_len == 0)
528 return -EINVAL;
03e8cb53 529
9aa51408
SJ
530 if (next)
531 data->host_cookie = sg_len;
f95f3850 532
9aa51408
SJ
533 return sg_len;
534}
535
9aa51408
SJ
536static void dw_mci_pre_req(struct mmc_host *mmc,
537 struct mmc_request *mrq,
538 bool is_first_req)
539{
540 struct dw_mci_slot *slot = mmc_priv(mmc);
541 struct mmc_data *data = mrq->data;
542
543 if (!slot->host->use_dma || !data)
544 return;
545
546 if (data->host_cookie) {
547 data->host_cookie = 0;
548 return;
549 }
550
551 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
552 data->host_cookie = 0;
553}
554
555static void dw_mci_post_req(struct mmc_host *mmc,
556 struct mmc_request *mrq,
557 int err)
558{
559 struct dw_mci_slot *slot = mmc_priv(mmc);
560 struct mmc_data *data = mrq->data;
561
562 if (!slot->host->use_dma || !data)
563 return;
564
565 if (data->host_cookie)
4a90920c 566 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
567 data->sg,
568 data->sg_len,
569 dw_mci_get_dma_dir(data));
570 data->host_cookie = 0;
571}
572
52426899
SJ
573static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
574{
575#ifdef CONFIG_MMC_DW_IDMAC
576 unsigned int blksz = data->blksz;
577 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
578 u32 fifo_width = 1 << host->data_shift;
579 u32 blksz_depth = blksz / fifo_width, fifoth_val;
580 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
581 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
582
583 tx_wmark = (host->fifo_depth) / 2;
584 tx_wmark_invers = host->fifo_depth - tx_wmark;
585
586 /*
587 * MSIZE is '1',
588 * if blksz is not a multiple of the FIFO width
589 */
590 if (blksz % fifo_width) {
591 msize = 0;
592 rx_wmark = 1;
593 goto done;
594 }
595
596 do {
597 if (!((blksz_depth % mszs[idx]) ||
598 (tx_wmark_invers % mszs[idx]))) {
599 msize = idx;
600 rx_wmark = mszs[idx] - 1;
601 break;
602 }
603 } while (--idx > 0);
604 /*
605 * If idx is '0', it won't be tried
606 * Thus, initial values are uesed
607 */
608done:
609 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
610 mci_writel(host, FIFOTH, fifoth_val);
611#endif
612}
613
f1d2736c
SJ
614static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
615{
616 unsigned int blksz = data->blksz;
617 u32 blksz_depth, fifo_depth;
618 u16 thld_size;
619
620 WARN_ON(!(data->flags & MMC_DATA_READ));
621
622 if (host->timing != MMC_TIMING_MMC_HS200 &&
623 host->timing != MMC_TIMING_UHS_SDR104)
624 goto disable;
625
626 blksz_depth = blksz / (1 << host->data_shift);
627 fifo_depth = host->fifo_depth;
628
629 if (blksz_depth > fifo_depth)
630 goto disable;
631
632 /*
633 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
634 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
635 * Currently just choose blksz.
636 */
637 thld_size = blksz;
638 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
639 return;
640
641disable:
642 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
643}
644
9aa51408
SJ
645static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
646{
647 int sg_len;
648 u32 temp;
649
650 host->using_dma = 0;
651
652 /* If we don't have a channel, we can't do DMA */
653 if (!host->use_dma)
654 return -ENODEV;
655
656 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
657 if (sg_len < 0) {
658 host->dma_ops->stop(host);
9aa51408 659 return sg_len;
a99aa9b9 660 }
9aa51408
SJ
661
662 host->using_dma = 1;
f95f3850 663
4a90920c 664 dev_vdbg(host->dev,
f95f3850
WN
665 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
666 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
667 sg_len);
668
52426899
SJ
669 /*
670 * Decide the MSIZE and RX/TX Watermark.
671 * If current block size is same with previous size,
672 * no need to update fifoth.
673 */
674 if (host->prev_blksz != data->blksz)
675 dw_mci_adjust_fifoth(host, data);
676
f95f3850
WN
677 /* Enable the DMA interface */
678 temp = mci_readl(host, CTRL);
679 temp |= SDMMC_CTRL_DMA_ENABLE;
680 mci_writel(host, CTRL, temp);
681
682 /* Disable RX/TX IRQs, let DMA handle it */
683 temp = mci_readl(host, INTMASK);
684 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
685 mci_writel(host, INTMASK, temp);
686
687 host->dma_ops->start(host, sg_len);
688
689 return 0;
690}
691
692static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
693{
694 u32 temp;
695
696 data->error = -EINPROGRESS;
697
698 WARN_ON(host->data);
699 host->sg = NULL;
700 host->data = data;
701
f1d2736c 702 if (data->flags & MMC_DATA_READ) {
55c5efbc 703 host->dir_status = DW_MCI_RECV_STATUS;
f1d2736c
SJ
704 dw_mci_ctrl_rd_thld(host, data);
705 } else {
55c5efbc 706 host->dir_status = DW_MCI_SEND_STATUS;
f1d2736c 707 }
55c5efbc 708
f95f3850 709 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
710 int flags = SG_MITER_ATOMIC;
711 if (host->data->flags & MMC_DATA_READ)
712 flags |= SG_MITER_TO_SG;
713 else
714 flags |= SG_MITER_FROM_SG;
715
716 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 717 host->sg = data->sg;
34b664a2
JH
718 host->part_buf_start = 0;
719 host->part_buf_count = 0;
f95f3850 720
b40af3aa 721 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
722 temp = mci_readl(host, INTMASK);
723 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
724 mci_writel(host, INTMASK, temp);
725
726 temp = mci_readl(host, CTRL);
727 temp &= ~SDMMC_CTRL_DMA_ENABLE;
728 mci_writel(host, CTRL, temp);
52426899
SJ
729
730 /*
731 * Use the initial fifoth_val for PIO mode.
732 * If next issued data may be transfered by DMA mode,
733 * prev_blksz should be invalidated.
734 */
735 mci_writel(host, FIFOTH, host->fifoth_val);
736 host->prev_blksz = 0;
737 } else {
738 /*
739 * Keep the current block size.
740 * It will be used to decide whether to update
741 * fifoth register next time.
742 */
743 host->prev_blksz = data->blksz;
f95f3850
WN
744 }
745}
746
747static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
748{
749 struct dw_mci *host = slot->host;
750 unsigned long timeout = jiffies + msecs_to_jiffies(500);
751 unsigned int cmd_status = 0;
752
753 mci_writel(host, CMDARG, arg);
754 wmb();
755 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
756
757 while (time_before(jiffies, timeout)) {
758 cmd_status = mci_readl(host, CMD);
759 if (!(cmd_status & SDMMC_CMD_START))
760 return;
761 }
762 dev_err(&slot->mmc->class_dev,
763 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
764 cmd, arg, cmd_status);
765}
766
ab269128 767static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
768{
769 struct dw_mci *host = slot->host;
fdf492a1 770 unsigned int clock = slot->clock;
f95f3850 771 u32 div;
9623b5b9 772 u32 clk_en_a;
f95f3850 773
fdf492a1
DA
774 if (!clock) {
775 mci_writel(host, CLKENA, 0);
776 mci_send_cmd(slot,
777 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
778 } else if (clock != host->current_speed || force_clkinit) {
779 div = host->bus_hz / clock;
780 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
781 /*
782 * move the + 1 after the divide to prevent
783 * over-clocking the card.
784 */
e419990b
SJ
785 div += 1;
786
fdf492a1 787 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 788
fdf492a1
DA
789 if ((clock << div) != slot->__clk_old || force_clkinit)
790 dev_info(&slot->mmc->class_dev,
791 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
792 slot->id, host->bus_hz, clock,
793 div ? ((host->bus_hz / div) >> 1) :
794 host->bus_hz, div);
f95f3850
WN
795
796 /* disable clock */
797 mci_writel(host, CLKENA, 0);
798 mci_writel(host, CLKSRC, 0);
799
800 /* inform CIU */
801 mci_send_cmd(slot,
802 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
803
804 /* set clock to desired speed */
805 mci_writel(host, CLKDIV, div);
806
807 /* inform CIU */
808 mci_send_cmd(slot,
809 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
810
9623b5b9
DA
811 /* enable clock; only low power if no SDIO */
812 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
813 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
814 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
815 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
816
817 /* inform CIU */
818 mci_send_cmd(slot,
819 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
820
fdf492a1
DA
821 /* keep the clock with reflecting clock dividor */
822 slot->__clk_old = clock << div;
f95f3850
WN
823 }
824
fdf492a1
DA
825 host->current_speed = clock;
826
f95f3850 827 /* Set the current slot bus width */
1d56c453 828 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
829}
830
053b3ce6
SJ
831static void __dw_mci_start_request(struct dw_mci *host,
832 struct dw_mci_slot *slot,
833 struct mmc_command *cmd)
f95f3850
WN
834{
835 struct mmc_request *mrq;
f95f3850
WN
836 struct mmc_data *data;
837 u32 cmdflags;
838
839 mrq = slot->mrq;
840 if (host->pdata->select_slot)
841 host->pdata->select_slot(slot->id);
842
f95f3850
WN
843 host->cur_slot = slot;
844 host->mrq = mrq;
845
846 host->pending_events = 0;
847 host->completed_events = 0;
e352c813 848 host->cmd_status = 0;
f95f3850 849 host->data_status = 0;
e352c813 850 host->dir_status = 0;
f95f3850 851
053b3ce6 852 data = cmd->data;
f95f3850
WN
853 if (data) {
854 dw_mci_set_timeout(host);
855 mci_writel(host, BYTCNT, data->blksz*data->blocks);
856 mci_writel(host, BLKSIZ, data->blksz);
857 }
858
f95f3850
WN
859 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
860
861 /* this is the first command, send the initialization clock */
862 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
863 cmdflags |= SDMMC_CMD_INIT;
864
865 if (data) {
866 dw_mci_submit_data(host, data);
867 wmb();
868 }
869
870 dw_mci_start_command(host, cmd, cmdflags);
871
872 if (mrq->stop)
873 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
90c2143a
SJ
874 else
875 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
876}
877
053b3ce6
SJ
878static void dw_mci_start_request(struct dw_mci *host,
879 struct dw_mci_slot *slot)
880{
881 struct mmc_request *mrq = slot->mrq;
882 struct mmc_command *cmd;
883
884 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
885 __dw_mci_start_request(host, slot, cmd);
886}
887
7456caae 888/* must be called with host->lock held */
f95f3850
WN
889static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
890 struct mmc_request *mrq)
891{
892 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
893 host->state);
894
f95f3850
WN
895 slot->mrq = mrq;
896
897 if (host->state == STATE_IDLE) {
898 host->state = STATE_SENDING_CMD;
899 dw_mci_start_request(host, slot);
900 } else {
901 list_add_tail(&slot->queue_node, &host->queue);
902 }
f95f3850
WN
903}
904
905static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
906{
907 struct dw_mci_slot *slot = mmc_priv(mmc);
908 struct dw_mci *host = slot->host;
909
910 WARN_ON(slot->mrq);
911
7456caae
JH
912 /*
913 * The check for card presence and queueing of the request must be
914 * atomic, otherwise the card could be removed in between and the
915 * request wouldn't fail until another card was inserted.
916 */
917 spin_lock_bh(&host->lock);
918
f95f3850 919 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 920 spin_unlock_bh(&host->lock);
f95f3850
WN
921 mrq->cmd->error = -ENOMEDIUM;
922 mmc_request_done(mmc, mrq);
923 return;
924 }
925
f95f3850 926 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
927
928 spin_unlock_bh(&host->lock);
f95f3850
WN
929}
930
931static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
932{
933 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 934 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 935 u32 regs;
f95f3850 936
f95f3850 937 switch (ios->bus_width) {
f95f3850
WN
938 case MMC_BUS_WIDTH_4:
939 slot->ctype = SDMMC_CTYPE_4BIT;
940 break;
c9b2a06f
JC
941 case MMC_BUS_WIDTH_8:
942 slot->ctype = SDMMC_CTYPE_8BIT;
943 break;
b2f7cb45
JC
944 default:
945 /* set default 1 bit mode */
946 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
947 }
948
3f514291
SJ
949 regs = mci_readl(slot->host, UHS_REG);
950
41babf75 951 /* DDR mode set */
3f514291 952 if (ios->timing == MMC_TIMING_UHS_DDR50)
c69042a5 953 regs |= ((0x1 << slot->id) << 16);
3f514291 954 else
c69042a5 955 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
956
957 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 958 slot->host->timing = ios->timing;
41babf75 959
fdf492a1
DA
960 /*
961 * Use mirror of ios->clock to prevent race with mmc
962 * core ios update when finding the minimum.
963 */
964 slot->clock = ios->clock;
f95f3850 965
cb27a843
JH
966 if (drv_data && drv_data->set_ios)
967 drv_data->set_ios(slot->host, ios);
800d78bf 968
bf7cb224
JC
969 /* Slot specific timing and width adjustment */
970 dw_mci_setup_bus(slot, false);
971
f95f3850
WN
972 switch (ios->power_mode) {
973 case MMC_POWER_UP:
974 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
e6f34e2f
JH
975 /* Power up slot */
976 if (slot->host->pdata->setpower)
977 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
4366dcc5
JC
978 regs = mci_readl(slot->host, PWREN);
979 regs |= (1 << slot->id);
980 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
981 break;
982 case MMC_POWER_OFF:
983 /* Power down slot */
984 if (slot->host->pdata->setpower)
985 slot->host->pdata->setpower(slot->id, 0);
4366dcc5
JC
986 regs = mci_readl(slot->host, PWREN);
987 regs &= ~(1 << slot->id);
988 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
989 break;
990 default:
991 break;
992 }
993}
994
995static int dw_mci_get_ro(struct mmc_host *mmc)
996{
997 int read_only;
998 struct dw_mci_slot *slot = mmc_priv(mmc);
999 struct dw_mci_board *brd = slot->host->pdata;
1000
1001 /* Use platform get_ro function, else try on board write protect */
9640639b 1002 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5
TA
1003 read_only = 0;
1004 else if (brd->get_ro)
f95f3850 1005 read_only = brd->get_ro(slot->id);
55a6ceb2
DA
1006 else if (gpio_is_valid(slot->wp_gpio))
1007 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
1008 else
1009 read_only =
1010 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1011
1012 dev_dbg(&mmc->class_dev, "card is %s\n",
1013 read_only ? "read-only" : "read-write");
1014
1015 return read_only;
1016}
1017
1018static int dw_mci_get_cd(struct mmc_host *mmc)
1019{
1020 int present;
1021 struct dw_mci_slot *slot = mmc_priv(mmc);
1022 struct dw_mci_board *brd = slot->host->pdata;
1023
1024 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
1025 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1026 present = 1;
1027 else if (brd->get_cd)
f95f3850
WN
1028 present = !brd->get_cd(slot->id);
1029 else
1030 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1031 == 0 ? 1 : 0;
1032
1033 if (present)
1034 dev_dbg(&mmc->class_dev, "card is present\n");
1035 else
1036 dev_dbg(&mmc->class_dev, "card is not present\n");
1037
1038 return present;
1039}
1040
9623b5b9
DA
1041/*
1042 * Disable lower power mode.
1043 *
1044 * Low power mode will stop the card clock when idle. According to the
1045 * description of the CLKENA register we should disable low power mode
1046 * for SDIO cards if we need SDIO interrupts to work.
1047 *
1048 * This function is fast if low power mode is already disabled.
1049 */
1050static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1051{
1052 struct dw_mci *host = slot->host;
1053 u32 clk_en_a;
1054 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1055
1056 clk_en_a = mci_readl(host, CLKENA);
1057
1058 if (clk_en_a & clken_low_pwr) {
1059 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1060 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1061 SDMMC_CMD_PRV_DAT_WAIT, 0);
1062 }
1063}
1064
1a5c8e1f
SH
1065static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1066{
1067 struct dw_mci_slot *slot = mmc_priv(mmc);
1068 struct dw_mci *host = slot->host;
1069 u32 int_mask;
1070
1071 /* Enable/disable Slot Specific SDIO interrupt */
1072 int_mask = mci_readl(host, INTMASK);
1073 if (enb) {
9623b5b9
DA
1074 /*
1075 * Turn off low power mode if it was enabled. This is a bit of
1076 * a heavy operation and we disable / enable IRQs a lot, so
1077 * we'll leave low power mode disabled and it will get
1078 * re-enabled again in dw_mci_setup_bus().
1079 */
1080 dw_mci_disable_low_power(slot);
1081
1a5c8e1f 1082 mci_writel(host, INTMASK,
705ad047 1083 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1084 } else {
1085 mci_writel(host, INTMASK,
705ad047 1086 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1087 }
1088}
1089
0976f16d
SJ
1090static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1091{
1092 struct dw_mci_slot *slot = mmc_priv(mmc);
1093 struct dw_mci *host = slot->host;
1094 const struct dw_mci_drv_data *drv_data = host->drv_data;
1095 struct dw_mci_tuning_data tuning_data;
1096 int err = -ENOSYS;
1097
1098 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1099 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1100 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1101 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1102 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1105 } else {
1106 return -EINVAL;
1107 }
1108 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1109 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1110 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1111 } else {
1112 dev_err(host->dev,
1113 "Undefined command(%d) for tuning\n", opcode);
1114 return -EINVAL;
1115 }
1116
1117 if (drv_data && drv_data->execute_tuning)
1118 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1119 return err;
1120}
1121
f95f3850 1122static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1123 .request = dw_mci_request,
9aa51408
SJ
1124 .pre_req = dw_mci_pre_req,
1125 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1126 .set_ios = dw_mci_set_ios,
1127 .get_ro = dw_mci_get_ro,
1128 .get_cd = dw_mci_get_cd,
1129 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1130 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1131};
1132
1133static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1134 __releases(&host->lock)
1135 __acquires(&host->lock)
1136{
1137 struct dw_mci_slot *slot;
1138 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1139
1140 WARN_ON(host->cmd || host->data);
1141
1142 host->cur_slot->mrq = NULL;
1143 host->mrq = NULL;
1144 if (!list_empty(&host->queue)) {
1145 slot = list_entry(host->queue.next,
1146 struct dw_mci_slot, queue_node);
1147 list_del(&slot->queue_node);
4a90920c 1148 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1149 mmc_hostname(slot->mmc));
1150 host->state = STATE_SENDING_CMD;
1151 dw_mci_start_request(host, slot);
1152 } else {
4a90920c 1153 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1154 host->state = STATE_IDLE;
1155 }
1156
1157 spin_unlock(&host->lock);
1158 mmc_request_done(prev_mmc, mrq);
1159 spin_lock(&host->lock);
1160}
1161
e352c813 1162static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1163{
1164 u32 status = host->cmd_status;
1165
1166 host->cmd_status = 0;
1167
1168 /* Read the response from the card (up to 16 bytes) */
1169 if (cmd->flags & MMC_RSP_PRESENT) {
1170 if (cmd->flags & MMC_RSP_136) {
1171 cmd->resp[3] = mci_readl(host, RESP0);
1172 cmd->resp[2] = mci_readl(host, RESP1);
1173 cmd->resp[1] = mci_readl(host, RESP2);
1174 cmd->resp[0] = mci_readl(host, RESP3);
1175 } else {
1176 cmd->resp[0] = mci_readl(host, RESP0);
1177 cmd->resp[1] = 0;
1178 cmd->resp[2] = 0;
1179 cmd->resp[3] = 0;
1180 }
1181 }
1182
1183 if (status & SDMMC_INT_RTO)
1184 cmd->error = -ETIMEDOUT;
1185 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1186 cmd->error = -EILSEQ;
1187 else if (status & SDMMC_INT_RESP_ERR)
1188 cmd->error = -EIO;
1189 else
1190 cmd->error = 0;
1191
1192 if (cmd->error) {
1193 /* newer ip versions need a delay between retries */
1194 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1195 mdelay(20);
f95f3850 1196 }
e352c813
SJ
1197
1198 return cmd->error;
1199}
1200
1201static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1202{
1203 u32 status = host->data_status, ctrl;
1204
1205 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1206 if (status & SDMMC_INT_DRTO) {
1207 data->error = -ETIMEDOUT;
1208 } else if (status & SDMMC_INT_DCRC) {
1209 data->error = -EILSEQ;
1210 } else if (status & SDMMC_INT_EBE) {
1211 if (host->dir_status ==
1212 DW_MCI_SEND_STATUS) {
1213 /*
1214 * No data CRC status was returned.
1215 * The number of bytes transferred
1216 * will be exaggerated in PIO mode.
1217 */
1218 data->bytes_xfered = 0;
1219 data->error = -ETIMEDOUT;
1220 } else if (host->dir_status ==
1221 DW_MCI_RECV_STATUS) {
1222 data->error = -EIO;
1223 }
1224 } else {
1225 /* SDMMC_INT_SBE is included */
1226 data->error = -EIO;
1227 }
1228
1229 dev_err(host->dev, "data error, status 0x%08x\n", status);
1230
1231 /*
1232 * After an error, there may be data lingering
1233 * in the FIFO, so reset it - doing so
1234 * generates a block interrupt, hence setting
1235 * the scatter-gather pointer to NULL.
1236 */
1237 sg_miter_stop(&host->sg_miter);
1238 host->sg = NULL;
1239 ctrl = mci_readl(host, CTRL);
1240 ctrl |= SDMMC_CTRL_FIFO_RESET;
1241 mci_writel(host, CTRL, ctrl);
1242 } else {
1243 data->bytes_xfered = data->blocks * data->blksz;
1244 data->error = 0;
1245 }
1246
1247 return data->error;
f95f3850
WN
1248}
1249
1250static void dw_mci_tasklet_func(unsigned long priv)
1251{
1252 struct dw_mci *host = (struct dw_mci *)priv;
1253 struct mmc_data *data;
1254 struct mmc_command *cmd;
e352c813 1255 struct mmc_request *mrq;
f95f3850
WN
1256 enum dw_mci_state state;
1257 enum dw_mci_state prev_state;
e352c813
SJ
1258 u32 ctrl;
1259 unsigned int err;
f95f3850
WN
1260
1261 spin_lock(&host->lock);
1262
1263 state = host->state;
1264 data = host->data;
e352c813 1265 mrq = host->mrq;
f95f3850
WN
1266
1267 do {
1268 prev_state = state;
1269
1270 switch (state) {
1271 case STATE_IDLE:
1272 break;
1273
1274 case STATE_SENDING_CMD:
1275 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1276 &host->pending_events))
1277 break;
1278
1279 cmd = host->cmd;
1280 host->cmd = NULL;
1281 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1282 err = dw_mci_command_complete(host, cmd);
1283 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1284 prev_state = state = STATE_SENDING_CMD;
1285 __dw_mci_start_request(host, host->cur_slot,
e352c813 1286 mrq->cmd);
053b3ce6
SJ
1287 goto unlock;
1288 }
1289
e352c813 1290 if (cmd->data && err) {
71abb133 1291 dw_mci_stop_dma(host);
90c2143a
SJ
1292 send_stop_abort(host, data);
1293 state = STATE_SENDING_STOP;
1294 break;
71abb133
SJ
1295 }
1296
e352c813
SJ
1297 if (!cmd->data || err) {
1298 dw_mci_request_end(host, mrq);
f95f3850
WN
1299 goto unlock;
1300 }
1301
1302 prev_state = state = STATE_SENDING_DATA;
1303 /* fall through */
1304
1305 case STATE_SENDING_DATA:
1306 if (test_and_clear_bit(EVENT_DATA_ERROR,
1307 &host->pending_events)) {
1308 dw_mci_stop_dma(host);
90c2143a 1309 send_stop_abort(host, data);
f95f3850
WN
1310 state = STATE_DATA_ERROR;
1311 break;
1312 }
1313
1314 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1315 &host->pending_events))
1316 break;
1317
1318 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1319 prev_state = state = STATE_DATA_BUSY;
1320 /* fall through */
1321
1322 case STATE_DATA_BUSY:
1323 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1324 &host->pending_events))
1325 break;
1326
1327 host->data = NULL;
1328 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
1329 err = dw_mci_data_complete(host, data);
1330
1331 if (!err) {
1332 if (!data->stop || mrq->sbc) {
1333 if (mrq->sbc)
1334 data->stop->error = 0;
1335 dw_mci_request_end(host, mrq);
1336 goto unlock;
f95f3850 1337 }
f95f3850 1338
e352c813
SJ
1339 /* stop command for open-ended transfer*/
1340 if (data->stop)
1341 send_stop_abort(host, data);
053b3ce6
SJ
1342 }
1343
e352c813
SJ
1344 /*
1345 * If err has non-zero,
1346 * stop-abort command has been already issued.
1347 */
f95f3850 1348 prev_state = state = STATE_SENDING_STOP;
e352c813 1349
f95f3850
WN
1350 /* fall through */
1351
1352 case STATE_SENDING_STOP:
1353 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1354 &host->pending_events))
1355 break;
1356
71abb133 1357 /* CMD error in data command */
e352c813 1358 if (mrq->cmd->error && mrq->data) {
71abb133
SJ
1359 sg_miter_stop(&host->sg_miter);
1360 host->sg = NULL;
1361 ctrl = mci_readl(host, CTRL);
1362 ctrl |= SDMMC_CTRL_FIFO_RESET;
1363 mci_writel(host, CTRL, ctrl);
1364 }
1365
f95f3850 1366 host->cmd = NULL;
71abb133 1367 host->data = NULL;
90c2143a 1368
e352c813
SJ
1369 if (mrq->stop)
1370 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
1371 else
1372 host->cmd_status = 0;
1373
e352c813 1374 dw_mci_request_end(host, mrq);
f95f3850
WN
1375 goto unlock;
1376
1377 case STATE_DATA_ERROR:
1378 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1379 &host->pending_events))
1380 break;
1381
1382 state = STATE_DATA_BUSY;
1383 break;
1384 }
1385 } while (state != prev_state);
1386
1387 host->state = state;
1388unlock:
1389 spin_unlock(&host->lock);
1390
1391}
1392
34b664a2
JH
1393/* push final bytes to part_buf, only use during push */
1394static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1395{
34b664a2
JH
1396 memcpy((void *)&host->part_buf, buf, cnt);
1397 host->part_buf_count = cnt;
1398}
f95f3850 1399
34b664a2
JH
1400/* append bytes to part_buf, only use during push */
1401static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402{
1403 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1404 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1405 host->part_buf_count += cnt;
1406 return cnt;
1407}
f95f3850 1408
34b664a2
JH
1409/* pull first bytes from part_buf, only use during pull */
1410static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1411{
1412 cnt = min(cnt, (int)host->part_buf_count);
1413 if (cnt) {
1414 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1415 cnt);
1416 host->part_buf_count -= cnt;
1417 host->part_buf_start += cnt;
f95f3850 1418 }
34b664a2 1419 return cnt;
f95f3850
WN
1420}
1421
34b664a2
JH
1422/* pull final bytes from the part_buf, assuming it's just been filled */
1423static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1424{
34b664a2
JH
1425 memcpy(buf, &host->part_buf, cnt);
1426 host->part_buf_start = cnt;
1427 host->part_buf_count = (1 << host->data_shift) - cnt;
1428}
f95f3850 1429
34b664a2
JH
1430static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1431{
cfbeb59c
MC
1432 struct mmc_data *data = host->data;
1433 int init_cnt = cnt;
1434
34b664a2
JH
1435 /* try and push anything in the part_buf */
1436 if (unlikely(host->part_buf_count)) {
1437 int len = dw_mci_push_part_bytes(host, buf, cnt);
1438 buf += len;
1439 cnt -= len;
cfbeb59c 1440 if (host->part_buf_count == 2) {
4e0a5adf
JC
1441 mci_writew(host, DATA(host->data_offset),
1442 host->part_buf16);
34b664a2
JH
1443 host->part_buf_count = 0;
1444 }
1445 }
1446#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1447 if (unlikely((unsigned long)buf & 0x1)) {
1448 while (cnt >= 2) {
1449 u16 aligned_buf[64];
1450 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1451 int items = len >> 1;
1452 int i;
1453 /* memcpy from input buffer into aligned buffer */
1454 memcpy(aligned_buf, buf, len);
1455 buf += len;
1456 cnt -= len;
1457 /* push data from aligned buffer into fifo */
1458 for (i = 0; i < items; ++i)
4e0a5adf
JC
1459 mci_writew(host, DATA(host->data_offset),
1460 aligned_buf[i]);
34b664a2
JH
1461 }
1462 } else
1463#endif
1464 {
1465 u16 *pdata = buf;
1466 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1467 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1468 buf = pdata;
1469 }
1470 /* put anything remaining in the part_buf */
1471 if (cnt) {
1472 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1473 /* Push data if we have reached the expected data length */
1474 if ((data->bytes_xfered + init_cnt) ==
1475 (data->blksz * data->blocks))
4e0a5adf 1476 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1477 host->part_buf16);
34b664a2
JH
1478 }
1479}
f95f3850 1480
34b664a2
JH
1481static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1482{
1483#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484 if (unlikely((unsigned long)buf & 0x1)) {
1485 while (cnt >= 2) {
1486 /* pull data from fifo into aligned buffer */
1487 u16 aligned_buf[64];
1488 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1489 int items = len >> 1;
1490 int i;
1491 for (i = 0; i < items; ++i)
4e0a5adf
JC
1492 aligned_buf[i] = mci_readw(host,
1493 DATA(host->data_offset));
34b664a2
JH
1494 /* memcpy from aligned buffer into output buffer */
1495 memcpy(buf, aligned_buf, len);
1496 buf += len;
1497 cnt -= len;
1498 }
1499 } else
1500#endif
1501 {
1502 u16 *pdata = buf;
1503 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1504 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1505 buf = pdata;
1506 }
1507 if (cnt) {
4e0a5adf 1508 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1509 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1510 }
1511}
1512
1513static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1514{
cfbeb59c
MC
1515 struct mmc_data *data = host->data;
1516 int init_cnt = cnt;
1517
34b664a2
JH
1518 /* try and push anything in the part_buf */
1519 if (unlikely(host->part_buf_count)) {
1520 int len = dw_mci_push_part_bytes(host, buf, cnt);
1521 buf += len;
1522 cnt -= len;
cfbeb59c 1523 if (host->part_buf_count == 4) {
4e0a5adf
JC
1524 mci_writel(host, DATA(host->data_offset),
1525 host->part_buf32);
34b664a2
JH
1526 host->part_buf_count = 0;
1527 }
1528 }
1529#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1530 if (unlikely((unsigned long)buf & 0x3)) {
1531 while (cnt >= 4) {
1532 u32 aligned_buf[32];
1533 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1534 int items = len >> 2;
1535 int i;
1536 /* memcpy from input buffer into aligned buffer */
1537 memcpy(aligned_buf, buf, len);
1538 buf += len;
1539 cnt -= len;
1540 /* push data from aligned buffer into fifo */
1541 for (i = 0; i < items; ++i)
4e0a5adf
JC
1542 mci_writel(host, DATA(host->data_offset),
1543 aligned_buf[i]);
34b664a2
JH
1544 }
1545 } else
1546#endif
1547 {
1548 u32 *pdata = buf;
1549 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1550 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1551 buf = pdata;
1552 }
1553 /* put anything remaining in the part_buf */
1554 if (cnt) {
1555 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1556 /* Push data if we have reached the expected data length */
1557 if ((data->bytes_xfered + init_cnt) ==
1558 (data->blksz * data->blocks))
4e0a5adf 1559 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1560 host->part_buf32);
f95f3850
WN
1561 }
1562}
1563
1564static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1565{
34b664a2
JH
1566#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567 if (unlikely((unsigned long)buf & 0x3)) {
1568 while (cnt >= 4) {
1569 /* pull data from fifo into aligned buffer */
1570 u32 aligned_buf[32];
1571 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1572 int items = len >> 2;
1573 int i;
1574 for (i = 0; i < items; ++i)
4e0a5adf
JC
1575 aligned_buf[i] = mci_readl(host,
1576 DATA(host->data_offset));
34b664a2
JH
1577 /* memcpy from aligned buffer into output buffer */
1578 memcpy(buf, aligned_buf, len);
1579 buf += len;
1580 cnt -= len;
1581 }
1582 } else
1583#endif
1584 {
1585 u32 *pdata = buf;
1586 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1587 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1588 buf = pdata;
1589 }
1590 if (cnt) {
4e0a5adf 1591 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1592 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1593 }
1594}
1595
1596static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1597{
cfbeb59c
MC
1598 struct mmc_data *data = host->data;
1599 int init_cnt = cnt;
1600
34b664a2
JH
1601 /* try and push anything in the part_buf */
1602 if (unlikely(host->part_buf_count)) {
1603 int len = dw_mci_push_part_bytes(host, buf, cnt);
1604 buf += len;
1605 cnt -= len;
c09fbd74 1606
cfbeb59c 1607 if (host->part_buf_count == 8) {
c09fbd74 1608 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1609 host->part_buf);
34b664a2
JH
1610 host->part_buf_count = 0;
1611 }
1612 }
1613#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1614 if (unlikely((unsigned long)buf & 0x7)) {
1615 while (cnt >= 8) {
1616 u64 aligned_buf[16];
1617 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1618 int items = len >> 3;
1619 int i;
1620 /* memcpy from input buffer into aligned buffer */
1621 memcpy(aligned_buf, buf, len);
1622 buf += len;
1623 cnt -= len;
1624 /* push data from aligned buffer into fifo */
1625 for (i = 0; i < items; ++i)
4e0a5adf
JC
1626 mci_writeq(host, DATA(host->data_offset),
1627 aligned_buf[i]);
34b664a2
JH
1628 }
1629 } else
1630#endif
1631 {
1632 u64 *pdata = buf;
1633 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1634 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1635 buf = pdata;
1636 }
1637 /* put anything remaining in the part_buf */
1638 if (cnt) {
1639 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1640 /* Push data if we have reached the expected data length */
1641 if ((data->bytes_xfered + init_cnt) ==
1642 (data->blksz * data->blocks))
4e0a5adf 1643 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1644 host->part_buf);
f95f3850
WN
1645 }
1646}
1647
1648static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1649{
34b664a2
JH
1650#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651 if (unlikely((unsigned long)buf & 0x7)) {
1652 while (cnt >= 8) {
1653 /* pull data from fifo into aligned buffer */
1654 u64 aligned_buf[16];
1655 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1656 int items = len >> 3;
1657 int i;
1658 for (i = 0; i < items; ++i)
4e0a5adf
JC
1659 aligned_buf[i] = mci_readq(host,
1660 DATA(host->data_offset));
34b664a2
JH
1661 /* memcpy from aligned buffer into output buffer */
1662 memcpy(buf, aligned_buf, len);
1663 buf += len;
1664 cnt -= len;
1665 }
1666 } else
1667#endif
1668 {
1669 u64 *pdata = buf;
1670 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1671 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1672 buf = pdata;
1673 }
1674 if (cnt) {
4e0a5adf 1675 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1676 dw_mci_pull_final_bytes(host, buf, cnt);
1677 }
1678}
f95f3850 1679
34b664a2
JH
1680static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1681{
1682 int len;
f95f3850 1683
34b664a2
JH
1684 /* get remaining partial bytes */
1685 len = dw_mci_pull_part_bytes(host, buf, cnt);
1686 if (unlikely(len == cnt))
1687 return;
1688 buf += len;
1689 cnt -= len;
1690
1691 /* get the rest of the data */
1692 host->pull_data(host, buf, cnt);
f95f3850
WN
1693}
1694
87a74d39 1695static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1696{
f9c2a0dc
SJ
1697 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1698 void *buf;
1699 unsigned int offset;
f95f3850
WN
1700 struct mmc_data *data = host->data;
1701 int shift = host->data_shift;
1702 u32 status;
3e4b0d8b 1703 unsigned int len;
f9c2a0dc 1704 unsigned int remain, fcnt;
f95f3850
WN
1705
1706 do {
f9c2a0dc
SJ
1707 if (!sg_miter_next(sg_miter))
1708 goto done;
1709
4225fc85 1710 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1711 buf = sg_miter->addr;
1712 remain = sg_miter->length;
1713 offset = 0;
1714
1715 do {
1716 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1717 << shift) + host->part_buf_count;
1718 len = min(remain, fcnt);
1719 if (!len)
1720 break;
34b664a2 1721 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1722 data->bytes_xfered += len;
f95f3850 1723 offset += len;
f9c2a0dc
SJ
1724 remain -= len;
1725 } while (remain);
f95f3850 1726
e74f3a9c 1727 sg_miter->consumed = offset;
f95f3850
WN
1728 status = mci_readl(host, MINTSTS);
1729 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1730 /* if the RXDR is ready read again */
1731 } while ((status & SDMMC_INT_RXDR) ||
1732 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1733
1734 if (!remain) {
1735 if (!sg_miter_next(sg_miter))
1736 goto done;
1737 sg_miter->consumed = 0;
1738 }
1739 sg_miter_stop(sg_miter);
f95f3850
WN
1740 return;
1741
1742done:
f9c2a0dc
SJ
1743 sg_miter_stop(sg_miter);
1744 host->sg = NULL;
f95f3850
WN
1745 smp_wmb();
1746 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1747}
1748
1749static void dw_mci_write_data_pio(struct dw_mci *host)
1750{
f9c2a0dc
SJ
1751 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1752 void *buf;
1753 unsigned int offset;
f95f3850
WN
1754 struct mmc_data *data = host->data;
1755 int shift = host->data_shift;
1756 u32 status;
3e4b0d8b 1757 unsigned int len;
f9c2a0dc
SJ
1758 unsigned int fifo_depth = host->fifo_depth;
1759 unsigned int remain, fcnt;
f95f3850
WN
1760
1761 do {
f9c2a0dc
SJ
1762 if (!sg_miter_next(sg_miter))
1763 goto done;
1764
4225fc85 1765 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1766 buf = sg_miter->addr;
1767 remain = sg_miter->length;
1768 offset = 0;
1769
1770 do {
1771 fcnt = ((fifo_depth -
1772 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1773 << shift) - host->part_buf_count;
1774 len = min(remain, fcnt);
1775 if (!len)
1776 break;
f95f3850 1777 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1778 data->bytes_xfered += len;
f95f3850 1779 offset += len;
f9c2a0dc
SJ
1780 remain -= len;
1781 } while (remain);
f95f3850 1782
e74f3a9c 1783 sg_miter->consumed = offset;
f95f3850
WN
1784 status = mci_readl(host, MINTSTS);
1785 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1786 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1787
1788 if (!remain) {
1789 if (!sg_miter_next(sg_miter))
1790 goto done;
1791 sg_miter->consumed = 0;
1792 }
1793 sg_miter_stop(sg_miter);
f95f3850
WN
1794 return;
1795
1796done:
f9c2a0dc
SJ
1797 sg_miter_stop(sg_miter);
1798 host->sg = NULL;
f95f3850
WN
1799 smp_wmb();
1800 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1801}
1802
1803static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1804{
1805 if (!host->cmd_status)
1806 host->cmd_status = status;
1807
1808 smp_wmb();
1809
1810 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1811 tasklet_schedule(&host->tasklet);
1812}
1813
1814static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1815{
1816 struct dw_mci *host = dev_id;
182c9081 1817 u32 pending;
1a5c8e1f 1818 int i;
f95f3850 1819
1fb5f68a
MC
1820 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1821
476d79f1
DA
1822 /*
1823 * DTO fix - version 2.10a and below, and only if internal DMA
1824 * is configured.
1825 */
1826 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1827 if (!pending &&
1828 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1829 pending |= SDMMC_INT_DATA_OVER;
1830 }
f95f3850 1831
476d79f1 1832 if (pending) {
f95f3850
WN
1833 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1834 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1835 host->cmd_status = pending;
f95f3850
WN
1836 smp_wmb();
1837 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1838 }
1839
1840 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1841 /* if there is an error report DATA_ERROR */
1842 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1843 host->data_status = pending;
f95f3850
WN
1844 smp_wmb();
1845 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1846 tasklet_schedule(&host->tasklet);
f95f3850
WN
1847 }
1848
1849 if (pending & SDMMC_INT_DATA_OVER) {
1850 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1851 if (!host->data_status)
182c9081 1852 host->data_status = pending;
f95f3850
WN
1853 smp_wmb();
1854 if (host->dir_status == DW_MCI_RECV_STATUS) {
1855 if (host->sg != NULL)
87a74d39 1856 dw_mci_read_data_pio(host, true);
f95f3850
WN
1857 }
1858 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1859 tasklet_schedule(&host->tasklet);
1860 }
1861
1862 if (pending & SDMMC_INT_RXDR) {
1863 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1864 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1865 dw_mci_read_data_pio(host, false);
f95f3850
WN
1866 }
1867
1868 if (pending & SDMMC_INT_TXDR) {
1869 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1870 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1871 dw_mci_write_data_pio(host);
1872 }
1873
1874 if (pending & SDMMC_INT_CMD_DONE) {
1875 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1876 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1877 }
1878
1879 if (pending & SDMMC_INT_CD) {
1880 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1881 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1882 }
1883
1a5c8e1f
SH
1884 /* Handle SDIO Interrupts */
1885 for (i = 0; i < host->num_slots; i++) {
1886 struct dw_mci_slot *slot = host->slot[i];
1887 if (pending & SDMMC_INT_SDIO(i)) {
1888 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1889 mmc_signal_sdio_irq(slot->mmc);
1890 }
1891 }
1892
1fb5f68a 1893 }
f95f3850
WN
1894
1895#ifdef CONFIG_MMC_DW_IDMAC
1896 /* Handle DMA interrupts */
1897 pending = mci_readl(host, IDSTS);
1898 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1899 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1900 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1901 host->dma_ops->complete(host);
1902 }
1903#endif
1904
1905 return IRQ_HANDLED;
1906}
1907
1791b13e 1908static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1909{
1791b13e 1910 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1911 int i;
1912
1913 for (i = 0; i < host->num_slots; i++) {
1914 struct dw_mci_slot *slot = host->slot[i];
1915 struct mmc_host *mmc = slot->mmc;
1916 struct mmc_request *mrq;
1917 int present;
1918 u32 ctrl;
1919
1920 present = dw_mci_get_cd(mmc);
1921 while (present != slot->last_detect_state) {
f95f3850
WN
1922 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1923 present ? "inserted" : "removed");
1924
1791b13e
JH
1925 spin_lock_bh(&host->lock);
1926
f95f3850
WN
1927 /* Card change detected */
1928 slot->last_detect_state = present;
1929
1791b13e
JH
1930 /* Mark card as present if applicable */
1931 if (present != 0)
f95f3850 1932 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850
WN
1933
1934 /* Clean up queue if present */
1935 mrq = slot->mrq;
1936 if (mrq) {
1937 if (mrq == host->mrq) {
1938 host->data = NULL;
1939 host->cmd = NULL;
1940
1941 switch (host->state) {
1942 case STATE_IDLE:
1943 break;
1944 case STATE_SENDING_CMD:
1945 mrq->cmd->error = -ENOMEDIUM;
1946 if (!mrq->data)
1947 break;
1948 /* fall through */
1949 case STATE_SENDING_DATA:
1950 mrq->data->error = -ENOMEDIUM;
1951 dw_mci_stop_dma(host);
1952 break;
1953 case STATE_DATA_BUSY:
1954 case STATE_DATA_ERROR:
1955 if (mrq->data->error == -EINPROGRESS)
1956 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
1957 /* fall through */
1958 case STATE_SENDING_STOP:
90c2143a
SJ
1959 if (mrq->stop)
1960 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
1961 break;
1962 }
1963
1964 dw_mci_request_end(host, mrq);
1965 } else {
1966 list_del(&slot->queue_node);
1967 mrq->cmd->error = -ENOMEDIUM;
1968 if (mrq->data)
1969 mrq->data->error = -ENOMEDIUM;
1970 if (mrq->stop)
1971 mrq->stop->error = -ENOMEDIUM;
1972
1973 spin_unlock(&host->lock);
1974 mmc_request_done(slot->mmc, mrq);
1975 spin_lock(&host->lock);
1976 }
1977 }
1978
1979 /* Power down slot */
1980 if (present == 0) {
f95f3850
WN
1981 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1982
1983 /*
1984 * Clear down the FIFO - doing so generates a
1985 * block interrupt, hence setting the
1986 * scatter-gather pointer to NULL.
1987 */
f9c2a0dc 1988 sg_miter_stop(&host->sg_miter);
f95f3850
WN
1989 host->sg = NULL;
1990
1991 ctrl = mci_readl(host, CTRL);
1992 ctrl |= SDMMC_CTRL_FIFO_RESET;
1993 mci_writel(host, CTRL, ctrl);
1994
1995#ifdef CONFIG_MMC_DW_IDMAC
1996 ctrl = mci_readl(host, BMOD);
141a712a
SJ
1997 /* Software reset of DMA */
1998 ctrl |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
1999 mci_writel(host, BMOD, ctrl);
2000#endif
2001
2002 }
2003
1791b13e
JH
2004 spin_unlock_bh(&host->lock);
2005
f95f3850
WN
2006 present = dw_mci_get_cd(mmc);
2007 }
2008
2009 mmc_detect_change(slot->mmc,
2010 msecs_to_jiffies(host->pdata->detect_delay_ms));
2011 }
2012}
2013
c91eab4b
TA
2014#ifdef CONFIG_OF
2015/* given a slot id, find out the device node representing that slot */
2016static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2017{
2018 struct device_node *np;
2019 const __be32 *addr;
2020 int len;
2021
2022 if (!dev || !dev->of_node)
2023 return NULL;
2024
2025 for_each_child_of_node(dev->of_node, np) {
2026 addr = of_get_property(np, "reg", &len);
2027 if (!addr || (len < sizeof(int)))
2028 continue;
2029 if (be32_to_cpup(addr) == slot)
2030 return np;
2031 }
2032 return NULL;
2033}
2034
a70aaa64
DA
2035static struct dw_mci_of_slot_quirks {
2036 char *quirk;
2037 int id;
2038} of_slot_quirks[] = {
2039 {
2040 .quirk = "disable-wp",
2041 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2042 },
2043};
2044
2045static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2046{
2047 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2048 int quirks = 0;
2049 int idx;
2050
2051 /* get quirks */
2052 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2053 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2054 quirks |= of_slot_quirks[idx].id;
2055
2056 return quirks;
2057}
2058
c91eab4b
TA
2059/* find out bus-width for a given slot */
2060static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2061{
2062 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2063 u32 bus_wd = 1;
2064
2065 if (!np)
2066 return 1;
2067
2068 if (of_property_read_u32(np, "bus-width", &bus_wd))
2069 dev_err(dev, "bus-width property not found, assuming width"
2070 " as 1\n");
2071 return bus_wd;
2072}
55a6ceb2
DA
2073
2074/* find the write protect gpio for a given slot; or -1 if none specified */
2075static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2076{
2077 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2078 int gpio;
2079
2080 if (!np)
2081 return -EINVAL;
2082
2083 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2084
2085 /* Having a missing entry is valid; return silently */
2086 if (!gpio_is_valid(gpio))
2087 return -EINVAL;
2088
2089 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2090 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2091 return -EINVAL;
2092 }
2093
2094 return gpio;
2095}
c91eab4b 2096#else /* CONFIG_OF */
a70aaa64
DA
2097static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2098{
2099 return 0;
2100}
c91eab4b
TA
2101static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2102{
2103 return 1;
2104}
2105static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2106{
2107 return NULL;
2108}
55a6ceb2
DA
2109static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2110{
2111 return -EINVAL;
2112}
c91eab4b
TA
2113#endif /* CONFIG_OF */
2114
36c179a9 2115static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2116{
2117 struct mmc_host *mmc;
2118 struct dw_mci_slot *slot;
e95baf13 2119 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2120 int ctrl_id, ret;
1f44a2a5 2121 u32 freq[2];
c91eab4b 2122 u8 bus_width;
f95f3850 2123
4a90920c 2124 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2125 if (!mmc)
2126 return -ENOMEM;
2127
2128 slot = mmc_priv(mmc);
2129 slot->id = id;
2130 slot->mmc = mmc;
2131 slot->host = host;
c91eab4b 2132 host->slot[id] = slot;
f95f3850 2133
a70aaa64
DA
2134 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2135
f95f3850 2136 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2137 if (of_property_read_u32_array(host->dev->of_node,
2138 "clock-freq-min-max", freq, 2)) {
2139 mmc->f_min = DW_MCI_FREQ_MIN;
2140 mmc->f_max = DW_MCI_FREQ_MAX;
2141 } else {
2142 mmc->f_min = freq[0];
2143 mmc->f_max = freq[1];
2144 }
f95f3850
WN
2145
2146 if (host->pdata->get_ocr)
2147 mmc->ocr_avail = host->pdata->get_ocr(id);
2148 else
2149 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2150
2151 /*
2152 * Start with slot power disabled, it will be enabled when a card
2153 * is detected.
2154 */
2155 if (host->pdata->setpower)
2156 host->pdata->setpower(id, 0);
2157
fc3d7720
JC
2158 if (host->pdata->caps)
2159 mmc->caps = host->pdata->caps;
fc3d7720 2160
ab269128
AK
2161 if (host->pdata->pm_caps)
2162 mmc->pm_caps = host->pdata->pm_caps;
2163
800d78bf
TA
2164 if (host->dev->of_node) {
2165 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2166 if (ctrl_id < 0)
2167 ctrl_id = 0;
2168 } else {
2169 ctrl_id = to_platform_device(host->dev)->id;
2170 }
cb27a843
JH
2171 if (drv_data && drv_data->caps)
2172 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2173
4f408cc6
SJ
2174 if (host->pdata->caps2)
2175 mmc->caps2 = host->pdata->caps2;
4f408cc6 2176
f95f3850 2177 if (host->pdata->get_bus_wd)
c91eab4b
TA
2178 bus_width = host->pdata->get_bus_wd(slot->id);
2179 else if (host->dev->of_node)
2180 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2181 else
2182 bus_width = 1;
2183
2184 switch (bus_width) {
2185 case 8:
2186 mmc->caps |= MMC_CAP_8_BIT_DATA;
2187 case 4:
2188 mmc->caps |= MMC_CAP_4_BIT_DATA;
2189 }
f95f3850 2190
f95f3850
WN
2191 if (host->pdata->blk_settings) {
2192 mmc->max_segs = host->pdata->blk_settings->max_segs;
2193 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2194 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2195 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2196 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2197 } else {
2198 /* Useful defaults if platform data is unset. */
a39e5746
JC
2199#ifdef CONFIG_MMC_DW_IDMAC
2200 mmc->max_segs = host->ring_size;
2201 mmc->max_blk_size = 65536;
2202 mmc->max_blk_count = host->ring_size;
2203 mmc->max_seg_size = 0x1000;
2204 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2205#else
f95f3850
WN
2206 mmc->max_segs = 64;
2207 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2208 mmc->max_blk_count = 512;
2209 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2210 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2211#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2212 }
f95f3850
WN
2213
2214 if (dw_mci_get_cd(mmc))
2215 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2216 else
2217 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2218
55a6ceb2
DA
2219 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2220
0cea529d
JC
2221 ret = mmc_add_host(mmc);
2222 if (ret)
2223 goto err_setup_bus;
f95f3850
WN
2224
2225#if defined(CONFIG_DEBUG_FS)
2226 dw_mci_init_debugfs(slot);
2227#endif
2228
2229 /* Card initially undetected */
2230 slot->last_detect_state = 0;
2231
2232 return 0;
800d78bf
TA
2233
2234err_setup_bus:
2235 mmc_free_host(mmc);
2236 return -EINVAL;
f95f3850
WN
2237}
2238
2239static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2240{
2241 /* Shutdown detect IRQ */
2242 if (slot->host->pdata->exit)
2243 slot->host->pdata->exit(id);
2244
2245 /* Debugfs stuff is cleaned up by mmc core */
2246 mmc_remove_host(slot->mmc);
2247 slot->host->slot[id] = NULL;
2248 mmc_free_host(slot->mmc);
2249}
2250
2251static void dw_mci_init_dma(struct dw_mci *host)
2252{
2253 /* Alloc memory for sg translation */
780f22af 2254 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2255 &host->sg_dma, GFP_KERNEL);
2256 if (!host->sg_cpu) {
4a90920c 2257 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2258 __func__);
2259 goto no_dma;
2260 }
2261
2262 /* Determine which DMA interface to use */
2263#ifdef CONFIG_MMC_DW_IDMAC
2264 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2265 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2266#endif
2267
2268 if (!host->dma_ops)
2269 goto no_dma;
2270
e1631f98
JC
2271 if (host->dma_ops->init && host->dma_ops->start &&
2272 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2273 if (host->dma_ops->init(host)) {
4a90920c 2274 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2275 "DMA Controller.\n", __func__);
2276 goto no_dma;
2277 }
2278 } else {
4a90920c 2279 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2280 goto no_dma;
2281 }
2282
2283 host->use_dma = 1;
2284 return;
2285
2286no_dma:
4a90920c 2287 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2288 host->use_dma = 0;
2289 return;
2290}
2291
2292static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2293{
2294 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2295 unsigned int ctrl;
2296
2297 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2298 SDMMC_CTRL_DMA_RESET));
2299
2300 /* wait till resets clear */
2301 do {
2302 ctrl = mci_readl(host, CTRL);
2303 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2304 SDMMC_CTRL_DMA_RESET)))
2305 return true;
2306 } while (time_before(jiffies, timeout));
2307
2308 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2309
2310 return false;
2311}
2312
c91eab4b
TA
2313#ifdef CONFIG_OF
2314static struct dw_mci_of_quirks {
2315 char *quirk;
2316 int id;
2317} of_quirks[] = {
2318 {
c91eab4b
TA
2319 .quirk = "broken-cd",
2320 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2321 },
2322};
2323
2324static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2325{
2326 struct dw_mci_board *pdata;
2327 struct device *dev = host->dev;
2328 struct device_node *np = dev->of_node;
e95baf13 2329 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2330 int idx, ret;
3c6d89ea 2331 u32 clock_frequency;
c91eab4b
TA
2332
2333 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2334 if (!pdata) {
2335 dev_err(dev, "could not allocate memory for pdata\n");
2336 return ERR_PTR(-ENOMEM);
2337 }
2338
2339 /* find out number of slots supported */
2340 if (of_property_read_u32(dev->of_node, "num-slots",
2341 &pdata->num_slots)) {
2342 dev_info(dev, "num-slots property not found, "
2343 "assuming 1 slot is available\n");
2344 pdata->num_slots = 1;
2345 }
2346
2347 /* get quirks */
2348 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2349 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2350 pdata->quirks |= of_quirks[idx].id;
2351
2352 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2353 dev_info(dev, "fifo-depth property not found, using "
2354 "value of FIFOTH register as default\n");
2355
2356 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2357
3c6d89ea
DA
2358 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2359 pdata->bus_hz = clock_frequency;
2360
cb27a843
JH
2361 if (drv_data && drv_data->parse_dt) {
2362 ret = drv_data->parse_dt(host);
800d78bf
TA
2363 if (ret)
2364 return ERR_PTR(ret);
2365 }
2366
ab269128
AK
2367 if (of_find_property(np, "keep-power-in-suspend", NULL))
2368 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2369
2370 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2371 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2372
10b49841
SJ
2373 if (of_find_property(np, "supports-highspeed", NULL))
2374 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2375
5dd63f52
SJ
2376 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2377 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2378
2379 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2380 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2381
c91eab4b
TA
2382 return pdata;
2383}
2384
2385#else /* CONFIG_OF */
2386static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2387{
2388 return ERR_PTR(-EINVAL);
2389}
2390#endif /* CONFIG_OF */
2391
62ca8034 2392int dw_mci_probe(struct dw_mci *host)
f95f3850 2393{
e95baf13 2394 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2395 int width, i, ret = 0;
f95f3850 2396 u32 fifo_size;
1c2215b7 2397 int init_slots = 0;
f95f3850 2398
c91eab4b
TA
2399 if (!host->pdata) {
2400 host->pdata = dw_mci_parse_dt(host);
2401 if (IS_ERR(host->pdata)) {
2402 dev_err(host->dev, "platform data not available\n");
2403 return -EINVAL;
2404 }
f95f3850
WN
2405 }
2406
62ca8034 2407 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4a90920c 2408 dev_err(host->dev,
f95f3850 2409 "Platform data must supply select_slot function\n");
62ca8034 2410 return -ENODEV;
f95f3850
WN
2411 }
2412
780f22af 2413 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2414 if (IS_ERR(host->biu_clk)) {
2415 dev_dbg(host->dev, "biu clock not available\n");
2416 } else {
2417 ret = clk_prepare_enable(host->biu_clk);
2418 if (ret) {
2419 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2420 return ret;
2421 }
2422 }
2423
780f22af 2424 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2425 if (IS_ERR(host->ciu_clk)) {
2426 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2427 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2428 } else {
2429 ret = clk_prepare_enable(host->ciu_clk);
2430 if (ret) {
2431 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2432 goto err_clk_biu;
2433 }
f90a0612 2434
3c6d89ea
DA
2435 if (host->pdata->bus_hz) {
2436 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2437 if (ret)
2438 dev_warn(host->dev,
2439 "Unable to set bus rate to %ul\n",
2440 host->pdata->bus_hz);
2441 }
f90a0612 2442 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2443 }
f90a0612 2444
002f0d5c
YK
2445 if (drv_data && drv_data->init) {
2446 ret = drv_data->init(host);
2447 if (ret) {
2448 dev_err(host->dev,
2449 "implementation specific init failed\n");
2450 goto err_clk_ciu;
2451 }
2452 }
2453
cb27a843
JH
2454 if (drv_data && drv_data->setup_clock) {
2455 ret = drv_data->setup_clock(host);
800d78bf
TA
2456 if (ret) {
2457 dev_err(host->dev,
2458 "implementation specific clock setup failed\n");
2459 goto err_clk_ciu;
2460 }
2461 }
2462
a55d6ff0 2463 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2464 if (IS_ERR(host->vmmc)) {
2465 ret = PTR_ERR(host->vmmc);
2466 if (ret == -EPROBE_DEFER)
2467 goto err_clk_ciu;
2468
2469 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2470 host->vmmc = NULL;
2471 } else {
2472 ret = regulator_enable(host->vmmc);
2473 if (ret) {
2474 if (ret != -EPROBE_DEFER)
2475 dev_err(host->dev,
2476 "regulator_enable fail: %d\n", ret);
2477 goto err_clk_ciu;
2478 }
2479 }
2480
f90a0612 2481 if (!host->bus_hz) {
4a90920c 2482 dev_err(host->dev,
f95f3850 2483 "Platform data must supply bus speed\n");
f90a0612 2484 ret = -ENODEV;
870556a3 2485 goto err_regulator;
f95f3850
WN
2486 }
2487
62ca8034 2488 host->quirks = host->pdata->quirks;
f95f3850
WN
2489
2490 spin_lock_init(&host->lock);
2491 INIT_LIST_HEAD(&host->queue);
2492
f95f3850
WN
2493 /*
2494 * Get the host data width - this assumes that HCON has been set with
2495 * the correct values.
2496 */
2497 i = (mci_readl(host, HCON) >> 7) & 0x7;
2498 if (!i) {
2499 host->push_data = dw_mci_push_data16;
2500 host->pull_data = dw_mci_pull_data16;
2501 width = 16;
2502 host->data_shift = 1;
2503 } else if (i == 2) {
2504 host->push_data = dw_mci_push_data64;
2505 host->pull_data = dw_mci_pull_data64;
2506 width = 64;
2507 host->data_shift = 3;
2508 } else {
2509 /* Check for a reserved value, and warn if it is */
2510 WARN((i != 1),
2511 "HCON reports a reserved host data width!\n"
2512 "Defaulting to 32-bit access.\n");
2513 host->push_data = dw_mci_push_data32;
2514 host->pull_data = dw_mci_pull_data32;
2515 width = 32;
2516 host->data_shift = 2;
2517 }
2518
2519 /* Reset all blocks */
4a90920c 2520 if (!mci_wait_reset(host->dev, host))
141a712a
SJ
2521 return -ENODEV;
2522
2523 host->dma_ops = host->pdata->dma_ops;
2524 dw_mci_init_dma(host);
f95f3850
WN
2525
2526 /* Clear the interrupts for the host controller */
2527 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2528 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2529
2530 /* Put in max timeout */
2531 mci_writel(host, TMOUT, 0xFFFFFFFF);
2532
2533 /*
2534 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2535 * Tx Mark = fifo_size / 2 DMA Size = 8
2536 */
b86d8253
JH
2537 if (!host->pdata->fifo_depth) {
2538 /*
2539 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2540 * have been overwritten by the bootloader, just like we're
2541 * about to do, so if you know the value for your hardware, you
2542 * should put it in the platform data.
2543 */
2544 fifo_size = mci_readl(host, FIFOTH);
8234e869 2545 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2546 } else {
2547 fifo_size = host->pdata->fifo_depth;
2548 }
2549 host->fifo_depth = fifo_size;
52426899
SJ
2550 host->fifoth_val =
2551 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2552 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2553
2554 /* disable clock to CIU */
2555 mci_writel(host, CLKENA, 0);
2556 mci_writel(host, CLKSRC, 0);
2557
63008768
JH
2558 /*
2559 * In 2.40a spec, Data offset is changed.
2560 * Need to check the version-id and set data-offset for DATA register.
2561 */
2562 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2563 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2564
2565 if (host->verid < DW_MMC_240A)
2566 host->data_offset = DATA_OFFSET;
2567 else
2568 host->data_offset = DATA_240A_OFFSET;
2569
f95f3850 2570 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2571 host->card_workqueue = alloc_workqueue("dw-mci-card",
1791b13e 2572 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
ef7aef9a
WY
2573 if (!host->card_workqueue) {
2574 ret = -ENOMEM;
1791b13e 2575 goto err_dmaunmap;
ef7aef9a 2576 }
1791b13e 2577 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2578 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2579 host->irq_flags, "dw-mci", host);
f95f3850 2580 if (ret)
1791b13e 2581 goto err_workqueue;
f95f3850 2582
f95f3850
WN
2583 if (host->pdata->num_slots)
2584 host->num_slots = host->pdata->num_slots;
2585 else
2586 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2587
2da1d7f2
YC
2588 /*
2589 * Enable interrupts for command done, data over, data empty, card det,
2590 * receive ready and error such as transmit, receive timeout, crc error
2591 */
2592 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2593 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2594 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2595 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2596 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2597
2598 dev_info(host->dev, "DW MMC controller at irq %d, "
2599 "%d bit host data width, "
2600 "%u deep fifo\n",
2601 host->irq, width, fifo_size);
2602
f95f3850
WN
2603 /* We need at least one slot to succeed */
2604 for (i = 0; i < host->num_slots; i++) {
2605 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2606 if (ret)
2607 dev_dbg(host->dev, "slot %d init failed\n", i);
2608 else
2609 init_slots++;
2610 }
2611
2612 if (init_slots) {
2613 dev_info(host->dev, "%d slots initialized\n", init_slots);
2614 } else {
2615 dev_dbg(host->dev, "attempted to initialize %d slots, "
2616 "but failed on all\n", host->num_slots);
780f22af 2617 goto err_workqueue;
f95f3850
WN
2618 }
2619
f95f3850 2620 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2621 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2622
2623 return 0;
2624
1791b13e 2625err_workqueue:
95dcc2cb 2626 destroy_workqueue(host->card_workqueue);
1791b13e 2627
f95f3850
WN
2628err_dmaunmap:
2629 if (host->use_dma && host->dma_ops->exit)
2630 host->dma_ops->exit(host);
f95f3850 2631
870556a3 2632err_regulator:
780f22af 2633 if (host->vmmc)
c07946a3 2634 regulator_disable(host->vmmc);
f90a0612
TA
2635
2636err_clk_ciu:
780f22af 2637 if (!IS_ERR(host->ciu_clk))
f90a0612 2638 clk_disable_unprepare(host->ciu_clk);
780f22af 2639
f90a0612 2640err_clk_biu:
780f22af 2641 if (!IS_ERR(host->biu_clk))
f90a0612 2642 clk_disable_unprepare(host->biu_clk);
780f22af 2643
f95f3850
WN
2644 return ret;
2645}
62ca8034 2646EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2647
62ca8034 2648void dw_mci_remove(struct dw_mci *host)
f95f3850 2649{
f95f3850
WN
2650 int i;
2651
2652 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2653 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2654
f95f3850 2655 for (i = 0; i < host->num_slots; i++) {
4a90920c 2656 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2657 if (host->slot[i])
2658 dw_mci_cleanup_slot(host->slot[i], i);
2659 }
2660
2661 /* disable clock to CIU */
2662 mci_writel(host, CLKENA, 0);
2663 mci_writel(host, CLKSRC, 0);
2664
95dcc2cb 2665 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2666
2667 if (host->use_dma && host->dma_ops->exit)
2668 host->dma_ops->exit(host);
2669
780f22af 2670 if (host->vmmc)
c07946a3 2671 regulator_disable(host->vmmc);
c07946a3 2672
f90a0612
TA
2673 if (!IS_ERR(host->ciu_clk))
2674 clk_disable_unprepare(host->ciu_clk);
780f22af 2675
f90a0612
TA
2676 if (!IS_ERR(host->biu_clk))
2677 clk_disable_unprepare(host->biu_clk);
f95f3850 2678}
62ca8034
SH
2679EXPORT_SYMBOL(dw_mci_remove);
2680
2681
f95f3850 2682
6fe8890d 2683#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2684/*
2685 * TODO: we should probably disable the clock to the card in the suspend path.
2686 */
62ca8034 2687int dw_mci_suspend(struct dw_mci *host)
f95f3850 2688{
62ca8034 2689 int i, ret = 0;
f95f3850
WN
2690
2691 for (i = 0; i < host->num_slots; i++) {
2692 struct dw_mci_slot *slot = host->slot[i];
2693 if (!slot)
2694 continue;
2695 ret = mmc_suspend_host(slot->mmc);
2696 if (ret < 0) {
2697 while (--i >= 0) {
2698 slot = host->slot[i];
2699 if (slot)
2700 mmc_resume_host(host->slot[i]->mmc);
2701 }
2702 return ret;
2703 }
2704 }
2705
c07946a3
JC
2706 if (host->vmmc)
2707 regulator_disable(host->vmmc);
2708
f95f3850
WN
2709 return 0;
2710}
62ca8034 2711EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2712
62ca8034 2713int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2714{
2715 int i, ret;
f95f3850 2716
f2f942ce
SK
2717 if (host->vmmc) {
2718 ret = regulator_enable(host->vmmc);
2719 if (ret) {
2720 dev_err(host->dev,
2721 "failed to enable regulator: %d\n", ret);
2722 return ret;
2723 }
2724 }
1d6c4e0a 2725
4a90920c 2726 if (!mci_wait_reset(host->dev, host)) {
e61cf118
JC
2727 ret = -ENODEV;
2728 return ret;
2729 }
2730
3bfe619d 2731 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2732 host->dma_ops->init(host);
2733
52426899
SJ
2734 /*
2735 * Restore the initial value at FIFOTH register
2736 * And Invalidate the prev_blksz with zero
2737 */
e61cf118 2738 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2739 host->prev_blksz = 0;
e61cf118 2740
2eb2944f
DA
2741 /* Put in max timeout */
2742 mci_writel(host, TMOUT, 0xFFFFFFFF);
2743
e61cf118
JC
2744 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2745 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2746 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2747 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2748 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2749
f95f3850
WN
2750 for (i = 0; i < host->num_slots; i++) {
2751 struct dw_mci_slot *slot = host->slot[i];
2752 if (!slot)
2753 continue;
ab269128
AK
2754 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2755 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2756 dw_mci_setup_bus(slot, true);
2757 }
2758
f95f3850
WN
2759 ret = mmc_resume_host(host->slot[i]->mmc);
2760 if (ret < 0)
2761 return ret;
2762 }
f95f3850
WN
2763 return 0;
2764}
62ca8034 2765EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2766#endif /* CONFIG_PM_SLEEP */
2767
f95f3850
WN
2768static int __init dw_mci_init(void)
2769{
8e1c4e4d 2770 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2771 return 0;
f95f3850
WN
2772}
2773
2774static void __exit dw_mci_exit(void)
2775{
f95f3850
WN
2776}
2777
2778module_init(dw_mci_init);
2779module_exit(dw_mci_exit);
2780
2781MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2782MODULE_AUTHOR("NXP Semiconductor VietNam");
2783MODULE_AUTHOR("Imagination Technologies Ltd");
2784MODULE_LICENSE("GPL v2");