]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: sdhci-pxav3: Use devm_* managed helpers
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
90c2143a 32#include <linux/mmc/sdio.h>
f95f3850
WN
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
c07946a3 35#include <linux/regulator/consumer.h>
1791b13e 36#include <linux/workqueue.h>
c91eab4b 37#include <linux/of.h>
55a6ceb2 38#include <linux/of_gpio.h>
bf626e55 39#include <linux/mmc/slot-gpio.h>
f95f3850
WN
40
41#include "dw_mmc.h"
42
43/* Common flag combinations */
3f7eec62 44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
1f44a2a5
SJ
55#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
f95f3850 58#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
f95f3850
WN
64struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
77
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
0976f16d
SJ
84static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
f95f3850 94
0976f16d
SJ
95static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
112};
113
31bff450
SJ
114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
f95f3850
WN
117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
f95f3850
WN
238static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239{
240 struct mmc_data *data;
800d78bf 241 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
243 u32 cmdr;
244 cmd->error = -EINPROGRESS;
245
246 cmdr = cmd->opcode;
247
90c2143a
SJ
248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850 253 cmdr |= SDMMC_CMD_STOP;
4a1b27ad
JC
254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850
WN
256
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
262 }
263
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
266
267 data = cmd->data;
268 if (data) {
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
274 }
275
cb27a843
JH
276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 278
f95f3850
WN
279 return cmdr;
280}
281
90c2143a
SJ
282static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283{
284 struct mmc_command *stop;
285 u32 cmdr;
286
287 if (!cmd->data)
288 return 0;
289
290 stop = &host->stop_abort;
291 cmdr = cmd->opcode;
292 memset(stop, 0, sizeof(struct mmc_command));
293
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
299 stop->arg = 0;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
306 } else {
307 return 0;
308 }
309
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
312
313 return cmdr;
314}
315
f95f3850
WN
316static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
318{
319 host->cmd = cmd;
4a90920c 320 dev_vdbg(host->dev,
f95f3850
WN
321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
323
324 mci_writel(host, CMDARG, cmd->arg);
325 wmb();
326
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
328}
329
90c2143a 330static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 331{
90c2143a
SJ
332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
334}
335
336/* DMA interface functions */
337static void dw_mci_stop_dma(struct dw_mci *host)
338{
03e8cb53 339 if (host->using_dma) {
f95f3850
WN
340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
f95f3850 342 }
aa50f259
SJ
343
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
346}
347
9aa51408
SJ
348static int dw_mci_get_dma_dir(struct mmc_data *data)
349{
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
352 else
353 return DMA_FROM_DEVICE;
354}
355
9beee912 356#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
357static void dw_mci_dma_cleanup(struct dw_mci *host)
358{
359 struct mmc_data *data = host->data;
360
361 if (data)
9aa51408 362 if (!data->host_cookie)
4a90920c 363 dma_unmap_sg(host->dev,
9aa51408
SJ
364 data->sg,
365 data->sg_len,
366 dw_mci_get_dma_dir(data));
f95f3850
WN
367}
368
5ce9d961
SJ
369static void dw_mci_idmac_reset(struct dw_mci *host)
370{
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
375}
376
f95f3850
WN
377static void dw_mci_idmac_stop_dma(struct dw_mci *host)
378{
379 u32 temp;
380
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
386
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
a5289a43 389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 390 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
391 mci_writel(host, BMOD, temp);
392}
393
394static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395{
396 struct mmc_data *data = host->data;
397
4a90920c 398 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
399
400 host->dma_ops->cleanup(host);
401
402 /*
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
405 */
406 if (data) {
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
409 }
410}
411
412static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
413 unsigned int sg_len)
414{
415 int i;
416 struct idmac_desc *desc = host->sg_cpu;
417
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
421
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
424
425 /* Buffer length */
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
427
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
430 }
431
432 /* Set first descriptor */
433 desc = host->sg_cpu;
434 desc->des0 |= IDMAC_DES0_FD;
435
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
440
441 wmb();
442}
443
444static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
445{
446 u32 temp;
447
448 dw_mci_translate_sglist(host, host->data, sg_len);
449
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
454
455 wmb();
456
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
a5289a43 459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
460 mci_writel(host, BMOD, temp);
461
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
464}
465
466static int dw_mci_idmac_init(struct dw_mci *host)
467{
468 struct idmac_desc *p;
897b69e7 469 int i;
f95f3850
WN
470
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
481
5ce9d961 482 dw_mci_idmac_reset(host);
141a712a 483
f95f3850 484 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
487 SDMMC_IDMAC_INT_TI);
488
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
491 return 0;
492}
493
8e2b36ea 494static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
500};
501#endif /* CONFIG_MMC_DW_IDMAC */
502
9aa51408
SJ
503static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
505 bool next)
f95f3850
WN
506{
507 struct scatterlist *sg;
9aa51408 508 unsigned int i, sg_len;
03e8cb53 509
9aa51408
SJ
510 if (!next && data->host_cookie)
511 return data->host_cookie;
f95f3850
WN
512
513 /*
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
517 */
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
519 return -EINVAL;
9aa51408 520
f95f3850
WN
521 if (data->blksz & 3)
522 return -EINVAL;
523
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
526 return -EINVAL;
527 }
528
4a90920c 529 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
530 data->sg,
531 data->sg_len,
532 dw_mci_get_dma_dir(data));
533 if (sg_len == 0)
534 return -EINVAL;
03e8cb53 535
9aa51408
SJ
536 if (next)
537 data->host_cookie = sg_len;
f95f3850 538
9aa51408
SJ
539 return sg_len;
540}
541
9aa51408
SJ
542static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
544 bool is_first_req)
545{
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
548
549 if (!slot->host->use_dma || !data)
550 return;
551
552 if (data->host_cookie) {
553 data->host_cookie = 0;
554 return;
555 }
556
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
559}
560
561static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
563 int err)
564{
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
567
568 if (!slot->host->use_dma || !data)
569 return;
570
571 if (data->host_cookie)
4a90920c 572 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
573 data->sg,
574 data->sg_len,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
577}
578
52426899
SJ
579static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580{
581#ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
591
592 /*
593 * MSIZE is '1',
594 * if blksz is not a multiple of the FIFO width
595 */
596 if (blksz % fifo_width) {
597 msize = 0;
598 rx_wmark = 1;
599 goto done;
600 }
601
602 do {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
605 msize = idx;
606 rx_wmark = mszs[idx] - 1;
607 break;
608 }
609 } while (--idx > 0);
610 /*
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
613 */
614done:
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
617#endif
618}
619
f1d2736c
SJ
620static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621{
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
624 u16 thld_size;
625
626 WARN_ON(!(data->flags & MMC_DATA_READ));
627
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
630 goto disable;
631
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
634
635 if (blksz_depth > fifo_depth)
636 goto disable;
637
638 /*
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
642 */
643 thld_size = blksz;
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
645 return;
646
647disable:
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
649}
650
9aa51408
SJ
651static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
652{
653 int sg_len;
654 u32 temp;
655
656 host->using_dma = 0;
657
658 /* If we don't have a channel, we can't do DMA */
659 if (!host->use_dma)
660 return -ENODEV;
661
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
663 if (sg_len < 0) {
664 host->dma_ops->stop(host);
9aa51408 665 return sg_len;
a99aa9b9 666 }
9aa51408
SJ
667
668 host->using_dma = 1;
f95f3850 669
4a90920c 670 dev_vdbg(host->dev,
f95f3850
WN
671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
673 sg_len);
674
52426899
SJ
675 /*
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
679 */
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
682
f95f3850
WN
683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
687
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
692
693 host->dma_ops->start(host, sg_len);
694
695 return 0;
696}
697
698static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
699{
700 u32 temp;
701
702 data->error = -EINPROGRESS;
703
704 WARN_ON(host->data);
705 host->sg = NULL;
706 host->data = data;
707
f1d2736c 708 if (data->flags & MMC_DATA_READ) {
55c5efbc 709 host->dir_status = DW_MCI_RECV_STATUS;
f1d2736c
SJ
710 dw_mci_ctrl_rd_thld(host, data);
711 } else {
55c5efbc 712 host->dir_status = DW_MCI_SEND_STATUS;
f1d2736c 713 }
55c5efbc 714
f95f3850 715 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
719 else
720 flags |= SG_MITER_FROM_SG;
721
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 723 host->sg = data->sg;
34b664a2
JH
724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
f95f3850 726
b40af3aa 727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
731
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
52426899
SJ
735
736 /*
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
740 */
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
743 } else {
744 /*
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
748 */
749 host->prev_blksz = data->blksz;
f95f3850
WN
750 }
751}
752
753static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754{
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
758
759 mci_writel(host, CMDARG, arg);
760 wmb();
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
766 return;
767 }
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
771}
772
ab269128 773static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
774{
775 struct dw_mci *host = slot->host;
fdf492a1 776 unsigned int clock = slot->clock;
f95f3850 777 u32 div;
9623b5b9 778 u32 clk_en_a;
f95f3850 779
fdf492a1
DA
780 if (!clock) {
781 mci_writel(host, CLKENA, 0);
782 mci_send_cmd(slot,
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
787 /*
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
790 */
e419990b
SJ
791 div += 1;
792
fdf492a1 793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 794
fdf492a1
DA
795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
800 host->bus_hz, div);
f95f3850
WN
801
802 /* disable clock */
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
805
806 /* inform CIU */
807 mci_send_cmd(slot,
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
9623b5b9
DA
817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
822
823 /* inform CIU */
824 mci_send_cmd(slot,
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826
fdf492a1
DA
827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
f95f3850
WN
829 }
830
fdf492a1
DA
831 host->current_speed = clock;
832
f95f3850 833 /* Set the current slot bus width */
1d56c453 834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
835}
836
053b3ce6
SJ
837static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
f95f3850
WN
840{
841 struct mmc_request *mrq;
f95f3850
WN
842 struct mmc_data *data;
843 u32 cmdflags;
844
845 mrq = slot->mrq;
f95f3850 846
f95f3850
WN
847 host->cur_slot = slot;
848 host->mrq = mrq;
849
850 host->pending_events = 0;
851 host->completed_events = 0;
e352c813 852 host->cmd_status = 0;
f95f3850 853 host->data_status = 0;
e352c813 854 host->dir_status = 0;
f95f3850 855
053b3ce6 856 data = cmd->data;
f95f3850 857 if (data) {
f16afa88 858 mci_writel(host, TMOUT, 0xFFFFFFFF);
f95f3850
WN
859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
861 }
862
f95f3850
WN
863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
868
869 if (data) {
870 dw_mci_submit_data(host, data);
871 wmb();
872 }
873
874 dw_mci_start_command(host, cmd, cmdflags);
875
876 if (mrq->stop)
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
90c2143a
SJ
878 else
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
880}
881
053b3ce6
SJ
882static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
884{
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
887
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
890}
891
7456caae 892/* must be called with host->lock held */
f95f3850
WN
893static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
895{
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
897 host->state);
898
f95f3850
WN
899 slot->mrq = mrq;
900
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
904 } else {
905 list_add_tail(&slot->queue_node, &host->queue);
906 }
f95f3850
WN
907}
908
909static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
913
914 WARN_ON(slot->mrq);
915
7456caae
JH
916 /*
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
920 */
921 spin_lock_bh(&host->lock);
922
f95f3850 923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 924 spin_unlock_bh(&host->lock);
f95f3850
WN
925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
927 return;
928 }
929
f95f3850 930 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
931
932 spin_unlock_bh(&host->lock);
f95f3850
WN
933}
934
935static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936{
937 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 939 u32 regs;
f95f3850 940
f95f3850 941 switch (ios->bus_width) {
f95f3850
WN
942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
944 break;
c9b2a06f
JC
945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
947 break;
b2f7cb45
JC
948 default:
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
951 }
952
3f514291
SJ
953 regs = mci_readl(slot->host, UHS_REG);
954
41babf75 955 /* DDR mode set */
cab3a802 956 if (ios->timing == MMC_TIMING_MMC_DDR52)
c69042a5 957 regs |= ((0x1 << slot->id) << 16);
3f514291 958 else
c69042a5 959 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
960
961 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 962 slot->host->timing = ios->timing;
41babf75 963
fdf492a1
DA
964 /*
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
967 */
968 slot->clock = ios->clock;
f95f3850 969
cb27a843
JH
970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
800d78bf 972
bf7cb224
JC
973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
975
f95f3850
WN
976 switch (ios->power_mode) {
977 case MMC_POWER_UP:
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
4366dcc5
JC
979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
982 break;
983 case MMC_POWER_OFF:
4366dcc5
JC
984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
987 break;
988 default:
989 break;
990 }
991}
992
993static int dw_mci_get_ro(struct mmc_host *mmc)
994{
995 int read_only;
996 struct dw_mci_slot *slot = mmc_priv(mmc);
9795a846 997 int gpio_ro = mmc_gpio_get_ro(mmc);
f95f3850
WN
998
999 /* Use platform get_ro function, else try on board write protect */
9640639b 1000 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5 1001 read_only = 0;
9795a846
JC
1002 else if (!IS_ERR_VALUE(gpio_ro))
1003 read_only = gpio_ro;
f95f3850
WN
1004 else
1005 read_only =
1006 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007
1008 dev_dbg(&mmc->class_dev, "card is %s\n",
1009 read_only ? "read-only" : "read-write");
1010
1011 return read_only;
1012}
1013
1014static int dw_mci_get_cd(struct mmc_host *mmc)
1015{
1016 int present;
1017 struct dw_mci_slot *slot = mmc_priv(mmc);
1018 struct dw_mci_board *brd = slot->host->pdata;
7cf347bd
ZG
1019 struct dw_mci *host = slot->host;
1020 int gpio_cd = mmc_gpio_get_cd(mmc);
f95f3850
WN
1021
1022 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
1023 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024 present = 1;
bf626e55 1025 else if (!IS_ERR_VALUE(gpio_cd))
7cf347bd 1026 present = gpio_cd;
f95f3850
WN
1027 else
1028 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1029 == 0 ? 1 : 0;
1030
7cf347bd 1031 spin_lock_bh(&host->lock);
bf626e55
ZG
1032 if (present) {
1033 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1034 dev_dbg(&mmc->class_dev, "card is present\n");
bf626e55
ZG
1035 } else {
1036 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1037 dev_dbg(&mmc->class_dev, "card is not present\n");
bf626e55 1038 }
7cf347bd 1039 spin_unlock_bh(&host->lock);
f95f3850
WN
1040
1041 return present;
1042}
1043
9623b5b9
DA
1044/*
1045 * Disable lower power mode.
1046 *
1047 * Low power mode will stop the card clock when idle. According to the
1048 * description of the CLKENA register we should disable low power mode
1049 * for SDIO cards if we need SDIO interrupts to work.
1050 *
1051 * This function is fast if low power mode is already disabled.
1052 */
1053static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054{
1055 struct dw_mci *host = slot->host;
1056 u32 clk_en_a;
1057 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058
1059 clk_en_a = mci_readl(host, CLKENA);
1060
1061 if (clk_en_a & clken_low_pwr) {
1062 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 SDMMC_CMD_PRV_DAT_WAIT, 0);
1065 }
1066}
1067
1a5c8e1f
SH
1068static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069{
1070 struct dw_mci_slot *slot = mmc_priv(mmc);
1071 struct dw_mci *host = slot->host;
1072 u32 int_mask;
1073
1074 /* Enable/disable Slot Specific SDIO interrupt */
1075 int_mask = mci_readl(host, INTMASK);
1076 if (enb) {
9623b5b9
DA
1077 /*
1078 * Turn off low power mode if it was enabled. This is a bit of
1079 * a heavy operation and we disable / enable IRQs a lot, so
1080 * we'll leave low power mode disabled and it will get
1081 * re-enabled again in dw_mci_setup_bus().
1082 */
1083 dw_mci_disable_low_power(slot);
1084
1a5c8e1f 1085 mci_writel(host, INTMASK,
705ad047 1086 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1087 } else {
1088 mci_writel(host, INTMASK,
705ad047 1089 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1090 }
1091}
1092
0976f16d
SJ
1093static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094{
1095 struct dw_mci_slot *slot = mmc_priv(mmc);
1096 struct dw_mci *host = slot->host;
1097 const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 struct dw_mci_tuning_data tuning_data;
1099 int err = -ENOSYS;
1100
1101 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108 } else {
1109 return -EINVAL;
1110 }
1111 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114 } else {
1115 dev_err(host->dev,
1116 "Undefined command(%d) for tuning\n", opcode);
1117 return -EINVAL;
1118 }
1119
1120 if (drv_data && drv_data->execute_tuning)
1121 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122 return err;
1123}
1124
f95f3850 1125static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1126 .request = dw_mci_request,
9aa51408
SJ
1127 .pre_req = dw_mci_pre_req,
1128 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1129 .set_ios = dw_mci_set_ios,
1130 .get_ro = dw_mci_get_ro,
1131 .get_cd = dw_mci_get_cd,
1132 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1133 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1134};
1135
1136static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 __releases(&host->lock)
1138 __acquires(&host->lock)
1139{
1140 struct dw_mci_slot *slot;
1141 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1142
1143 WARN_ON(host->cmd || host->data);
1144
1145 host->cur_slot->mrq = NULL;
1146 host->mrq = NULL;
1147 if (!list_empty(&host->queue)) {
1148 slot = list_entry(host->queue.next,
1149 struct dw_mci_slot, queue_node);
1150 list_del(&slot->queue_node);
4a90920c 1151 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1152 mmc_hostname(slot->mmc));
1153 host->state = STATE_SENDING_CMD;
1154 dw_mci_start_request(host, slot);
1155 } else {
4a90920c 1156 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1157 host->state = STATE_IDLE;
1158 }
1159
1160 spin_unlock(&host->lock);
1161 mmc_request_done(prev_mmc, mrq);
1162 spin_lock(&host->lock);
1163}
1164
e352c813 1165static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1166{
1167 u32 status = host->cmd_status;
1168
1169 host->cmd_status = 0;
1170
1171 /* Read the response from the card (up to 16 bytes) */
1172 if (cmd->flags & MMC_RSP_PRESENT) {
1173 if (cmd->flags & MMC_RSP_136) {
1174 cmd->resp[3] = mci_readl(host, RESP0);
1175 cmd->resp[2] = mci_readl(host, RESP1);
1176 cmd->resp[1] = mci_readl(host, RESP2);
1177 cmd->resp[0] = mci_readl(host, RESP3);
1178 } else {
1179 cmd->resp[0] = mci_readl(host, RESP0);
1180 cmd->resp[1] = 0;
1181 cmd->resp[2] = 0;
1182 cmd->resp[3] = 0;
1183 }
1184 }
1185
1186 if (status & SDMMC_INT_RTO)
1187 cmd->error = -ETIMEDOUT;
1188 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 cmd->error = -EILSEQ;
1190 else if (status & SDMMC_INT_RESP_ERR)
1191 cmd->error = -EIO;
1192 else
1193 cmd->error = 0;
1194
1195 if (cmd->error) {
1196 /* newer ip versions need a delay between retries */
1197 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198 mdelay(20);
f95f3850 1199 }
e352c813
SJ
1200
1201 return cmd->error;
1202}
1203
1204static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205{
31bff450 1206 u32 status = host->data_status;
e352c813
SJ
1207
1208 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 if (status & SDMMC_INT_DRTO) {
1210 data->error = -ETIMEDOUT;
1211 } else if (status & SDMMC_INT_DCRC) {
1212 data->error = -EILSEQ;
1213 } else if (status & SDMMC_INT_EBE) {
1214 if (host->dir_status ==
1215 DW_MCI_SEND_STATUS) {
1216 /*
1217 * No data CRC status was returned.
1218 * The number of bytes transferred
1219 * will be exaggerated in PIO mode.
1220 */
1221 data->bytes_xfered = 0;
1222 data->error = -ETIMEDOUT;
1223 } else if (host->dir_status ==
1224 DW_MCI_RECV_STATUS) {
1225 data->error = -EIO;
1226 }
1227 } else {
1228 /* SDMMC_INT_SBE is included */
1229 data->error = -EIO;
1230 }
1231
e6cc0123 1232 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
e352c813
SJ
1233
1234 /*
1235 * After an error, there may be data lingering
31bff450 1236 * in the FIFO
e352c813 1237 */
31bff450 1238 dw_mci_fifo_reset(host);
e352c813
SJ
1239 } else {
1240 data->bytes_xfered = data->blocks * data->blksz;
1241 data->error = 0;
1242 }
1243
1244 return data->error;
f95f3850
WN
1245}
1246
1247static void dw_mci_tasklet_func(unsigned long priv)
1248{
1249 struct dw_mci *host = (struct dw_mci *)priv;
1250 struct mmc_data *data;
1251 struct mmc_command *cmd;
e352c813 1252 struct mmc_request *mrq;
f95f3850
WN
1253 enum dw_mci_state state;
1254 enum dw_mci_state prev_state;
e352c813 1255 unsigned int err;
f95f3850
WN
1256
1257 spin_lock(&host->lock);
1258
1259 state = host->state;
1260 data = host->data;
e352c813 1261 mrq = host->mrq;
f95f3850
WN
1262
1263 do {
1264 prev_state = state;
1265
1266 switch (state) {
1267 case STATE_IDLE:
1268 break;
1269
1270 case STATE_SENDING_CMD:
1271 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 &host->pending_events))
1273 break;
1274
1275 cmd = host->cmd;
1276 host->cmd = NULL;
1277 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1278 err = dw_mci_command_complete(host, cmd);
1279 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1280 prev_state = state = STATE_SENDING_CMD;
1281 __dw_mci_start_request(host, host->cur_slot,
e352c813 1282 mrq->cmd);
053b3ce6
SJ
1283 goto unlock;
1284 }
1285
e352c813 1286 if (cmd->data && err) {
71abb133 1287 dw_mci_stop_dma(host);
90c2143a
SJ
1288 send_stop_abort(host, data);
1289 state = STATE_SENDING_STOP;
1290 break;
71abb133
SJ
1291 }
1292
e352c813
SJ
1293 if (!cmd->data || err) {
1294 dw_mci_request_end(host, mrq);
f95f3850
WN
1295 goto unlock;
1296 }
1297
1298 prev_state = state = STATE_SENDING_DATA;
1299 /* fall through */
1300
1301 case STATE_SENDING_DATA:
1302 if (test_and_clear_bit(EVENT_DATA_ERROR,
1303 &host->pending_events)) {
1304 dw_mci_stop_dma(host);
90c2143a 1305 send_stop_abort(host, data);
f95f3850
WN
1306 state = STATE_DATA_ERROR;
1307 break;
1308 }
1309
1310 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311 &host->pending_events))
1312 break;
1313
1314 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315 prev_state = state = STATE_DATA_BUSY;
1316 /* fall through */
1317
1318 case STATE_DATA_BUSY:
1319 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320 &host->pending_events))
1321 break;
1322
1323 host->data = NULL;
1324 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
1325 err = dw_mci_data_complete(host, data);
1326
1327 if (!err) {
1328 if (!data->stop || mrq->sbc) {
17c8bc85 1329 if (mrq->sbc && data->stop)
e352c813
SJ
1330 data->stop->error = 0;
1331 dw_mci_request_end(host, mrq);
1332 goto unlock;
f95f3850 1333 }
f95f3850 1334
e352c813
SJ
1335 /* stop command for open-ended transfer*/
1336 if (data->stop)
1337 send_stop_abort(host, data);
053b3ce6
SJ
1338 }
1339
e352c813
SJ
1340 /*
1341 * If err has non-zero,
1342 * stop-abort command has been already issued.
1343 */
f95f3850 1344 prev_state = state = STATE_SENDING_STOP;
e352c813 1345
f95f3850
WN
1346 /* fall through */
1347
1348 case STATE_SENDING_STOP:
1349 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350 &host->pending_events))
1351 break;
1352
71abb133 1353 /* CMD error in data command */
31bff450
SJ
1354 if (mrq->cmd->error && mrq->data)
1355 dw_mci_fifo_reset(host);
71abb133 1356
f95f3850 1357 host->cmd = NULL;
71abb133 1358 host->data = NULL;
90c2143a 1359
e352c813
SJ
1360 if (mrq->stop)
1361 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
1362 else
1363 host->cmd_status = 0;
1364
e352c813 1365 dw_mci_request_end(host, mrq);
f95f3850
WN
1366 goto unlock;
1367
1368 case STATE_DATA_ERROR:
1369 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370 &host->pending_events))
1371 break;
1372
1373 state = STATE_DATA_BUSY;
1374 break;
1375 }
1376 } while (state != prev_state);
1377
1378 host->state = state;
1379unlock:
1380 spin_unlock(&host->lock);
1381
1382}
1383
34b664a2
JH
1384/* push final bytes to part_buf, only use during push */
1385static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1386{
34b664a2
JH
1387 memcpy((void *)&host->part_buf, buf, cnt);
1388 host->part_buf_count = cnt;
1389}
f95f3850 1390
34b664a2
JH
1391/* append bytes to part_buf, only use during push */
1392static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1393{
1394 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396 host->part_buf_count += cnt;
1397 return cnt;
1398}
f95f3850 1399
34b664a2
JH
1400/* pull first bytes from part_buf, only use during pull */
1401static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402{
1403 cnt = min(cnt, (int)host->part_buf_count);
1404 if (cnt) {
1405 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406 cnt);
1407 host->part_buf_count -= cnt;
1408 host->part_buf_start += cnt;
f95f3850 1409 }
34b664a2 1410 return cnt;
f95f3850
WN
1411}
1412
34b664a2
JH
1413/* pull final bytes from the part_buf, assuming it's just been filled */
1414static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1415{
34b664a2
JH
1416 memcpy(buf, &host->part_buf, cnt);
1417 host->part_buf_start = cnt;
1418 host->part_buf_count = (1 << host->data_shift) - cnt;
1419}
f95f3850 1420
34b664a2
JH
1421static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1422{
cfbeb59c
MC
1423 struct mmc_data *data = host->data;
1424 int init_cnt = cnt;
1425
34b664a2
JH
1426 /* try and push anything in the part_buf */
1427 if (unlikely(host->part_buf_count)) {
1428 int len = dw_mci_push_part_bytes(host, buf, cnt);
1429 buf += len;
1430 cnt -= len;
cfbeb59c 1431 if (host->part_buf_count == 2) {
4e0a5adf
JC
1432 mci_writew(host, DATA(host->data_offset),
1433 host->part_buf16);
34b664a2
JH
1434 host->part_buf_count = 0;
1435 }
1436 }
1437#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438 if (unlikely((unsigned long)buf & 0x1)) {
1439 while (cnt >= 2) {
1440 u16 aligned_buf[64];
1441 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442 int items = len >> 1;
1443 int i;
1444 /* memcpy from input buffer into aligned buffer */
1445 memcpy(aligned_buf, buf, len);
1446 buf += len;
1447 cnt -= len;
1448 /* push data from aligned buffer into fifo */
1449 for (i = 0; i < items; ++i)
4e0a5adf
JC
1450 mci_writew(host, DATA(host->data_offset),
1451 aligned_buf[i]);
34b664a2
JH
1452 }
1453 } else
1454#endif
1455 {
1456 u16 *pdata = buf;
1457 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1458 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1459 buf = pdata;
1460 }
1461 /* put anything remaining in the part_buf */
1462 if (cnt) {
1463 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1464 /* Push data if we have reached the expected data length */
1465 if ((data->bytes_xfered + init_cnt) ==
1466 (data->blksz * data->blocks))
4e0a5adf 1467 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1468 host->part_buf16);
34b664a2
JH
1469 }
1470}
f95f3850 1471
34b664a2
JH
1472static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1473{
1474#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475 if (unlikely((unsigned long)buf & 0x1)) {
1476 while (cnt >= 2) {
1477 /* pull data from fifo into aligned buffer */
1478 u16 aligned_buf[64];
1479 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480 int items = len >> 1;
1481 int i;
1482 for (i = 0; i < items; ++i)
4e0a5adf
JC
1483 aligned_buf[i] = mci_readw(host,
1484 DATA(host->data_offset));
34b664a2
JH
1485 /* memcpy from aligned buffer into output buffer */
1486 memcpy(buf, aligned_buf, len);
1487 buf += len;
1488 cnt -= len;
1489 }
1490 } else
1491#endif
1492 {
1493 u16 *pdata = buf;
1494 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1495 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1496 buf = pdata;
1497 }
1498 if (cnt) {
4e0a5adf 1499 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1500 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1501 }
1502}
1503
1504static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1505{
cfbeb59c
MC
1506 struct mmc_data *data = host->data;
1507 int init_cnt = cnt;
1508
34b664a2
JH
1509 /* try and push anything in the part_buf */
1510 if (unlikely(host->part_buf_count)) {
1511 int len = dw_mci_push_part_bytes(host, buf, cnt);
1512 buf += len;
1513 cnt -= len;
cfbeb59c 1514 if (host->part_buf_count == 4) {
4e0a5adf
JC
1515 mci_writel(host, DATA(host->data_offset),
1516 host->part_buf32);
34b664a2
JH
1517 host->part_buf_count = 0;
1518 }
1519 }
1520#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 if (unlikely((unsigned long)buf & 0x3)) {
1522 while (cnt >= 4) {
1523 u32 aligned_buf[32];
1524 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525 int items = len >> 2;
1526 int i;
1527 /* memcpy from input buffer into aligned buffer */
1528 memcpy(aligned_buf, buf, len);
1529 buf += len;
1530 cnt -= len;
1531 /* push data from aligned buffer into fifo */
1532 for (i = 0; i < items; ++i)
4e0a5adf
JC
1533 mci_writel(host, DATA(host->data_offset),
1534 aligned_buf[i]);
34b664a2
JH
1535 }
1536 } else
1537#endif
1538 {
1539 u32 *pdata = buf;
1540 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1541 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1542 buf = pdata;
1543 }
1544 /* put anything remaining in the part_buf */
1545 if (cnt) {
1546 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1547 /* Push data if we have reached the expected data length */
1548 if ((data->bytes_xfered + init_cnt) ==
1549 (data->blksz * data->blocks))
4e0a5adf 1550 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1551 host->part_buf32);
f95f3850
WN
1552 }
1553}
1554
1555static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1556{
34b664a2
JH
1557#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558 if (unlikely((unsigned long)buf & 0x3)) {
1559 while (cnt >= 4) {
1560 /* pull data from fifo into aligned buffer */
1561 u32 aligned_buf[32];
1562 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563 int items = len >> 2;
1564 int i;
1565 for (i = 0; i < items; ++i)
4e0a5adf
JC
1566 aligned_buf[i] = mci_readl(host,
1567 DATA(host->data_offset));
34b664a2
JH
1568 /* memcpy from aligned buffer into output buffer */
1569 memcpy(buf, aligned_buf, len);
1570 buf += len;
1571 cnt -= len;
1572 }
1573 } else
1574#endif
1575 {
1576 u32 *pdata = buf;
1577 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1578 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1579 buf = pdata;
1580 }
1581 if (cnt) {
4e0a5adf 1582 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1583 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1584 }
1585}
1586
1587static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1588{
cfbeb59c
MC
1589 struct mmc_data *data = host->data;
1590 int init_cnt = cnt;
1591
34b664a2
JH
1592 /* try and push anything in the part_buf */
1593 if (unlikely(host->part_buf_count)) {
1594 int len = dw_mci_push_part_bytes(host, buf, cnt);
1595 buf += len;
1596 cnt -= len;
c09fbd74 1597
cfbeb59c 1598 if (host->part_buf_count == 8) {
c09fbd74 1599 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1600 host->part_buf);
34b664a2
JH
1601 host->part_buf_count = 0;
1602 }
1603 }
1604#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605 if (unlikely((unsigned long)buf & 0x7)) {
1606 while (cnt >= 8) {
1607 u64 aligned_buf[16];
1608 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609 int items = len >> 3;
1610 int i;
1611 /* memcpy from input buffer into aligned buffer */
1612 memcpy(aligned_buf, buf, len);
1613 buf += len;
1614 cnt -= len;
1615 /* push data from aligned buffer into fifo */
1616 for (i = 0; i < items; ++i)
4e0a5adf
JC
1617 mci_writeq(host, DATA(host->data_offset),
1618 aligned_buf[i]);
34b664a2
JH
1619 }
1620 } else
1621#endif
1622 {
1623 u64 *pdata = buf;
1624 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1625 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1626 buf = pdata;
1627 }
1628 /* put anything remaining in the part_buf */
1629 if (cnt) {
1630 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1631 /* Push data if we have reached the expected data length */
1632 if ((data->bytes_xfered + init_cnt) ==
1633 (data->blksz * data->blocks))
4e0a5adf 1634 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1635 host->part_buf);
f95f3850
WN
1636 }
1637}
1638
1639static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1640{
34b664a2
JH
1641#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642 if (unlikely((unsigned long)buf & 0x7)) {
1643 while (cnt >= 8) {
1644 /* pull data from fifo into aligned buffer */
1645 u64 aligned_buf[16];
1646 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647 int items = len >> 3;
1648 int i;
1649 for (i = 0; i < items; ++i)
4e0a5adf
JC
1650 aligned_buf[i] = mci_readq(host,
1651 DATA(host->data_offset));
34b664a2
JH
1652 /* memcpy from aligned buffer into output buffer */
1653 memcpy(buf, aligned_buf, len);
1654 buf += len;
1655 cnt -= len;
1656 }
1657 } else
1658#endif
1659 {
1660 u64 *pdata = buf;
1661 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1662 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1663 buf = pdata;
1664 }
1665 if (cnt) {
4e0a5adf 1666 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1667 dw_mci_pull_final_bytes(host, buf, cnt);
1668 }
1669}
f95f3850 1670
34b664a2
JH
1671static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1672{
1673 int len;
f95f3850 1674
34b664a2
JH
1675 /* get remaining partial bytes */
1676 len = dw_mci_pull_part_bytes(host, buf, cnt);
1677 if (unlikely(len == cnt))
1678 return;
1679 buf += len;
1680 cnt -= len;
1681
1682 /* get the rest of the data */
1683 host->pull_data(host, buf, cnt);
f95f3850
WN
1684}
1685
87a74d39 1686static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1687{
f9c2a0dc
SJ
1688 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689 void *buf;
1690 unsigned int offset;
f95f3850
WN
1691 struct mmc_data *data = host->data;
1692 int shift = host->data_shift;
1693 u32 status;
3e4b0d8b 1694 unsigned int len;
f9c2a0dc 1695 unsigned int remain, fcnt;
f95f3850
WN
1696
1697 do {
f9c2a0dc
SJ
1698 if (!sg_miter_next(sg_miter))
1699 goto done;
1700
4225fc85 1701 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1702 buf = sg_miter->addr;
1703 remain = sg_miter->length;
1704 offset = 0;
1705
1706 do {
1707 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708 << shift) + host->part_buf_count;
1709 len = min(remain, fcnt);
1710 if (!len)
1711 break;
34b664a2 1712 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1713 data->bytes_xfered += len;
f95f3850 1714 offset += len;
f9c2a0dc
SJ
1715 remain -= len;
1716 } while (remain);
f95f3850 1717
e74f3a9c 1718 sg_miter->consumed = offset;
f95f3850
WN
1719 status = mci_readl(host, MINTSTS);
1720 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1721 /* if the RXDR is ready read again */
1722 } while ((status & SDMMC_INT_RXDR) ||
1723 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1724
1725 if (!remain) {
1726 if (!sg_miter_next(sg_miter))
1727 goto done;
1728 sg_miter->consumed = 0;
1729 }
1730 sg_miter_stop(sg_miter);
f95f3850
WN
1731 return;
1732
1733done:
f9c2a0dc
SJ
1734 sg_miter_stop(sg_miter);
1735 host->sg = NULL;
f95f3850
WN
1736 smp_wmb();
1737 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1738}
1739
1740static void dw_mci_write_data_pio(struct dw_mci *host)
1741{
f9c2a0dc
SJ
1742 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743 void *buf;
1744 unsigned int offset;
f95f3850
WN
1745 struct mmc_data *data = host->data;
1746 int shift = host->data_shift;
1747 u32 status;
3e4b0d8b 1748 unsigned int len;
f9c2a0dc
SJ
1749 unsigned int fifo_depth = host->fifo_depth;
1750 unsigned int remain, fcnt;
f95f3850
WN
1751
1752 do {
f9c2a0dc
SJ
1753 if (!sg_miter_next(sg_miter))
1754 goto done;
1755
4225fc85 1756 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1757 buf = sg_miter->addr;
1758 remain = sg_miter->length;
1759 offset = 0;
1760
1761 do {
1762 fcnt = ((fifo_depth -
1763 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764 << shift) - host->part_buf_count;
1765 len = min(remain, fcnt);
1766 if (!len)
1767 break;
f95f3850 1768 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1769 data->bytes_xfered += len;
f95f3850 1770 offset += len;
f9c2a0dc
SJ
1771 remain -= len;
1772 } while (remain);
f95f3850 1773
e74f3a9c 1774 sg_miter->consumed = offset;
f95f3850
WN
1775 status = mci_readl(host, MINTSTS);
1776 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1777 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1778
1779 if (!remain) {
1780 if (!sg_miter_next(sg_miter))
1781 goto done;
1782 sg_miter->consumed = 0;
1783 }
1784 sg_miter_stop(sg_miter);
f95f3850
WN
1785 return;
1786
1787done:
f9c2a0dc
SJ
1788 sg_miter_stop(sg_miter);
1789 host->sg = NULL;
f95f3850
WN
1790 smp_wmb();
1791 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1792}
1793
1794static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1795{
1796 if (!host->cmd_status)
1797 host->cmd_status = status;
1798
1799 smp_wmb();
1800
1801 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802 tasklet_schedule(&host->tasklet);
1803}
1804
1805static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1806{
1807 struct dw_mci *host = dev_id;
182c9081 1808 u32 pending;
1a5c8e1f 1809 int i;
f95f3850 1810
1fb5f68a
MC
1811 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1812
476d79f1
DA
1813 /*
1814 * DTO fix - version 2.10a and below, and only if internal DMA
1815 * is configured.
1816 */
1817 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818 if (!pending &&
1819 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820 pending |= SDMMC_INT_DATA_OVER;
1821 }
f95f3850 1822
476d79f1 1823 if (pending) {
f95f3850
WN
1824 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1826 host->cmd_status = pending;
f95f3850
WN
1827 smp_wmb();
1828 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1829 }
1830
1831 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832 /* if there is an error report DATA_ERROR */
1833 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1834 host->data_status = pending;
f95f3850
WN
1835 smp_wmb();
1836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1837 tasklet_schedule(&host->tasklet);
f95f3850
WN
1838 }
1839
1840 if (pending & SDMMC_INT_DATA_OVER) {
1841 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842 if (!host->data_status)
182c9081 1843 host->data_status = pending;
f95f3850
WN
1844 smp_wmb();
1845 if (host->dir_status == DW_MCI_RECV_STATUS) {
1846 if (host->sg != NULL)
87a74d39 1847 dw_mci_read_data_pio(host, true);
f95f3850
WN
1848 }
1849 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850 tasklet_schedule(&host->tasklet);
1851 }
1852
1853 if (pending & SDMMC_INT_RXDR) {
1854 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1855 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1856 dw_mci_read_data_pio(host, false);
f95f3850
WN
1857 }
1858
1859 if (pending & SDMMC_INT_TXDR) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1861 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1862 dw_mci_write_data_pio(host);
1863 }
1864
1865 if (pending & SDMMC_INT_CMD_DONE) {
1866 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1867 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1868 }
1869
1870 if (pending & SDMMC_INT_CD) {
1871 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1872 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1873 }
1874
1a5c8e1f
SH
1875 /* Handle SDIO Interrupts */
1876 for (i = 0; i < host->num_slots; i++) {
1877 struct dw_mci_slot *slot = host->slot[i];
1878 if (pending & SDMMC_INT_SDIO(i)) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880 mmc_signal_sdio_irq(slot->mmc);
1881 }
1882 }
1883
1fb5f68a 1884 }
f95f3850
WN
1885
1886#ifdef CONFIG_MMC_DW_IDMAC
1887 /* Handle DMA interrupts */
1888 pending = mci_readl(host, IDSTS);
1889 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1892 host->dma_ops->complete(host);
1893 }
1894#endif
1895
1896 return IRQ_HANDLED;
1897}
1898
1791b13e 1899static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1900{
1791b13e 1901 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1902 int i;
1903
1904 for (i = 0; i < host->num_slots; i++) {
1905 struct dw_mci_slot *slot = host->slot[i];
1906 struct mmc_host *mmc = slot->mmc;
1907 struct mmc_request *mrq;
1908 int present;
f95f3850
WN
1909
1910 present = dw_mci_get_cd(mmc);
1911 while (present != slot->last_detect_state) {
f95f3850
WN
1912 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1913 present ? "inserted" : "removed");
1914
1791b13e
JH
1915 spin_lock_bh(&host->lock);
1916
f95f3850
WN
1917 /* Card change detected */
1918 slot->last_detect_state = present;
1919
f95f3850
WN
1920 /* Clean up queue if present */
1921 mrq = slot->mrq;
1922 if (mrq) {
1923 if (mrq == host->mrq) {
1924 host->data = NULL;
1925 host->cmd = NULL;
1926
1927 switch (host->state) {
1928 case STATE_IDLE:
1929 break;
1930 case STATE_SENDING_CMD:
1931 mrq->cmd->error = -ENOMEDIUM;
1932 if (!mrq->data)
1933 break;
1934 /* fall through */
1935 case STATE_SENDING_DATA:
1936 mrq->data->error = -ENOMEDIUM;
1937 dw_mci_stop_dma(host);
1938 break;
1939 case STATE_DATA_BUSY:
1940 case STATE_DATA_ERROR:
1941 if (mrq->data->error == -EINPROGRESS)
1942 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
1943 /* fall through */
1944 case STATE_SENDING_STOP:
90c2143a
SJ
1945 if (mrq->stop)
1946 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
1947 break;
1948 }
1949
1950 dw_mci_request_end(host, mrq);
1951 } else {
1952 list_del(&slot->queue_node);
1953 mrq->cmd->error = -ENOMEDIUM;
1954 if (mrq->data)
1955 mrq->data->error = -ENOMEDIUM;
1956 if (mrq->stop)
1957 mrq->stop->error = -ENOMEDIUM;
1958
1959 spin_unlock(&host->lock);
1960 mmc_request_done(slot->mmc, mrq);
1961 spin_lock(&host->lock);
1962 }
1963 }
1964
1965 /* Power down slot */
1966 if (present == 0) {
31bff450
SJ
1967 /* Clear down the FIFO */
1968 dw_mci_fifo_reset(host);
f95f3850 1969#ifdef CONFIG_MMC_DW_IDMAC
5ce9d961 1970 dw_mci_idmac_reset(host);
f95f3850
WN
1971#endif
1972
1973 }
1974
1791b13e
JH
1975 spin_unlock_bh(&host->lock);
1976
f95f3850
WN
1977 present = dw_mci_get_cd(mmc);
1978 }
1979
1980 mmc_detect_change(slot->mmc,
1981 msecs_to_jiffies(host->pdata->detect_delay_ms));
1982 }
1983}
1984
c91eab4b
TA
1985#ifdef CONFIG_OF
1986/* given a slot id, find out the device node representing that slot */
1987static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1988{
1989 struct device_node *np;
1990 const __be32 *addr;
1991 int len;
1992
1993 if (!dev || !dev->of_node)
1994 return NULL;
1995
1996 for_each_child_of_node(dev->of_node, np) {
1997 addr = of_get_property(np, "reg", &len);
1998 if (!addr || (len < sizeof(int)))
1999 continue;
2000 if (be32_to_cpup(addr) == slot)
2001 return np;
2002 }
2003 return NULL;
2004}
2005
a70aaa64
DA
2006static struct dw_mci_of_slot_quirks {
2007 char *quirk;
2008 int id;
2009} of_slot_quirks[] = {
2010 {
2011 .quirk = "disable-wp",
2012 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2013 },
2014};
2015
2016static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2017{
2018 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2019 int quirks = 0;
2020 int idx;
2021
2022 /* get quirks */
2023 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2024 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2025 quirks |= of_slot_quirks[idx].id;
2026
2027 return quirks;
2028}
c91eab4b 2029#else /* CONFIG_OF */
a70aaa64
DA
2030static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2031{
2032 return 0;
2033}
c91eab4b
TA
2034static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2035{
2036 return NULL;
2037}
2038#endif /* CONFIG_OF */
2039
36c179a9 2040static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2041{
2042 struct mmc_host *mmc;
2043 struct dw_mci_slot *slot;
e95baf13 2044 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2045 int ctrl_id, ret;
1f44a2a5 2046 u32 freq[2];
f95f3850 2047
4a90920c 2048 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2049 if (!mmc)
2050 return -ENOMEM;
2051
2052 slot = mmc_priv(mmc);
2053 slot->id = id;
2054 slot->mmc = mmc;
2055 slot->host = host;
c91eab4b 2056 host->slot[id] = slot;
f95f3850 2057
a70aaa64
DA
2058 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2059
f95f3850 2060 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2061 if (of_property_read_u32_array(host->dev->of_node,
2062 "clock-freq-min-max", freq, 2)) {
2063 mmc->f_min = DW_MCI_FREQ_MIN;
2064 mmc->f_max = DW_MCI_FREQ_MAX;
2065 } else {
2066 mmc->f_min = freq[0];
2067 mmc->f_max = freq[1];
2068 }
f95f3850 2069
907abd51 2070 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
f95f3850 2071
fc3d7720
JC
2072 if (host->pdata->caps)
2073 mmc->caps = host->pdata->caps;
fc3d7720 2074
ab269128
AK
2075 if (host->pdata->pm_caps)
2076 mmc->pm_caps = host->pdata->pm_caps;
2077
800d78bf
TA
2078 if (host->dev->of_node) {
2079 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2080 if (ctrl_id < 0)
2081 ctrl_id = 0;
2082 } else {
2083 ctrl_id = to_platform_device(host->dev)->id;
2084 }
cb27a843
JH
2085 if (drv_data && drv_data->caps)
2086 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2087
4f408cc6
SJ
2088 if (host->pdata->caps2)
2089 mmc->caps2 = host->pdata->caps2;
4f408cc6 2090
d8a4fb0e 2091 mmc_of_parse(mmc);
f95f3850 2092
f95f3850
WN
2093 if (host->pdata->blk_settings) {
2094 mmc->max_segs = host->pdata->blk_settings->max_segs;
2095 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2096 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2097 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2098 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2099 } else {
2100 /* Useful defaults if platform data is unset. */
a39e5746
JC
2101#ifdef CONFIG_MMC_DW_IDMAC
2102 mmc->max_segs = host->ring_size;
2103 mmc->max_blk_size = 65536;
2104 mmc->max_blk_count = host->ring_size;
2105 mmc->max_seg_size = 0x1000;
2106 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2107#else
f95f3850
WN
2108 mmc->max_segs = 64;
2109 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2110 mmc->max_blk_count = 512;
2111 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2112 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2113#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2114 }
f95f3850 2115
ae0eb348
JC
2116 if (dw_mci_get_cd(mmc))
2117 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2118 else
2119 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2120
0cea529d
JC
2121 ret = mmc_add_host(mmc);
2122 if (ret)
2123 goto err_setup_bus;
f95f3850
WN
2124
2125#if defined(CONFIG_DEBUG_FS)
2126 dw_mci_init_debugfs(slot);
2127#endif
2128
2129 /* Card initially undetected */
2130 slot->last_detect_state = 0;
2131
2132 return 0;
800d78bf
TA
2133
2134err_setup_bus:
2135 mmc_free_host(mmc);
2136 return -EINVAL;
f95f3850
WN
2137}
2138
2139static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2140{
f95f3850
WN
2141 /* Debugfs stuff is cleaned up by mmc core */
2142 mmc_remove_host(slot->mmc);
2143 slot->host->slot[id] = NULL;
2144 mmc_free_host(slot->mmc);
2145}
2146
2147static void dw_mci_init_dma(struct dw_mci *host)
2148{
2149 /* Alloc memory for sg translation */
780f22af 2150 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2151 &host->sg_dma, GFP_KERNEL);
2152 if (!host->sg_cpu) {
4a90920c 2153 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2154 __func__);
2155 goto no_dma;
2156 }
2157
2158 /* Determine which DMA interface to use */
2159#ifdef CONFIG_MMC_DW_IDMAC
2160 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2161 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2162#endif
2163
2164 if (!host->dma_ops)
2165 goto no_dma;
2166
e1631f98
JC
2167 if (host->dma_ops->init && host->dma_ops->start &&
2168 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2169 if (host->dma_ops->init(host)) {
4a90920c 2170 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2171 "DMA Controller.\n", __func__);
2172 goto no_dma;
2173 }
2174 } else {
4a90920c 2175 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2176 goto no_dma;
2177 }
2178
2179 host->use_dma = 1;
2180 return;
2181
2182no_dma:
4a90920c 2183 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2184 host->use_dma = 0;
2185 return;
2186}
2187
31bff450 2188static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
f95f3850
WN
2189{
2190 unsigned long timeout = jiffies + msecs_to_jiffies(500);
31bff450 2191 u32 ctrl;
f95f3850 2192
31bff450
SJ
2193 ctrl = mci_readl(host, CTRL);
2194 ctrl |= reset;
2195 mci_writel(host, CTRL, ctrl);
f95f3850
WN
2196
2197 /* wait till resets clear */
2198 do {
2199 ctrl = mci_readl(host, CTRL);
31bff450 2200 if (!(ctrl & reset))
f95f3850
WN
2201 return true;
2202 } while (time_before(jiffies, timeout));
2203
31bff450
SJ
2204 dev_err(host->dev,
2205 "Timeout resetting block (ctrl reset %#x)\n",
2206 ctrl & reset);
f95f3850
WN
2207
2208 return false;
2209}
2210
31bff450
SJ
2211static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2212{
2213 /*
2214 * Reseting generates a block interrupt, hence setting
2215 * the scatter-gather pointer to NULL.
2216 */
2217 if (host->sg) {
2218 sg_miter_stop(&host->sg_miter);
2219 host->sg = NULL;
2220 }
2221
2222 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2223}
2224
2225static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2226{
2227 return dw_mci_ctrl_reset(host,
2228 SDMMC_CTRL_FIFO_RESET |
2229 SDMMC_CTRL_RESET |
2230 SDMMC_CTRL_DMA_RESET);
2231}
2232
c91eab4b
TA
2233#ifdef CONFIG_OF
2234static struct dw_mci_of_quirks {
2235 char *quirk;
2236 int id;
2237} of_quirks[] = {
2238 {
c91eab4b
TA
2239 .quirk = "broken-cd",
2240 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2241 },
2242};
2243
2244static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2245{
2246 struct dw_mci_board *pdata;
2247 struct device *dev = host->dev;
2248 struct device_node *np = dev->of_node;
e95baf13 2249 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2250 int idx, ret;
3c6d89ea 2251 u32 clock_frequency;
c91eab4b
TA
2252
2253 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2254 if (!pdata) {
2255 dev_err(dev, "could not allocate memory for pdata\n");
2256 return ERR_PTR(-ENOMEM);
2257 }
2258
2259 /* find out number of slots supported */
2260 if (of_property_read_u32(dev->of_node, "num-slots",
2261 &pdata->num_slots)) {
2262 dev_info(dev, "num-slots property not found, "
2263 "assuming 1 slot is available\n");
2264 pdata->num_slots = 1;
2265 }
2266
2267 /* get quirks */
2268 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2269 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2270 pdata->quirks |= of_quirks[idx].id;
2271
2272 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2273 dev_info(dev, "fifo-depth property not found, using "
2274 "value of FIFOTH register as default\n");
2275
2276 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2277
3c6d89ea
DA
2278 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2279 pdata->bus_hz = clock_frequency;
2280
cb27a843
JH
2281 if (drv_data && drv_data->parse_dt) {
2282 ret = drv_data->parse_dt(host);
800d78bf
TA
2283 if (ret)
2284 return ERR_PTR(ret);
2285 }
2286
10b49841
SJ
2287 if (of_find_property(np, "supports-highspeed", NULL))
2288 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2289
c91eab4b
TA
2290 return pdata;
2291}
2292
2293#else /* CONFIG_OF */
2294static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2295{
2296 return ERR_PTR(-EINVAL);
2297}
2298#endif /* CONFIG_OF */
2299
62ca8034 2300int dw_mci_probe(struct dw_mci *host)
f95f3850 2301{
e95baf13 2302 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2303 int width, i, ret = 0;
f95f3850 2304 u32 fifo_size;
1c2215b7 2305 int init_slots = 0;
f95f3850 2306
c91eab4b
TA
2307 if (!host->pdata) {
2308 host->pdata = dw_mci_parse_dt(host);
2309 if (IS_ERR(host->pdata)) {
2310 dev_err(host->dev, "platform data not available\n");
2311 return -EINVAL;
2312 }
f95f3850
WN
2313 }
2314
907abd51 2315 if (host->pdata->num_slots > 1) {
4a90920c 2316 dev_err(host->dev,
907abd51 2317 "Platform data must supply num_slots.\n");
62ca8034 2318 return -ENODEV;
f95f3850
WN
2319 }
2320
780f22af 2321 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2322 if (IS_ERR(host->biu_clk)) {
2323 dev_dbg(host->dev, "biu clock not available\n");
2324 } else {
2325 ret = clk_prepare_enable(host->biu_clk);
2326 if (ret) {
2327 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2328 return ret;
2329 }
2330 }
2331
780f22af 2332 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2333 if (IS_ERR(host->ciu_clk)) {
2334 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2335 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2336 } else {
2337 ret = clk_prepare_enable(host->ciu_clk);
2338 if (ret) {
2339 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2340 goto err_clk_biu;
2341 }
f90a0612 2342
3c6d89ea
DA
2343 if (host->pdata->bus_hz) {
2344 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2345 if (ret)
2346 dev_warn(host->dev,
612de4c1 2347 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
2348 host->pdata->bus_hz);
2349 }
f90a0612 2350 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2351 }
f90a0612 2352
612de4c1
JC
2353 if (!host->bus_hz) {
2354 dev_err(host->dev,
2355 "Platform data must supply bus speed\n");
2356 ret = -ENODEV;
2357 goto err_clk_ciu;
2358 }
2359
002f0d5c
YK
2360 if (drv_data && drv_data->init) {
2361 ret = drv_data->init(host);
2362 if (ret) {
2363 dev_err(host->dev,
2364 "implementation specific init failed\n");
2365 goto err_clk_ciu;
2366 }
2367 }
2368
cb27a843
JH
2369 if (drv_data && drv_data->setup_clock) {
2370 ret = drv_data->setup_clock(host);
800d78bf
TA
2371 if (ret) {
2372 dev_err(host->dev,
2373 "implementation specific clock setup failed\n");
2374 goto err_clk_ciu;
2375 }
2376 }
2377
a55d6ff0 2378 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2379 if (IS_ERR(host->vmmc)) {
2380 ret = PTR_ERR(host->vmmc);
2381 if (ret == -EPROBE_DEFER)
2382 goto err_clk_ciu;
2383
2384 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2385 host->vmmc = NULL;
2386 } else {
2387 ret = regulator_enable(host->vmmc);
2388 if (ret) {
2389 if (ret != -EPROBE_DEFER)
2390 dev_err(host->dev,
2391 "regulator_enable fail: %d\n", ret);
2392 goto err_clk_ciu;
2393 }
2394 }
2395
62ca8034 2396 host->quirks = host->pdata->quirks;
f95f3850
WN
2397
2398 spin_lock_init(&host->lock);
2399 INIT_LIST_HEAD(&host->queue);
2400
f95f3850
WN
2401 /*
2402 * Get the host data width - this assumes that HCON has been set with
2403 * the correct values.
2404 */
2405 i = (mci_readl(host, HCON) >> 7) & 0x7;
2406 if (!i) {
2407 host->push_data = dw_mci_push_data16;
2408 host->pull_data = dw_mci_pull_data16;
2409 width = 16;
2410 host->data_shift = 1;
2411 } else if (i == 2) {
2412 host->push_data = dw_mci_push_data64;
2413 host->pull_data = dw_mci_pull_data64;
2414 width = 64;
2415 host->data_shift = 3;
2416 } else {
2417 /* Check for a reserved value, and warn if it is */
2418 WARN((i != 1),
2419 "HCON reports a reserved host data width!\n"
2420 "Defaulting to 32-bit access.\n");
2421 host->push_data = dw_mci_push_data32;
2422 host->pull_data = dw_mci_pull_data32;
2423 width = 32;
2424 host->data_shift = 2;
2425 }
2426
2427 /* Reset all blocks */
31bff450 2428 if (!dw_mci_ctrl_all_reset(host))
141a712a
SJ
2429 return -ENODEV;
2430
2431 host->dma_ops = host->pdata->dma_ops;
2432 dw_mci_init_dma(host);
f95f3850
WN
2433
2434 /* Clear the interrupts for the host controller */
2435 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2436 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2437
2438 /* Put in max timeout */
2439 mci_writel(host, TMOUT, 0xFFFFFFFF);
2440
2441 /*
2442 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2443 * Tx Mark = fifo_size / 2 DMA Size = 8
2444 */
b86d8253
JH
2445 if (!host->pdata->fifo_depth) {
2446 /*
2447 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2448 * have been overwritten by the bootloader, just like we're
2449 * about to do, so if you know the value for your hardware, you
2450 * should put it in the platform data.
2451 */
2452 fifo_size = mci_readl(host, FIFOTH);
8234e869 2453 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2454 } else {
2455 fifo_size = host->pdata->fifo_depth;
2456 }
2457 host->fifo_depth = fifo_size;
52426899
SJ
2458 host->fifoth_val =
2459 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2460 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2461
2462 /* disable clock to CIU */
2463 mci_writel(host, CLKENA, 0);
2464 mci_writel(host, CLKSRC, 0);
2465
63008768
JH
2466 /*
2467 * In 2.40a spec, Data offset is changed.
2468 * Need to check the version-id and set data-offset for DATA register.
2469 */
2470 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2471 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2472
2473 if (host->verid < DW_MMC_240A)
2474 host->data_offset = DATA_OFFSET;
2475 else
2476 host->data_offset = DATA_240A_OFFSET;
2477
f95f3850 2478 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2479 host->card_workqueue = alloc_workqueue("dw-mci-card",
59ff3eb6 2480 WQ_MEM_RECLAIM, 1);
ef7aef9a
WY
2481 if (!host->card_workqueue) {
2482 ret = -ENOMEM;
1791b13e 2483 goto err_dmaunmap;
ef7aef9a 2484 }
1791b13e 2485 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2486 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2487 host->irq_flags, "dw-mci", host);
f95f3850 2488 if (ret)
1791b13e 2489 goto err_workqueue;
f95f3850 2490
f95f3850
WN
2491 if (host->pdata->num_slots)
2492 host->num_slots = host->pdata->num_slots;
2493 else
2494 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2495
2da1d7f2
YC
2496 /*
2497 * Enable interrupts for command done, data over, data empty, card det,
2498 * receive ready and error such as transmit, receive timeout, crc error
2499 */
2500 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2501 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2502 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2503 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2504 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2505
2506 dev_info(host->dev, "DW MMC controller at irq %d, "
2507 "%d bit host data width, "
2508 "%u deep fifo\n",
2509 host->irq, width, fifo_size);
2510
f95f3850
WN
2511 /* We need at least one slot to succeed */
2512 for (i = 0; i < host->num_slots; i++) {
2513 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2514 if (ret)
2515 dev_dbg(host->dev, "slot %d init failed\n", i);
2516 else
2517 init_slots++;
2518 }
2519
2520 if (init_slots) {
2521 dev_info(host->dev, "%d slots initialized\n", init_slots);
2522 } else {
2523 dev_dbg(host->dev, "attempted to initialize %d slots, "
2524 "but failed on all\n", host->num_slots);
780f22af 2525 goto err_workqueue;
f95f3850
WN
2526 }
2527
f95f3850 2528 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2529 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2530
2531 return 0;
2532
1791b13e 2533err_workqueue:
95dcc2cb 2534 destroy_workqueue(host->card_workqueue);
1791b13e 2535
f95f3850
WN
2536err_dmaunmap:
2537 if (host->use_dma && host->dma_ops->exit)
2538 host->dma_ops->exit(host);
780f22af 2539 if (host->vmmc)
c07946a3 2540 regulator_disable(host->vmmc);
f90a0612
TA
2541
2542err_clk_ciu:
780f22af 2543 if (!IS_ERR(host->ciu_clk))
f90a0612 2544 clk_disable_unprepare(host->ciu_clk);
780f22af 2545
f90a0612 2546err_clk_biu:
780f22af 2547 if (!IS_ERR(host->biu_clk))
f90a0612 2548 clk_disable_unprepare(host->biu_clk);
780f22af 2549
f95f3850
WN
2550 return ret;
2551}
62ca8034 2552EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2553
62ca8034 2554void dw_mci_remove(struct dw_mci *host)
f95f3850 2555{
f95f3850
WN
2556 int i;
2557
2558 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2559 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2560
f95f3850 2561 for (i = 0; i < host->num_slots; i++) {
4a90920c 2562 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2563 if (host->slot[i])
2564 dw_mci_cleanup_slot(host->slot[i], i);
2565 }
2566
2567 /* disable clock to CIU */
2568 mci_writel(host, CLKENA, 0);
2569 mci_writel(host, CLKSRC, 0);
2570
95dcc2cb 2571 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2572
2573 if (host->use_dma && host->dma_ops->exit)
2574 host->dma_ops->exit(host);
2575
780f22af 2576 if (host->vmmc)
c07946a3 2577 regulator_disable(host->vmmc);
c07946a3 2578
f90a0612
TA
2579 if (!IS_ERR(host->ciu_clk))
2580 clk_disable_unprepare(host->ciu_clk);
780f22af 2581
f90a0612
TA
2582 if (!IS_ERR(host->biu_clk))
2583 clk_disable_unprepare(host->biu_clk);
f95f3850 2584}
62ca8034
SH
2585EXPORT_SYMBOL(dw_mci_remove);
2586
2587
f95f3850 2588
6fe8890d 2589#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2590/*
2591 * TODO: we should probably disable the clock to the card in the suspend path.
2592 */
62ca8034 2593int dw_mci_suspend(struct dw_mci *host)
f95f3850 2594{
c07946a3
JC
2595 if (host->vmmc)
2596 regulator_disable(host->vmmc);
2597
f95f3850
WN
2598 return 0;
2599}
62ca8034 2600EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2601
62ca8034 2602int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2603{
2604 int i, ret;
f95f3850 2605
f2f942ce
SK
2606 if (host->vmmc) {
2607 ret = regulator_enable(host->vmmc);
2608 if (ret) {
2609 dev_err(host->dev,
2610 "failed to enable regulator: %d\n", ret);
2611 return ret;
2612 }
2613 }
1d6c4e0a 2614
31bff450 2615 if (!dw_mci_ctrl_all_reset(host)) {
e61cf118
JC
2616 ret = -ENODEV;
2617 return ret;
2618 }
2619
3bfe619d 2620 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2621 host->dma_ops->init(host);
2622
52426899
SJ
2623 /*
2624 * Restore the initial value at FIFOTH register
2625 * And Invalidate the prev_blksz with zero
2626 */
e61cf118 2627 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2628 host->prev_blksz = 0;
e61cf118 2629
2eb2944f
DA
2630 /* Put in max timeout */
2631 mci_writel(host, TMOUT, 0xFFFFFFFF);
2632
e61cf118
JC
2633 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2634 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2635 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2636 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2637 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2638
f95f3850
WN
2639 for (i = 0; i < host->num_slots; i++) {
2640 struct dw_mci_slot *slot = host->slot[i];
2641 if (!slot)
2642 continue;
ab269128
AK
2643 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2644 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2645 dw_mci_setup_bus(slot, true);
2646 }
f95f3850 2647 }
f95f3850
WN
2648 return 0;
2649}
62ca8034 2650EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2651#endif /* CONFIG_PM_SLEEP */
2652
f95f3850
WN
2653static int __init dw_mci_init(void)
2654{
8e1c4e4d 2655 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2656 return 0;
f95f3850
WN
2657}
2658
2659static void __exit dw_mci_exit(void)
2660{
f95f3850
WN
2661}
2662
2663module_init(dw_mci_init);
2664module_exit(dw_mci_exit);
2665
2666MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2667MODULE_AUTHOR("NXP Semiconductor VietNam");
2668MODULE_AUTHOR("Imagination Technologies Ltd");
2669MODULE_LICENSE("GPL v2");