]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: remove unnecessary function.
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
90c2143a 32#include <linux/mmc/sdio.h>
f95f3850
WN
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
c07946a3 35#include <linux/regulator/consumer.h>
1791b13e 36#include <linux/workqueue.h>
c91eab4b 37#include <linux/of.h>
55a6ceb2 38#include <linux/of_gpio.h>
bf626e55 39#include <linux/mmc/slot-gpio.h>
f95f3850
WN
40
41#include "dw_mmc.h"
42
43/* Common flag combinations */
3f7eec62 44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
1f44a2a5
SJ
55#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
f95f3850 58#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
f95f3850
WN
64struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
77
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
0976f16d
SJ
84static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
f95f3850 94
0976f16d
SJ
95static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
112};
113
31bff450
SJ
114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
f95f3850
WN
117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
f95f3850
WN
238static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239{
240 struct mmc_data *data;
800d78bf 241 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
243 u32 cmdr;
244 cmd->error = -EINPROGRESS;
245
246 cmdr = cmd->opcode;
247
90c2143a
SJ
248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850 253 cmdr |= SDMMC_CMD_STOP;
4a1b27ad
JC
254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850
WN
256
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
262 }
263
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
266
267 data = cmd->data;
268 if (data) {
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
274 }
275
cb27a843
JH
276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 278
f95f3850
WN
279 return cmdr;
280}
281
90c2143a
SJ
282static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283{
284 struct mmc_command *stop;
285 u32 cmdr;
286
287 if (!cmd->data)
288 return 0;
289
290 stop = &host->stop_abort;
291 cmdr = cmd->opcode;
292 memset(stop, 0, sizeof(struct mmc_command));
293
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
299 stop->arg = 0;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
306 } else {
307 return 0;
308 }
309
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
312
313 return cmdr;
314}
315
f95f3850
WN
316static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
318{
319 host->cmd = cmd;
4a90920c 320 dev_vdbg(host->dev,
f95f3850
WN
321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
323
324 mci_writel(host, CMDARG, cmd->arg);
325 wmb();
326
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
328}
329
90c2143a 330static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 331{
90c2143a
SJ
332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
334}
335
336/* DMA interface functions */
337static void dw_mci_stop_dma(struct dw_mci *host)
338{
03e8cb53 339 if (host->using_dma) {
f95f3850
WN
340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
f95f3850 342 }
aa50f259
SJ
343
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
346}
347
9aa51408
SJ
348static int dw_mci_get_dma_dir(struct mmc_data *data)
349{
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
352 else
353 return DMA_FROM_DEVICE;
354}
355
9beee912 356#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
357static void dw_mci_dma_cleanup(struct dw_mci *host)
358{
359 struct mmc_data *data = host->data;
360
361 if (data)
9aa51408 362 if (!data->host_cookie)
4a90920c 363 dma_unmap_sg(host->dev,
9aa51408
SJ
364 data->sg,
365 data->sg_len,
366 dw_mci_get_dma_dir(data));
f95f3850
WN
367}
368
5ce9d961
SJ
369static void dw_mci_idmac_reset(struct dw_mci *host)
370{
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
375}
376
f95f3850
WN
377static void dw_mci_idmac_stop_dma(struct dw_mci *host)
378{
379 u32 temp;
380
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
386
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
a5289a43 389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 390 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
391 mci_writel(host, BMOD, temp);
392}
393
394static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395{
396 struct mmc_data *data = host->data;
397
4a90920c 398 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
399
400 host->dma_ops->cleanup(host);
401
402 /*
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
405 */
406 if (data) {
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
409 }
410}
411
412static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
413 unsigned int sg_len)
414{
415 int i;
416 struct idmac_desc *desc = host->sg_cpu;
417
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
421
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
424
425 /* Buffer length */
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
427
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
430 }
431
432 /* Set first descriptor */
433 desc = host->sg_cpu;
434 desc->des0 |= IDMAC_DES0_FD;
435
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
440
441 wmb();
442}
443
444static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
445{
446 u32 temp;
447
448 dw_mci_translate_sglist(host, host->data, sg_len);
449
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
454
455 wmb();
456
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
a5289a43 459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
460 mci_writel(host, BMOD, temp);
461
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
464}
465
466static int dw_mci_idmac_init(struct dw_mci *host)
467{
468 struct idmac_desc *p;
897b69e7 469 int i;
f95f3850
WN
470
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
481
5ce9d961 482 dw_mci_idmac_reset(host);
141a712a 483
f95f3850 484 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
487 SDMMC_IDMAC_INT_TI);
488
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
491 return 0;
492}
493
8e2b36ea 494static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
500};
501#endif /* CONFIG_MMC_DW_IDMAC */
502
9aa51408
SJ
503static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
505 bool next)
f95f3850
WN
506{
507 struct scatterlist *sg;
9aa51408 508 unsigned int i, sg_len;
03e8cb53 509
9aa51408
SJ
510 if (!next && data->host_cookie)
511 return data->host_cookie;
f95f3850
WN
512
513 /*
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
517 */
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
519 return -EINVAL;
9aa51408 520
f95f3850
WN
521 if (data->blksz & 3)
522 return -EINVAL;
523
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
526 return -EINVAL;
527 }
528
4a90920c 529 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
530 data->sg,
531 data->sg_len,
532 dw_mci_get_dma_dir(data));
533 if (sg_len == 0)
534 return -EINVAL;
03e8cb53 535
9aa51408
SJ
536 if (next)
537 data->host_cookie = sg_len;
f95f3850 538
9aa51408
SJ
539 return sg_len;
540}
541
9aa51408
SJ
542static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
544 bool is_first_req)
545{
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
548
549 if (!slot->host->use_dma || !data)
550 return;
551
552 if (data->host_cookie) {
553 data->host_cookie = 0;
554 return;
555 }
556
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
559}
560
561static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
563 int err)
564{
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
567
568 if (!slot->host->use_dma || !data)
569 return;
570
571 if (data->host_cookie)
4a90920c 572 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
573 data->sg,
574 data->sg_len,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
577}
578
52426899
SJ
579static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580{
581#ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
591
592 /*
593 * MSIZE is '1',
594 * if blksz is not a multiple of the FIFO width
595 */
596 if (blksz % fifo_width) {
597 msize = 0;
598 rx_wmark = 1;
599 goto done;
600 }
601
602 do {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
605 msize = idx;
606 rx_wmark = mszs[idx] - 1;
607 break;
608 }
609 } while (--idx > 0);
610 /*
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
613 */
614done:
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
617#endif
618}
619
f1d2736c
SJ
620static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621{
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
624 u16 thld_size;
625
626 WARN_ON(!(data->flags & MMC_DATA_READ));
627
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
630 goto disable;
631
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
634
635 if (blksz_depth > fifo_depth)
636 goto disable;
637
638 /*
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
642 */
643 thld_size = blksz;
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
645 return;
646
647disable:
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
649}
650
9aa51408
SJ
651static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
652{
653 int sg_len;
654 u32 temp;
655
656 host->using_dma = 0;
657
658 /* If we don't have a channel, we can't do DMA */
659 if (!host->use_dma)
660 return -ENODEV;
661
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
663 if (sg_len < 0) {
664 host->dma_ops->stop(host);
9aa51408 665 return sg_len;
a99aa9b9 666 }
9aa51408
SJ
667
668 host->using_dma = 1;
f95f3850 669
4a90920c 670 dev_vdbg(host->dev,
f95f3850
WN
671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
673 sg_len);
674
52426899
SJ
675 /*
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
679 */
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
682
f95f3850
WN
683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
687
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
692
693 host->dma_ops->start(host, sg_len);
694
695 return 0;
696}
697
698static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
699{
700 u32 temp;
701
702 data->error = -EINPROGRESS;
703
704 WARN_ON(host->data);
705 host->sg = NULL;
706 host->data = data;
707
f1d2736c 708 if (data->flags & MMC_DATA_READ) {
55c5efbc 709 host->dir_status = DW_MCI_RECV_STATUS;
f1d2736c
SJ
710 dw_mci_ctrl_rd_thld(host, data);
711 } else {
55c5efbc 712 host->dir_status = DW_MCI_SEND_STATUS;
f1d2736c 713 }
55c5efbc 714
f95f3850 715 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
719 else
720 flags |= SG_MITER_FROM_SG;
721
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 723 host->sg = data->sg;
34b664a2
JH
724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
f95f3850 726
b40af3aa 727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
731
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
52426899
SJ
735
736 /*
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
740 */
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
743 } else {
744 /*
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
748 */
749 host->prev_blksz = data->blksz;
f95f3850
WN
750 }
751}
752
753static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754{
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
758
759 mci_writel(host, CMDARG, arg);
760 wmb();
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
766 return;
767 }
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
771}
772
ab269128 773static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
774{
775 struct dw_mci *host = slot->host;
fdf492a1 776 unsigned int clock = slot->clock;
f95f3850 777 u32 div;
9623b5b9 778 u32 clk_en_a;
f95f3850 779
fdf492a1
DA
780 if (!clock) {
781 mci_writel(host, CLKENA, 0);
782 mci_send_cmd(slot,
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
787 /*
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
790 */
e419990b
SJ
791 div += 1;
792
fdf492a1 793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 794
fdf492a1
DA
795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
800 host->bus_hz, div);
f95f3850
WN
801
802 /* disable clock */
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
805
806 /* inform CIU */
807 mci_send_cmd(slot,
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
9623b5b9
DA
817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
822
823 /* inform CIU */
824 mci_send_cmd(slot,
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826
fdf492a1
DA
827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
f95f3850
WN
829 }
830
fdf492a1
DA
831 host->current_speed = clock;
832
f95f3850 833 /* Set the current slot bus width */
1d56c453 834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
835}
836
053b3ce6
SJ
837static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
f95f3850
WN
840{
841 struct mmc_request *mrq;
f95f3850
WN
842 struct mmc_data *data;
843 u32 cmdflags;
844
845 mrq = slot->mrq;
f95f3850 846
f95f3850
WN
847 host->cur_slot = slot;
848 host->mrq = mrq;
849
850 host->pending_events = 0;
851 host->completed_events = 0;
e352c813 852 host->cmd_status = 0;
f95f3850 853 host->data_status = 0;
e352c813 854 host->dir_status = 0;
f95f3850 855
053b3ce6 856 data = cmd->data;
f95f3850 857 if (data) {
f16afa88 858 mci_writel(host, TMOUT, 0xFFFFFFFF);
f95f3850
WN
859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
861 }
862
f95f3850
WN
863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
868
869 if (data) {
870 dw_mci_submit_data(host, data);
871 wmb();
872 }
873
874 dw_mci_start_command(host, cmd, cmdflags);
875
876 if (mrq->stop)
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
90c2143a
SJ
878 else
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
880}
881
053b3ce6
SJ
882static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
884{
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
887
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
890}
891
7456caae 892/* must be called with host->lock held */
f95f3850
WN
893static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
895{
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
897 host->state);
898
f95f3850
WN
899 slot->mrq = mrq;
900
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
904 } else {
905 list_add_tail(&slot->queue_node, &host->queue);
906 }
f95f3850
WN
907}
908
909static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
913
914 WARN_ON(slot->mrq);
915
7456caae
JH
916 /*
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
920 */
921 spin_lock_bh(&host->lock);
922
f95f3850 923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 924 spin_unlock_bh(&host->lock);
f95f3850
WN
925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
927 return;
928 }
929
f95f3850 930 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
931
932 spin_unlock_bh(&host->lock);
f95f3850
WN
933}
934
935static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936{
937 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 939 u32 regs;
f95f3850 940
f95f3850 941 switch (ios->bus_width) {
f95f3850
WN
942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
944 break;
c9b2a06f
JC
945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
947 break;
b2f7cb45
JC
948 default:
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
951 }
952
3f514291
SJ
953 regs = mci_readl(slot->host, UHS_REG);
954
41babf75 955 /* DDR mode set */
cab3a802 956 if (ios->timing == MMC_TIMING_MMC_DDR52)
c69042a5 957 regs |= ((0x1 << slot->id) << 16);
3f514291 958 else
c69042a5 959 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
960
961 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 962 slot->host->timing = ios->timing;
41babf75 963
fdf492a1
DA
964 /*
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
967 */
968 slot->clock = ios->clock;
f95f3850 969
cb27a843
JH
970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
800d78bf 972
bf7cb224
JC
973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
975
f95f3850
WN
976 switch (ios->power_mode) {
977 case MMC_POWER_UP:
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
4366dcc5
JC
979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
982 break;
983 case MMC_POWER_OFF:
4366dcc5
JC
984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
987 break;
988 default:
989 break;
990 }
991}
992
993static int dw_mci_get_ro(struct mmc_host *mmc)
994{
995 int read_only;
996 struct dw_mci_slot *slot = mmc_priv(mmc);
f95f3850
WN
997
998 /* Use platform get_ro function, else try on board write protect */
9640639b 999 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5 1000 read_only = 0;
55a6ceb2
DA
1001 else if (gpio_is_valid(slot->wp_gpio))
1002 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
1003 else
1004 read_only =
1005 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1006
1007 dev_dbg(&mmc->class_dev, "card is %s\n",
1008 read_only ? "read-only" : "read-write");
1009
1010 return read_only;
1011}
1012
1013static int dw_mci_get_cd(struct mmc_host *mmc)
1014{
1015 int present;
1016 struct dw_mci_slot *slot = mmc_priv(mmc);
1017 struct dw_mci_board *brd = slot->host->pdata;
7cf347bd
ZG
1018 struct dw_mci *host = slot->host;
1019 int gpio_cd = mmc_gpio_get_cd(mmc);
f95f3850
WN
1020
1021 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
1022 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1023 present = 1;
bf626e55 1024 else if (!IS_ERR_VALUE(gpio_cd))
7cf347bd 1025 present = gpio_cd;
f95f3850
WN
1026 else
1027 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1028 == 0 ? 1 : 0;
1029
7cf347bd 1030 spin_lock_bh(&host->lock);
bf626e55
ZG
1031 if (present) {
1032 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1033 dev_dbg(&mmc->class_dev, "card is present\n");
bf626e55
ZG
1034 } else {
1035 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1036 dev_dbg(&mmc->class_dev, "card is not present\n");
bf626e55 1037 }
7cf347bd 1038 spin_unlock_bh(&host->lock);
f95f3850
WN
1039
1040 return present;
1041}
1042
9623b5b9
DA
1043/*
1044 * Disable lower power mode.
1045 *
1046 * Low power mode will stop the card clock when idle. According to the
1047 * description of the CLKENA register we should disable low power mode
1048 * for SDIO cards if we need SDIO interrupts to work.
1049 *
1050 * This function is fast if low power mode is already disabled.
1051 */
1052static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1053{
1054 struct dw_mci *host = slot->host;
1055 u32 clk_en_a;
1056 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1057
1058 clk_en_a = mci_readl(host, CLKENA);
1059
1060 if (clk_en_a & clken_low_pwr) {
1061 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1063 SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 }
1065}
1066
1a5c8e1f
SH
1067static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1068{
1069 struct dw_mci_slot *slot = mmc_priv(mmc);
1070 struct dw_mci *host = slot->host;
1071 u32 int_mask;
1072
1073 /* Enable/disable Slot Specific SDIO interrupt */
1074 int_mask = mci_readl(host, INTMASK);
1075 if (enb) {
9623b5b9
DA
1076 /*
1077 * Turn off low power mode if it was enabled. This is a bit of
1078 * a heavy operation and we disable / enable IRQs a lot, so
1079 * we'll leave low power mode disabled and it will get
1080 * re-enabled again in dw_mci_setup_bus().
1081 */
1082 dw_mci_disable_low_power(slot);
1083
1a5c8e1f 1084 mci_writel(host, INTMASK,
705ad047 1085 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1086 } else {
1087 mci_writel(host, INTMASK,
705ad047 1088 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1089 }
1090}
1091
0976f16d
SJ
1092static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1093{
1094 struct dw_mci_slot *slot = mmc_priv(mmc);
1095 struct dw_mci *host = slot->host;
1096 const struct dw_mci_drv_data *drv_data = host->drv_data;
1097 struct dw_mci_tuning_data tuning_data;
1098 int err = -ENOSYS;
1099
1100 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1101 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1102 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1103 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1104 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1105 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1106 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1107 } else {
1108 return -EINVAL;
1109 }
1110 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1111 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1112 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1113 } else {
1114 dev_err(host->dev,
1115 "Undefined command(%d) for tuning\n", opcode);
1116 return -EINVAL;
1117 }
1118
1119 if (drv_data && drv_data->execute_tuning)
1120 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1121 return err;
1122}
1123
f95f3850 1124static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1125 .request = dw_mci_request,
9aa51408
SJ
1126 .pre_req = dw_mci_pre_req,
1127 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1128 .set_ios = dw_mci_set_ios,
1129 .get_ro = dw_mci_get_ro,
1130 .get_cd = dw_mci_get_cd,
1131 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1132 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1133};
1134
1135static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1136 __releases(&host->lock)
1137 __acquires(&host->lock)
1138{
1139 struct dw_mci_slot *slot;
1140 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1141
1142 WARN_ON(host->cmd || host->data);
1143
1144 host->cur_slot->mrq = NULL;
1145 host->mrq = NULL;
1146 if (!list_empty(&host->queue)) {
1147 slot = list_entry(host->queue.next,
1148 struct dw_mci_slot, queue_node);
1149 list_del(&slot->queue_node);
4a90920c 1150 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1151 mmc_hostname(slot->mmc));
1152 host->state = STATE_SENDING_CMD;
1153 dw_mci_start_request(host, slot);
1154 } else {
4a90920c 1155 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1156 host->state = STATE_IDLE;
1157 }
1158
1159 spin_unlock(&host->lock);
1160 mmc_request_done(prev_mmc, mrq);
1161 spin_lock(&host->lock);
1162}
1163
e352c813 1164static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1165{
1166 u32 status = host->cmd_status;
1167
1168 host->cmd_status = 0;
1169
1170 /* Read the response from the card (up to 16 bytes) */
1171 if (cmd->flags & MMC_RSP_PRESENT) {
1172 if (cmd->flags & MMC_RSP_136) {
1173 cmd->resp[3] = mci_readl(host, RESP0);
1174 cmd->resp[2] = mci_readl(host, RESP1);
1175 cmd->resp[1] = mci_readl(host, RESP2);
1176 cmd->resp[0] = mci_readl(host, RESP3);
1177 } else {
1178 cmd->resp[0] = mci_readl(host, RESP0);
1179 cmd->resp[1] = 0;
1180 cmd->resp[2] = 0;
1181 cmd->resp[3] = 0;
1182 }
1183 }
1184
1185 if (status & SDMMC_INT_RTO)
1186 cmd->error = -ETIMEDOUT;
1187 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1188 cmd->error = -EILSEQ;
1189 else if (status & SDMMC_INT_RESP_ERR)
1190 cmd->error = -EIO;
1191 else
1192 cmd->error = 0;
1193
1194 if (cmd->error) {
1195 /* newer ip versions need a delay between retries */
1196 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1197 mdelay(20);
f95f3850 1198 }
e352c813
SJ
1199
1200 return cmd->error;
1201}
1202
1203static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1204{
31bff450 1205 u32 status = host->data_status;
e352c813
SJ
1206
1207 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1208 if (status & SDMMC_INT_DRTO) {
1209 data->error = -ETIMEDOUT;
1210 } else if (status & SDMMC_INT_DCRC) {
1211 data->error = -EILSEQ;
1212 } else if (status & SDMMC_INT_EBE) {
1213 if (host->dir_status ==
1214 DW_MCI_SEND_STATUS) {
1215 /*
1216 * No data CRC status was returned.
1217 * The number of bytes transferred
1218 * will be exaggerated in PIO mode.
1219 */
1220 data->bytes_xfered = 0;
1221 data->error = -ETIMEDOUT;
1222 } else if (host->dir_status ==
1223 DW_MCI_RECV_STATUS) {
1224 data->error = -EIO;
1225 }
1226 } else {
1227 /* SDMMC_INT_SBE is included */
1228 data->error = -EIO;
1229 }
1230
1231 dev_err(host->dev, "data error, status 0x%08x\n", status);
1232
1233 /*
1234 * After an error, there may be data lingering
31bff450 1235 * in the FIFO
e352c813 1236 */
31bff450 1237 dw_mci_fifo_reset(host);
e352c813
SJ
1238 } else {
1239 data->bytes_xfered = data->blocks * data->blksz;
1240 data->error = 0;
1241 }
1242
1243 return data->error;
f95f3850
WN
1244}
1245
1246static void dw_mci_tasklet_func(unsigned long priv)
1247{
1248 struct dw_mci *host = (struct dw_mci *)priv;
1249 struct mmc_data *data;
1250 struct mmc_command *cmd;
e352c813 1251 struct mmc_request *mrq;
f95f3850
WN
1252 enum dw_mci_state state;
1253 enum dw_mci_state prev_state;
e352c813 1254 unsigned int err;
f95f3850
WN
1255
1256 spin_lock(&host->lock);
1257
1258 state = host->state;
1259 data = host->data;
e352c813 1260 mrq = host->mrq;
f95f3850
WN
1261
1262 do {
1263 prev_state = state;
1264
1265 switch (state) {
1266 case STATE_IDLE:
1267 break;
1268
1269 case STATE_SENDING_CMD:
1270 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1271 &host->pending_events))
1272 break;
1273
1274 cmd = host->cmd;
1275 host->cmd = NULL;
1276 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1277 err = dw_mci_command_complete(host, cmd);
1278 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1279 prev_state = state = STATE_SENDING_CMD;
1280 __dw_mci_start_request(host, host->cur_slot,
e352c813 1281 mrq->cmd);
053b3ce6
SJ
1282 goto unlock;
1283 }
1284
e352c813 1285 if (cmd->data && err) {
71abb133 1286 dw_mci_stop_dma(host);
90c2143a
SJ
1287 send_stop_abort(host, data);
1288 state = STATE_SENDING_STOP;
1289 break;
71abb133
SJ
1290 }
1291
e352c813
SJ
1292 if (!cmd->data || err) {
1293 dw_mci_request_end(host, mrq);
f95f3850
WN
1294 goto unlock;
1295 }
1296
1297 prev_state = state = STATE_SENDING_DATA;
1298 /* fall through */
1299
1300 case STATE_SENDING_DATA:
1301 if (test_and_clear_bit(EVENT_DATA_ERROR,
1302 &host->pending_events)) {
1303 dw_mci_stop_dma(host);
90c2143a 1304 send_stop_abort(host, data);
f95f3850
WN
1305 state = STATE_DATA_ERROR;
1306 break;
1307 }
1308
1309 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1310 &host->pending_events))
1311 break;
1312
1313 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1314 prev_state = state = STATE_DATA_BUSY;
1315 /* fall through */
1316
1317 case STATE_DATA_BUSY:
1318 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1319 &host->pending_events))
1320 break;
1321
1322 host->data = NULL;
1323 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
1324 err = dw_mci_data_complete(host, data);
1325
1326 if (!err) {
1327 if (!data->stop || mrq->sbc) {
17c8bc85 1328 if (mrq->sbc && data->stop)
e352c813
SJ
1329 data->stop->error = 0;
1330 dw_mci_request_end(host, mrq);
1331 goto unlock;
f95f3850 1332 }
f95f3850 1333
e352c813
SJ
1334 /* stop command for open-ended transfer*/
1335 if (data->stop)
1336 send_stop_abort(host, data);
053b3ce6
SJ
1337 }
1338
e352c813
SJ
1339 /*
1340 * If err has non-zero,
1341 * stop-abort command has been already issued.
1342 */
f95f3850 1343 prev_state = state = STATE_SENDING_STOP;
e352c813 1344
f95f3850
WN
1345 /* fall through */
1346
1347 case STATE_SENDING_STOP:
1348 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1349 &host->pending_events))
1350 break;
1351
71abb133 1352 /* CMD error in data command */
31bff450
SJ
1353 if (mrq->cmd->error && mrq->data)
1354 dw_mci_fifo_reset(host);
71abb133 1355
f95f3850 1356 host->cmd = NULL;
71abb133 1357 host->data = NULL;
90c2143a 1358
e352c813
SJ
1359 if (mrq->stop)
1360 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
1361 else
1362 host->cmd_status = 0;
1363
e352c813 1364 dw_mci_request_end(host, mrq);
f95f3850
WN
1365 goto unlock;
1366
1367 case STATE_DATA_ERROR:
1368 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1369 &host->pending_events))
1370 break;
1371
1372 state = STATE_DATA_BUSY;
1373 break;
1374 }
1375 } while (state != prev_state);
1376
1377 host->state = state;
1378unlock:
1379 spin_unlock(&host->lock);
1380
1381}
1382
34b664a2
JH
1383/* push final bytes to part_buf, only use during push */
1384static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1385{
34b664a2
JH
1386 memcpy((void *)&host->part_buf, buf, cnt);
1387 host->part_buf_count = cnt;
1388}
f95f3850 1389
34b664a2
JH
1390/* append bytes to part_buf, only use during push */
1391static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1392{
1393 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1394 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1395 host->part_buf_count += cnt;
1396 return cnt;
1397}
f95f3850 1398
34b664a2
JH
1399/* pull first bytes from part_buf, only use during pull */
1400static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1401{
1402 cnt = min(cnt, (int)host->part_buf_count);
1403 if (cnt) {
1404 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1405 cnt);
1406 host->part_buf_count -= cnt;
1407 host->part_buf_start += cnt;
f95f3850 1408 }
34b664a2 1409 return cnt;
f95f3850
WN
1410}
1411
34b664a2
JH
1412/* pull final bytes from the part_buf, assuming it's just been filled */
1413static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1414{
34b664a2
JH
1415 memcpy(buf, &host->part_buf, cnt);
1416 host->part_buf_start = cnt;
1417 host->part_buf_count = (1 << host->data_shift) - cnt;
1418}
f95f3850 1419
34b664a2
JH
1420static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1421{
cfbeb59c
MC
1422 struct mmc_data *data = host->data;
1423 int init_cnt = cnt;
1424
34b664a2
JH
1425 /* try and push anything in the part_buf */
1426 if (unlikely(host->part_buf_count)) {
1427 int len = dw_mci_push_part_bytes(host, buf, cnt);
1428 buf += len;
1429 cnt -= len;
cfbeb59c 1430 if (host->part_buf_count == 2) {
4e0a5adf
JC
1431 mci_writew(host, DATA(host->data_offset),
1432 host->part_buf16);
34b664a2
JH
1433 host->part_buf_count = 0;
1434 }
1435 }
1436#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1437 if (unlikely((unsigned long)buf & 0x1)) {
1438 while (cnt >= 2) {
1439 u16 aligned_buf[64];
1440 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1441 int items = len >> 1;
1442 int i;
1443 /* memcpy from input buffer into aligned buffer */
1444 memcpy(aligned_buf, buf, len);
1445 buf += len;
1446 cnt -= len;
1447 /* push data from aligned buffer into fifo */
1448 for (i = 0; i < items; ++i)
4e0a5adf
JC
1449 mci_writew(host, DATA(host->data_offset),
1450 aligned_buf[i]);
34b664a2
JH
1451 }
1452 } else
1453#endif
1454 {
1455 u16 *pdata = buf;
1456 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1457 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1458 buf = pdata;
1459 }
1460 /* put anything remaining in the part_buf */
1461 if (cnt) {
1462 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1463 /* Push data if we have reached the expected data length */
1464 if ((data->bytes_xfered + init_cnt) ==
1465 (data->blksz * data->blocks))
4e0a5adf 1466 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1467 host->part_buf16);
34b664a2
JH
1468 }
1469}
f95f3850 1470
34b664a2
JH
1471static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1472{
1473#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1474 if (unlikely((unsigned long)buf & 0x1)) {
1475 while (cnt >= 2) {
1476 /* pull data from fifo into aligned buffer */
1477 u16 aligned_buf[64];
1478 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1479 int items = len >> 1;
1480 int i;
1481 for (i = 0; i < items; ++i)
4e0a5adf
JC
1482 aligned_buf[i] = mci_readw(host,
1483 DATA(host->data_offset));
34b664a2
JH
1484 /* memcpy from aligned buffer into output buffer */
1485 memcpy(buf, aligned_buf, len);
1486 buf += len;
1487 cnt -= len;
1488 }
1489 } else
1490#endif
1491 {
1492 u16 *pdata = buf;
1493 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1494 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1495 buf = pdata;
1496 }
1497 if (cnt) {
4e0a5adf 1498 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1499 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1500 }
1501}
1502
1503static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1504{
cfbeb59c
MC
1505 struct mmc_data *data = host->data;
1506 int init_cnt = cnt;
1507
34b664a2
JH
1508 /* try and push anything in the part_buf */
1509 if (unlikely(host->part_buf_count)) {
1510 int len = dw_mci_push_part_bytes(host, buf, cnt);
1511 buf += len;
1512 cnt -= len;
cfbeb59c 1513 if (host->part_buf_count == 4) {
4e0a5adf
JC
1514 mci_writel(host, DATA(host->data_offset),
1515 host->part_buf32);
34b664a2
JH
1516 host->part_buf_count = 0;
1517 }
1518 }
1519#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1520 if (unlikely((unsigned long)buf & 0x3)) {
1521 while (cnt >= 4) {
1522 u32 aligned_buf[32];
1523 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1524 int items = len >> 2;
1525 int i;
1526 /* memcpy from input buffer into aligned buffer */
1527 memcpy(aligned_buf, buf, len);
1528 buf += len;
1529 cnt -= len;
1530 /* push data from aligned buffer into fifo */
1531 for (i = 0; i < items; ++i)
4e0a5adf
JC
1532 mci_writel(host, DATA(host->data_offset),
1533 aligned_buf[i]);
34b664a2
JH
1534 }
1535 } else
1536#endif
1537 {
1538 u32 *pdata = buf;
1539 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1540 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1541 buf = pdata;
1542 }
1543 /* put anything remaining in the part_buf */
1544 if (cnt) {
1545 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1546 /* Push data if we have reached the expected data length */
1547 if ((data->bytes_xfered + init_cnt) ==
1548 (data->blksz * data->blocks))
4e0a5adf 1549 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1550 host->part_buf32);
f95f3850
WN
1551 }
1552}
1553
1554static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1555{
34b664a2
JH
1556#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1557 if (unlikely((unsigned long)buf & 0x3)) {
1558 while (cnt >= 4) {
1559 /* pull data from fifo into aligned buffer */
1560 u32 aligned_buf[32];
1561 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1562 int items = len >> 2;
1563 int i;
1564 for (i = 0; i < items; ++i)
4e0a5adf
JC
1565 aligned_buf[i] = mci_readl(host,
1566 DATA(host->data_offset));
34b664a2
JH
1567 /* memcpy from aligned buffer into output buffer */
1568 memcpy(buf, aligned_buf, len);
1569 buf += len;
1570 cnt -= len;
1571 }
1572 } else
1573#endif
1574 {
1575 u32 *pdata = buf;
1576 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1577 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1578 buf = pdata;
1579 }
1580 if (cnt) {
4e0a5adf 1581 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1582 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1583 }
1584}
1585
1586static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1587{
cfbeb59c
MC
1588 struct mmc_data *data = host->data;
1589 int init_cnt = cnt;
1590
34b664a2
JH
1591 /* try and push anything in the part_buf */
1592 if (unlikely(host->part_buf_count)) {
1593 int len = dw_mci_push_part_bytes(host, buf, cnt);
1594 buf += len;
1595 cnt -= len;
c09fbd74 1596
cfbeb59c 1597 if (host->part_buf_count == 8) {
c09fbd74 1598 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1599 host->part_buf);
34b664a2
JH
1600 host->part_buf_count = 0;
1601 }
1602 }
1603#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604 if (unlikely((unsigned long)buf & 0x7)) {
1605 while (cnt >= 8) {
1606 u64 aligned_buf[16];
1607 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1608 int items = len >> 3;
1609 int i;
1610 /* memcpy from input buffer into aligned buffer */
1611 memcpy(aligned_buf, buf, len);
1612 buf += len;
1613 cnt -= len;
1614 /* push data from aligned buffer into fifo */
1615 for (i = 0; i < items; ++i)
4e0a5adf
JC
1616 mci_writeq(host, DATA(host->data_offset),
1617 aligned_buf[i]);
34b664a2
JH
1618 }
1619 } else
1620#endif
1621 {
1622 u64 *pdata = buf;
1623 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1624 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1625 buf = pdata;
1626 }
1627 /* put anything remaining in the part_buf */
1628 if (cnt) {
1629 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1630 /* Push data if we have reached the expected data length */
1631 if ((data->bytes_xfered + init_cnt) ==
1632 (data->blksz * data->blocks))
4e0a5adf 1633 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1634 host->part_buf);
f95f3850
WN
1635 }
1636}
1637
1638static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1639{
34b664a2
JH
1640#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1641 if (unlikely((unsigned long)buf & 0x7)) {
1642 while (cnt >= 8) {
1643 /* pull data from fifo into aligned buffer */
1644 u64 aligned_buf[16];
1645 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1646 int items = len >> 3;
1647 int i;
1648 for (i = 0; i < items; ++i)
4e0a5adf
JC
1649 aligned_buf[i] = mci_readq(host,
1650 DATA(host->data_offset));
34b664a2
JH
1651 /* memcpy from aligned buffer into output buffer */
1652 memcpy(buf, aligned_buf, len);
1653 buf += len;
1654 cnt -= len;
1655 }
1656 } else
1657#endif
1658 {
1659 u64 *pdata = buf;
1660 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1661 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1662 buf = pdata;
1663 }
1664 if (cnt) {
4e0a5adf 1665 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1666 dw_mci_pull_final_bytes(host, buf, cnt);
1667 }
1668}
f95f3850 1669
34b664a2
JH
1670static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1671{
1672 int len;
f95f3850 1673
34b664a2
JH
1674 /* get remaining partial bytes */
1675 len = dw_mci_pull_part_bytes(host, buf, cnt);
1676 if (unlikely(len == cnt))
1677 return;
1678 buf += len;
1679 cnt -= len;
1680
1681 /* get the rest of the data */
1682 host->pull_data(host, buf, cnt);
f95f3850
WN
1683}
1684
87a74d39 1685static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1686{
f9c2a0dc
SJ
1687 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1688 void *buf;
1689 unsigned int offset;
f95f3850
WN
1690 struct mmc_data *data = host->data;
1691 int shift = host->data_shift;
1692 u32 status;
3e4b0d8b 1693 unsigned int len;
f9c2a0dc 1694 unsigned int remain, fcnt;
f95f3850
WN
1695
1696 do {
f9c2a0dc
SJ
1697 if (!sg_miter_next(sg_miter))
1698 goto done;
1699
4225fc85 1700 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1701 buf = sg_miter->addr;
1702 remain = sg_miter->length;
1703 offset = 0;
1704
1705 do {
1706 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1707 << shift) + host->part_buf_count;
1708 len = min(remain, fcnt);
1709 if (!len)
1710 break;
34b664a2 1711 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1712 data->bytes_xfered += len;
f95f3850 1713 offset += len;
f9c2a0dc
SJ
1714 remain -= len;
1715 } while (remain);
f95f3850 1716
e74f3a9c 1717 sg_miter->consumed = offset;
f95f3850
WN
1718 status = mci_readl(host, MINTSTS);
1719 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1720 /* if the RXDR is ready read again */
1721 } while ((status & SDMMC_INT_RXDR) ||
1722 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1723
1724 if (!remain) {
1725 if (!sg_miter_next(sg_miter))
1726 goto done;
1727 sg_miter->consumed = 0;
1728 }
1729 sg_miter_stop(sg_miter);
f95f3850
WN
1730 return;
1731
1732done:
f9c2a0dc
SJ
1733 sg_miter_stop(sg_miter);
1734 host->sg = NULL;
f95f3850
WN
1735 smp_wmb();
1736 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1737}
1738
1739static void dw_mci_write_data_pio(struct dw_mci *host)
1740{
f9c2a0dc
SJ
1741 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1742 void *buf;
1743 unsigned int offset;
f95f3850
WN
1744 struct mmc_data *data = host->data;
1745 int shift = host->data_shift;
1746 u32 status;
3e4b0d8b 1747 unsigned int len;
f9c2a0dc
SJ
1748 unsigned int fifo_depth = host->fifo_depth;
1749 unsigned int remain, fcnt;
f95f3850
WN
1750
1751 do {
f9c2a0dc
SJ
1752 if (!sg_miter_next(sg_miter))
1753 goto done;
1754
4225fc85 1755 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1756 buf = sg_miter->addr;
1757 remain = sg_miter->length;
1758 offset = 0;
1759
1760 do {
1761 fcnt = ((fifo_depth -
1762 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1763 << shift) - host->part_buf_count;
1764 len = min(remain, fcnt);
1765 if (!len)
1766 break;
f95f3850 1767 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1768 data->bytes_xfered += len;
f95f3850 1769 offset += len;
f9c2a0dc
SJ
1770 remain -= len;
1771 } while (remain);
f95f3850 1772
e74f3a9c 1773 sg_miter->consumed = offset;
f95f3850
WN
1774 status = mci_readl(host, MINTSTS);
1775 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1776 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1777
1778 if (!remain) {
1779 if (!sg_miter_next(sg_miter))
1780 goto done;
1781 sg_miter->consumed = 0;
1782 }
1783 sg_miter_stop(sg_miter);
f95f3850
WN
1784 return;
1785
1786done:
f9c2a0dc
SJ
1787 sg_miter_stop(sg_miter);
1788 host->sg = NULL;
f95f3850
WN
1789 smp_wmb();
1790 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1791}
1792
1793static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1794{
1795 if (!host->cmd_status)
1796 host->cmd_status = status;
1797
1798 smp_wmb();
1799
1800 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1801 tasklet_schedule(&host->tasklet);
1802}
1803
1804static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1805{
1806 struct dw_mci *host = dev_id;
182c9081 1807 u32 pending;
1a5c8e1f 1808 int i;
f95f3850 1809
1fb5f68a
MC
1810 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1811
476d79f1
DA
1812 /*
1813 * DTO fix - version 2.10a and below, and only if internal DMA
1814 * is configured.
1815 */
1816 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1817 if (!pending &&
1818 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1819 pending |= SDMMC_INT_DATA_OVER;
1820 }
f95f3850 1821
476d79f1 1822 if (pending) {
f95f3850
WN
1823 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1824 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1825 host->cmd_status = pending;
f95f3850
WN
1826 smp_wmb();
1827 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1828 }
1829
1830 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1831 /* if there is an error report DATA_ERROR */
1832 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1833 host->data_status = pending;
f95f3850
WN
1834 smp_wmb();
1835 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1836 tasklet_schedule(&host->tasklet);
f95f3850
WN
1837 }
1838
1839 if (pending & SDMMC_INT_DATA_OVER) {
1840 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1841 if (!host->data_status)
182c9081 1842 host->data_status = pending;
f95f3850
WN
1843 smp_wmb();
1844 if (host->dir_status == DW_MCI_RECV_STATUS) {
1845 if (host->sg != NULL)
87a74d39 1846 dw_mci_read_data_pio(host, true);
f95f3850
WN
1847 }
1848 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1849 tasklet_schedule(&host->tasklet);
1850 }
1851
1852 if (pending & SDMMC_INT_RXDR) {
1853 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1854 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1855 dw_mci_read_data_pio(host, false);
f95f3850
WN
1856 }
1857
1858 if (pending & SDMMC_INT_TXDR) {
1859 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1860 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1861 dw_mci_write_data_pio(host);
1862 }
1863
1864 if (pending & SDMMC_INT_CMD_DONE) {
1865 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1866 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1867 }
1868
1869 if (pending & SDMMC_INT_CD) {
1870 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1871 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1872 }
1873
1a5c8e1f
SH
1874 /* Handle SDIO Interrupts */
1875 for (i = 0; i < host->num_slots; i++) {
1876 struct dw_mci_slot *slot = host->slot[i];
1877 if (pending & SDMMC_INT_SDIO(i)) {
1878 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1879 mmc_signal_sdio_irq(slot->mmc);
1880 }
1881 }
1882
1fb5f68a 1883 }
f95f3850
WN
1884
1885#ifdef CONFIG_MMC_DW_IDMAC
1886 /* Handle DMA interrupts */
1887 pending = mci_readl(host, IDSTS);
1888 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1889 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1891 host->dma_ops->complete(host);
1892 }
1893#endif
1894
1895 return IRQ_HANDLED;
1896}
1897
1791b13e 1898static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1899{
1791b13e 1900 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1901 int i;
1902
1903 for (i = 0; i < host->num_slots; i++) {
1904 struct dw_mci_slot *slot = host->slot[i];
1905 struct mmc_host *mmc = slot->mmc;
1906 struct mmc_request *mrq;
1907 int present;
f95f3850
WN
1908
1909 present = dw_mci_get_cd(mmc);
1910 while (present != slot->last_detect_state) {
f95f3850
WN
1911 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1912 present ? "inserted" : "removed");
1913
1791b13e
JH
1914 spin_lock_bh(&host->lock);
1915
f95f3850
WN
1916 /* Card change detected */
1917 slot->last_detect_state = present;
1918
f95f3850
WN
1919 /* Clean up queue if present */
1920 mrq = slot->mrq;
1921 if (mrq) {
1922 if (mrq == host->mrq) {
1923 host->data = NULL;
1924 host->cmd = NULL;
1925
1926 switch (host->state) {
1927 case STATE_IDLE:
1928 break;
1929 case STATE_SENDING_CMD:
1930 mrq->cmd->error = -ENOMEDIUM;
1931 if (!mrq->data)
1932 break;
1933 /* fall through */
1934 case STATE_SENDING_DATA:
1935 mrq->data->error = -ENOMEDIUM;
1936 dw_mci_stop_dma(host);
1937 break;
1938 case STATE_DATA_BUSY:
1939 case STATE_DATA_ERROR:
1940 if (mrq->data->error == -EINPROGRESS)
1941 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
1942 /* fall through */
1943 case STATE_SENDING_STOP:
90c2143a
SJ
1944 if (mrq->stop)
1945 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
1946 break;
1947 }
1948
1949 dw_mci_request_end(host, mrq);
1950 } else {
1951 list_del(&slot->queue_node);
1952 mrq->cmd->error = -ENOMEDIUM;
1953 if (mrq->data)
1954 mrq->data->error = -ENOMEDIUM;
1955 if (mrq->stop)
1956 mrq->stop->error = -ENOMEDIUM;
1957
1958 spin_unlock(&host->lock);
1959 mmc_request_done(slot->mmc, mrq);
1960 spin_lock(&host->lock);
1961 }
1962 }
1963
1964 /* Power down slot */
1965 if (present == 0) {
31bff450
SJ
1966 /* Clear down the FIFO */
1967 dw_mci_fifo_reset(host);
f95f3850 1968#ifdef CONFIG_MMC_DW_IDMAC
5ce9d961 1969 dw_mci_idmac_reset(host);
f95f3850
WN
1970#endif
1971
1972 }
1973
1791b13e
JH
1974 spin_unlock_bh(&host->lock);
1975
f95f3850
WN
1976 present = dw_mci_get_cd(mmc);
1977 }
1978
1979 mmc_detect_change(slot->mmc,
1980 msecs_to_jiffies(host->pdata->detect_delay_ms));
1981 }
1982}
1983
c91eab4b
TA
1984#ifdef CONFIG_OF
1985/* given a slot id, find out the device node representing that slot */
1986static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1987{
1988 struct device_node *np;
1989 const __be32 *addr;
1990 int len;
1991
1992 if (!dev || !dev->of_node)
1993 return NULL;
1994
1995 for_each_child_of_node(dev->of_node, np) {
1996 addr = of_get_property(np, "reg", &len);
1997 if (!addr || (len < sizeof(int)))
1998 continue;
1999 if (be32_to_cpup(addr) == slot)
2000 return np;
2001 }
2002 return NULL;
2003}
2004
a70aaa64
DA
2005static struct dw_mci_of_slot_quirks {
2006 char *quirk;
2007 int id;
2008} of_slot_quirks[] = {
2009 {
2010 .quirk = "disable-wp",
2011 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2012 },
2013};
2014
2015static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2016{
2017 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2018 int quirks = 0;
2019 int idx;
2020
2021 /* get quirks */
2022 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2023 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2024 quirks |= of_slot_quirks[idx].id;
2025
2026 return quirks;
2027}
2028
55a6ceb2
DA
2029/* find the write protect gpio for a given slot; or -1 if none specified */
2030static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2031{
2032 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2033 int gpio;
2034
2035 if (!np)
2036 return -EINVAL;
2037
2038 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2039
2040 /* Having a missing entry is valid; return silently */
2041 if (!gpio_is_valid(gpio))
2042 return -EINVAL;
2043
2044 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2045 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2046 return -EINVAL;
2047 }
2048
2049 return gpio;
2050}
bf626e55 2051
7cf347bd 2052/* find the cd gpio for a given slot */
bf626e55
ZG
2053static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2054 struct mmc_host *mmc)
2055{
2056 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2057 int gpio;
2058
2059 if (!np)
2060 return;
2061
2062 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2063
2064 /* Having a missing entry is valid; return silently */
2065 if (!gpio_is_valid(gpio))
2066 return;
2067
2068 if (mmc_gpio_request_cd(mmc, gpio, 0))
2069 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2070}
c91eab4b 2071#else /* CONFIG_OF */
a70aaa64
DA
2072static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2073{
2074 return 0;
2075}
c91eab4b
TA
2076static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2077{
2078 return NULL;
2079}
55a6ceb2
DA
2080static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2081{
2082 return -EINVAL;
2083}
bf626e55
ZG
2084static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2085 struct mmc_host *mmc)
2086{
2087 return;
2088}
c91eab4b
TA
2089#endif /* CONFIG_OF */
2090
36c179a9 2091static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2092{
2093 struct mmc_host *mmc;
2094 struct dw_mci_slot *slot;
e95baf13 2095 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2096 int ctrl_id, ret;
1f44a2a5 2097 u32 freq[2];
f95f3850 2098
4a90920c 2099 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2100 if (!mmc)
2101 return -ENOMEM;
2102
2103 slot = mmc_priv(mmc);
2104 slot->id = id;
2105 slot->mmc = mmc;
2106 slot->host = host;
c91eab4b 2107 host->slot[id] = slot;
f95f3850 2108
a70aaa64
DA
2109 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2110
f95f3850 2111 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2112 if (of_property_read_u32_array(host->dev->of_node,
2113 "clock-freq-min-max", freq, 2)) {
2114 mmc->f_min = DW_MCI_FREQ_MIN;
2115 mmc->f_max = DW_MCI_FREQ_MAX;
2116 } else {
2117 mmc->f_min = freq[0];
2118 mmc->f_max = freq[1];
2119 }
f95f3850 2120
907abd51 2121 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
f95f3850 2122
fc3d7720
JC
2123 if (host->pdata->caps)
2124 mmc->caps = host->pdata->caps;
fc3d7720 2125
ab269128
AK
2126 if (host->pdata->pm_caps)
2127 mmc->pm_caps = host->pdata->pm_caps;
2128
800d78bf
TA
2129 if (host->dev->of_node) {
2130 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2131 if (ctrl_id < 0)
2132 ctrl_id = 0;
2133 } else {
2134 ctrl_id = to_platform_device(host->dev)->id;
2135 }
cb27a843
JH
2136 if (drv_data && drv_data->caps)
2137 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2138
4f408cc6
SJ
2139 if (host->pdata->caps2)
2140 mmc->caps2 = host->pdata->caps2;
4f408cc6 2141
d8a4fb0e 2142 mmc_of_parse(mmc);
f95f3850 2143
f95f3850
WN
2144 if (host->pdata->blk_settings) {
2145 mmc->max_segs = host->pdata->blk_settings->max_segs;
2146 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2147 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2148 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2149 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2150 } else {
2151 /* Useful defaults if platform data is unset. */
a39e5746
JC
2152#ifdef CONFIG_MMC_DW_IDMAC
2153 mmc->max_segs = host->ring_size;
2154 mmc->max_blk_size = 65536;
2155 mmc->max_blk_count = host->ring_size;
2156 mmc->max_seg_size = 0x1000;
2157 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2158#else
f95f3850
WN
2159 mmc->max_segs = 64;
2160 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2161 mmc->max_blk_count = 512;
2162 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2163 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2164#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2165 }
f95f3850 2166
55a6ceb2 2167 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
bf626e55 2168 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
55a6ceb2 2169
0cea529d
JC
2170 ret = mmc_add_host(mmc);
2171 if (ret)
2172 goto err_setup_bus;
f95f3850
WN
2173
2174#if defined(CONFIG_DEBUG_FS)
2175 dw_mci_init_debugfs(slot);
2176#endif
2177
2178 /* Card initially undetected */
2179 slot->last_detect_state = 0;
2180
2181 return 0;
800d78bf
TA
2182
2183err_setup_bus:
2184 mmc_free_host(mmc);
2185 return -EINVAL;
f95f3850
WN
2186}
2187
2188static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2189{
f95f3850
WN
2190 /* Debugfs stuff is cleaned up by mmc core */
2191 mmc_remove_host(slot->mmc);
2192 slot->host->slot[id] = NULL;
2193 mmc_free_host(slot->mmc);
2194}
2195
2196static void dw_mci_init_dma(struct dw_mci *host)
2197{
2198 /* Alloc memory for sg translation */
780f22af 2199 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2200 &host->sg_dma, GFP_KERNEL);
2201 if (!host->sg_cpu) {
4a90920c 2202 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2203 __func__);
2204 goto no_dma;
2205 }
2206
2207 /* Determine which DMA interface to use */
2208#ifdef CONFIG_MMC_DW_IDMAC
2209 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2210 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2211#endif
2212
2213 if (!host->dma_ops)
2214 goto no_dma;
2215
e1631f98
JC
2216 if (host->dma_ops->init && host->dma_ops->start &&
2217 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2218 if (host->dma_ops->init(host)) {
4a90920c 2219 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2220 "DMA Controller.\n", __func__);
2221 goto no_dma;
2222 }
2223 } else {
4a90920c 2224 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2225 goto no_dma;
2226 }
2227
2228 host->use_dma = 1;
2229 return;
2230
2231no_dma:
4a90920c 2232 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2233 host->use_dma = 0;
2234 return;
2235}
2236
31bff450 2237static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
f95f3850
WN
2238{
2239 unsigned long timeout = jiffies + msecs_to_jiffies(500);
31bff450 2240 u32 ctrl;
f95f3850 2241
31bff450
SJ
2242 ctrl = mci_readl(host, CTRL);
2243 ctrl |= reset;
2244 mci_writel(host, CTRL, ctrl);
f95f3850
WN
2245
2246 /* wait till resets clear */
2247 do {
2248 ctrl = mci_readl(host, CTRL);
31bff450 2249 if (!(ctrl & reset))
f95f3850
WN
2250 return true;
2251 } while (time_before(jiffies, timeout));
2252
31bff450
SJ
2253 dev_err(host->dev,
2254 "Timeout resetting block (ctrl reset %#x)\n",
2255 ctrl & reset);
f95f3850
WN
2256
2257 return false;
2258}
2259
31bff450
SJ
2260static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2261{
2262 /*
2263 * Reseting generates a block interrupt, hence setting
2264 * the scatter-gather pointer to NULL.
2265 */
2266 if (host->sg) {
2267 sg_miter_stop(&host->sg_miter);
2268 host->sg = NULL;
2269 }
2270
2271 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2272}
2273
2274static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2275{
2276 return dw_mci_ctrl_reset(host,
2277 SDMMC_CTRL_FIFO_RESET |
2278 SDMMC_CTRL_RESET |
2279 SDMMC_CTRL_DMA_RESET);
2280}
2281
c91eab4b
TA
2282#ifdef CONFIG_OF
2283static struct dw_mci_of_quirks {
2284 char *quirk;
2285 int id;
2286} of_quirks[] = {
2287 {
c91eab4b
TA
2288 .quirk = "broken-cd",
2289 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2290 },
2291};
2292
2293static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2294{
2295 struct dw_mci_board *pdata;
2296 struct device *dev = host->dev;
2297 struct device_node *np = dev->of_node;
e95baf13 2298 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2299 int idx, ret;
3c6d89ea 2300 u32 clock_frequency;
c91eab4b
TA
2301
2302 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2303 if (!pdata) {
2304 dev_err(dev, "could not allocate memory for pdata\n");
2305 return ERR_PTR(-ENOMEM);
2306 }
2307
2308 /* find out number of slots supported */
2309 if (of_property_read_u32(dev->of_node, "num-slots",
2310 &pdata->num_slots)) {
2311 dev_info(dev, "num-slots property not found, "
2312 "assuming 1 slot is available\n");
2313 pdata->num_slots = 1;
2314 }
2315
2316 /* get quirks */
2317 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2318 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2319 pdata->quirks |= of_quirks[idx].id;
2320
2321 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2322 dev_info(dev, "fifo-depth property not found, using "
2323 "value of FIFOTH register as default\n");
2324
2325 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2326
3c6d89ea
DA
2327 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2328 pdata->bus_hz = clock_frequency;
2329
cb27a843
JH
2330 if (drv_data && drv_data->parse_dt) {
2331 ret = drv_data->parse_dt(host);
800d78bf
TA
2332 if (ret)
2333 return ERR_PTR(ret);
2334 }
2335
10b49841
SJ
2336 if (of_find_property(np, "supports-highspeed", NULL))
2337 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2338
c91eab4b
TA
2339 return pdata;
2340}
2341
2342#else /* CONFIG_OF */
2343static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2344{
2345 return ERR_PTR(-EINVAL);
2346}
2347#endif /* CONFIG_OF */
2348
62ca8034 2349int dw_mci_probe(struct dw_mci *host)
f95f3850 2350{
e95baf13 2351 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2352 int width, i, ret = 0;
f95f3850 2353 u32 fifo_size;
1c2215b7 2354 int init_slots = 0;
f95f3850 2355
c91eab4b
TA
2356 if (!host->pdata) {
2357 host->pdata = dw_mci_parse_dt(host);
2358 if (IS_ERR(host->pdata)) {
2359 dev_err(host->dev, "platform data not available\n");
2360 return -EINVAL;
2361 }
f95f3850
WN
2362 }
2363
907abd51 2364 if (host->pdata->num_slots > 1) {
4a90920c 2365 dev_err(host->dev,
907abd51 2366 "Platform data must supply num_slots.\n");
62ca8034 2367 return -ENODEV;
f95f3850
WN
2368 }
2369
780f22af 2370 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2371 if (IS_ERR(host->biu_clk)) {
2372 dev_dbg(host->dev, "biu clock not available\n");
2373 } else {
2374 ret = clk_prepare_enable(host->biu_clk);
2375 if (ret) {
2376 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2377 return ret;
2378 }
2379 }
2380
780f22af 2381 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2382 if (IS_ERR(host->ciu_clk)) {
2383 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2384 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2385 } else {
2386 ret = clk_prepare_enable(host->ciu_clk);
2387 if (ret) {
2388 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2389 goto err_clk_biu;
2390 }
f90a0612 2391
3c6d89ea
DA
2392 if (host->pdata->bus_hz) {
2393 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2394 if (ret)
2395 dev_warn(host->dev,
612de4c1 2396 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
2397 host->pdata->bus_hz);
2398 }
f90a0612 2399 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2400 }
f90a0612 2401
612de4c1
JC
2402 if (!host->bus_hz) {
2403 dev_err(host->dev,
2404 "Platform data must supply bus speed\n");
2405 ret = -ENODEV;
2406 goto err_clk_ciu;
2407 }
2408
002f0d5c
YK
2409 if (drv_data && drv_data->init) {
2410 ret = drv_data->init(host);
2411 if (ret) {
2412 dev_err(host->dev,
2413 "implementation specific init failed\n");
2414 goto err_clk_ciu;
2415 }
2416 }
2417
cb27a843
JH
2418 if (drv_data && drv_data->setup_clock) {
2419 ret = drv_data->setup_clock(host);
800d78bf
TA
2420 if (ret) {
2421 dev_err(host->dev,
2422 "implementation specific clock setup failed\n");
2423 goto err_clk_ciu;
2424 }
2425 }
2426
a55d6ff0 2427 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2428 if (IS_ERR(host->vmmc)) {
2429 ret = PTR_ERR(host->vmmc);
2430 if (ret == -EPROBE_DEFER)
2431 goto err_clk_ciu;
2432
2433 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2434 host->vmmc = NULL;
2435 } else {
2436 ret = regulator_enable(host->vmmc);
2437 if (ret) {
2438 if (ret != -EPROBE_DEFER)
2439 dev_err(host->dev,
2440 "regulator_enable fail: %d\n", ret);
2441 goto err_clk_ciu;
2442 }
2443 }
2444
62ca8034 2445 host->quirks = host->pdata->quirks;
f95f3850
WN
2446
2447 spin_lock_init(&host->lock);
2448 INIT_LIST_HEAD(&host->queue);
2449
f95f3850
WN
2450 /*
2451 * Get the host data width - this assumes that HCON has been set with
2452 * the correct values.
2453 */
2454 i = (mci_readl(host, HCON) >> 7) & 0x7;
2455 if (!i) {
2456 host->push_data = dw_mci_push_data16;
2457 host->pull_data = dw_mci_pull_data16;
2458 width = 16;
2459 host->data_shift = 1;
2460 } else if (i == 2) {
2461 host->push_data = dw_mci_push_data64;
2462 host->pull_data = dw_mci_pull_data64;
2463 width = 64;
2464 host->data_shift = 3;
2465 } else {
2466 /* Check for a reserved value, and warn if it is */
2467 WARN((i != 1),
2468 "HCON reports a reserved host data width!\n"
2469 "Defaulting to 32-bit access.\n");
2470 host->push_data = dw_mci_push_data32;
2471 host->pull_data = dw_mci_pull_data32;
2472 width = 32;
2473 host->data_shift = 2;
2474 }
2475
2476 /* Reset all blocks */
31bff450 2477 if (!dw_mci_ctrl_all_reset(host))
141a712a
SJ
2478 return -ENODEV;
2479
2480 host->dma_ops = host->pdata->dma_ops;
2481 dw_mci_init_dma(host);
f95f3850
WN
2482
2483 /* Clear the interrupts for the host controller */
2484 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2485 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2486
2487 /* Put in max timeout */
2488 mci_writel(host, TMOUT, 0xFFFFFFFF);
2489
2490 /*
2491 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2492 * Tx Mark = fifo_size / 2 DMA Size = 8
2493 */
b86d8253
JH
2494 if (!host->pdata->fifo_depth) {
2495 /*
2496 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2497 * have been overwritten by the bootloader, just like we're
2498 * about to do, so if you know the value for your hardware, you
2499 * should put it in the platform data.
2500 */
2501 fifo_size = mci_readl(host, FIFOTH);
8234e869 2502 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2503 } else {
2504 fifo_size = host->pdata->fifo_depth;
2505 }
2506 host->fifo_depth = fifo_size;
52426899
SJ
2507 host->fifoth_val =
2508 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2509 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2510
2511 /* disable clock to CIU */
2512 mci_writel(host, CLKENA, 0);
2513 mci_writel(host, CLKSRC, 0);
2514
63008768
JH
2515 /*
2516 * In 2.40a spec, Data offset is changed.
2517 * Need to check the version-id and set data-offset for DATA register.
2518 */
2519 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2520 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2521
2522 if (host->verid < DW_MMC_240A)
2523 host->data_offset = DATA_OFFSET;
2524 else
2525 host->data_offset = DATA_240A_OFFSET;
2526
f95f3850 2527 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2528 host->card_workqueue = alloc_workqueue("dw-mci-card",
59ff3eb6 2529 WQ_MEM_RECLAIM, 1);
ef7aef9a
WY
2530 if (!host->card_workqueue) {
2531 ret = -ENOMEM;
1791b13e 2532 goto err_dmaunmap;
ef7aef9a 2533 }
1791b13e 2534 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2535 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2536 host->irq_flags, "dw-mci", host);
f95f3850 2537 if (ret)
1791b13e 2538 goto err_workqueue;
f95f3850 2539
f95f3850
WN
2540 if (host->pdata->num_slots)
2541 host->num_slots = host->pdata->num_slots;
2542 else
2543 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2544
2da1d7f2
YC
2545 /*
2546 * Enable interrupts for command done, data over, data empty, card det,
2547 * receive ready and error such as transmit, receive timeout, crc error
2548 */
2549 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2550 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2551 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2552 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2553 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2554
2555 dev_info(host->dev, "DW MMC controller at irq %d, "
2556 "%d bit host data width, "
2557 "%u deep fifo\n",
2558 host->irq, width, fifo_size);
2559
f95f3850
WN
2560 /* We need at least one slot to succeed */
2561 for (i = 0; i < host->num_slots; i++) {
2562 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2563 if (ret)
2564 dev_dbg(host->dev, "slot %d init failed\n", i);
2565 else
2566 init_slots++;
2567 }
2568
2569 if (init_slots) {
2570 dev_info(host->dev, "%d slots initialized\n", init_slots);
2571 } else {
2572 dev_dbg(host->dev, "attempted to initialize %d slots, "
2573 "but failed on all\n", host->num_slots);
780f22af 2574 goto err_workqueue;
f95f3850
WN
2575 }
2576
f95f3850 2577 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2578 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2579
2580 return 0;
2581
1791b13e 2582err_workqueue:
95dcc2cb 2583 destroy_workqueue(host->card_workqueue);
1791b13e 2584
f95f3850
WN
2585err_dmaunmap:
2586 if (host->use_dma && host->dma_ops->exit)
2587 host->dma_ops->exit(host);
780f22af 2588 if (host->vmmc)
c07946a3 2589 regulator_disable(host->vmmc);
f90a0612
TA
2590
2591err_clk_ciu:
780f22af 2592 if (!IS_ERR(host->ciu_clk))
f90a0612 2593 clk_disable_unprepare(host->ciu_clk);
780f22af 2594
f90a0612 2595err_clk_biu:
780f22af 2596 if (!IS_ERR(host->biu_clk))
f90a0612 2597 clk_disable_unprepare(host->biu_clk);
780f22af 2598
f95f3850
WN
2599 return ret;
2600}
62ca8034 2601EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2602
62ca8034 2603void dw_mci_remove(struct dw_mci *host)
f95f3850 2604{
f95f3850
WN
2605 int i;
2606
2607 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2608 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2609
f95f3850 2610 for (i = 0; i < host->num_slots; i++) {
4a90920c 2611 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2612 if (host->slot[i])
2613 dw_mci_cleanup_slot(host->slot[i], i);
2614 }
2615
2616 /* disable clock to CIU */
2617 mci_writel(host, CLKENA, 0);
2618 mci_writel(host, CLKSRC, 0);
2619
95dcc2cb 2620 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2621
2622 if (host->use_dma && host->dma_ops->exit)
2623 host->dma_ops->exit(host);
2624
780f22af 2625 if (host->vmmc)
c07946a3 2626 regulator_disable(host->vmmc);
c07946a3 2627
f90a0612
TA
2628 if (!IS_ERR(host->ciu_clk))
2629 clk_disable_unprepare(host->ciu_clk);
780f22af 2630
f90a0612
TA
2631 if (!IS_ERR(host->biu_clk))
2632 clk_disable_unprepare(host->biu_clk);
f95f3850 2633}
62ca8034
SH
2634EXPORT_SYMBOL(dw_mci_remove);
2635
2636
f95f3850 2637
6fe8890d 2638#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2639/*
2640 * TODO: we should probably disable the clock to the card in the suspend path.
2641 */
62ca8034 2642int dw_mci_suspend(struct dw_mci *host)
f95f3850 2643{
c07946a3
JC
2644 if (host->vmmc)
2645 regulator_disable(host->vmmc);
2646
f95f3850
WN
2647 return 0;
2648}
62ca8034 2649EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2650
62ca8034 2651int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2652{
2653 int i, ret;
f95f3850 2654
f2f942ce
SK
2655 if (host->vmmc) {
2656 ret = regulator_enable(host->vmmc);
2657 if (ret) {
2658 dev_err(host->dev,
2659 "failed to enable regulator: %d\n", ret);
2660 return ret;
2661 }
2662 }
1d6c4e0a 2663
31bff450 2664 if (!dw_mci_ctrl_all_reset(host)) {
e61cf118
JC
2665 ret = -ENODEV;
2666 return ret;
2667 }
2668
3bfe619d 2669 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2670 host->dma_ops->init(host);
2671
52426899
SJ
2672 /*
2673 * Restore the initial value at FIFOTH register
2674 * And Invalidate the prev_blksz with zero
2675 */
e61cf118 2676 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2677 host->prev_blksz = 0;
e61cf118 2678
2eb2944f
DA
2679 /* Put in max timeout */
2680 mci_writel(host, TMOUT, 0xFFFFFFFF);
2681
e61cf118
JC
2682 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2683 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2684 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2685 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2686 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2687
f95f3850
WN
2688 for (i = 0; i < host->num_slots; i++) {
2689 struct dw_mci_slot *slot = host->slot[i];
2690 if (!slot)
2691 continue;
ab269128
AK
2692 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2693 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2694 dw_mci_setup_bus(slot, true);
2695 }
f95f3850 2696 }
f95f3850
WN
2697 return 0;
2698}
62ca8034 2699EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2700#endif /* CONFIG_PM_SLEEP */
2701
f95f3850
WN
2702static int __init dw_mci_init(void)
2703{
8e1c4e4d 2704 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2705 return 0;
f95f3850
WN
2706}
2707
2708static void __exit dw_mci_exit(void)
2709{
f95f3850
WN
2710}
2711
2712module_init(dw_mci_init);
2713module_exit(dw_mci_exit);
2714
2715MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2716MODULE_AUTHOR("NXP Semiconductor VietNam");
2717MODULE_AUTHOR("Imagination Technologies Ltd");
2718MODULE_LICENSE("GPL v2");