]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: replace the bus_hz checking point
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
f95f3850
WN
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
90c2143a 32#include <linux/mmc/sdio.h>
f95f3850
WN
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
c07946a3 35#include <linux/regulator/consumer.h>
1791b13e 36#include <linux/workqueue.h>
c91eab4b 37#include <linux/of.h>
55a6ceb2 38#include <linux/of_gpio.h>
bf626e55 39#include <linux/mmc/slot-gpio.h>
f95f3850
WN
40
41#include "dw_mmc.h"
42
43/* Common flag combinations */
3f7eec62 44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
f95f3850
WN
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
1f44a2a5
SJ
55#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
f95f3850 58#ifdef CONFIG_MMC_DW_IDMAC
fc79a4d6
JS
59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
f95f3850
WN
64struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
f95f3850
WN
77
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
0976f16d
SJ
84static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
f95f3850 94
0976f16d
SJ
95static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
f95f3850
WN
112};
113
31bff450
SJ
114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
f95f3850
WN
117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
238static void dw_mci_set_timeout(struct dw_mci *host)
239{
240 /* timeout (maximum) */
241 mci_writel(host, TMOUT, 0xffffffff);
242}
243
244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245{
246 struct mmc_data *data;
800d78bf 247 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 248 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
f95f3850
WN
249 u32 cmdr;
250 cmd->error = -EINPROGRESS;
251
252 cmdr = cmd->opcode;
253
90c2143a
SJ
254 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
255 cmd->opcode == MMC_GO_IDLE_STATE ||
256 cmd->opcode == MMC_GO_INACTIVE_STATE ||
257 (cmd->opcode == SD_IO_RW_DIRECT &&
258 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
f95f3850
WN
259 cmdr |= SDMMC_CMD_STOP;
260 else
90c2143a
SJ
261 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
262 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
f95f3850
WN
263
264 if (cmd->flags & MMC_RSP_PRESENT) {
265 /* We expect a response, so set this bit */
266 cmdr |= SDMMC_CMD_RESP_EXP;
267 if (cmd->flags & MMC_RSP_136)
268 cmdr |= SDMMC_CMD_RESP_LONG;
269 }
270
271 if (cmd->flags & MMC_RSP_CRC)
272 cmdr |= SDMMC_CMD_RESP_CRC;
273
274 data = cmd->data;
275 if (data) {
276 cmdr |= SDMMC_CMD_DAT_EXP;
277 if (data->flags & MMC_DATA_STREAM)
278 cmdr |= SDMMC_CMD_STRM_MODE;
279 if (data->flags & MMC_DATA_WRITE)
280 cmdr |= SDMMC_CMD_DAT_WR;
281 }
282
cb27a843
JH
283 if (drv_data && drv_data->prepare_command)
284 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 285
f95f3850
WN
286 return cmdr;
287}
288
90c2143a
SJ
289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290{
291 struct mmc_command *stop;
292 u32 cmdr;
293
294 if (!cmd->data)
295 return 0;
296
297 stop = &host->stop_abort;
298 cmdr = cmd->opcode;
299 memset(stop, 0, sizeof(struct mmc_command));
300
301 if (cmdr == MMC_READ_SINGLE_BLOCK ||
302 cmdr == MMC_READ_MULTIPLE_BLOCK ||
303 cmdr == MMC_WRITE_BLOCK ||
304 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
305 stop->opcode = MMC_STOP_TRANSMISSION;
306 stop->arg = 0;
307 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
308 } else if (cmdr == SD_IO_RW_EXTENDED) {
309 stop->opcode = SD_IO_RW_DIRECT;
310 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
311 ((cmd->arg >> 28) & 0x7);
312 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
313 } else {
314 return 0;
315 }
316
317 cmdr = stop->opcode | SDMMC_CMD_STOP |
318 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
319
320 return cmdr;
321}
322
f95f3850
WN
323static void dw_mci_start_command(struct dw_mci *host,
324 struct mmc_command *cmd, u32 cmd_flags)
325{
326 host->cmd = cmd;
4a90920c 327 dev_vdbg(host->dev,
f95f3850
WN
328 "start command: ARGR=0x%08x CMDR=0x%08x\n",
329 cmd->arg, cmd_flags);
330
331 mci_writel(host, CMDARG, cmd->arg);
332 wmb();
333
334 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
335}
336
90c2143a 337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
f95f3850 338{
90c2143a
SJ
339 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
340 dw_mci_start_command(host, stop, host->stop_cmdr);
f95f3850
WN
341}
342
343/* DMA interface functions */
344static void dw_mci_stop_dma(struct dw_mci *host)
345{
03e8cb53 346 if (host->using_dma) {
f95f3850
WN
347 host->dma_ops->stop(host);
348 host->dma_ops->cleanup(host);
f95f3850 349 }
aa50f259
SJ
350
351 /* Data transfer was stopped by the interrupt handler */
352 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
f95f3850
WN
353}
354
9aa51408
SJ
355static int dw_mci_get_dma_dir(struct mmc_data *data)
356{
357 if (data->flags & MMC_DATA_WRITE)
358 return DMA_TO_DEVICE;
359 else
360 return DMA_FROM_DEVICE;
361}
362
9beee912 363#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
364static void dw_mci_dma_cleanup(struct dw_mci *host)
365{
366 struct mmc_data *data = host->data;
367
368 if (data)
9aa51408 369 if (!data->host_cookie)
4a90920c 370 dma_unmap_sg(host->dev,
9aa51408
SJ
371 data->sg,
372 data->sg_len,
373 dw_mci_get_dma_dir(data));
f95f3850
WN
374}
375
5ce9d961
SJ
376static void dw_mci_idmac_reset(struct dw_mci *host)
377{
378 u32 bmod = mci_readl(host, BMOD);
379 /* Software reset of DMA */
380 bmod |= SDMMC_IDMAC_SWRESET;
381 mci_writel(host, BMOD, bmod);
382}
383
f95f3850
WN
384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
385{
386 u32 temp;
387
388 /* Disable and reset the IDMAC interface */
389 temp = mci_readl(host, CTRL);
390 temp &= ~SDMMC_CTRL_USE_IDMAC;
391 temp |= SDMMC_CTRL_DMA_RESET;
392 mci_writel(host, CTRL, temp);
393
394 /* Stop the IDMAC running */
395 temp = mci_readl(host, BMOD);
a5289a43 396 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
5ce9d961 397 temp |= SDMMC_IDMAC_SWRESET;
f95f3850
WN
398 mci_writel(host, BMOD, temp);
399}
400
401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
402{
403 struct mmc_data *data = host->data;
404
4a90920c 405 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
406
407 host->dma_ops->cleanup(host);
408
409 /*
410 * If the card was removed, data will be NULL. No point in trying to
411 * send the stop command or waiting for NBUSY in this case.
412 */
413 if (data) {
414 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
415 tasklet_schedule(&host->tasklet);
416 }
417}
418
419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
420 unsigned int sg_len)
421{
422 int i;
423 struct idmac_desc *desc = host->sg_cpu;
424
425 for (i = 0; i < sg_len; i++, desc++) {
426 unsigned int length = sg_dma_len(&data->sg[i]);
427 u32 mem_addr = sg_dma_address(&data->sg[i]);
428
429 /* Set the OWN bit and disable interrupts for this descriptor */
430 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
431
432 /* Buffer length */
433 IDMAC_SET_BUFFER1_SIZE(desc, length);
434
435 /* Physical address to DMA to/from */
436 desc->des2 = mem_addr;
437 }
438
439 /* Set first descriptor */
440 desc = host->sg_cpu;
441 desc->des0 |= IDMAC_DES0_FD;
442
443 /* Set last descriptor */
444 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
445 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
446 desc->des0 |= IDMAC_DES0_LD;
447
448 wmb();
449}
450
451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
452{
453 u32 temp;
454
455 dw_mci_translate_sglist(host, host->data, sg_len);
456
457 /* Select IDMAC interface */
458 temp = mci_readl(host, CTRL);
459 temp |= SDMMC_CTRL_USE_IDMAC;
460 mci_writel(host, CTRL, temp);
461
462 wmb();
463
464 /* Enable the IDMAC */
465 temp = mci_readl(host, BMOD);
a5289a43 466 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
467 mci_writel(host, BMOD, temp);
468
469 /* Start it running */
470 mci_writel(host, PLDMND, 1);
471}
472
473static int dw_mci_idmac_init(struct dw_mci *host)
474{
475 struct idmac_desc *p;
897b69e7 476 int i;
f95f3850
WN
477
478 /* Number of descriptors in the ring buffer */
479 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
480
481 /* Forward link the descriptor list */
482 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
483 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
484
485 /* Set the last descriptor as the end-of-ring descriptor */
486 p->des3 = host->sg_dma;
487 p->des0 = IDMAC_DES0_ER;
488
5ce9d961 489 dw_mci_idmac_reset(host);
141a712a 490
f95f3850 491 /* Mask out interrupts - get Tx & Rx complete only */
fc79a4d6 492 mci_writel(host, IDSTS, IDMAC_INT_CLR);
f95f3850
WN
493 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
494 SDMMC_IDMAC_INT_TI);
495
496 /* Set the descriptor base address */
497 mci_writel(host, DBADDR, host->sg_dma);
498 return 0;
499}
500
8e2b36ea 501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
502 .init = dw_mci_idmac_init,
503 .start = dw_mci_idmac_start_dma,
504 .stop = dw_mci_idmac_stop_dma,
505 .complete = dw_mci_idmac_complete_dma,
506 .cleanup = dw_mci_dma_cleanup,
507};
508#endif /* CONFIG_MMC_DW_IDMAC */
509
9aa51408
SJ
510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
511 struct mmc_data *data,
512 bool next)
f95f3850
WN
513{
514 struct scatterlist *sg;
9aa51408 515 unsigned int i, sg_len;
03e8cb53 516
9aa51408
SJ
517 if (!next && data->host_cookie)
518 return data->host_cookie;
f95f3850
WN
519
520 /*
521 * We don't do DMA on "complex" transfers, i.e. with
522 * non-word-aligned buffers or lengths. Also, we don't bother
523 * with all the DMA setup overhead for short transfers.
524 */
525 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
526 return -EINVAL;
9aa51408 527
f95f3850
WN
528 if (data->blksz & 3)
529 return -EINVAL;
530
531 for_each_sg(data->sg, sg, data->sg_len, i) {
532 if (sg->offset & 3 || sg->length & 3)
533 return -EINVAL;
534 }
535
4a90920c 536 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
537 data->sg,
538 data->sg_len,
539 dw_mci_get_dma_dir(data));
540 if (sg_len == 0)
541 return -EINVAL;
03e8cb53 542
9aa51408
SJ
543 if (next)
544 data->host_cookie = sg_len;
f95f3850 545
9aa51408
SJ
546 return sg_len;
547}
548
9aa51408
SJ
549static void dw_mci_pre_req(struct mmc_host *mmc,
550 struct mmc_request *mrq,
551 bool is_first_req)
552{
553 struct dw_mci_slot *slot = mmc_priv(mmc);
554 struct mmc_data *data = mrq->data;
555
556 if (!slot->host->use_dma || !data)
557 return;
558
559 if (data->host_cookie) {
560 data->host_cookie = 0;
561 return;
562 }
563
564 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
565 data->host_cookie = 0;
566}
567
568static void dw_mci_post_req(struct mmc_host *mmc,
569 struct mmc_request *mrq,
570 int err)
571{
572 struct dw_mci_slot *slot = mmc_priv(mmc);
573 struct mmc_data *data = mrq->data;
574
575 if (!slot->host->use_dma || !data)
576 return;
577
578 if (data->host_cookie)
4a90920c 579 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
580 data->sg,
581 data->sg_len,
582 dw_mci_get_dma_dir(data));
583 data->host_cookie = 0;
584}
585
52426899
SJ
586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
587{
588#ifdef CONFIG_MMC_DW_IDMAC
589 unsigned int blksz = data->blksz;
590 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
591 u32 fifo_width = 1 << host->data_shift;
592 u32 blksz_depth = blksz / fifo_width, fifoth_val;
593 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
594 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
595
596 tx_wmark = (host->fifo_depth) / 2;
597 tx_wmark_invers = host->fifo_depth - tx_wmark;
598
599 /*
600 * MSIZE is '1',
601 * if blksz is not a multiple of the FIFO width
602 */
603 if (blksz % fifo_width) {
604 msize = 0;
605 rx_wmark = 1;
606 goto done;
607 }
608
609 do {
610 if (!((blksz_depth % mszs[idx]) ||
611 (tx_wmark_invers % mszs[idx]))) {
612 msize = idx;
613 rx_wmark = mszs[idx] - 1;
614 break;
615 }
616 } while (--idx > 0);
617 /*
618 * If idx is '0', it won't be tried
619 * Thus, initial values are uesed
620 */
621done:
622 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
623 mci_writel(host, FIFOTH, fifoth_val);
624#endif
625}
626
f1d2736c
SJ
627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
628{
629 unsigned int blksz = data->blksz;
630 u32 blksz_depth, fifo_depth;
631 u16 thld_size;
632
633 WARN_ON(!(data->flags & MMC_DATA_READ));
634
635 if (host->timing != MMC_TIMING_MMC_HS200 &&
636 host->timing != MMC_TIMING_UHS_SDR104)
637 goto disable;
638
639 blksz_depth = blksz / (1 << host->data_shift);
640 fifo_depth = host->fifo_depth;
641
642 if (blksz_depth > fifo_depth)
643 goto disable;
644
645 /*
646 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
647 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
648 * Currently just choose blksz.
649 */
650 thld_size = blksz;
651 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
652 return;
653
654disable:
655 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
656}
657
9aa51408
SJ
658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
659{
660 int sg_len;
661 u32 temp;
662
663 host->using_dma = 0;
664
665 /* If we don't have a channel, we can't do DMA */
666 if (!host->use_dma)
667 return -ENODEV;
668
669 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
670 if (sg_len < 0) {
671 host->dma_ops->stop(host);
9aa51408 672 return sg_len;
a99aa9b9 673 }
9aa51408
SJ
674
675 host->using_dma = 1;
f95f3850 676
4a90920c 677 dev_vdbg(host->dev,
f95f3850
WN
678 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
679 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
680 sg_len);
681
52426899
SJ
682 /*
683 * Decide the MSIZE and RX/TX Watermark.
684 * If current block size is same with previous size,
685 * no need to update fifoth.
686 */
687 if (host->prev_blksz != data->blksz)
688 dw_mci_adjust_fifoth(host, data);
689
f95f3850
WN
690 /* Enable the DMA interface */
691 temp = mci_readl(host, CTRL);
692 temp |= SDMMC_CTRL_DMA_ENABLE;
693 mci_writel(host, CTRL, temp);
694
695 /* Disable RX/TX IRQs, let DMA handle it */
696 temp = mci_readl(host, INTMASK);
697 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
698 mci_writel(host, INTMASK, temp);
699
700 host->dma_ops->start(host, sg_len);
701
702 return 0;
703}
704
705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
706{
707 u32 temp;
708
709 data->error = -EINPROGRESS;
710
711 WARN_ON(host->data);
712 host->sg = NULL;
713 host->data = data;
714
f1d2736c 715 if (data->flags & MMC_DATA_READ) {
55c5efbc 716 host->dir_status = DW_MCI_RECV_STATUS;
f1d2736c
SJ
717 dw_mci_ctrl_rd_thld(host, data);
718 } else {
55c5efbc 719 host->dir_status = DW_MCI_SEND_STATUS;
f1d2736c 720 }
55c5efbc 721
f95f3850 722 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc
SJ
723 int flags = SG_MITER_ATOMIC;
724 if (host->data->flags & MMC_DATA_READ)
725 flags |= SG_MITER_TO_SG;
726 else
727 flags |= SG_MITER_FROM_SG;
728
729 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 730 host->sg = data->sg;
34b664a2
JH
731 host->part_buf_start = 0;
732 host->part_buf_count = 0;
f95f3850 733
b40af3aa 734 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
735 temp = mci_readl(host, INTMASK);
736 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
737 mci_writel(host, INTMASK, temp);
738
739 temp = mci_readl(host, CTRL);
740 temp &= ~SDMMC_CTRL_DMA_ENABLE;
741 mci_writel(host, CTRL, temp);
52426899
SJ
742
743 /*
744 * Use the initial fifoth_val for PIO mode.
745 * If next issued data may be transfered by DMA mode,
746 * prev_blksz should be invalidated.
747 */
748 mci_writel(host, FIFOTH, host->fifoth_val);
749 host->prev_blksz = 0;
750 } else {
751 /*
752 * Keep the current block size.
753 * It will be used to decide whether to update
754 * fifoth register next time.
755 */
756 host->prev_blksz = data->blksz;
f95f3850
WN
757 }
758}
759
760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
761{
762 struct dw_mci *host = slot->host;
763 unsigned long timeout = jiffies + msecs_to_jiffies(500);
764 unsigned int cmd_status = 0;
765
766 mci_writel(host, CMDARG, arg);
767 wmb();
768 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
769
770 while (time_before(jiffies, timeout)) {
771 cmd_status = mci_readl(host, CMD);
772 if (!(cmd_status & SDMMC_CMD_START))
773 return;
774 }
775 dev_err(&slot->mmc->class_dev,
776 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
777 cmd, arg, cmd_status);
778}
779
ab269128 780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
781{
782 struct dw_mci *host = slot->host;
fdf492a1 783 unsigned int clock = slot->clock;
f95f3850 784 u32 div;
9623b5b9 785 u32 clk_en_a;
f95f3850 786
fdf492a1
DA
787 if (!clock) {
788 mci_writel(host, CLKENA, 0);
789 mci_send_cmd(slot,
790 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
791 } else if (clock != host->current_speed || force_clkinit) {
792 div = host->bus_hz / clock;
793 if (host->bus_hz % clock && host->bus_hz > clock)
f95f3850
WN
794 /*
795 * move the + 1 after the divide to prevent
796 * over-clocking the card.
797 */
e419990b
SJ
798 div += 1;
799
fdf492a1 800 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
f95f3850 801
fdf492a1
DA
802 if ((clock << div) != slot->__clk_old || force_clkinit)
803 dev_info(&slot->mmc->class_dev,
804 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
805 slot->id, host->bus_hz, clock,
806 div ? ((host->bus_hz / div) >> 1) :
807 host->bus_hz, div);
f95f3850
WN
808
809 /* disable clock */
810 mci_writel(host, CLKENA, 0);
811 mci_writel(host, CLKSRC, 0);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
817 /* set clock to desired speed */
818 mci_writel(host, CLKDIV, div);
819
820 /* inform CIU */
821 mci_send_cmd(slot,
822 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
823
9623b5b9
DA
824 /* enable clock; only low power if no SDIO */
825 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
826 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
827 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
828 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
829
830 /* inform CIU */
831 mci_send_cmd(slot,
832 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
833
fdf492a1
DA
834 /* keep the clock with reflecting clock dividor */
835 slot->__clk_old = clock << div;
f95f3850
WN
836 }
837
fdf492a1
DA
838 host->current_speed = clock;
839
f95f3850 840 /* Set the current slot bus width */
1d56c453 841 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
842}
843
053b3ce6
SJ
844static void __dw_mci_start_request(struct dw_mci *host,
845 struct dw_mci_slot *slot,
846 struct mmc_command *cmd)
f95f3850
WN
847{
848 struct mmc_request *mrq;
f95f3850
WN
849 struct mmc_data *data;
850 u32 cmdflags;
851
852 mrq = slot->mrq;
853 if (host->pdata->select_slot)
854 host->pdata->select_slot(slot->id);
855
f95f3850
WN
856 host->cur_slot = slot;
857 host->mrq = mrq;
858
859 host->pending_events = 0;
860 host->completed_events = 0;
e352c813 861 host->cmd_status = 0;
f95f3850 862 host->data_status = 0;
e352c813 863 host->dir_status = 0;
f95f3850 864
053b3ce6 865 data = cmd->data;
f95f3850
WN
866 if (data) {
867 dw_mci_set_timeout(host);
868 mci_writel(host, BYTCNT, data->blksz*data->blocks);
869 mci_writel(host, BLKSIZ, data->blksz);
870 }
871
f95f3850
WN
872 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
873
874 /* this is the first command, send the initialization clock */
875 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
876 cmdflags |= SDMMC_CMD_INIT;
877
878 if (data) {
879 dw_mci_submit_data(host, data);
880 wmb();
881 }
882
883 dw_mci_start_command(host, cmd, cmdflags);
884
885 if (mrq->stop)
886 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
90c2143a
SJ
887 else
888 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
f95f3850
WN
889}
890
053b3ce6
SJ
891static void dw_mci_start_request(struct dw_mci *host,
892 struct dw_mci_slot *slot)
893{
894 struct mmc_request *mrq = slot->mrq;
895 struct mmc_command *cmd;
896
897 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
898 __dw_mci_start_request(host, slot, cmd);
899}
900
7456caae 901/* must be called with host->lock held */
f95f3850
WN
902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
903 struct mmc_request *mrq)
904{
905 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
906 host->state);
907
f95f3850
WN
908 slot->mrq = mrq;
909
910 if (host->state == STATE_IDLE) {
911 host->state = STATE_SENDING_CMD;
912 dw_mci_start_request(host, slot);
913 } else {
914 list_add_tail(&slot->queue_node, &host->queue);
915 }
f95f3850
WN
916}
917
918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
919{
920 struct dw_mci_slot *slot = mmc_priv(mmc);
921 struct dw_mci *host = slot->host;
922
923 WARN_ON(slot->mrq);
924
7456caae
JH
925 /*
926 * The check for card presence and queueing of the request must be
927 * atomic, otherwise the card could be removed in between and the
928 * request wouldn't fail until another card was inserted.
929 */
930 spin_lock_bh(&host->lock);
931
f95f3850 932 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
7456caae 933 spin_unlock_bh(&host->lock);
f95f3850
WN
934 mrq->cmd->error = -ENOMEDIUM;
935 mmc_request_done(mmc, mrq);
936 return;
937 }
938
f95f3850 939 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
940
941 spin_unlock_bh(&host->lock);
f95f3850
WN
942}
943
944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
945{
946 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 947 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 948 u32 regs;
f95f3850 949
f95f3850 950 switch (ios->bus_width) {
f95f3850
WN
951 case MMC_BUS_WIDTH_4:
952 slot->ctype = SDMMC_CTYPE_4BIT;
953 break;
c9b2a06f
JC
954 case MMC_BUS_WIDTH_8:
955 slot->ctype = SDMMC_CTYPE_8BIT;
956 break;
b2f7cb45
JC
957 default:
958 /* set default 1 bit mode */
959 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
960 }
961
3f514291
SJ
962 regs = mci_readl(slot->host, UHS_REG);
963
41babf75 964 /* DDR mode set */
cab3a802 965 if (ios->timing == MMC_TIMING_MMC_DDR52)
c69042a5 966 regs |= ((0x1 << slot->id) << 16);
3f514291 967 else
c69042a5 968 regs &= ~((0x1 << slot->id) << 16);
3f514291
SJ
969
970 mci_writel(slot->host, UHS_REG, regs);
f1d2736c 971 slot->host->timing = ios->timing;
41babf75 972
fdf492a1
DA
973 /*
974 * Use mirror of ios->clock to prevent race with mmc
975 * core ios update when finding the minimum.
976 */
977 slot->clock = ios->clock;
f95f3850 978
cb27a843
JH
979 if (drv_data && drv_data->set_ios)
980 drv_data->set_ios(slot->host, ios);
800d78bf 981
bf7cb224
JC
982 /* Slot specific timing and width adjustment */
983 dw_mci_setup_bus(slot, false);
984
f95f3850
WN
985 switch (ios->power_mode) {
986 case MMC_POWER_UP:
987 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
e6f34e2f
JH
988 /* Power up slot */
989 if (slot->host->pdata->setpower)
990 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
4366dcc5
JC
991 regs = mci_readl(slot->host, PWREN);
992 regs |= (1 << slot->id);
993 mci_writel(slot->host, PWREN, regs);
e6f34e2f
JH
994 break;
995 case MMC_POWER_OFF:
996 /* Power down slot */
997 if (slot->host->pdata->setpower)
998 slot->host->pdata->setpower(slot->id, 0);
4366dcc5
JC
999 regs = mci_readl(slot->host, PWREN);
1000 regs &= ~(1 << slot->id);
1001 mci_writel(slot->host, PWREN, regs);
f95f3850
WN
1002 break;
1003 default:
1004 break;
1005 }
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010 int read_only;
1011 struct dw_mci_slot *slot = mmc_priv(mmc);
1012 struct dw_mci_board *brd = slot->host->pdata;
1013
1014 /* Use platform get_ro function, else try on board write protect */
9640639b 1015 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5
TA
1016 read_only = 0;
1017 else if (brd->get_ro)
f95f3850 1018 read_only = brd->get_ro(slot->id);
55a6ceb2
DA
1019 else if (gpio_is_valid(slot->wp_gpio))
1020 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
1021 else
1022 read_only =
1023 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025 dev_dbg(&mmc->class_dev, "card is %s\n",
1026 read_only ? "read-only" : "read-write");
1027
1028 return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033 int present;
1034 struct dw_mci_slot *slot = mmc_priv(mmc);
1035 struct dw_mci_board *brd = slot->host->pdata;
7cf347bd
ZG
1036 struct dw_mci *host = slot->host;
1037 int gpio_cd = mmc_gpio_get_cd(mmc);
f95f3850
WN
1038
1039 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
1040 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1041 present = 1;
1042 else if (brd->get_cd)
f95f3850 1043 present = !brd->get_cd(slot->id);
bf626e55 1044 else if (!IS_ERR_VALUE(gpio_cd))
7cf347bd 1045 present = gpio_cd;
f95f3850
WN
1046 else
1047 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048 == 0 ? 1 : 0;
1049
7cf347bd 1050 spin_lock_bh(&host->lock);
bf626e55
ZG
1051 if (present) {
1052 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1053 dev_dbg(&mmc->class_dev, "card is present\n");
bf626e55
ZG
1054 } else {
1055 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850 1056 dev_dbg(&mmc->class_dev, "card is not present\n");
bf626e55 1057 }
7cf347bd 1058 spin_unlock_bh(&host->lock);
f95f3850
WN
1059
1060 return present;
1061}
1062
9623b5b9
DA
1063/*
1064 * Disable lower power mode.
1065 *
1066 * Low power mode will stop the card clock when idle. According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1069 *
1070 * This function is fast if low power mode is already disabled.
1071 */
1072static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1073{
1074 struct dw_mci *host = slot->host;
1075 u32 clk_en_a;
1076 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077
1078 clk_en_a = mci_readl(host, CLKENA);
1079
1080 if (clk_en_a & clken_low_pwr) {
1081 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083 SDMMC_CMD_PRV_DAT_WAIT, 0);
1084 }
1085}
1086
1a5c8e1f
SH
1087static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088{
1089 struct dw_mci_slot *slot = mmc_priv(mmc);
1090 struct dw_mci *host = slot->host;
1091 u32 int_mask;
1092
1093 /* Enable/disable Slot Specific SDIO interrupt */
1094 int_mask = mci_readl(host, INTMASK);
1095 if (enb) {
9623b5b9
DA
1096 /*
1097 * Turn off low power mode if it was enabled. This is a bit of
1098 * a heavy operation and we disable / enable IRQs a lot, so
1099 * we'll leave low power mode disabled and it will get
1100 * re-enabled again in dw_mci_setup_bus().
1101 */
1102 dw_mci_disable_low_power(slot);
1103
1a5c8e1f 1104 mci_writel(host, INTMASK,
705ad047 1105 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1106 } else {
1107 mci_writel(host, INTMASK,
705ad047 1108 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
1109 }
1110}
1111
0976f16d
SJ
1112static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113{
1114 struct dw_mci_slot *slot = mmc_priv(mmc);
1115 struct dw_mci *host = slot->host;
1116 const struct dw_mci_drv_data *drv_data = host->drv_data;
1117 struct dw_mci_tuning_data tuning_data;
1118 int err = -ENOSYS;
1119
1120 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127 } else {
1128 return -EINVAL;
1129 }
1130 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1133 } else {
1134 dev_err(host->dev,
1135 "Undefined command(%d) for tuning\n", opcode);
1136 return -EINVAL;
1137 }
1138
1139 if (drv_data && drv_data->execute_tuning)
1140 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141 return err;
1142}
1143
f95f3850 1144static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 1145 .request = dw_mci_request,
9aa51408
SJ
1146 .pre_req = dw_mci_pre_req,
1147 .post_req = dw_mci_post_req,
1a5c8e1f
SH
1148 .set_ios = dw_mci_set_ios,
1149 .get_ro = dw_mci_get_ro,
1150 .get_cd = dw_mci_get_cd,
1151 .enable_sdio_irq = dw_mci_enable_sdio_irq,
0976f16d 1152 .execute_tuning = dw_mci_execute_tuning,
f95f3850
WN
1153};
1154
1155static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156 __releases(&host->lock)
1157 __acquires(&host->lock)
1158{
1159 struct dw_mci_slot *slot;
1160 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1161
1162 WARN_ON(host->cmd || host->data);
1163
1164 host->cur_slot->mrq = NULL;
1165 host->mrq = NULL;
1166 if (!list_empty(&host->queue)) {
1167 slot = list_entry(host->queue.next,
1168 struct dw_mci_slot, queue_node);
1169 list_del(&slot->queue_node);
4a90920c 1170 dev_vdbg(host->dev, "list not empty: %s is next\n",
f95f3850
WN
1171 mmc_hostname(slot->mmc));
1172 host->state = STATE_SENDING_CMD;
1173 dw_mci_start_request(host, slot);
1174 } else {
4a90920c 1175 dev_vdbg(host->dev, "list empty\n");
f95f3850
WN
1176 host->state = STATE_IDLE;
1177 }
1178
1179 spin_unlock(&host->lock);
1180 mmc_request_done(prev_mmc, mrq);
1181 spin_lock(&host->lock);
1182}
1183
e352c813 1184static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
f95f3850
WN
1185{
1186 u32 status = host->cmd_status;
1187
1188 host->cmd_status = 0;
1189
1190 /* Read the response from the card (up to 16 bytes) */
1191 if (cmd->flags & MMC_RSP_PRESENT) {
1192 if (cmd->flags & MMC_RSP_136) {
1193 cmd->resp[3] = mci_readl(host, RESP0);
1194 cmd->resp[2] = mci_readl(host, RESP1);
1195 cmd->resp[1] = mci_readl(host, RESP2);
1196 cmd->resp[0] = mci_readl(host, RESP3);
1197 } else {
1198 cmd->resp[0] = mci_readl(host, RESP0);
1199 cmd->resp[1] = 0;
1200 cmd->resp[2] = 0;
1201 cmd->resp[3] = 0;
1202 }
1203 }
1204
1205 if (status & SDMMC_INT_RTO)
1206 cmd->error = -ETIMEDOUT;
1207 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208 cmd->error = -EILSEQ;
1209 else if (status & SDMMC_INT_RESP_ERR)
1210 cmd->error = -EIO;
1211 else
1212 cmd->error = 0;
1213
1214 if (cmd->error) {
1215 /* newer ip versions need a delay between retries */
1216 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217 mdelay(20);
f95f3850 1218 }
e352c813
SJ
1219
1220 return cmd->error;
1221}
1222
1223static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224{
31bff450 1225 u32 status = host->data_status;
e352c813
SJ
1226
1227 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228 if (status & SDMMC_INT_DRTO) {
1229 data->error = -ETIMEDOUT;
1230 } else if (status & SDMMC_INT_DCRC) {
1231 data->error = -EILSEQ;
1232 } else if (status & SDMMC_INT_EBE) {
1233 if (host->dir_status ==
1234 DW_MCI_SEND_STATUS) {
1235 /*
1236 * No data CRC status was returned.
1237 * The number of bytes transferred
1238 * will be exaggerated in PIO mode.
1239 */
1240 data->bytes_xfered = 0;
1241 data->error = -ETIMEDOUT;
1242 } else if (host->dir_status ==
1243 DW_MCI_RECV_STATUS) {
1244 data->error = -EIO;
1245 }
1246 } else {
1247 /* SDMMC_INT_SBE is included */
1248 data->error = -EIO;
1249 }
1250
1251 dev_err(host->dev, "data error, status 0x%08x\n", status);
1252
1253 /*
1254 * After an error, there may be data lingering
31bff450 1255 * in the FIFO
e352c813 1256 */
31bff450 1257 dw_mci_fifo_reset(host);
e352c813
SJ
1258 } else {
1259 data->bytes_xfered = data->blocks * data->blksz;
1260 data->error = 0;
1261 }
1262
1263 return data->error;
f95f3850
WN
1264}
1265
1266static void dw_mci_tasklet_func(unsigned long priv)
1267{
1268 struct dw_mci *host = (struct dw_mci *)priv;
1269 struct mmc_data *data;
1270 struct mmc_command *cmd;
e352c813 1271 struct mmc_request *mrq;
f95f3850
WN
1272 enum dw_mci_state state;
1273 enum dw_mci_state prev_state;
e352c813 1274 unsigned int err;
f95f3850
WN
1275
1276 spin_lock(&host->lock);
1277
1278 state = host->state;
1279 data = host->data;
e352c813 1280 mrq = host->mrq;
f95f3850
WN
1281
1282 do {
1283 prev_state = state;
1284
1285 switch (state) {
1286 case STATE_IDLE:
1287 break;
1288
1289 case STATE_SENDING_CMD:
1290 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291 &host->pending_events))
1292 break;
1293
1294 cmd = host->cmd;
1295 host->cmd = NULL;
1296 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
e352c813
SJ
1297 err = dw_mci_command_complete(host, cmd);
1298 if (cmd == mrq->sbc && !err) {
053b3ce6
SJ
1299 prev_state = state = STATE_SENDING_CMD;
1300 __dw_mci_start_request(host, host->cur_slot,
e352c813 1301 mrq->cmd);
053b3ce6
SJ
1302 goto unlock;
1303 }
1304
e352c813 1305 if (cmd->data && err) {
71abb133 1306 dw_mci_stop_dma(host);
90c2143a
SJ
1307 send_stop_abort(host, data);
1308 state = STATE_SENDING_STOP;
1309 break;
71abb133
SJ
1310 }
1311
e352c813
SJ
1312 if (!cmd->data || err) {
1313 dw_mci_request_end(host, mrq);
f95f3850
WN
1314 goto unlock;
1315 }
1316
1317 prev_state = state = STATE_SENDING_DATA;
1318 /* fall through */
1319
1320 case STATE_SENDING_DATA:
1321 if (test_and_clear_bit(EVENT_DATA_ERROR,
1322 &host->pending_events)) {
1323 dw_mci_stop_dma(host);
90c2143a 1324 send_stop_abort(host, data);
f95f3850
WN
1325 state = STATE_DATA_ERROR;
1326 break;
1327 }
1328
1329 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330 &host->pending_events))
1331 break;
1332
1333 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1334 prev_state = state = STATE_DATA_BUSY;
1335 /* fall through */
1336
1337 case STATE_DATA_BUSY:
1338 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339 &host->pending_events))
1340 break;
1341
1342 host->data = NULL;
1343 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
e352c813
SJ
1344 err = dw_mci_data_complete(host, data);
1345
1346 if (!err) {
1347 if (!data->stop || mrq->sbc) {
17c8bc85 1348 if (mrq->sbc && data->stop)
e352c813
SJ
1349 data->stop->error = 0;
1350 dw_mci_request_end(host, mrq);
1351 goto unlock;
f95f3850 1352 }
f95f3850 1353
e352c813
SJ
1354 /* stop command for open-ended transfer*/
1355 if (data->stop)
1356 send_stop_abort(host, data);
053b3ce6
SJ
1357 }
1358
e352c813
SJ
1359 /*
1360 * If err has non-zero,
1361 * stop-abort command has been already issued.
1362 */
f95f3850 1363 prev_state = state = STATE_SENDING_STOP;
e352c813 1364
f95f3850
WN
1365 /* fall through */
1366
1367 case STATE_SENDING_STOP:
1368 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369 &host->pending_events))
1370 break;
1371
71abb133 1372 /* CMD error in data command */
31bff450
SJ
1373 if (mrq->cmd->error && mrq->data)
1374 dw_mci_fifo_reset(host);
71abb133 1375
f95f3850 1376 host->cmd = NULL;
71abb133 1377 host->data = NULL;
90c2143a 1378
e352c813
SJ
1379 if (mrq->stop)
1380 dw_mci_command_complete(host, mrq->stop);
90c2143a
SJ
1381 else
1382 host->cmd_status = 0;
1383
e352c813 1384 dw_mci_request_end(host, mrq);
f95f3850
WN
1385 goto unlock;
1386
1387 case STATE_DATA_ERROR:
1388 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389 &host->pending_events))
1390 break;
1391
1392 state = STATE_DATA_BUSY;
1393 break;
1394 }
1395 } while (state != prev_state);
1396
1397 host->state = state;
1398unlock:
1399 spin_unlock(&host->lock);
1400
1401}
1402
34b664a2
JH
1403/* push final bytes to part_buf, only use during push */
1404static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1405{
34b664a2
JH
1406 memcpy((void *)&host->part_buf, buf, cnt);
1407 host->part_buf_count = cnt;
1408}
f95f3850 1409
34b664a2
JH
1410/* append bytes to part_buf, only use during push */
1411static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412{
1413 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415 host->part_buf_count += cnt;
1416 return cnt;
1417}
f95f3850 1418
34b664a2
JH
1419/* pull first bytes from part_buf, only use during pull */
1420static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421{
1422 cnt = min(cnt, (int)host->part_buf_count);
1423 if (cnt) {
1424 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425 cnt);
1426 host->part_buf_count -= cnt;
1427 host->part_buf_start += cnt;
f95f3850 1428 }
34b664a2 1429 return cnt;
f95f3850
WN
1430}
1431
34b664a2
JH
1432/* pull final bytes from the part_buf, assuming it's just been filled */
1433static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 1434{
34b664a2
JH
1435 memcpy(buf, &host->part_buf, cnt);
1436 host->part_buf_start = cnt;
1437 host->part_buf_count = (1 << host->data_shift) - cnt;
1438}
f95f3850 1439
34b664a2
JH
1440static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441{
cfbeb59c
MC
1442 struct mmc_data *data = host->data;
1443 int init_cnt = cnt;
1444
34b664a2
JH
1445 /* try and push anything in the part_buf */
1446 if (unlikely(host->part_buf_count)) {
1447 int len = dw_mci_push_part_bytes(host, buf, cnt);
1448 buf += len;
1449 cnt -= len;
cfbeb59c 1450 if (host->part_buf_count == 2) {
4e0a5adf
JC
1451 mci_writew(host, DATA(host->data_offset),
1452 host->part_buf16);
34b664a2
JH
1453 host->part_buf_count = 0;
1454 }
1455 }
1456#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457 if (unlikely((unsigned long)buf & 0x1)) {
1458 while (cnt >= 2) {
1459 u16 aligned_buf[64];
1460 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461 int items = len >> 1;
1462 int i;
1463 /* memcpy from input buffer into aligned buffer */
1464 memcpy(aligned_buf, buf, len);
1465 buf += len;
1466 cnt -= len;
1467 /* push data from aligned buffer into fifo */
1468 for (i = 0; i < items; ++i)
4e0a5adf
JC
1469 mci_writew(host, DATA(host->data_offset),
1470 aligned_buf[i]);
34b664a2
JH
1471 }
1472 } else
1473#endif
1474 {
1475 u16 *pdata = buf;
1476 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1477 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1478 buf = pdata;
1479 }
1480 /* put anything remaining in the part_buf */
1481 if (cnt) {
1482 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1483 /* Push data if we have reached the expected data length */
1484 if ((data->bytes_xfered + init_cnt) ==
1485 (data->blksz * data->blocks))
4e0a5adf 1486 mci_writew(host, DATA(host->data_offset),
cfbeb59c 1487 host->part_buf16);
34b664a2
JH
1488 }
1489}
f95f3850 1490
34b664a2
JH
1491static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492{
1493#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494 if (unlikely((unsigned long)buf & 0x1)) {
1495 while (cnt >= 2) {
1496 /* pull data from fifo into aligned buffer */
1497 u16 aligned_buf[64];
1498 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499 int items = len >> 1;
1500 int i;
1501 for (i = 0; i < items; ++i)
4e0a5adf
JC
1502 aligned_buf[i] = mci_readw(host,
1503 DATA(host->data_offset));
34b664a2
JH
1504 /* memcpy from aligned buffer into output buffer */
1505 memcpy(buf, aligned_buf, len);
1506 buf += len;
1507 cnt -= len;
1508 }
1509 } else
1510#endif
1511 {
1512 u16 *pdata = buf;
1513 for (; cnt >= 2; cnt -= 2)
4e0a5adf 1514 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
1515 buf = pdata;
1516 }
1517 if (cnt) {
4e0a5adf 1518 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 1519 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1520 }
1521}
1522
1523static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524{
cfbeb59c
MC
1525 struct mmc_data *data = host->data;
1526 int init_cnt = cnt;
1527
34b664a2
JH
1528 /* try and push anything in the part_buf */
1529 if (unlikely(host->part_buf_count)) {
1530 int len = dw_mci_push_part_bytes(host, buf, cnt);
1531 buf += len;
1532 cnt -= len;
cfbeb59c 1533 if (host->part_buf_count == 4) {
4e0a5adf
JC
1534 mci_writel(host, DATA(host->data_offset),
1535 host->part_buf32);
34b664a2
JH
1536 host->part_buf_count = 0;
1537 }
1538 }
1539#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540 if (unlikely((unsigned long)buf & 0x3)) {
1541 while (cnt >= 4) {
1542 u32 aligned_buf[32];
1543 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544 int items = len >> 2;
1545 int i;
1546 /* memcpy from input buffer into aligned buffer */
1547 memcpy(aligned_buf, buf, len);
1548 buf += len;
1549 cnt -= len;
1550 /* push data from aligned buffer into fifo */
1551 for (i = 0; i < items; ++i)
4e0a5adf
JC
1552 mci_writel(host, DATA(host->data_offset),
1553 aligned_buf[i]);
34b664a2
JH
1554 }
1555 } else
1556#endif
1557 {
1558 u32 *pdata = buf;
1559 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1560 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1561 buf = pdata;
1562 }
1563 /* put anything remaining in the part_buf */
1564 if (cnt) {
1565 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1566 /* Push data if we have reached the expected data length */
1567 if ((data->bytes_xfered + init_cnt) ==
1568 (data->blksz * data->blocks))
4e0a5adf 1569 mci_writel(host, DATA(host->data_offset),
cfbeb59c 1570 host->part_buf32);
f95f3850
WN
1571 }
1572}
1573
1574static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575{
34b664a2
JH
1576#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577 if (unlikely((unsigned long)buf & 0x3)) {
1578 while (cnt >= 4) {
1579 /* pull data from fifo into aligned buffer */
1580 u32 aligned_buf[32];
1581 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582 int items = len >> 2;
1583 int i;
1584 for (i = 0; i < items; ++i)
4e0a5adf
JC
1585 aligned_buf[i] = mci_readl(host,
1586 DATA(host->data_offset));
34b664a2
JH
1587 /* memcpy from aligned buffer into output buffer */
1588 memcpy(buf, aligned_buf, len);
1589 buf += len;
1590 cnt -= len;
1591 }
1592 } else
1593#endif
1594 {
1595 u32 *pdata = buf;
1596 for (; cnt >= 4; cnt -= 4)
4e0a5adf 1597 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
1598 buf = pdata;
1599 }
1600 if (cnt) {
4e0a5adf 1601 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 1602 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
1603 }
1604}
1605
1606static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607{
cfbeb59c
MC
1608 struct mmc_data *data = host->data;
1609 int init_cnt = cnt;
1610
34b664a2
JH
1611 /* try and push anything in the part_buf */
1612 if (unlikely(host->part_buf_count)) {
1613 int len = dw_mci_push_part_bytes(host, buf, cnt);
1614 buf += len;
1615 cnt -= len;
c09fbd74 1616
cfbeb59c 1617 if (host->part_buf_count == 8) {
c09fbd74 1618 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 1619 host->part_buf);
34b664a2
JH
1620 host->part_buf_count = 0;
1621 }
1622 }
1623#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624 if (unlikely((unsigned long)buf & 0x7)) {
1625 while (cnt >= 8) {
1626 u64 aligned_buf[16];
1627 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628 int items = len >> 3;
1629 int i;
1630 /* memcpy from input buffer into aligned buffer */
1631 memcpy(aligned_buf, buf, len);
1632 buf += len;
1633 cnt -= len;
1634 /* push data from aligned buffer into fifo */
1635 for (i = 0; i < items; ++i)
4e0a5adf
JC
1636 mci_writeq(host, DATA(host->data_offset),
1637 aligned_buf[i]);
34b664a2
JH
1638 }
1639 } else
1640#endif
1641 {
1642 u64 *pdata = buf;
1643 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1644 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
1645 buf = pdata;
1646 }
1647 /* put anything remaining in the part_buf */
1648 if (cnt) {
1649 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
1650 /* Push data if we have reached the expected data length */
1651 if ((data->bytes_xfered + init_cnt) ==
1652 (data->blksz * data->blocks))
4e0a5adf 1653 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 1654 host->part_buf);
f95f3850
WN
1655 }
1656}
1657
1658static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659{
34b664a2
JH
1660#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661 if (unlikely((unsigned long)buf & 0x7)) {
1662 while (cnt >= 8) {
1663 /* pull data from fifo into aligned buffer */
1664 u64 aligned_buf[16];
1665 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666 int items = len >> 3;
1667 int i;
1668 for (i = 0; i < items; ++i)
4e0a5adf
JC
1669 aligned_buf[i] = mci_readq(host,
1670 DATA(host->data_offset));
34b664a2
JH
1671 /* memcpy from aligned buffer into output buffer */
1672 memcpy(buf, aligned_buf, len);
1673 buf += len;
1674 cnt -= len;
1675 }
1676 } else
1677#endif
1678 {
1679 u64 *pdata = buf;
1680 for (; cnt >= 8; cnt -= 8)
4e0a5adf 1681 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1682 buf = pdata;
1683 }
1684 if (cnt) {
4e0a5adf 1685 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
1686 dw_mci_pull_final_bytes(host, buf, cnt);
1687 }
1688}
f95f3850 1689
34b664a2
JH
1690static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691{
1692 int len;
f95f3850 1693
34b664a2
JH
1694 /* get remaining partial bytes */
1695 len = dw_mci_pull_part_bytes(host, buf, cnt);
1696 if (unlikely(len == cnt))
1697 return;
1698 buf += len;
1699 cnt -= len;
1700
1701 /* get the rest of the data */
1702 host->pull_data(host, buf, cnt);
f95f3850
WN
1703}
1704
87a74d39 1705static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 1706{
f9c2a0dc
SJ
1707 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708 void *buf;
1709 unsigned int offset;
f95f3850
WN
1710 struct mmc_data *data = host->data;
1711 int shift = host->data_shift;
1712 u32 status;
3e4b0d8b 1713 unsigned int len;
f9c2a0dc 1714 unsigned int remain, fcnt;
f95f3850
WN
1715
1716 do {
f9c2a0dc
SJ
1717 if (!sg_miter_next(sg_miter))
1718 goto done;
1719
4225fc85 1720 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1721 buf = sg_miter->addr;
1722 remain = sg_miter->length;
1723 offset = 0;
1724
1725 do {
1726 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727 << shift) + host->part_buf_count;
1728 len = min(remain, fcnt);
1729 if (!len)
1730 break;
34b664a2 1731 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 1732 data->bytes_xfered += len;
f95f3850 1733 offset += len;
f9c2a0dc
SJ
1734 remain -= len;
1735 } while (remain);
f95f3850 1736
e74f3a9c 1737 sg_miter->consumed = offset;
f95f3850
WN
1738 status = mci_readl(host, MINTSTS);
1739 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
1740 /* if the RXDR is ready read again */
1741 } while ((status & SDMMC_INT_RXDR) ||
1742 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
1743
1744 if (!remain) {
1745 if (!sg_miter_next(sg_miter))
1746 goto done;
1747 sg_miter->consumed = 0;
1748 }
1749 sg_miter_stop(sg_miter);
f95f3850
WN
1750 return;
1751
1752done:
f9c2a0dc
SJ
1753 sg_miter_stop(sg_miter);
1754 host->sg = NULL;
f95f3850
WN
1755 smp_wmb();
1756 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757}
1758
1759static void dw_mci_write_data_pio(struct dw_mci *host)
1760{
f9c2a0dc
SJ
1761 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762 void *buf;
1763 unsigned int offset;
f95f3850
WN
1764 struct mmc_data *data = host->data;
1765 int shift = host->data_shift;
1766 u32 status;
3e4b0d8b 1767 unsigned int len;
f9c2a0dc
SJ
1768 unsigned int fifo_depth = host->fifo_depth;
1769 unsigned int remain, fcnt;
f95f3850
WN
1770
1771 do {
f9c2a0dc
SJ
1772 if (!sg_miter_next(sg_miter))
1773 goto done;
1774
4225fc85 1775 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
1776 buf = sg_miter->addr;
1777 remain = sg_miter->length;
1778 offset = 0;
1779
1780 do {
1781 fcnt = ((fifo_depth -
1782 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783 << shift) - host->part_buf_count;
1784 len = min(remain, fcnt);
1785 if (!len)
1786 break;
f95f3850 1787 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 1788 data->bytes_xfered += len;
f95f3850 1789 offset += len;
f9c2a0dc
SJ
1790 remain -= len;
1791 } while (remain);
f95f3850 1792
e74f3a9c 1793 sg_miter->consumed = offset;
f95f3850
WN
1794 status = mci_readl(host, MINTSTS);
1795 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 1796 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
1797
1798 if (!remain) {
1799 if (!sg_miter_next(sg_miter))
1800 goto done;
1801 sg_miter->consumed = 0;
1802 }
1803 sg_miter_stop(sg_miter);
f95f3850
WN
1804 return;
1805
1806done:
f9c2a0dc
SJ
1807 sg_miter_stop(sg_miter);
1808 host->sg = NULL;
f95f3850
WN
1809 smp_wmb();
1810 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814{
1815 if (!host->cmd_status)
1816 host->cmd_status = status;
1817
1818 smp_wmb();
1819
1820 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821 tasklet_schedule(&host->tasklet);
1822}
1823
1824static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825{
1826 struct dw_mci *host = dev_id;
182c9081 1827 u32 pending;
1a5c8e1f 1828 int i;
f95f3850 1829
1fb5f68a
MC
1830 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831
476d79f1
DA
1832 /*
1833 * DTO fix - version 2.10a and below, and only if internal DMA
1834 * is configured.
1835 */
1836 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837 if (!pending &&
1838 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839 pending |= SDMMC_INT_DATA_OVER;
1840 }
f95f3850 1841
476d79f1 1842 if (pending) {
f95f3850
WN
1843 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1844 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
182c9081 1845 host->cmd_status = pending;
f95f3850
WN
1846 smp_wmb();
1847 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
f95f3850
WN
1848 }
1849
1850 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1851 /* if there is an error report DATA_ERROR */
1852 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
182c9081 1853 host->data_status = pending;
f95f3850
WN
1854 smp_wmb();
1855 set_bit(EVENT_DATA_ERROR, &host->pending_events);
9b2026a1 1856 tasklet_schedule(&host->tasklet);
f95f3850
WN
1857 }
1858
1859 if (pending & SDMMC_INT_DATA_OVER) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861 if (!host->data_status)
182c9081 1862 host->data_status = pending;
f95f3850
WN
1863 smp_wmb();
1864 if (host->dir_status == DW_MCI_RECV_STATUS) {
1865 if (host->sg != NULL)
87a74d39 1866 dw_mci_read_data_pio(host, true);
f95f3850
WN
1867 }
1868 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869 tasklet_schedule(&host->tasklet);
1870 }
1871
1872 if (pending & SDMMC_INT_RXDR) {
1873 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 1874 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 1875 dw_mci_read_data_pio(host, false);
f95f3850
WN
1876 }
1877
1878 if (pending & SDMMC_INT_TXDR) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 1880 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850
WN
1881 dw_mci_write_data_pio(host);
1882 }
1883
1884 if (pending & SDMMC_INT_CMD_DONE) {
1885 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
182c9081 1886 dw_mci_cmd_interrupt(host, pending);
f95f3850
WN
1887 }
1888
1889 if (pending & SDMMC_INT_CD) {
1890 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 1891 queue_work(host->card_workqueue, &host->card_work);
f95f3850
WN
1892 }
1893
1a5c8e1f
SH
1894 /* Handle SDIO Interrupts */
1895 for (i = 0; i < host->num_slots; i++) {
1896 struct dw_mci_slot *slot = host->slot[i];
1897 if (pending & SDMMC_INT_SDIO(i)) {
1898 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1899 mmc_signal_sdio_irq(slot->mmc);
1900 }
1901 }
1902
1fb5f68a 1903 }
f95f3850
WN
1904
1905#ifdef CONFIG_MMC_DW_IDMAC
1906 /* Handle DMA interrupts */
1907 pending = mci_readl(host, IDSTS);
1908 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850
WN
1911 host->dma_ops->complete(host);
1912 }
1913#endif
1914
1915 return IRQ_HANDLED;
1916}
1917
1791b13e 1918static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 1919{
1791b13e 1920 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
f95f3850
WN
1921 int i;
1922
1923 for (i = 0; i < host->num_slots; i++) {
1924 struct dw_mci_slot *slot = host->slot[i];
1925 struct mmc_host *mmc = slot->mmc;
1926 struct mmc_request *mrq;
1927 int present;
f95f3850
WN
1928
1929 present = dw_mci_get_cd(mmc);
1930 while (present != slot->last_detect_state) {
f95f3850
WN
1931 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932 present ? "inserted" : "removed");
1933
1791b13e
JH
1934 spin_lock_bh(&host->lock);
1935
f95f3850
WN
1936 /* Card change detected */
1937 slot->last_detect_state = present;
1938
f95f3850
WN
1939 /* Clean up queue if present */
1940 mrq = slot->mrq;
1941 if (mrq) {
1942 if (mrq == host->mrq) {
1943 host->data = NULL;
1944 host->cmd = NULL;
1945
1946 switch (host->state) {
1947 case STATE_IDLE:
1948 break;
1949 case STATE_SENDING_CMD:
1950 mrq->cmd->error = -ENOMEDIUM;
1951 if (!mrq->data)
1952 break;
1953 /* fall through */
1954 case STATE_SENDING_DATA:
1955 mrq->data->error = -ENOMEDIUM;
1956 dw_mci_stop_dma(host);
1957 break;
1958 case STATE_DATA_BUSY:
1959 case STATE_DATA_ERROR:
1960 if (mrq->data->error == -EINPROGRESS)
1961 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
1962 /* fall through */
1963 case STATE_SENDING_STOP:
90c2143a
SJ
1964 if (mrq->stop)
1965 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
1966 break;
1967 }
1968
1969 dw_mci_request_end(host, mrq);
1970 } else {
1971 list_del(&slot->queue_node);
1972 mrq->cmd->error = -ENOMEDIUM;
1973 if (mrq->data)
1974 mrq->data->error = -ENOMEDIUM;
1975 if (mrq->stop)
1976 mrq->stop->error = -ENOMEDIUM;
1977
1978 spin_unlock(&host->lock);
1979 mmc_request_done(slot->mmc, mrq);
1980 spin_lock(&host->lock);
1981 }
1982 }
1983
1984 /* Power down slot */
1985 if (present == 0) {
31bff450
SJ
1986 /* Clear down the FIFO */
1987 dw_mci_fifo_reset(host);
f95f3850 1988#ifdef CONFIG_MMC_DW_IDMAC
5ce9d961 1989 dw_mci_idmac_reset(host);
f95f3850
WN
1990#endif
1991
1992 }
1993
1791b13e
JH
1994 spin_unlock_bh(&host->lock);
1995
f95f3850
WN
1996 present = dw_mci_get_cd(mmc);
1997 }
1998
1999 mmc_detect_change(slot->mmc,
2000 msecs_to_jiffies(host->pdata->detect_delay_ms));
2001 }
2002}
2003
c91eab4b
TA
2004#ifdef CONFIG_OF
2005/* given a slot id, find out the device node representing that slot */
2006static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007{
2008 struct device_node *np;
2009 const __be32 *addr;
2010 int len;
2011
2012 if (!dev || !dev->of_node)
2013 return NULL;
2014
2015 for_each_child_of_node(dev->of_node, np) {
2016 addr = of_get_property(np, "reg", &len);
2017 if (!addr || (len < sizeof(int)))
2018 continue;
2019 if (be32_to_cpup(addr) == slot)
2020 return np;
2021 }
2022 return NULL;
2023}
2024
a70aaa64
DA
2025static struct dw_mci_of_slot_quirks {
2026 char *quirk;
2027 int id;
2028} of_slot_quirks[] = {
2029 {
2030 .quirk = "disable-wp",
2031 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032 },
2033};
2034
2035static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036{
2037 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038 int quirks = 0;
2039 int idx;
2040
2041 /* get quirks */
2042 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044 quirks |= of_slot_quirks[idx].id;
2045
2046 return quirks;
2047}
2048
55a6ceb2
DA
2049/* find the write protect gpio for a given slot; or -1 if none specified */
2050static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2051{
2052 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053 int gpio;
2054
2055 if (!np)
2056 return -EINVAL;
2057
2058 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2059
2060 /* Having a missing entry is valid; return silently */
2061 if (!gpio_is_valid(gpio))
2062 return -EINVAL;
2063
2064 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2065 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2066 return -EINVAL;
2067 }
2068
2069 return gpio;
2070}
bf626e55 2071
7cf347bd 2072/* find the cd gpio for a given slot */
bf626e55
ZG
2073static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2074 struct mmc_host *mmc)
2075{
2076 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2077 int gpio;
2078
2079 if (!np)
2080 return;
2081
2082 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2083
2084 /* Having a missing entry is valid; return silently */
2085 if (!gpio_is_valid(gpio))
2086 return;
2087
2088 if (mmc_gpio_request_cd(mmc, gpio, 0))
2089 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2090}
c91eab4b 2091#else /* CONFIG_OF */
a70aaa64
DA
2092static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2093{
2094 return 0;
2095}
c91eab4b
TA
2096static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2097{
2098 return NULL;
2099}
55a6ceb2
DA
2100static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2101{
2102 return -EINVAL;
2103}
bf626e55
ZG
2104static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2105 struct mmc_host *mmc)
2106{
2107 return;
2108}
c91eab4b
TA
2109#endif /* CONFIG_OF */
2110
36c179a9 2111static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
2112{
2113 struct mmc_host *mmc;
2114 struct dw_mci_slot *slot;
e95baf13 2115 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2116 int ctrl_id, ret;
1f44a2a5 2117 u32 freq[2];
f95f3850 2118
4a90920c 2119 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
2120 if (!mmc)
2121 return -ENOMEM;
2122
2123 slot = mmc_priv(mmc);
2124 slot->id = id;
2125 slot->mmc = mmc;
2126 slot->host = host;
c91eab4b 2127 host->slot[id] = slot;
f95f3850 2128
a70aaa64
DA
2129 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2130
f95f3850 2131 mmc->ops = &dw_mci_ops;
1f44a2a5
SJ
2132 if (of_property_read_u32_array(host->dev->of_node,
2133 "clock-freq-min-max", freq, 2)) {
2134 mmc->f_min = DW_MCI_FREQ_MIN;
2135 mmc->f_max = DW_MCI_FREQ_MAX;
2136 } else {
2137 mmc->f_min = freq[0];
2138 mmc->f_max = freq[1];
2139 }
f95f3850
WN
2140
2141 if (host->pdata->get_ocr)
2142 mmc->ocr_avail = host->pdata->get_ocr(id);
2143 else
2144 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2145
2146 /*
2147 * Start with slot power disabled, it will be enabled when a card
2148 * is detected.
2149 */
2150 if (host->pdata->setpower)
2151 host->pdata->setpower(id, 0);
2152
fc3d7720
JC
2153 if (host->pdata->caps)
2154 mmc->caps = host->pdata->caps;
fc3d7720 2155
ab269128
AK
2156 if (host->pdata->pm_caps)
2157 mmc->pm_caps = host->pdata->pm_caps;
2158
800d78bf
TA
2159 if (host->dev->of_node) {
2160 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2161 if (ctrl_id < 0)
2162 ctrl_id = 0;
2163 } else {
2164 ctrl_id = to_platform_device(host->dev)->id;
2165 }
cb27a843
JH
2166 if (drv_data && drv_data->caps)
2167 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 2168
4f408cc6
SJ
2169 if (host->pdata->caps2)
2170 mmc->caps2 = host->pdata->caps2;
4f408cc6 2171
d8a4fb0e 2172 mmc_of_parse(mmc);
f95f3850 2173
f95f3850
WN
2174 if (host->pdata->blk_settings) {
2175 mmc->max_segs = host->pdata->blk_settings->max_segs;
2176 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2177 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2178 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2179 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2180 } else {
2181 /* Useful defaults if platform data is unset. */
a39e5746
JC
2182#ifdef CONFIG_MMC_DW_IDMAC
2183 mmc->max_segs = host->ring_size;
2184 mmc->max_blk_size = 65536;
2185 mmc->max_blk_count = host->ring_size;
2186 mmc->max_seg_size = 0x1000;
2187 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2188#else
f95f3850
WN
2189 mmc->max_segs = 64;
2190 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2191 mmc->max_blk_count = 512;
2192 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2193 mmc->max_seg_size = mmc->max_req_size;
f95f3850 2194#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 2195 }
f95f3850 2196
55a6ceb2 2197 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
bf626e55 2198 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
55a6ceb2 2199
0cea529d
JC
2200 ret = mmc_add_host(mmc);
2201 if (ret)
2202 goto err_setup_bus;
f95f3850
WN
2203
2204#if defined(CONFIG_DEBUG_FS)
2205 dw_mci_init_debugfs(slot);
2206#endif
2207
2208 /* Card initially undetected */
2209 slot->last_detect_state = 0;
2210
2211 return 0;
800d78bf
TA
2212
2213err_setup_bus:
2214 mmc_free_host(mmc);
2215 return -EINVAL;
f95f3850
WN
2216}
2217
2218static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2219{
2220 /* Shutdown detect IRQ */
2221 if (slot->host->pdata->exit)
2222 slot->host->pdata->exit(id);
2223
2224 /* Debugfs stuff is cleaned up by mmc core */
2225 mmc_remove_host(slot->mmc);
2226 slot->host->slot[id] = NULL;
2227 mmc_free_host(slot->mmc);
2228}
2229
2230static void dw_mci_init_dma(struct dw_mci *host)
2231{
2232 /* Alloc memory for sg translation */
780f22af 2233 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
f95f3850
WN
2234 &host->sg_dma, GFP_KERNEL);
2235 if (!host->sg_cpu) {
4a90920c 2236 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
2237 __func__);
2238 goto no_dma;
2239 }
2240
2241 /* Determine which DMA interface to use */
2242#ifdef CONFIG_MMC_DW_IDMAC
2243 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 2244 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
2245#endif
2246
2247 if (!host->dma_ops)
2248 goto no_dma;
2249
e1631f98
JC
2250 if (host->dma_ops->init && host->dma_ops->start &&
2251 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 2252 if (host->dma_ops->init(host)) {
4a90920c 2253 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
2254 "DMA Controller.\n", __func__);
2255 goto no_dma;
2256 }
2257 } else {
4a90920c 2258 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
2259 goto no_dma;
2260 }
2261
2262 host->use_dma = 1;
2263 return;
2264
2265no_dma:
4a90920c 2266 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
2267 host->use_dma = 0;
2268 return;
2269}
2270
31bff450 2271static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
f95f3850
WN
2272{
2273 unsigned long timeout = jiffies + msecs_to_jiffies(500);
31bff450 2274 u32 ctrl;
f95f3850 2275
31bff450
SJ
2276 ctrl = mci_readl(host, CTRL);
2277 ctrl |= reset;
2278 mci_writel(host, CTRL, ctrl);
f95f3850
WN
2279
2280 /* wait till resets clear */
2281 do {
2282 ctrl = mci_readl(host, CTRL);
31bff450 2283 if (!(ctrl & reset))
f95f3850
WN
2284 return true;
2285 } while (time_before(jiffies, timeout));
2286
31bff450
SJ
2287 dev_err(host->dev,
2288 "Timeout resetting block (ctrl reset %#x)\n",
2289 ctrl & reset);
f95f3850
WN
2290
2291 return false;
2292}
2293
31bff450
SJ
2294static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2295{
2296 /*
2297 * Reseting generates a block interrupt, hence setting
2298 * the scatter-gather pointer to NULL.
2299 */
2300 if (host->sg) {
2301 sg_miter_stop(&host->sg_miter);
2302 host->sg = NULL;
2303 }
2304
2305 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2306}
2307
2308static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2309{
2310 return dw_mci_ctrl_reset(host,
2311 SDMMC_CTRL_FIFO_RESET |
2312 SDMMC_CTRL_RESET |
2313 SDMMC_CTRL_DMA_RESET);
2314}
2315
c91eab4b
TA
2316#ifdef CONFIG_OF
2317static struct dw_mci_of_quirks {
2318 char *quirk;
2319 int id;
2320} of_quirks[] = {
2321 {
c91eab4b
TA
2322 .quirk = "broken-cd",
2323 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2324 },
2325};
2326
2327static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2328{
2329 struct dw_mci_board *pdata;
2330 struct device *dev = host->dev;
2331 struct device_node *np = dev->of_node;
e95baf13 2332 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 2333 int idx, ret;
3c6d89ea 2334 u32 clock_frequency;
c91eab4b
TA
2335
2336 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2337 if (!pdata) {
2338 dev_err(dev, "could not allocate memory for pdata\n");
2339 return ERR_PTR(-ENOMEM);
2340 }
2341
2342 /* find out number of slots supported */
2343 if (of_property_read_u32(dev->of_node, "num-slots",
2344 &pdata->num_slots)) {
2345 dev_info(dev, "num-slots property not found, "
2346 "assuming 1 slot is available\n");
2347 pdata->num_slots = 1;
2348 }
2349
2350 /* get quirks */
2351 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2352 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2353 pdata->quirks |= of_quirks[idx].id;
2354
2355 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2356 dev_info(dev, "fifo-depth property not found, using "
2357 "value of FIFOTH register as default\n");
2358
2359 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2360
3c6d89ea
DA
2361 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2362 pdata->bus_hz = clock_frequency;
2363
cb27a843
JH
2364 if (drv_data && drv_data->parse_dt) {
2365 ret = drv_data->parse_dt(host);
800d78bf
TA
2366 if (ret)
2367 return ERR_PTR(ret);
2368 }
2369
10b49841
SJ
2370 if (of_find_property(np, "supports-highspeed", NULL))
2371 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2372
c91eab4b
TA
2373 return pdata;
2374}
2375
2376#else /* CONFIG_OF */
2377static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2378{
2379 return ERR_PTR(-EINVAL);
2380}
2381#endif /* CONFIG_OF */
2382
62ca8034 2383int dw_mci_probe(struct dw_mci *host)
f95f3850 2384{
e95baf13 2385 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 2386 int width, i, ret = 0;
f95f3850 2387 u32 fifo_size;
1c2215b7 2388 int init_slots = 0;
f95f3850 2389
c91eab4b
TA
2390 if (!host->pdata) {
2391 host->pdata = dw_mci_parse_dt(host);
2392 if (IS_ERR(host->pdata)) {
2393 dev_err(host->dev, "platform data not available\n");
2394 return -EINVAL;
2395 }
f95f3850
WN
2396 }
2397
62ca8034 2398 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4a90920c 2399 dev_err(host->dev,
f95f3850 2400 "Platform data must supply select_slot function\n");
62ca8034 2401 return -ENODEV;
f95f3850
WN
2402 }
2403
780f22af 2404 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
2405 if (IS_ERR(host->biu_clk)) {
2406 dev_dbg(host->dev, "biu clock not available\n");
2407 } else {
2408 ret = clk_prepare_enable(host->biu_clk);
2409 if (ret) {
2410 dev_err(host->dev, "failed to enable biu clock\n");
f90a0612
TA
2411 return ret;
2412 }
2413 }
2414
780f22af 2415 host->ciu_clk = devm_clk_get(host->dev, "ciu");
f90a0612
TA
2416 if (IS_ERR(host->ciu_clk)) {
2417 dev_dbg(host->dev, "ciu clock not available\n");
3c6d89ea 2418 host->bus_hz = host->pdata->bus_hz;
f90a0612
TA
2419 } else {
2420 ret = clk_prepare_enable(host->ciu_clk);
2421 if (ret) {
2422 dev_err(host->dev, "failed to enable ciu clock\n");
f90a0612
TA
2423 goto err_clk_biu;
2424 }
f90a0612 2425
3c6d89ea
DA
2426 if (host->pdata->bus_hz) {
2427 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2428 if (ret)
2429 dev_warn(host->dev,
612de4c1 2430 "Unable to set bus rate to %uHz\n",
3c6d89ea
DA
2431 host->pdata->bus_hz);
2432 }
f90a0612 2433 host->bus_hz = clk_get_rate(host->ciu_clk);
3c6d89ea 2434 }
f90a0612 2435
612de4c1
JC
2436 if (!host->bus_hz) {
2437 dev_err(host->dev,
2438 "Platform data must supply bus speed\n");
2439 ret = -ENODEV;
2440 goto err_clk_ciu;
2441 }
2442
002f0d5c
YK
2443 if (drv_data && drv_data->init) {
2444 ret = drv_data->init(host);
2445 if (ret) {
2446 dev_err(host->dev,
2447 "implementation specific init failed\n");
2448 goto err_clk_ciu;
2449 }
2450 }
2451
cb27a843
JH
2452 if (drv_data && drv_data->setup_clock) {
2453 ret = drv_data->setup_clock(host);
800d78bf
TA
2454 if (ret) {
2455 dev_err(host->dev,
2456 "implementation specific clock setup failed\n");
2457 goto err_clk_ciu;
2458 }
2459 }
2460
a55d6ff0 2461 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
870556a3
DA
2462 if (IS_ERR(host->vmmc)) {
2463 ret = PTR_ERR(host->vmmc);
2464 if (ret == -EPROBE_DEFER)
2465 goto err_clk_ciu;
2466
2467 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2468 host->vmmc = NULL;
2469 } else {
2470 ret = regulator_enable(host->vmmc);
2471 if (ret) {
2472 if (ret != -EPROBE_DEFER)
2473 dev_err(host->dev,
2474 "regulator_enable fail: %d\n", ret);
2475 goto err_clk_ciu;
2476 }
2477 }
2478
62ca8034 2479 host->quirks = host->pdata->quirks;
f95f3850
WN
2480
2481 spin_lock_init(&host->lock);
2482 INIT_LIST_HEAD(&host->queue);
2483
f95f3850
WN
2484 /*
2485 * Get the host data width - this assumes that HCON has been set with
2486 * the correct values.
2487 */
2488 i = (mci_readl(host, HCON) >> 7) & 0x7;
2489 if (!i) {
2490 host->push_data = dw_mci_push_data16;
2491 host->pull_data = dw_mci_pull_data16;
2492 width = 16;
2493 host->data_shift = 1;
2494 } else if (i == 2) {
2495 host->push_data = dw_mci_push_data64;
2496 host->pull_data = dw_mci_pull_data64;
2497 width = 64;
2498 host->data_shift = 3;
2499 } else {
2500 /* Check for a reserved value, and warn if it is */
2501 WARN((i != 1),
2502 "HCON reports a reserved host data width!\n"
2503 "Defaulting to 32-bit access.\n");
2504 host->push_data = dw_mci_push_data32;
2505 host->pull_data = dw_mci_pull_data32;
2506 width = 32;
2507 host->data_shift = 2;
2508 }
2509
2510 /* Reset all blocks */
31bff450 2511 if (!dw_mci_ctrl_all_reset(host))
141a712a
SJ
2512 return -ENODEV;
2513
2514 host->dma_ops = host->pdata->dma_ops;
2515 dw_mci_init_dma(host);
f95f3850
WN
2516
2517 /* Clear the interrupts for the host controller */
2518 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2519 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2520
2521 /* Put in max timeout */
2522 mci_writel(host, TMOUT, 0xFFFFFFFF);
2523
2524 /*
2525 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2526 * Tx Mark = fifo_size / 2 DMA Size = 8
2527 */
b86d8253
JH
2528 if (!host->pdata->fifo_depth) {
2529 /*
2530 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2531 * have been overwritten by the bootloader, just like we're
2532 * about to do, so if you know the value for your hardware, you
2533 * should put it in the platform data.
2534 */
2535 fifo_size = mci_readl(host, FIFOTH);
8234e869 2536 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
b86d8253
JH
2537 } else {
2538 fifo_size = host->pdata->fifo_depth;
2539 }
2540 host->fifo_depth = fifo_size;
52426899
SJ
2541 host->fifoth_val =
2542 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
e61cf118 2543 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850
WN
2544
2545 /* disable clock to CIU */
2546 mci_writel(host, CLKENA, 0);
2547 mci_writel(host, CLKSRC, 0);
2548
63008768
JH
2549 /*
2550 * In 2.40a spec, Data offset is changed.
2551 * Need to check the version-id and set data-offset for DATA register.
2552 */
2553 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2554 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2555
2556 if (host->verid < DW_MMC_240A)
2557 host->data_offset = DATA_OFFSET;
2558 else
2559 host->data_offset = DATA_240A_OFFSET;
2560
f95f3850 2561 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
95dcc2cb 2562 host->card_workqueue = alloc_workqueue("dw-mci-card",
59ff3eb6 2563 WQ_MEM_RECLAIM, 1);
ef7aef9a
WY
2564 if (!host->card_workqueue) {
2565 ret = -ENOMEM;
1791b13e 2566 goto err_dmaunmap;
ef7aef9a 2567 }
1791b13e 2568 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
780f22af
SJ
2569 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2570 host->irq_flags, "dw-mci", host);
f95f3850 2571 if (ret)
1791b13e 2572 goto err_workqueue;
f95f3850 2573
f95f3850
WN
2574 if (host->pdata->num_slots)
2575 host->num_slots = host->pdata->num_slots;
2576 else
2577 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2578
2da1d7f2
YC
2579 /*
2580 * Enable interrupts for command done, data over, data empty, card det,
2581 * receive ready and error such as transmit, receive timeout, crc error
2582 */
2583 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2584 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2585 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2586 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2587 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2588
2589 dev_info(host->dev, "DW MMC controller at irq %d, "
2590 "%d bit host data width, "
2591 "%u deep fifo\n",
2592 host->irq, width, fifo_size);
2593
f95f3850
WN
2594 /* We need at least one slot to succeed */
2595 for (i = 0; i < host->num_slots; i++) {
2596 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
2597 if (ret)
2598 dev_dbg(host->dev, "slot %d init failed\n", i);
2599 else
2600 init_slots++;
2601 }
2602
2603 if (init_slots) {
2604 dev_info(host->dev, "%d slots initialized\n", init_slots);
2605 } else {
2606 dev_dbg(host->dev, "attempted to initialize %d slots, "
2607 "but failed on all\n", host->num_slots);
780f22af 2608 goto err_workqueue;
f95f3850
WN
2609 }
2610
f95f3850 2611 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 2612 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850
WN
2613
2614 return 0;
2615
1791b13e 2616err_workqueue:
95dcc2cb 2617 destroy_workqueue(host->card_workqueue);
1791b13e 2618
f95f3850
WN
2619err_dmaunmap:
2620 if (host->use_dma && host->dma_ops->exit)
2621 host->dma_ops->exit(host);
780f22af 2622 if (host->vmmc)
c07946a3 2623 regulator_disable(host->vmmc);
f90a0612
TA
2624
2625err_clk_ciu:
780f22af 2626 if (!IS_ERR(host->ciu_clk))
f90a0612 2627 clk_disable_unprepare(host->ciu_clk);
780f22af 2628
f90a0612 2629err_clk_biu:
780f22af 2630 if (!IS_ERR(host->biu_clk))
f90a0612 2631 clk_disable_unprepare(host->biu_clk);
780f22af 2632
f95f3850
WN
2633 return ret;
2634}
62ca8034 2635EXPORT_SYMBOL(dw_mci_probe);
f95f3850 2636
62ca8034 2637void dw_mci_remove(struct dw_mci *host)
f95f3850 2638{
f95f3850
WN
2639 int i;
2640
2641 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2642 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2643
f95f3850 2644 for (i = 0; i < host->num_slots; i++) {
4a90920c 2645 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
2646 if (host->slot[i])
2647 dw_mci_cleanup_slot(host->slot[i], i);
2648 }
2649
2650 /* disable clock to CIU */
2651 mci_writel(host, CLKENA, 0);
2652 mci_writel(host, CLKSRC, 0);
2653
95dcc2cb 2654 destroy_workqueue(host->card_workqueue);
f95f3850
WN
2655
2656 if (host->use_dma && host->dma_ops->exit)
2657 host->dma_ops->exit(host);
2658
780f22af 2659 if (host->vmmc)
c07946a3 2660 regulator_disable(host->vmmc);
c07946a3 2661
f90a0612
TA
2662 if (!IS_ERR(host->ciu_clk))
2663 clk_disable_unprepare(host->ciu_clk);
780f22af 2664
f90a0612
TA
2665 if (!IS_ERR(host->biu_clk))
2666 clk_disable_unprepare(host->biu_clk);
f95f3850 2667}
62ca8034
SH
2668EXPORT_SYMBOL(dw_mci_remove);
2669
2670
f95f3850 2671
6fe8890d 2672#ifdef CONFIG_PM_SLEEP
f95f3850
WN
2673/*
2674 * TODO: we should probably disable the clock to the card in the suspend path.
2675 */
62ca8034 2676int dw_mci_suspend(struct dw_mci *host)
f95f3850 2677{
c07946a3
JC
2678 if (host->vmmc)
2679 regulator_disable(host->vmmc);
2680
f95f3850
WN
2681 return 0;
2682}
62ca8034 2683EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 2684
62ca8034 2685int dw_mci_resume(struct dw_mci *host)
f95f3850
WN
2686{
2687 int i, ret;
f95f3850 2688
f2f942ce
SK
2689 if (host->vmmc) {
2690 ret = regulator_enable(host->vmmc);
2691 if (ret) {
2692 dev_err(host->dev,
2693 "failed to enable regulator: %d\n", ret);
2694 return ret;
2695 }
2696 }
1d6c4e0a 2697
31bff450 2698 if (!dw_mci_ctrl_all_reset(host)) {
e61cf118
JC
2699 ret = -ENODEV;
2700 return ret;
2701 }
2702
3bfe619d 2703 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
2704 host->dma_ops->init(host);
2705
52426899
SJ
2706 /*
2707 * Restore the initial value at FIFOTH register
2708 * And Invalidate the prev_blksz with zero
2709 */
e61cf118 2710 mci_writel(host, FIFOTH, host->fifoth_val);
52426899 2711 host->prev_blksz = 0;
e61cf118 2712
2eb2944f
DA
2713 /* Put in max timeout */
2714 mci_writel(host, TMOUT, 0xFFFFFFFF);
2715
e61cf118
JC
2716 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2717 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2718 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2719 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2720 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2721
f95f3850
WN
2722 for (i = 0; i < host->num_slots; i++) {
2723 struct dw_mci_slot *slot = host->slot[i];
2724 if (!slot)
2725 continue;
ab269128
AK
2726 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2727 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2728 dw_mci_setup_bus(slot, true);
2729 }
f95f3850 2730 }
f95f3850
WN
2731 return 0;
2732}
62ca8034 2733EXPORT_SYMBOL(dw_mci_resume);
6fe8890d
JC
2734#endif /* CONFIG_PM_SLEEP */
2735
f95f3850
WN
2736static int __init dw_mci_init(void)
2737{
8e1c4e4d 2738 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 2739 return 0;
f95f3850
WN
2740}
2741
2742static void __exit dw_mci_exit(void)
2743{
f95f3850
WN
2744}
2745
2746module_init(dw_mci_init);
2747module_exit(dw_mci_exit);
2748
2749MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2750MODULE_AUTHOR("NXP Semiconductor VietNam");
2751MODULE_AUTHOR("Imagination Technologies Ltd");
2752MODULE_LICENSE("GPL v2");