]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/host/sdhci.c
mmc: core: add mmc_get_dma_dir
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / host / sdhci.c
CommitLineData
d129bceb 1/*
70f10482 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
d129bceb 3 *
b69c9058 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
d129bceb
PO
5 *
6 * This program is free software; you can redistribute it and/or modify
643f720c
PO
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
84c46a53
PO
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
d129bceb
PO
14 */
15
d129bceb 16#include <linux/delay.h>
5a436cc0 17#include <linux/ktime.h>
d129bceb 18#include <linux/highmem.h>
b8c86fc5 19#include <linux/io.h>
88b47679 20#include <linux/module.h>
d129bceb 21#include <linux/dma-mapping.h>
5a0e3ad6 22#include <linux/slab.h>
11763609 23#include <linux/scatterlist.h>
9bea3c85 24#include <linux/regulator/consumer.h>
66fd8ad5 25#include <linux/pm_runtime.h>
92e0c44b 26#include <linux/of.h>
d129bceb 27
2f730fec
PO
28#include <linux/leds.h>
29
22113efd 30#include <linux/mmc/mmc.h>
d129bceb 31#include <linux/mmc/host.h>
473b095a 32#include <linux/mmc/card.h>
85cc1c33 33#include <linux/mmc/sdio.h>
bec9d4e5 34#include <linux/mmc/slot-gpio.h>
d129bceb 35
d129bceb
PO
36#include "sdhci.h"
37
38#define DRIVER_NAME "sdhci"
d129bceb 39
d129bceb 40#define DBG(f, x...) \
f421865d 41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
d129bceb 42
85ad90e2
AH
43#define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45
b513ea25
AN
46#define MAX_TUNING_LOOP 40
47
df673b22 48static unsigned int debug_quirks = 0;
66fd8ad5 49static unsigned int debug_quirks2;
67435274 50
d129bceb
PO
51static void sdhci_finish_data(struct sdhci_host *);
52
52983382 53static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
d129bceb 54
d2898172 55void sdhci_dumpregs(struct sdhci_host *host)
d129bceb 56{
85ad90e2
AH
57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
58
59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
60 sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 sdhci_readw(host, SDHCI_HOST_VERSION));
62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
63 sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
66 sdhci_readl(host, SDHCI_ARGUMENT),
67 sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
69 sdhci_readl(host, SDHCI_PRESENT_STATE),
70 sdhci_readb(host, SDHCI_HOST_CONTROL));
71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
72 sdhci_readb(host, SDHCI_POWER_CONTROL),
73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 sdhci_readl(host, SDHCI_INT_STATUS));
80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
81 sdhci_readl(host, SDHCI_INT_ENABLE),
82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
84 sdhci_readw(host, SDHCI_ACMD12_ERR),
85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
87 sdhci_readl(host, SDHCI_CAPABILITIES),
88 sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
90 sdhci_readw(host, SDHCI_COMMAND),
91 sdhci_readl(host, SDHCI_MAX_CURRENT));
92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
7962302f
AH
93 sdhci_readl(host, SDHCI_RESPONSE),
94 sdhci_readl(host, SDHCI_RESPONSE + 4));
85ad90e2 95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
7962302f
AH
96 sdhci_readl(host, SDHCI_RESPONSE + 8),
97 sdhci_readl(host, SDHCI_RESPONSE + 12));
85ad90e2
AH
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host, SDHCI_HOST_CONTROL2));
d129bceb 100
e57a5f61 101 if (host->flags & SDHCI_USE_ADMA) {
85ad90e2
AH
102 if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 sdhci_readl(host, SDHCI_ADMA_ERROR),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
107 } else {
108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
109 sdhci_readl(host, SDHCI_ADMA_ERROR),
110 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
111 }
e57a5f61 112 }
be3f4ae0 113
85ad90e2 114 SDHCI_DUMP("============================================\n");
d129bceb 115}
d2898172 116EXPORT_SYMBOL_GPL(sdhci_dumpregs);
d129bceb
PO
117
118/*****************************************************************************\
119 * *
120 * Low level functions *
121 * *
122\*****************************************************************************/
123
56a590dc
AH
124static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
125{
126 return cmd->data || cmd->flags & MMC_RSP_BUSY;
127}
128
7260cf5e
AV
129static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
130{
5b4f1f6c 131 u32 present;
7260cf5e 132
c79396c1 133 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
860951c5 134 !mmc_card_is_removable(host->mmc))
66fd8ad5
AH
135 return;
136
5b4f1f6c
RK
137 if (enable) {
138 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
139 SDHCI_CARD_PRESENT;
d25928d1 140
5b4f1f6c
RK
141 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
142 SDHCI_INT_CARD_INSERT;
143 } else {
144 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
145 }
b537f94c
RK
146
147 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
148 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
7260cf5e
AV
149}
150
151static void sdhci_enable_card_detection(struct sdhci_host *host)
152{
153 sdhci_set_card_detection(host, true);
154}
155
156static void sdhci_disable_card_detection(struct sdhci_host *host)
157{
158 sdhci_set_card_detection(host, false);
159}
160
02d0b685
UH
161static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
162{
163 if (host->bus_on)
164 return;
165 host->bus_on = true;
166 pm_runtime_get_noresume(host->mmc->parent);
167}
168
169static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
170{
171 if (!host->bus_on)
172 return;
173 host->bus_on = false;
174 pm_runtime_put_noidle(host->mmc->parent);
175}
176
03231f9b 177void sdhci_reset(struct sdhci_host *host, u8 mask)
d129bceb 178{
5a436cc0 179 ktime_t timeout;
393c1a34 180
4e4141a5 181 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
d129bceb 182
f0710a55 183 if (mask & SDHCI_RESET_ALL) {
d129bceb 184 host->clock = 0;
f0710a55
AH
185 /* Reset-all turns off SD Bus Power */
186 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
187 sdhci_runtime_pm_bus_off(host);
188 }
d129bceb 189
e16514d8 190 /* Wait max 100 ms */
5a436cc0 191 timeout = ktime_add_ms(ktime_get(), 100);
e16514d8
PO
192
193 /* hw clears the bit when it's done */
4e4141a5 194 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
5a436cc0 195 if (ktime_after(ktime_get(), timeout)) {
a3c76eb9 196 pr_err("%s: Reset 0x%x never completed.\n",
e16514d8
PO
197 mmc_hostname(host->mmc), (int)mask);
198 sdhci_dumpregs(host);
199 return;
200 }
5a436cc0 201 udelay(10);
d129bceb 202 }
03231f9b
RK
203}
204EXPORT_SYMBOL_GPL(sdhci_reset);
205
206static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
207{
208 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
d3940f27
AH
209 struct mmc_host *mmc = host->mmc;
210
211 if (!mmc->ops->get_cd(mmc))
03231f9b
RK
212 return;
213 }
063a9dbb 214
03231f9b 215 host->ops->reset(host, mask);
393c1a34 216
da91a8f9
RK
217 if (mask & SDHCI_RESET_ALL) {
218 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
219 if (host->ops->enable_dma)
220 host->ops->enable_dma(host);
221 }
222
223 /* Resetting the controller clears many */
224 host->preset_enabled = false;
3abc1e80 225 }
d129bceb
PO
226}
227
f5c1ab82 228static void sdhci_set_default_irqs(struct sdhci_host *host)
d129bceb 229{
b537f94c
RK
230 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
231 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
232 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
233 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
234 SDHCI_INT_RESPONSE;
235
f37b20eb
DA
236 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
237 host->tuning_mode == SDHCI_TUNING_MODE_3)
238 host->ier |= SDHCI_INT_RETUNE;
239
b537f94c
RK
240 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
241 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
f5c1ab82
AH
242}
243
244static void sdhci_init(struct sdhci_host *host, int soft)
245{
246 struct mmc_host *mmc = host->mmc;
247
248 if (soft)
249 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
250 else
251 sdhci_do_reset(host, SDHCI_RESET_ALL);
252
253 sdhci_set_default_irqs(host);
2f4cbb3d 254
f12e39db
AH
255 host->cqe_on = false;
256
2f4cbb3d
NP
257 if (soft) {
258 /* force clock reconfiguration */
259 host->clock = 0;
d3940f27 260 mmc->ops->set_ios(mmc, &mmc->ios);
2f4cbb3d 261 }
7260cf5e 262}
d129bceb 263
7260cf5e
AV
264static void sdhci_reinit(struct sdhci_host *host)
265{
2f4cbb3d 266 sdhci_init(host, 0);
7260cf5e 267 sdhci_enable_card_detection(host);
d129bceb
PO
268}
269
061d17a6 270static void __sdhci_led_activate(struct sdhci_host *host)
d129bceb
PO
271{
272 u8 ctrl;
273
4e4141a5 274 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 275 ctrl |= SDHCI_CTRL_LED;
4e4141a5 276 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
277}
278
061d17a6 279static void __sdhci_led_deactivate(struct sdhci_host *host)
d129bceb
PO
280{
281 u8 ctrl;
282
4e4141a5 283 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 284 ctrl &= ~SDHCI_CTRL_LED;
4e4141a5 285 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
286}
287
4f78230f 288#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2f730fec 289static void sdhci_led_control(struct led_classdev *led,
061d17a6 290 enum led_brightness brightness)
2f730fec
PO
291{
292 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
293 unsigned long flags;
294
295 spin_lock_irqsave(&host->lock, flags);
296
66fd8ad5
AH
297 if (host->runtime_suspended)
298 goto out;
299
2f730fec 300 if (brightness == LED_OFF)
061d17a6 301 __sdhci_led_deactivate(host);
2f730fec 302 else
061d17a6 303 __sdhci_led_activate(host);
66fd8ad5 304out:
2f730fec
PO
305 spin_unlock_irqrestore(&host->lock, flags);
306}
061d17a6
AH
307
308static int sdhci_led_register(struct sdhci_host *host)
309{
310 struct mmc_host *mmc = host->mmc;
311
312 snprintf(host->led_name, sizeof(host->led_name),
313 "%s::", mmc_hostname(mmc));
314
315 host->led.name = host->led_name;
316 host->led.brightness = LED_OFF;
317 host->led.default_trigger = mmc_hostname(mmc);
318 host->led.brightness_set = sdhci_led_control;
319
320 return led_classdev_register(mmc_dev(mmc), &host->led);
321}
322
323static void sdhci_led_unregister(struct sdhci_host *host)
324{
325 led_classdev_unregister(&host->led);
326}
327
328static inline void sdhci_led_activate(struct sdhci_host *host)
329{
330}
331
332static inline void sdhci_led_deactivate(struct sdhci_host *host)
333{
334}
335
336#else
337
338static inline int sdhci_led_register(struct sdhci_host *host)
339{
340 return 0;
341}
342
343static inline void sdhci_led_unregister(struct sdhci_host *host)
344{
345}
346
347static inline void sdhci_led_activate(struct sdhci_host *host)
348{
349 __sdhci_led_activate(host);
350}
351
352static inline void sdhci_led_deactivate(struct sdhci_host *host)
353{
354 __sdhci_led_deactivate(host);
355}
356
2f730fec
PO
357#endif
358
d129bceb
PO
359/*****************************************************************************\
360 * *
361 * Core functions *
362 * *
363\*****************************************************************************/
364
a406f5a3 365static void sdhci_read_block_pio(struct sdhci_host *host)
d129bceb 366{
7659150c
PO
367 unsigned long flags;
368 size_t blksize, len, chunk;
7244b85b 369 u32 uninitialized_var(scratch);
7659150c 370 u8 *buf;
d129bceb 371
a406f5a3 372 DBG("PIO reading\n");
d129bceb 373
a406f5a3 374 blksize = host->data->blksz;
7659150c 375 chunk = 0;
d129bceb 376
7659150c 377 local_irq_save(flags);
d129bceb 378
a406f5a3 379 while (blksize) {
bf3a35ac 380 BUG_ON(!sg_miter_next(&host->sg_miter));
d129bceb 381
7659150c 382 len = min(host->sg_miter.length, blksize);
d129bceb 383
7659150c
PO
384 blksize -= len;
385 host->sg_miter.consumed = len;
14d836e7 386
7659150c 387 buf = host->sg_miter.addr;
d129bceb 388
7659150c
PO
389 while (len) {
390 if (chunk == 0) {
4e4141a5 391 scratch = sdhci_readl(host, SDHCI_BUFFER);
7659150c 392 chunk = 4;
a406f5a3 393 }
7659150c
PO
394
395 *buf = scratch & 0xFF;
396
397 buf++;
398 scratch >>= 8;
399 chunk--;
400 len--;
d129bceb 401 }
a406f5a3 402 }
7659150c
PO
403
404 sg_miter_stop(&host->sg_miter);
405
406 local_irq_restore(flags);
a406f5a3 407}
d129bceb 408
a406f5a3
PO
409static void sdhci_write_block_pio(struct sdhci_host *host)
410{
7659150c
PO
411 unsigned long flags;
412 size_t blksize, len, chunk;
413 u32 scratch;
414 u8 *buf;
d129bceb 415
a406f5a3
PO
416 DBG("PIO writing\n");
417
418 blksize = host->data->blksz;
7659150c
PO
419 chunk = 0;
420 scratch = 0;
d129bceb 421
7659150c 422 local_irq_save(flags);
d129bceb 423
a406f5a3 424 while (blksize) {
bf3a35ac 425 BUG_ON(!sg_miter_next(&host->sg_miter));
a406f5a3 426
7659150c
PO
427 len = min(host->sg_miter.length, blksize);
428
429 blksize -= len;
430 host->sg_miter.consumed = len;
431
432 buf = host->sg_miter.addr;
d129bceb 433
7659150c
PO
434 while (len) {
435 scratch |= (u32)*buf << (chunk * 8);
436
437 buf++;
438 chunk++;
439 len--;
440
441 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
4e4141a5 442 sdhci_writel(host, scratch, SDHCI_BUFFER);
7659150c
PO
443 chunk = 0;
444 scratch = 0;
d129bceb 445 }
d129bceb
PO
446 }
447 }
7659150c
PO
448
449 sg_miter_stop(&host->sg_miter);
450
451 local_irq_restore(flags);
a406f5a3
PO
452}
453
454static void sdhci_transfer_pio(struct sdhci_host *host)
455{
456 u32 mask;
457
7659150c 458 if (host->blocks == 0)
a406f5a3
PO
459 return;
460
461 if (host->data->flags & MMC_DATA_READ)
462 mask = SDHCI_DATA_AVAILABLE;
463 else
464 mask = SDHCI_SPACE_AVAILABLE;
465
4a3cba32
PO
466 /*
467 * Some controllers (JMicron JMB38x) mess up the buffer bits
468 * for transfers < 4 bytes. As long as it is just one block,
469 * we can ignore the bits.
470 */
471 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
472 (host->data->blocks == 1))
473 mask = ~0;
474
4e4141a5 475 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
3e3bf207
AV
476 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
477 udelay(100);
478
a406f5a3
PO
479 if (host->data->flags & MMC_DATA_READ)
480 sdhci_read_block_pio(host);
481 else
482 sdhci_write_block_pio(host);
d129bceb 483
7659150c
PO
484 host->blocks--;
485 if (host->blocks == 0)
a406f5a3 486 break;
a406f5a3 487 }
d129bceb 488
a406f5a3 489 DBG("PIO transfer complete.\n");
d129bceb
PO
490}
491
48857d9b 492static int sdhci_pre_dma_transfer(struct sdhci_host *host,
c0999b72 493 struct mmc_data *data, int cookie)
48857d9b
RK
494{
495 int sg_count;
496
94538e51
RK
497 /*
498 * If the data buffers are already mapped, return the previous
499 * dma_map_sg() result.
500 */
501 if (data->host_cookie == COOKIE_PRE_MAPPED)
48857d9b 502 return data->sg_count;
48857d9b
RK
503
504 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
505 data->flags & MMC_DATA_WRITE ?
506 DMA_TO_DEVICE : DMA_FROM_DEVICE);
507
508 if (sg_count == 0)
509 return -ENOSPC;
510
511 data->sg_count = sg_count;
c0999b72 512 data->host_cookie = cookie;
48857d9b
RK
513
514 return sg_count;
515}
516
2134a922
PO
517static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
518{
519 local_irq_save(*flags);
482fce99 520 return kmap_atomic(sg_page(sg)) + sg->offset;
2134a922
PO
521}
522
523static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
524{
482fce99 525 kunmap_atomic(buffer);
2134a922
PO
526 local_irq_restore(*flags);
527}
528
e57a5f61
AH
529static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
530 dma_addr_t addr, int len, unsigned cmd)
118cd17d 531{
e57a5f61 532 struct sdhci_adma2_64_desc *dma_desc = desc;
118cd17d 533
e57a5f61 534 /* 32-bit and 64-bit descriptors have these members in same position */
0545230f
AH
535 dma_desc->cmd = cpu_to_le16(cmd);
536 dma_desc->len = cpu_to_le16(len);
e57a5f61
AH
537 dma_desc->addr_lo = cpu_to_le32((u32)addr);
538
539 if (host->flags & SDHCI_USE_64_BIT_DMA)
540 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
118cd17d
BD
541}
542
b5ffa674
AH
543static void sdhci_adma_mark_end(void *desc)
544{
e57a5f61 545 struct sdhci_adma2_64_desc *dma_desc = desc;
b5ffa674 546
e57a5f61 547 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
0545230f 548 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
b5ffa674
AH
549}
550
60c64762
RK
551static void sdhci_adma_table_pre(struct sdhci_host *host,
552 struct mmc_data *data, int sg_count)
2134a922 553{
2134a922 554 struct scatterlist *sg;
2134a922 555 unsigned long flags;
acc3ad13
RK
556 dma_addr_t addr, align_addr;
557 void *desc, *align;
558 char *buffer;
559 int len, offset, i;
2134a922
PO
560
561 /*
562 * The spec does not specify endianness of descriptor table.
563 * We currently guess that it is LE.
564 */
565
60c64762 566 host->sg_count = sg_count;
2134a922 567
4efaa6fb 568 desc = host->adma_table;
2134a922
PO
569 align = host->align_buffer;
570
571 align_addr = host->align_addr;
572
573 for_each_sg(data->sg, sg, host->sg_count, i) {
574 addr = sg_dma_address(sg);
575 len = sg_dma_len(sg);
576
577 /*
acc3ad13
RK
578 * The SDHCI specification states that ADMA addresses must
579 * be 32-bit aligned. If they aren't, then we use a bounce
580 * buffer for the (up to three) bytes that screw up the
2134a922
PO
581 * alignment.
582 */
04a5ae6f
AH
583 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
584 SDHCI_ADMA2_MASK;
2134a922
PO
585 if (offset) {
586 if (data->flags & MMC_DATA_WRITE) {
587 buffer = sdhci_kmap_atomic(sg, &flags);
588 memcpy(align, buffer, offset);
589 sdhci_kunmap_atomic(buffer, &flags);
590 }
591
118cd17d 592 /* tran, valid */
e57a5f61 593 sdhci_adma_write_desc(host, desc, align_addr, offset,
739d46dc 594 ADMA2_TRAN_VALID);
2134a922
PO
595
596 BUG_ON(offset > 65536);
597
04a5ae6f
AH
598 align += SDHCI_ADMA2_ALIGN;
599 align_addr += SDHCI_ADMA2_ALIGN;
2134a922 600
76fe379a 601 desc += host->desc_sz;
2134a922
PO
602
603 addr += offset;
604 len -= offset;
605 }
606
2134a922
PO
607 BUG_ON(len > 65536);
608
347ea32d
AH
609 if (len) {
610 /* tran, valid */
611 sdhci_adma_write_desc(host, desc, addr, len,
612 ADMA2_TRAN_VALID);
613 desc += host->desc_sz;
614 }
2134a922
PO
615
616 /*
617 * If this triggers then we have a calculation bug
618 * somewhere. :/
619 */
76fe379a 620 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
2134a922
PO
621 }
622
70764a90 623 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
acc3ad13 624 /* Mark the last descriptor as the terminating descriptor */
4efaa6fb 625 if (desc != host->adma_table) {
76fe379a 626 desc -= host->desc_sz;
b5ffa674 627 sdhci_adma_mark_end(desc);
70764a90
TA
628 }
629 } else {
acc3ad13 630 /* Add a terminating entry - nop, end, valid */
e57a5f61 631 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
70764a90 632 }
2134a922
PO
633}
634
635static void sdhci_adma_table_post(struct sdhci_host *host,
636 struct mmc_data *data)
637{
2134a922
PO
638 struct scatterlist *sg;
639 int i, size;
1c3d5f6d 640 void *align;
2134a922
PO
641 char *buffer;
642 unsigned long flags;
643
47fa9613
RK
644 if (data->flags & MMC_DATA_READ) {
645 bool has_unaligned = false;
de0b65a7 646
47fa9613
RK
647 /* Do a quick scan of the SG list for any unaligned mappings */
648 for_each_sg(data->sg, sg, host->sg_count, i)
649 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
650 has_unaligned = true;
651 break;
652 }
2134a922 653
47fa9613
RK
654 if (has_unaligned) {
655 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
f55c98f7 656 data->sg_len, DMA_FROM_DEVICE);
2134a922 657
47fa9613 658 align = host->align_buffer;
2134a922 659
47fa9613
RK
660 for_each_sg(data->sg, sg, host->sg_count, i) {
661 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
662 size = SDHCI_ADMA2_ALIGN -
663 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
664
665 buffer = sdhci_kmap_atomic(sg, &flags);
666 memcpy(buffer, align, size);
667 sdhci_kunmap_atomic(buffer, &flags);
2134a922 668
47fa9613
RK
669 align += SDHCI_ADMA2_ALIGN;
670 }
2134a922
PO
671 }
672 }
673 }
2134a922
PO
674}
675
a3c7778f 676static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
d129bceb 677{
1c8cde92 678 u8 count;
a3c7778f 679 struct mmc_data *data = cmd->data;
1c8cde92 680 unsigned target_timeout, current_timeout;
d129bceb 681
ee53ab5d
PO
682 /*
683 * If the host controller provides us with an incorrect timeout
684 * value, just skip the check and use 0xE. The hardware may take
685 * longer to time out, but that's much better than having a too-short
686 * timeout value.
687 */
11a2f1b7 688 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
ee53ab5d 689 return 0xE;
e538fbe8 690
a3c7778f 691 /* Unspecified timeout, assume max */
1d4d7744 692 if (!data && !cmd->busy_timeout)
a3c7778f 693 return 0xE;
d129bceb 694
a3c7778f
AW
695 /* timeout in us */
696 if (!data)
1d4d7744 697 target_timeout = cmd->busy_timeout * 1000;
78a2ca27 698 else {
fafcfda9 699 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
7f05538a
RK
700 if (host->clock && data->timeout_clks) {
701 unsigned long long val;
702
703 /*
704 * data->timeout_clks is in units of clock cycles.
705 * host->clock is in Hz. target_timeout is in us.
706 * Hence, us = 1000000 * cycles / Hz. Round up.
707 */
02265cd6 708 val = 1000000ULL * data->timeout_clks;
7f05538a
RK
709 if (do_div(val, host->clock))
710 target_timeout++;
711 target_timeout += val;
712 }
78a2ca27 713 }
81b39802 714
1c8cde92
PO
715 /*
716 * Figure out needed cycles.
717 * We do this in steps in order to fit inside a 32 bit int.
718 * The first step is the minimum timeout, which will have a
719 * minimum resolution of 6 bits:
720 * (1) 2^13*1000 > 2^22,
721 * (2) host->timeout_clk < 2^16
722 * =>
723 * (1) / (2) > 2^6
724 */
725 count = 0;
726 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
727 while (current_timeout < target_timeout) {
728 count++;
729 current_timeout <<= 1;
730 if (count >= 0xF)
731 break;
732 }
733
734 if (count >= 0xF) {
f421865d
AH
735 DBG("Too large timeout 0x%x requested for CMD%d!\n",
736 count, cmd->opcode);
1c8cde92
PO
737 count = 0xE;
738 }
739
ee53ab5d
PO
740 return count;
741}
742
6aa943ab
AV
743static void sdhci_set_transfer_irqs(struct sdhci_host *host)
744{
745 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
746 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
747
748 if (host->flags & SDHCI_REQ_USE_DMA)
b537f94c 749 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
6aa943ab 750 else
b537f94c
RK
751 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
752
753 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
754 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
6aa943ab
AV
755}
756
b45e668a 757static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
ee53ab5d
PO
758{
759 u8 count;
b45e668a
AD
760
761 if (host->ops->set_timeout) {
762 host->ops->set_timeout(host, cmd);
763 } else {
764 count = sdhci_calc_timeout(host, cmd);
765 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
766 }
767}
768
769static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
770{
2134a922 771 u8 ctrl;
a3c7778f 772 struct mmc_data *data = cmd->data;
ee53ab5d 773
56a590dc 774 if (sdhci_data_line_cmd(cmd))
b45e668a 775 sdhci_set_timeout(host, cmd);
a3c7778f
AW
776
777 if (!data)
ee53ab5d
PO
778 return;
779
43dea098
AH
780 WARN_ON(host->data);
781
ee53ab5d
PO
782 /* Sanity checks */
783 BUG_ON(data->blksz * data->blocks > 524288);
784 BUG_ON(data->blksz > host->mmc->max_blk_size);
785 BUG_ON(data->blocks > 65535);
786
787 host->data = data;
788 host->data_early = 0;
f6a03cbf 789 host->data->bytes_xfered = 0;
ee53ab5d 790
fce14421 791 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2134a922 792 struct scatterlist *sg;
df953925 793 unsigned int length_mask, offset_mask;
a0eaf0f9 794 int i;
2134a922 795
fce14421
RK
796 host->flags |= SDHCI_REQ_USE_DMA;
797
798 /*
799 * FIXME: This doesn't account for merging when mapping the
800 * scatterlist.
801 *
802 * The assumption here being that alignment and lengths are
803 * the same after DMA mapping to device address space.
804 */
a0eaf0f9 805 length_mask = 0;
df953925 806 offset_mask = 0;
2134a922 807 if (host->flags & SDHCI_USE_ADMA) {
df953925 808 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
a0eaf0f9 809 length_mask = 3;
df953925
RK
810 /*
811 * As we use up to 3 byte chunks to work
812 * around alignment problems, we need to
813 * check the offset as well.
814 */
815 offset_mask = 3;
816 }
2134a922
PO
817 } else {
818 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
a0eaf0f9 819 length_mask = 3;
df953925
RK
820 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
821 offset_mask = 3;
2134a922
PO
822 }
823
df953925 824 if (unlikely(length_mask | offset_mask)) {
2134a922 825 for_each_sg(data->sg, sg, data->sg_len, i) {
a0eaf0f9 826 if (sg->length & length_mask) {
2e4456f0 827 DBG("Reverting to PIO because of transfer size (%d)\n",
a0eaf0f9 828 sg->length);
2134a922
PO
829 host->flags &= ~SDHCI_REQ_USE_DMA;
830 break;
831 }
a0eaf0f9 832 if (sg->offset & offset_mask) {
2e4456f0 833 DBG("Reverting to PIO because of bad alignment\n");
2134a922
PO
834 host->flags &= ~SDHCI_REQ_USE_DMA;
835 break;
836 }
837 }
838 }
839 }
840
8f1934ce 841 if (host->flags & SDHCI_REQ_USE_DMA) {
c0999b72 842 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
60c64762
RK
843
844 if (sg_cnt <= 0) {
845 /*
846 * This only happens when someone fed
847 * us an invalid request.
848 */
849 WARN_ON(1);
850 host->flags &= ~SDHCI_REQ_USE_DMA;
851 } else if (host->flags & SDHCI_USE_ADMA) {
852 sdhci_adma_table_pre(host, data, sg_cnt);
853
854 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
855 if (host->flags & SDHCI_USE_64_BIT_DMA)
856 sdhci_writel(host,
857 (u64)host->adma_addr >> 32,
858 SDHCI_ADMA_ADDRESS_HI);
8f1934ce 859 } else {
60c64762
RK
860 WARN_ON(sg_cnt != 1);
861 sdhci_writel(host, sg_dma_address(data->sg),
862 SDHCI_DMA_ADDRESS);
8f1934ce
PO
863 }
864 }
865
2134a922
PO
866 /*
867 * Always adjust the DMA selection as some controllers
868 * (e.g. JMicron) can't do PIO properly when the selection
869 * is ADMA.
870 */
871 if (host->version >= SDHCI_SPEC_200) {
4e4141a5 872 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2134a922
PO
873 ctrl &= ~SDHCI_CTRL_DMA_MASK;
874 if ((host->flags & SDHCI_REQ_USE_DMA) &&
e57a5f61
AH
875 (host->flags & SDHCI_USE_ADMA)) {
876 if (host->flags & SDHCI_USE_64_BIT_DMA)
877 ctrl |= SDHCI_CTRL_ADMA64;
878 else
879 ctrl |= SDHCI_CTRL_ADMA32;
880 } else {
2134a922 881 ctrl |= SDHCI_CTRL_SDMA;
e57a5f61 882 }
4e4141a5 883 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
c9fddbc4
PO
884 }
885
8f1934ce 886 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
da60a91d
SAS
887 int flags;
888
889 flags = SG_MITER_ATOMIC;
890 if (host->data->flags & MMC_DATA_READ)
891 flags |= SG_MITER_TO_SG;
892 else
893 flags |= SG_MITER_FROM_SG;
894 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
7659150c 895 host->blocks = data->blocks;
d129bceb 896 }
c7fa9963 897
6aa943ab
AV
898 sdhci_set_transfer_irqs(host);
899
f6a03cbf
MV
900 /* Set the DMA boundary value and block size */
901 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
902 data->blksz), SDHCI_BLOCK_SIZE);
4e4141a5 903 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
c7fa9963
PO
904}
905
0293d501
AH
906static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
907 struct mmc_request *mrq)
908{
20845bef
AH
909 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
910 !mrq->cap_cmd_during_tfr;
0293d501
AH
911}
912
c7fa9963 913static void sdhci_set_transfer_mode(struct sdhci_host *host,
e89d456f 914 struct mmc_command *cmd)
c7fa9963 915{
d3fc5d71 916 u16 mode = 0;
e89d456f 917 struct mmc_data *data = cmd->data;
c7fa9963 918
2b558c13 919 if (data == NULL) {
9b8ffea6
VW
920 if (host->quirks2 &
921 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
922 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
923 } else {
2b558c13 924 /* clear Auto CMD settings for no data CMDs */
9b8ffea6
VW
925 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
926 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
2b558c13 927 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
9b8ffea6 928 }
c7fa9963 929 return;
2b558c13 930 }
c7fa9963 931
e538fbe8
PO
932 WARN_ON(!host->data);
933
d3fc5d71
VY
934 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
935 mode = SDHCI_TRNS_BLK_CNT_EN;
936
e89d456f 937 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
d3fc5d71 938 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
e89d456f
AW
939 /*
940 * If we are sending CMD23, CMD12 never gets sent
941 * on successful completion (so no Auto-CMD12).
942 */
0293d501 943 if (sdhci_auto_cmd12(host, cmd->mrq) &&
85cc1c33 944 (cmd->opcode != SD_IO_RW_EXTENDED))
e89d456f 945 mode |= SDHCI_TRNS_AUTO_CMD12;
a4c73aba 946 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
8edf6371 947 mode |= SDHCI_TRNS_AUTO_CMD23;
a4c73aba 948 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
8edf6371 949 }
c4512f79 950 }
8edf6371 951
c7fa9963
PO
952 if (data->flags & MMC_DATA_READ)
953 mode |= SDHCI_TRNS_READ;
c9fddbc4 954 if (host->flags & SDHCI_REQ_USE_DMA)
c7fa9963
PO
955 mode |= SDHCI_TRNS_DMA;
956
4e4141a5 957 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
d129bceb
PO
958}
959
0cc563ce
AH
960static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
961{
962 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
963 ((mrq->cmd && mrq->cmd->error) ||
964 (mrq->sbc && mrq->sbc->error) ||
965 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
966 (mrq->data->stop && mrq->data->stop->error))) ||
967 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
968}
969
4e9f8fe5
AH
970static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
971{
972 int i;
973
974 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
975 if (host->mrqs_done[i] == mrq) {
976 WARN_ON(1);
977 return;
978 }
979 }
980
981 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
982 if (!host->mrqs_done[i]) {
983 host->mrqs_done[i] = mrq;
984 break;
985 }
986 }
987
988 WARN_ON(i >= SDHCI_MAX_MRQS);
989
990 tasklet_schedule(&host->finish_tasklet);
991}
992
a6d3bdd5
AH
993static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
994{
5a8a3fef
AH
995 if (host->cmd && host->cmd->mrq == mrq)
996 host->cmd = NULL;
997
998 if (host->data_cmd && host->data_cmd->mrq == mrq)
999 host->data_cmd = NULL;
1000
1001 if (host->data && host->data->mrq == mrq)
1002 host->data = NULL;
1003
ed1563de
AH
1004 if (sdhci_needs_reset(host, mrq))
1005 host->pending_reset = true;
1006
4e9f8fe5 1007 __sdhci_finish_mrq(host, mrq);
a6d3bdd5
AH
1008}
1009
d129bceb
PO
1010static void sdhci_finish_data(struct sdhci_host *host)
1011{
33a57adb
AH
1012 struct mmc_command *data_cmd = host->data_cmd;
1013 struct mmc_data *data = host->data;
d129bceb 1014
d129bceb 1015 host->data = NULL;
7c89a3d9 1016 host->data_cmd = NULL;
d129bceb 1017
add8913d
RK
1018 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1019 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1020 sdhci_adma_table_post(host, data);
d129bceb
PO
1021
1022 /*
c9b74c5b
PO
1023 * The specification states that the block count register must
1024 * be updated, but it does not specify at what point in the
1025 * data flow. That makes the register entirely useless to read
1026 * back so we have to assume that nothing made it to the card
1027 * in the event of an error.
d129bceb 1028 */
c9b74c5b
PO
1029 if (data->error)
1030 data->bytes_xfered = 0;
d129bceb 1031 else
c9b74c5b 1032 data->bytes_xfered = data->blksz * data->blocks;
d129bceb 1033
e89d456f
AW
1034 /*
1035 * Need to send CMD12 if -
1036 * a) open-ended multiblock transfer (no CMD23)
1037 * b) error in multiblock transfer
1038 */
1039 if (data->stop &&
1040 (data->error ||
a4c73aba 1041 !data->mrq->sbc)) {
e89d456f 1042
d129bceb
PO
1043 /*
1044 * The controller needs a reset of internal state machines
1045 * upon error conditions.
1046 */
17b0429d 1047 if (data->error) {
33a57adb
AH
1048 if (!host->cmd || host->cmd == data_cmd)
1049 sdhci_do_reset(host, SDHCI_RESET_CMD);
03231f9b 1050 sdhci_do_reset(host, SDHCI_RESET_DATA);
d129bceb
PO
1051 }
1052
20845bef
AH
1053 /*
1054 * 'cap_cmd_during_tfr' request must not use the command line
1055 * after mmc_command_done() has been called. It is upper layer's
1056 * responsibility to send the stop command if required.
1057 */
1058 if (data->mrq->cap_cmd_during_tfr) {
1059 sdhci_finish_mrq(host, data->mrq);
1060 } else {
1061 /* Avoid triggering warning in sdhci_send_command() */
1062 host->cmd = NULL;
1063 sdhci_send_command(host, data->stop);
1064 }
a6d3bdd5
AH
1065 } else {
1066 sdhci_finish_mrq(host, data->mrq);
1067 }
d129bceb
PO
1068}
1069
d7422fb4
AH
1070static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1071 unsigned long timeout)
1072{
1073 if (sdhci_data_line_cmd(mrq->cmd))
1074 mod_timer(&host->data_timer, timeout);
1075 else
1076 mod_timer(&host->timer, timeout);
1077}
1078
1079static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1080{
1081 if (sdhci_data_line_cmd(mrq->cmd))
1082 del_timer(&host->data_timer);
1083 else
1084 del_timer(&host->timer);
1085}
1086
c0e55129 1087void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
d129bceb
PO
1088{
1089 int flags;
fd2208d7 1090 u32 mask;
7cb2c76f 1091 unsigned long timeout;
d129bceb
PO
1092
1093 WARN_ON(host->cmd);
1094
96776200
RK
1095 /* Initially, a command has no error */
1096 cmd->error = 0;
1097
fc605f1d
AH
1098 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1099 cmd->opcode == MMC_STOP_TRANSMISSION)
1100 cmd->flags |= MMC_RSP_BUSY;
1101
d129bceb 1102 /* Wait max 10 ms */
7cb2c76f 1103 timeout = 10;
fd2208d7
PO
1104
1105 mask = SDHCI_CMD_INHIBIT;
56a590dc 1106 if (sdhci_data_line_cmd(cmd))
fd2208d7
PO
1107 mask |= SDHCI_DATA_INHIBIT;
1108
1109 /* We shouldn't wait for data inihibit for stop commands, even
1110 though they might use busy signaling */
a4c73aba 1111 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
fd2208d7
PO
1112 mask &= ~SDHCI_DATA_INHIBIT;
1113
4e4141a5 1114 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
7cb2c76f 1115 if (timeout == 0) {
2e4456f0
MV
1116 pr_err("%s: Controller never released inhibit bit(s).\n",
1117 mmc_hostname(host->mmc));
d129bceb 1118 sdhci_dumpregs(host);
17b0429d 1119 cmd->error = -EIO;
a6d3bdd5 1120 sdhci_finish_mrq(host, cmd->mrq);
d129bceb
PO
1121 return;
1122 }
7cb2c76f
PO
1123 timeout--;
1124 mdelay(1);
1125 }
d129bceb 1126
3e1a6892 1127 timeout = jiffies;
1d4d7744
UH
1128 if (!cmd->data && cmd->busy_timeout > 9000)
1129 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
3e1a6892
AH
1130 else
1131 timeout += 10 * HZ;
d7422fb4 1132 sdhci_mod_timer(host, cmd->mrq, timeout);
d129bceb
PO
1133
1134 host->cmd = cmd;
56a590dc 1135 if (sdhci_data_line_cmd(cmd)) {
7c89a3d9
AH
1136 WARN_ON(host->data_cmd);
1137 host->data_cmd = cmd;
1138 }
d129bceb 1139
a3c7778f 1140 sdhci_prepare_data(host, cmd);
d129bceb 1141
4e4141a5 1142 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
d129bceb 1143
e89d456f 1144 sdhci_set_transfer_mode(host, cmd);
c7fa9963 1145
d129bceb 1146 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
a3c76eb9 1147 pr_err("%s: Unsupported response type!\n",
d129bceb 1148 mmc_hostname(host->mmc));
17b0429d 1149 cmd->error = -EINVAL;
a6d3bdd5 1150 sdhci_finish_mrq(host, cmd->mrq);
d129bceb
PO
1151 return;
1152 }
1153
1154 if (!(cmd->flags & MMC_RSP_PRESENT))
1155 flags = SDHCI_CMD_RESP_NONE;
1156 else if (cmd->flags & MMC_RSP_136)
1157 flags = SDHCI_CMD_RESP_LONG;
1158 else if (cmd->flags & MMC_RSP_BUSY)
1159 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1160 else
1161 flags = SDHCI_CMD_RESP_SHORT;
1162
1163 if (cmd->flags & MMC_RSP_CRC)
1164 flags |= SDHCI_CMD_CRC;
1165 if (cmd->flags & MMC_RSP_OPCODE)
1166 flags |= SDHCI_CMD_INDEX;
b513ea25
AN
1167
1168 /* CMD19 is special in that the Data Present Select should be set */
069c9f14
G
1169 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1170 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
d129bceb
PO
1171 flags |= SDHCI_CMD_DATA;
1172
4e4141a5 1173 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
d129bceb 1174}
c0e55129 1175EXPORT_SYMBOL_GPL(sdhci_send_command);
d129bceb
PO
1176
1177static void sdhci_finish_command(struct sdhci_host *host)
1178{
e0a5640a 1179 struct mmc_command *cmd = host->cmd;
d129bceb
PO
1180 int i;
1181
e0a5640a
AH
1182 host->cmd = NULL;
1183
1184 if (cmd->flags & MMC_RSP_PRESENT) {
1185 if (cmd->flags & MMC_RSP_136) {
d129bceb
PO
1186 /* CRC is stripped so we need to do some shifting. */
1187 for (i = 0;i < 4;i++) {
e0a5640a 1188 cmd->resp[i] = sdhci_readl(host,
d129bceb
PO
1189 SDHCI_RESPONSE + (3-i)*4) << 8;
1190 if (i != 3)
e0a5640a 1191 cmd->resp[i] |=
4e4141a5 1192 sdhci_readb(host,
d129bceb
PO
1193 SDHCI_RESPONSE + (3-i)*4-1);
1194 }
1195 } else {
e0a5640a 1196 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
d129bceb
PO
1197 }
1198 }
1199
20845bef
AH
1200 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1201 mmc_command_done(host->mmc, cmd->mrq);
1202
6bde8681
AH
1203 /*
1204 * The host can send and interrupt when the busy state has
1205 * ended, allowing us to wait without wasting CPU cycles.
1206 * The busy signal uses DAT0 so this is similar to waiting
1207 * for data to complete.
1208 *
1209 * Note: The 1.0 specification is a bit ambiguous about this
1210 * feature so there might be some problems with older
1211 * controllers.
1212 */
e0a5640a
AH
1213 if (cmd->flags & MMC_RSP_BUSY) {
1214 if (cmd->data) {
6bde8681
AH
1215 DBG("Cannot wait for busy signal when also doing a data transfer");
1216 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
ea968023
AH
1217 cmd == host->data_cmd) {
1218 /* Command complete before busy is ended */
6bde8681
AH
1219 return;
1220 }
1221 }
1222
e89d456f 1223 /* Finished CMD23, now send actual command. */
a4c73aba
AH
1224 if (cmd == cmd->mrq->sbc) {
1225 sdhci_send_command(host, cmd->mrq->cmd);
e89d456f 1226 } else {
e538fbe8 1227
e89d456f
AW
1228 /* Processed actual command. */
1229 if (host->data && host->data_early)
1230 sdhci_finish_data(host);
d129bceb 1231
e0a5640a 1232 if (!cmd->data)
a6d3bdd5 1233 sdhci_finish_mrq(host, cmd->mrq);
e89d456f 1234 }
d129bceb
PO
1235}
1236
52983382
KL
1237static u16 sdhci_get_preset_value(struct sdhci_host *host)
1238{
d975f121 1239 u16 preset = 0;
52983382 1240
d975f121
RK
1241 switch (host->timing) {
1242 case MMC_TIMING_UHS_SDR12:
52983382
KL
1243 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1244 break;
d975f121 1245 case MMC_TIMING_UHS_SDR25:
52983382
KL
1246 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1247 break;
d975f121 1248 case MMC_TIMING_UHS_SDR50:
52983382
KL
1249 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1250 break;
d975f121
RK
1251 case MMC_TIMING_UHS_SDR104:
1252 case MMC_TIMING_MMC_HS200:
52983382
KL
1253 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1254 break;
d975f121 1255 case MMC_TIMING_UHS_DDR50:
0dafa60e 1256 case MMC_TIMING_MMC_DDR52:
52983382
KL
1257 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1258 break;
e9fb05d5
AH
1259 case MMC_TIMING_MMC_HS400:
1260 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1261 break;
52983382
KL
1262 default:
1263 pr_warn("%s: Invalid UHS-I mode selected\n",
1264 mmc_hostname(host->mmc));
1265 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1266 break;
1267 }
1268 return preset;
1269}
1270
fb9ee047
LD
1271u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1272 unsigned int *actual_clock)
d129bceb 1273{
c3ed3877 1274 int div = 0; /* Initialized for compiler warning */
df16219f 1275 int real_div = div, clk_mul = 1;
c3ed3877 1276 u16 clk = 0;
5497159c 1277 bool switch_base_clk = false;
d129bceb 1278
85105c53 1279 if (host->version >= SDHCI_SPEC_300) {
da91a8f9 1280 if (host->preset_enabled) {
52983382
KL
1281 u16 pre_val;
1282
1283 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1284 pre_val = sdhci_get_preset_value(host);
1285 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1286 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1287 if (host->clk_mul &&
1288 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1289 clk = SDHCI_PROG_CLOCK_MODE;
1290 real_div = div + 1;
1291 clk_mul = host->clk_mul;
1292 } else {
1293 real_div = max_t(int, 1, div << 1);
1294 }
1295 goto clock_set;
1296 }
1297
c3ed3877
AN
1298 /*
1299 * Check if the Host Controller supports Programmable Clock
1300 * Mode.
1301 */
1302 if (host->clk_mul) {
52983382
KL
1303 for (div = 1; div <= 1024; div++) {
1304 if ((host->max_clk * host->clk_mul / div)
1305 <= clock)
1306 break;
1307 }
5497159c 1308 if ((host->max_clk * host->clk_mul / div) <= clock) {
1309 /*
1310 * Set Programmable Clock Mode in the Clock
1311 * Control register.
1312 */
1313 clk = SDHCI_PROG_CLOCK_MODE;
1314 real_div = div;
1315 clk_mul = host->clk_mul;
1316 div--;
1317 } else {
1318 /*
1319 * Divisor can be too small to reach clock
1320 * speed requirement. Then use the base clock.
1321 */
1322 switch_base_clk = true;
1323 }
1324 }
1325
1326 if (!host->clk_mul || switch_base_clk) {
c3ed3877
AN
1327 /* Version 3.00 divisors must be a multiple of 2. */
1328 if (host->max_clk <= clock)
1329 div = 1;
1330 else {
1331 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1332 div += 2) {
1333 if ((host->max_clk / div) <= clock)
1334 break;
1335 }
85105c53 1336 }
df16219f 1337 real_div = div;
c3ed3877 1338 div >>= 1;
d1955c3a
SG
1339 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1340 && !div && host->max_clk <= 25000000)
1341 div = 1;
85105c53
ZG
1342 }
1343 } else {
1344 /* Version 2.00 divisors must be a power of 2. */
0397526d 1345 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
85105c53
ZG
1346 if ((host->max_clk / div) <= clock)
1347 break;
1348 }
df16219f 1349 real_div = div;
c3ed3877 1350 div >>= 1;
d129bceb 1351 }
d129bceb 1352
52983382 1353clock_set:
03d6f5ff 1354 if (real_div)
fb9ee047 1355 *actual_clock = (host->max_clk * clk_mul) / real_div;
c3ed3877 1356 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
85105c53
ZG
1357 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1358 << SDHCI_DIVIDER_HI_SHIFT;
fb9ee047
LD
1359
1360 return clk;
1361}
1362EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1363
fec79673 1364void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
fb9ee047 1365{
5a436cc0 1366 ktime_t timeout;
fb9ee047 1367
d129bceb 1368 clk |= SDHCI_CLOCK_INT_EN;
4e4141a5 1369 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb 1370
27f6cb16 1371 /* Wait max 20 ms */
5a436cc0 1372 timeout = ktime_add_ms(ktime_get(), 20);
4e4141a5 1373 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
7cb2c76f 1374 & SDHCI_CLOCK_INT_STABLE)) {
5a436cc0 1375 if (ktime_after(ktime_get(), timeout)) {
2e4456f0
MV
1376 pr_err("%s: Internal clock never stabilised.\n",
1377 mmc_hostname(host->mmc));
d129bceb
PO
1378 sdhci_dumpregs(host);
1379 return;
1380 }
5a436cc0 1381 udelay(10);
7cb2c76f 1382 }
d129bceb
PO
1383
1384 clk |= SDHCI_CLOCK_CARD_EN;
4e4141a5 1385 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb 1386}
fec79673
RH
1387EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1388
1389void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1390{
1391 u16 clk;
1392
1393 host->mmc->actual_clock = 0;
1394
1395 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1396
1397 if (clock == 0)
1398 return;
1399
1400 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1401 sdhci_enable_clk(host, clk);
1402}
1771059c 1403EXPORT_SYMBOL_GPL(sdhci_set_clock);
d129bceb 1404
1dceb041
AH
1405static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1406 unsigned short vdd)
146ad66e 1407{
3a48edc4 1408 struct mmc_host *mmc = host->mmc;
1dceb041 1409
1dceb041 1410 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1dceb041
AH
1411
1412 if (mode != MMC_POWER_OFF)
1413 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1414 else
1415 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1416}
1417
606d3131
AH
1418void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1419 unsigned short vdd)
1dceb041 1420{
8364248a 1421 u8 pwr = 0;
146ad66e 1422
24fbb3ca
RK
1423 if (mode != MMC_POWER_OFF) {
1424 switch (1 << vdd) {
ae628903
PO
1425 case MMC_VDD_165_195:
1426 pwr = SDHCI_POWER_180;
1427 break;
1428 case MMC_VDD_29_30:
1429 case MMC_VDD_30_31:
1430 pwr = SDHCI_POWER_300;
1431 break;
1432 case MMC_VDD_32_33:
1433 case MMC_VDD_33_34:
1434 pwr = SDHCI_POWER_330;
1435 break;
1436 default:
9d5de93f
AH
1437 WARN(1, "%s: Invalid vdd %#x\n",
1438 mmc_hostname(host->mmc), vdd);
1439 break;
ae628903
PO
1440 }
1441 }
1442
1443 if (host->pwr == pwr)
e921a8b6 1444 return;
146ad66e 1445
ae628903
PO
1446 host->pwr = pwr;
1447
1448 if (pwr == 0) {
4e4141a5 1449 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
f0710a55
AH
1450 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1451 sdhci_runtime_pm_bus_off(host);
e921a8b6
RK
1452 } else {
1453 /*
1454 * Spec says that we should clear the power reg before setting
1455 * a new value. Some controllers don't seem to like this though.
1456 */
1457 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1458 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
146ad66e 1459
e921a8b6
RK
1460 /*
1461 * At least the Marvell CaFe chip gets confused if we set the
1462 * voltage and set turn on power at the same time, so set the
1463 * voltage first.
1464 */
1465 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1466 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
e08c1694 1467
e921a8b6 1468 pwr |= SDHCI_POWER_ON;
146ad66e 1469
e921a8b6 1470 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
557b0697 1471
e921a8b6
RK
1472 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1473 sdhci_runtime_pm_bus_on(host);
f0710a55 1474
e921a8b6
RK
1475 /*
1476 * Some controllers need an extra 10ms delay of 10ms before
1477 * they can apply clock after applying power
1478 */
1479 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1480 mdelay(10);
1481 }
1dceb041 1482}
606d3131 1483EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
918f4cbd 1484
606d3131
AH
1485void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1486 unsigned short vdd)
1dceb041 1487{
606d3131
AH
1488 if (IS_ERR(host->mmc->supply.vmmc))
1489 sdhci_set_power_noreg(host, mode, vdd);
1dceb041 1490 else
606d3131 1491 sdhci_set_power_reg(host, mode, vdd);
146ad66e 1492}
606d3131 1493EXPORT_SYMBOL_GPL(sdhci_set_power);
146ad66e 1494
d129bceb
PO
1495/*****************************************************************************\
1496 * *
1497 * MMC callbacks *
1498 * *
1499\*****************************************************************************/
1500
1501static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1502{
1503 struct sdhci_host *host;
505a8680 1504 int present;
d129bceb
PO
1505 unsigned long flags;
1506
1507 host = mmc_priv(mmc);
1508
04e079cf 1509 /* Firstly check card presence */
8d28b7a7 1510 present = mmc->ops->get_cd(mmc);
2836766a 1511
d129bceb
PO
1512 spin_lock_irqsave(&host->lock, flags);
1513
061d17a6 1514 sdhci_led_activate(host);
e89d456f
AW
1515
1516 /*
1517 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1518 * requests if Auto-CMD12 is enabled.
1519 */
0293d501 1520 if (sdhci_auto_cmd12(host, mrq)) {
c4512f79
JH
1521 if (mrq->stop) {
1522 mrq->data->stop = NULL;
1523 mrq->stop = NULL;
1524 }
1525 }
d129bceb 1526
68d1fb7e 1527 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
a4c73aba 1528 mrq->cmd->error = -ENOMEDIUM;
a6d3bdd5 1529 sdhci_finish_mrq(host, mrq);
cf2b5eea 1530 } else {
8edf6371 1531 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
e89d456f
AW
1532 sdhci_send_command(host, mrq->sbc);
1533 else
1534 sdhci_send_command(host, mrq->cmd);
cf2b5eea 1535 }
d129bceb 1536
5f25a66f 1537 mmiowb();
d129bceb
PO
1538 spin_unlock_irqrestore(&host->lock, flags);
1539}
1540
2317f56c
RK
1541void sdhci_set_bus_width(struct sdhci_host *host, int width)
1542{
1543 u8 ctrl;
1544
1545 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1546 if (width == MMC_BUS_WIDTH_8) {
1547 ctrl &= ~SDHCI_CTRL_4BITBUS;
1548 if (host->version >= SDHCI_SPEC_300)
1549 ctrl |= SDHCI_CTRL_8BITBUS;
1550 } else {
1551 if (host->version >= SDHCI_SPEC_300)
1552 ctrl &= ~SDHCI_CTRL_8BITBUS;
1553 if (width == MMC_BUS_WIDTH_4)
1554 ctrl |= SDHCI_CTRL_4BITBUS;
1555 else
1556 ctrl &= ~SDHCI_CTRL_4BITBUS;
1557 }
1558 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1559}
1560EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1561
96d7b78c
RK
1562void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1563{
1564 u16 ctrl_2;
1565
1566 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1567 /* Select Bus Speed Mode for host */
1568 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1569 if ((timing == MMC_TIMING_MMC_HS200) ||
1570 (timing == MMC_TIMING_UHS_SDR104))
1571 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1572 else if (timing == MMC_TIMING_UHS_SDR12)
1573 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1574 else if (timing == MMC_TIMING_UHS_SDR25)
1575 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1576 else if (timing == MMC_TIMING_UHS_SDR50)
1577 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1578 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1579 (timing == MMC_TIMING_MMC_DDR52))
1580 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
e9fb05d5
AH
1581 else if (timing == MMC_TIMING_MMC_HS400)
1582 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
96d7b78c
RK
1583 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1584}
1585EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1586
ded97e0b 1587static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
d129bceb 1588{
ded97e0b 1589 struct sdhci_host *host = mmc_priv(mmc);
d129bceb
PO
1590 u8 ctrl;
1591
84ec048b
AH
1592 if (ios->power_mode == MMC_POWER_UNDEFINED)
1593 return;
1594
ceb6143b 1595 if (host->flags & SDHCI_DEVICE_DEAD) {
3a48edc4
TK
1596 if (!IS_ERR(mmc->supply.vmmc) &&
1597 ios->power_mode == MMC_POWER_OFF)
4e743f1f 1598 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
ceb6143b
AH
1599 return;
1600 }
1e72859e 1601
d129bceb
PO
1602 /*
1603 * Reset the chip on each power off.
1604 * Should clear out any weird states.
1605 */
1606 if (ios->power_mode == MMC_POWER_OFF) {
4e4141a5 1607 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
7260cf5e 1608 sdhci_reinit(host);
d129bceb
PO
1609 }
1610
52983382 1611 if (host->version >= SDHCI_SPEC_300 &&
372c4634
DA
1612 (ios->power_mode == MMC_POWER_UP) &&
1613 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
52983382
KL
1614 sdhci_enable_preset_value(host, false);
1615
373073ef 1616 if (!ios->clock || ios->clock != host->clock) {
1771059c 1617 host->ops->set_clock(host, ios->clock);
373073ef 1618 host->clock = ios->clock;
03d6f5ff
AD
1619
1620 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1621 host->clock) {
1622 host->timeout_clk = host->mmc->actual_clock ?
1623 host->mmc->actual_clock / 1000 :
1624 host->clock / 1000;
1625 host->mmc->max_busy_timeout =
1626 host->ops->get_max_timeout_count ?
1627 host->ops->get_max_timeout_count(host) :
1628 1 << 27;
1629 host->mmc->max_busy_timeout /= host->timeout_clk;
1630 }
373073ef 1631 }
d129bceb 1632
606d3131
AH
1633 if (host->ops->set_power)
1634 host->ops->set_power(host, ios->power_mode, ios->vdd);
1635 else
1636 sdhci_set_power(host, ios->power_mode, ios->vdd);
d129bceb 1637
643a81ff
PR
1638 if (host->ops->platform_send_init_74_clocks)
1639 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1640
2317f56c 1641 host->ops->set_bus_width(host, ios->bus_width);
ae6d6c92 1642
15ec4461 1643 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
cd9277c0 1644
3ab9c8da 1645 if ((ios->timing == MMC_TIMING_SD_HS ||
273c5414
JC
1646 ios->timing == MMC_TIMING_MMC_HS ||
1647 ios->timing == MMC_TIMING_MMC_HS400 ||
1648 ios->timing == MMC_TIMING_MMC_HS200 ||
1649 ios->timing == MMC_TIMING_MMC_DDR52 ||
1650 ios->timing == MMC_TIMING_UHS_SDR50 ||
1651 ios->timing == MMC_TIMING_UHS_SDR104 ||
1652 ios->timing == MMC_TIMING_UHS_DDR50 ||
1653 ios->timing == MMC_TIMING_UHS_SDR25)
3ab9c8da 1654 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
cd9277c0
PO
1655 ctrl |= SDHCI_CTRL_HISPD;
1656 else
1657 ctrl &= ~SDHCI_CTRL_HISPD;
1658
d6d50a15 1659 if (host->version >= SDHCI_SPEC_300) {
49c468fc 1660 u16 clk, ctrl_2;
49c468fc 1661
da91a8f9 1662 if (!host->preset_enabled) {
758535c4 1663 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d6d50a15
AN
1664 /*
1665 * We only need to set Driver Strength if the
1666 * preset value enable is not set.
1667 */
da91a8f9 1668 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
d6d50a15
AN
1669 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1670 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1671 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
43e943a0
PG
1672 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1673 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
d6d50a15
AN
1674 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1675 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
43e943a0
PG
1676 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1677 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1678 else {
2e4456f0
MV
1679 pr_warn("%s: invalid driver type, default to driver type B\n",
1680 mmc_hostname(mmc));
43e943a0
PG
1681 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1682 }
d6d50a15
AN
1683
1684 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
758535c4
AN
1685 } else {
1686 /*
1687 * According to SDHC Spec v3.00, if the Preset Value
1688 * Enable in the Host Control 2 register is set, we
1689 * need to reset SD Clock Enable before changing High
1690 * Speed Enable to avoid generating clock gliches.
1691 */
758535c4
AN
1692
1693 /* Reset SD Clock Enable */
1694 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1695 clk &= ~SDHCI_CLOCK_CARD_EN;
1696 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1697
1698 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1699
1700 /* Re-enable SD Clock */
1771059c 1701 host->ops->set_clock(host, host->clock);
d6d50a15 1702 }
49c468fc 1703
49c468fc
AN
1704 /* Reset SD Clock Enable */
1705 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1706 clk &= ~SDHCI_CLOCK_CARD_EN;
1707 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1708
96d7b78c 1709 host->ops->set_uhs_signaling(host, ios->timing);
d975f121 1710 host->timing = ios->timing;
49c468fc 1711
52983382
KL
1712 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1713 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1714 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1715 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1716 (ios->timing == MMC_TIMING_UHS_SDR104) ||
0dafa60e
JZ
1717 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1718 (ios->timing == MMC_TIMING_MMC_DDR52))) {
52983382
KL
1719 u16 preset;
1720
1721 sdhci_enable_preset_value(host, true);
1722 preset = sdhci_get_preset_value(host);
1723 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1724 >> SDHCI_PRESET_DRV_SHIFT;
1725 }
1726
49c468fc 1727 /* Re-enable SD Clock */
1771059c 1728 host->ops->set_clock(host, host->clock);
758535c4
AN
1729 } else
1730 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d6d50a15 1731
b8352260
LD
1732 /*
1733 * Some (ENE) controllers go apeshit on some ios operation,
1734 * signalling timeout and CRC errors even on CMD0. Resetting
1735 * it on each ios seems to solve the problem.
1736 */
c63705e1 1737 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
03231f9b 1738 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
b8352260 1739
5f25a66f 1740 mmiowb();
d129bceb
PO
1741}
1742
ded97e0b 1743static int sdhci_get_cd(struct mmc_host *mmc)
66fd8ad5
AH
1744{
1745 struct sdhci_host *host = mmc_priv(mmc);
ded97e0b 1746 int gpio_cd = mmc_gpio_get_cd(mmc);
94144a46
KL
1747
1748 if (host->flags & SDHCI_DEVICE_DEAD)
1749 return 0;
1750
88af5655 1751 /* If nonremovable, assume that the card is always present. */
860951c5 1752 if (!mmc_card_is_removable(host->mmc))
94144a46
KL
1753 return 1;
1754
88af5655
II
1755 /*
1756 * Try slot gpio detect, if defined it take precedence
1757 * over build in controller functionality
1758 */
287980e4 1759 if (gpio_cd >= 0)
94144a46
KL
1760 return !!gpio_cd;
1761
88af5655
II
1762 /* If polling, assume that the card is always present. */
1763 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1764 return 1;
1765
94144a46
KL
1766 /* Host native card detect */
1767 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1768}
1769
66fd8ad5 1770static int sdhci_check_ro(struct sdhci_host *host)
d129bceb 1771{
d129bceb 1772 unsigned long flags;
2dfb579c 1773 int is_readonly;
d129bceb 1774
d129bceb
PO
1775 spin_lock_irqsave(&host->lock, flags);
1776
1e72859e 1777 if (host->flags & SDHCI_DEVICE_DEAD)
2dfb579c
WS
1778 is_readonly = 0;
1779 else if (host->ops->get_ro)
1780 is_readonly = host->ops->get_ro(host);
1e72859e 1781 else
2dfb579c
WS
1782 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1783 & SDHCI_WRITE_PROTECT);
d129bceb
PO
1784
1785 spin_unlock_irqrestore(&host->lock, flags);
1786
2dfb579c
WS
1787 /* This quirk needs to be replaced by a callback-function later */
1788 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1789 !is_readonly : is_readonly;
d129bceb
PO
1790}
1791
82b0e23a
TI
1792#define SAMPLE_COUNT 5
1793
ded97e0b 1794static int sdhci_get_ro(struct mmc_host *mmc)
82b0e23a 1795{
ded97e0b 1796 struct sdhci_host *host = mmc_priv(mmc);
82b0e23a
TI
1797 int i, ro_count;
1798
82b0e23a 1799 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
66fd8ad5 1800 return sdhci_check_ro(host);
82b0e23a
TI
1801
1802 ro_count = 0;
1803 for (i = 0; i < SAMPLE_COUNT; i++) {
66fd8ad5 1804 if (sdhci_check_ro(host)) {
82b0e23a
TI
1805 if (++ro_count > SAMPLE_COUNT / 2)
1806 return 1;
1807 }
1808 msleep(30);
1809 }
1810 return 0;
1811}
1812
20758b66
AH
1813static void sdhci_hw_reset(struct mmc_host *mmc)
1814{
1815 struct sdhci_host *host = mmc_priv(mmc);
1816
1817 if (host->ops && host->ops->hw_reset)
1818 host->ops->hw_reset(host);
1819}
1820
66fd8ad5
AH
1821static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1822{
be138554 1823 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
ef104333 1824 if (enable)
b537f94c 1825 host->ier |= SDHCI_INT_CARD_INT;
ef104333 1826 else
b537f94c
RK
1827 host->ier &= ~SDHCI_INT_CARD_INT;
1828
1829 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1830 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
ef104333
RK
1831 mmiowb();
1832 }
66fd8ad5
AH
1833}
1834
1835static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1836{
1837 struct sdhci_host *host = mmc_priv(mmc);
1838 unsigned long flags;
f75979b7 1839
923713b3
HG
1840 if (enable)
1841 pm_runtime_get_noresume(host->mmc->parent);
1842
66fd8ad5 1843 spin_lock_irqsave(&host->lock, flags);
ef104333
RK
1844 if (enable)
1845 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1846 else
1847 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1848
66fd8ad5 1849 sdhci_enable_sdio_irq_nolock(host, enable);
f75979b7 1850 spin_unlock_irqrestore(&host->lock, flags);
923713b3
HG
1851
1852 if (!enable)
1853 pm_runtime_put_noidle(host->mmc->parent);
f75979b7
PO
1854}
1855
ded97e0b
DA
1856static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1857 struct mmc_ios *ios)
f2119df6 1858{
ded97e0b 1859 struct sdhci_host *host = mmc_priv(mmc);
20b92a30 1860 u16 ctrl;
6231f3de 1861 int ret;
f2119df6 1862
20b92a30
KL
1863 /*
1864 * Signal Voltage Switching is only applicable for Host Controllers
1865 * v3.00 and above.
1866 */
1867 if (host->version < SDHCI_SPEC_300)
1868 return 0;
6231f3de 1869
f2119df6 1870 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
f2119df6 1871
21f5998f 1872 switch (ios->signal_voltage) {
20b92a30 1873 case MMC_SIGNAL_VOLTAGE_330:
8cb851a4
AH
1874 if (!(host->flags & SDHCI_SIGNALING_330))
1875 return -EINVAL;
20b92a30
KL
1876 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1877 ctrl &= ~SDHCI_CTRL_VDD_180;
1878 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
f2119df6 1879
3a48edc4 1880 if (!IS_ERR(mmc->supply.vqmmc)) {
761daa36 1881 ret = mmc_regulator_set_vqmmc(mmc, ios);
20b92a30 1882 if (ret) {
6606110d
JP
1883 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1884 mmc_hostname(mmc));
20b92a30
KL
1885 return -EIO;
1886 }
1887 }
1888 /* Wait for 5ms */
1889 usleep_range(5000, 5500);
f2119df6 1890
20b92a30
KL
1891 /* 3.3V regulator output should be stable within 5 ms */
1892 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1893 if (!(ctrl & SDHCI_CTRL_VDD_180))
1894 return 0;
6231f3de 1895
6606110d
JP
1896 pr_warn("%s: 3.3V regulator output did not became stable\n",
1897 mmc_hostname(mmc));
20b92a30
KL
1898
1899 return -EAGAIN;
1900 case MMC_SIGNAL_VOLTAGE_180:
8cb851a4
AH
1901 if (!(host->flags & SDHCI_SIGNALING_180))
1902 return -EINVAL;
3a48edc4 1903 if (!IS_ERR(mmc->supply.vqmmc)) {
761daa36 1904 ret = mmc_regulator_set_vqmmc(mmc, ios);
20b92a30 1905 if (ret) {
6606110d
JP
1906 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1907 mmc_hostname(mmc));
20b92a30
KL
1908 return -EIO;
1909 }
1910 }
6231f3de 1911
6231f3de
PR
1912 /*
1913 * Enable 1.8V Signal Enable in the Host Control2
1914 * register
1915 */
20b92a30
KL
1916 ctrl |= SDHCI_CTRL_VDD_180;
1917 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
6231f3de 1918
9d967a61
VY
1919 /* Some controller need to do more when switching */
1920 if (host->ops->voltage_switch)
1921 host->ops->voltage_switch(host);
1922
20b92a30
KL
1923 /* 1.8V regulator output should be stable within 5 ms */
1924 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1925 if (ctrl & SDHCI_CTRL_VDD_180)
1926 return 0;
f2119df6 1927
6606110d
JP
1928 pr_warn("%s: 1.8V regulator output did not became stable\n",
1929 mmc_hostname(mmc));
f2119df6 1930
20b92a30
KL
1931 return -EAGAIN;
1932 case MMC_SIGNAL_VOLTAGE_120:
8cb851a4
AH
1933 if (!(host->flags & SDHCI_SIGNALING_120))
1934 return -EINVAL;
3a48edc4 1935 if (!IS_ERR(mmc->supply.vqmmc)) {
761daa36 1936 ret = mmc_regulator_set_vqmmc(mmc, ios);
20b92a30 1937 if (ret) {
6606110d
JP
1938 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1939 mmc_hostname(mmc));
20b92a30 1940 return -EIO;
f2119df6
AN
1941 }
1942 }
6231f3de 1943 return 0;
20b92a30 1944 default:
f2119df6
AN
1945 /* No signal voltage switch required */
1946 return 0;
20b92a30 1947 }
f2119df6
AN
1948}
1949
20b92a30
KL
1950static int sdhci_card_busy(struct mmc_host *mmc)
1951{
1952 struct sdhci_host *host = mmc_priv(mmc);
1953 u32 present_state;
1954
e613cc47 1955 /* Check whether DAT[0] is 0 */
20b92a30 1956 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
20b92a30 1957
e613cc47 1958 return !(present_state & SDHCI_DATA_0_LVL_MASK);
20b92a30
KL
1959}
1960
b5540ce1
AH
1961static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1962{
1963 struct sdhci_host *host = mmc_priv(mmc);
1964 unsigned long flags;
1965
1966 spin_lock_irqsave(&host->lock, flags);
1967 host->flags |= SDHCI_HS400_TUNING;
1968 spin_unlock_irqrestore(&host->lock, flags);
1969
1970 return 0;
1971}
1972
da4bc4f2
AH
1973static void sdhci_start_tuning(struct sdhci_host *host)
1974{
1975 u16 ctrl;
1976
1977 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1978 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1979 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1980 ctrl |= SDHCI_CTRL_TUNED_CLK;
1981 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1982
1983 /*
1984 * As per the Host Controller spec v3.00, tuning command
1985 * generates Buffer Read Ready interrupt, so enable that.
1986 *
1987 * Note: The spec clearly says that when tuning sequence
1988 * is being performed, the controller does not generate
1989 * interrupts other than Buffer Read Ready interrupt. But
1990 * to make sure we don't hit a controller bug, we _only_
1991 * enable Buffer Read Ready interrupt here.
1992 */
1993 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1994 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1995}
1996
1997static void sdhci_end_tuning(struct sdhci_host *host)
1998{
1999 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2000 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2001}
2002
2003static void sdhci_reset_tuning(struct sdhci_host *host)
2004{
2005 u16 ctrl;
2006
2007 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2008 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2009 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2010 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2011}
2012
2a85ef25 2013static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
da4bc4f2
AH
2014{
2015 sdhci_reset_tuning(host);
2016
2017 sdhci_do_reset(host, SDHCI_RESET_CMD);
2018 sdhci_do_reset(host, SDHCI_RESET_DATA);
2019
2020 sdhci_end_tuning(host);
2021
da4bc4f2 2022 mmc_abort_tuning(host->mmc, opcode);
da4bc4f2
AH
2023}
2024
2025/*
2026 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2027 * tuning command does not have a data payload (or rather the hardware does it
2028 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2029 * interrupt setup is different to other commands and there is no timeout
2030 * interrupt so special handling is needed.
2031 */
2a85ef25 2032static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
da4bc4f2
AH
2033{
2034 struct mmc_host *mmc = host->mmc;
c7836d15
MY
2035 struct mmc_command cmd = {};
2036 struct mmc_request mrq = {};
2a85ef25
AH
2037 unsigned long flags;
2038
2039 spin_lock_irqsave(&host->lock, flags);
da4bc4f2
AH
2040
2041 cmd.opcode = opcode;
2042 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2043 cmd.mrq = &mrq;
2044
2045 mrq.cmd = &cmd;
2046 /*
2047 * In response to CMD19, the card sends 64 bytes of tuning
2048 * block to the Host Controller. So we set the block size
2049 * to 64 here.
2050 */
85336109
AH
2051 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2052 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2053 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
2054 else
2055 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
da4bc4f2
AH
2056
2057 /*
2058 * The tuning block is sent by the card to the host controller.
2059 * So we set the TRNS_READ bit in the Transfer Mode register.
2060 * This also takes care of setting DMA Enable and Multi Block
2061 * Select in the same register to 0.
2062 */
2063 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2064
2065 sdhci_send_command(host, &cmd);
2066
2067 host->cmd = NULL;
2068
2069 sdhci_del_timer(host, &mrq);
2070
2071 host->tuning_done = 0;
2072
2a85ef25 2073 mmiowb();
da4bc4f2
AH
2074 spin_unlock_irqrestore(&host->lock, flags);
2075
2076 /* Wait for Buffer Read Ready interrupt */
2077 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2078 msecs_to_jiffies(50));
2079
da4bc4f2
AH
2080}
2081
2a85ef25 2082static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
6b11e70b
AH
2083{
2084 int i;
2085
2086 /*
2087 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2088 * of loops reaches 40 times.
2089 */
2090 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2091 u16 ctrl;
2092
2a85ef25 2093 sdhci_send_tuning(host, opcode);
6b11e70b
AH
2094
2095 if (!host->tuning_done) {
2096 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2097 mmc_hostname(host->mmc));
2a85ef25 2098 sdhci_abort_tuning(host, opcode);
6b11e70b
AH
2099 return;
2100 }
2101
2102 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2103 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2104 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2105 return; /* Success! */
2106 break;
2107 }
2108
2109 /* eMMC spec does not require a delay between tuning cycles */
2110 if (opcode == MMC_SEND_TUNING_BLOCK)
2111 mdelay(1);
2112 }
2113
2114 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2115 mmc_hostname(host->mmc));
2116 sdhci_reset_tuning(host);
2117}
2118
85a882c2 2119int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
b513ea25 2120{
4b6f37d3 2121 struct sdhci_host *host = mmc_priv(mmc);
b513ea25 2122 int err = 0;
38e40bf5 2123 unsigned int tuning_count = 0;
b5540ce1 2124 bool hs400_tuning;
b513ea25 2125
b5540ce1 2126 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
b5540ce1 2127
38e40bf5
AH
2128 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2129 tuning_count = host->tuning_count;
2130
b513ea25 2131 /*
9faac7b9
WY
2132 * The Host Controller needs tuning in case of SDR104 and DDR50
2133 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2134 * the Capabilities register.
069c9f14
G
2135 * If the Host Controller supports the HS200 mode then the
2136 * tuning function has to be executed.
b513ea25 2137 */
4b6f37d3 2138 switch (host->timing) {
b5540ce1 2139 /* HS400 tuning is done in HS200 mode */
e9fb05d5 2140 case MMC_TIMING_MMC_HS400:
b5540ce1 2141 err = -EINVAL;
2a85ef25 2142 goto out;
b5540ce1 2143
4b6f37d3 2144 case MMC_TIMING_MMC_HS200:
b5540ce1
AH
2145 /*
2146 * Periodic re-tuning for HS400 is not expected to be needed, so
2147 * disable it here.
2148 */
2149 if (hs400_tuning)
2150 tuning_count = 0;
2151 break;
2152
4b6f37d3 2153 case MMC_TIMING_UHS_SDR104:
9faac7b9 2154 case MMC_TIMING_UHS_DDR50:
4b6f37d3
RK
2155 break;
2156
2157 case MMC_TIMING_UHS_SDR50:
4228b213 2158 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
4b6f37d3
RK
2159 break;
2160 /* FALLTHROUGH */
2161
2162 default:
2a85ef25 2163 goto out;
b513ea25
AN
2164 }
2165
45251812 2166 if (host->ops->platform_execute_tuning) {
8a8fa879 2167 err = host->ops->platform_execute_tuning(host, opcode);
2a85ef25 2168 goto out;
45251812
DA
2169 }
2170
6b11e70b 2171 host->mmc->retune_period = tuning_count;
b513ea25 2172
6b11e70b 2173 sdhci_start_tuning(host);
da4bc4f2 2174
2a85ef25 2175 __sdhci_execute_tuning(host, opcode);
cf2b5eea 2176
da4bc4f2 2177 sdhci_end_tuning(host);
2a85ef25 2178out:
8a8fa879 2179 host->flags &= ~SDHCI_HS400_TUNING;
6b11e70b 2180
b513ea25
AN
2181 return err;
2182}
85a882c2 2183EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
b513ea25 2184
52983382 2185static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
4d55c5a1 2186{
4d55c5a1
AN
2187 /* Host Controller v3.00 defines preset value registers */
2188 if (host->version < SDHCI_SPEC_300)
2189 return;
2190
4d55c5a1
AN
2191 /*
2192 * We only enable or disable Preset Value if they are not already
2193 * enabled or disabled respectively. Otherwise, we bail out.
2194 */
da91a8f9
RK
2195 if (host->preset_enabled != enable) {
2196 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2197
2198 if (enable)
2199 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2200 else
2201 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2202
4d55c5a1 2203 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
da91a8f9
RK
2204
2205 if (enable)
2206 host->flags |= SDHCI_PV_ENABLED;
2207 else
2208 host->flags &= ~SDHCI_PV_ENABLED;
2209
2210 host->preset_enabled = enable;
4d55c5a1 2211 }
66fd8ad5
AH
2212}
2213
348487cb
HC
2214static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2215 int err)
2216{
2217 struct sdhci_host *host = mmc_priv(mmc);
2218 struct mmc_data *data = mrq->data;
2219
f48f039c 2220 if (data->host_cookie != COOKIE_UNMAPPED)
771a3dc2
RK
2221 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2222 data->flags & MMC_DATA_WRITE ?
2223 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2224
2225 data->host_cookie = COOKIE_UNMAPPED;
348487cb
HC
2226}
2227
d3c6aac3 2228static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
348487cb
HC
2229{
2230 struct sdhci_host *host = mmc_priv(mmc);
2231
d31911b9 2232 mrq->data->host_cookie = COOKIE_UNMAPPED;
348487cb
HC
2233
2234 if (host->flags & SDHCI_REQ_USE_DMA)
94538e51 2235 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
348487cb
HC
2236}
2237
5d0d11c5
AH
2238static inline bool sdhci_has_requests(struct sdhci_host *host)
2239{
2240 return host->cmd || host->data_cmd;
2241}
2242
2243static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2244{
2245 if (host->data_cmd) {
2246 host->data_cmd->error = err;
2247 sdhci_finish_mrq(host, host->data_cmd->mrq);
2248 }
2249
2250 if (host->cmd) {
2251 host->cmd->error = err;
2252 sdhci_finish_mrq(host, host->cmd->mrq);
2253 }
2254}
2255
71e69211 2256static void sdhci_card_event(struct mmc_host *mmc)
d129bceb 2257{
71e69211 2258 struct sdhci_host *host = mmc_priv(mmc);
d129bceb 2259 unsigned long flags;
2836766a 2260 int present;
d129bceb 2261
722e1280
CD
2262 /* First check if client has provided their own card event */
2263 if (host->ops->card_event)
2264 host->ops->card_event(host);
2265
d3940f27 2266 present = mmc->ops->get_cd(mmc);
2836766a 2267
d129bceb
PO
2268 spin_lock_irqsave(&host->lock, flags);
2269
5d0d11c5
AH
2270 /* Check sdhci_has_requests() first in case we are runtime suspended */
2271 if (sdhci_has_requests(host) && !present) {
a3c76eb9 2272 pr_err("%s: Card removed during transfer!\n",
66fd8ad5 2273 mmc_hostname(host->mmc));
a3c76eb9 2274 pr_err("%s: Resetting controller.\n",
66fd8ad5 2275 mmc_hostname(host->mmc));
d129bceb 2276
03231f9b
RK
2277 sdhci_do_reset(host, SDHCI_RESET_CMD);
2278 sdhci_do_reset(host, SDHCI_RESET_DATA);
d129bceb 2279
5d0d11c5 2280 sdhci_error_out_mrqs(host, -ENOMEDIUM);
d129bceb
PO
2281 }
2282
2283 spin_unlock_irqrestore(&host->lock, flags);
71e69211
GL
2284}
2285
2286static const struct mmc_host_ops sdhci_ops = {
2287 .request = sdhci_request,
348487cb
HC
2288 .post_req = sdhci_post_req,
2289 .pre_req = sdhci_pre_req,
71e69211 2290 .set_ios = sdhci_set_ios,
94144a46 2291 .get_cd = sdhci_get_cd,
71e69211
GL
2292 .get_ro = sdhci_get_ro,
2293 .hw_reset = sdhci_hw_reset,
2294 .enable_sdio_irq = sdhci_enable_sdio_irq,
2295 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
b5540ce1 2296 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
71e69211 2297 .execute_tuning = sdhci_execute_tuning,
71e69211 2298 .card_event = sdhci_card_event,
20b92a30 2299 .card_busy = sdhci_card_busy,
71e69211
GL
2300};
2301
2302/*****************************************************************************\
2303 * *
2304 * Tasklets *
2305 * *
2306\*****************************************************************************/
2307
4e9f8fe5 2308static bool sdhci_request_done(struct sdhci_host *host)
d129bceb 2309{
d129bceb
PO
2310 unsigned long flags;
2311 struct mmc_request *mrq;
4e9f8fe5 2312 int i;
d129bceb 2313
66fd8ad5
AH
2314 spin_lock_irqsave(&host->lock, flags);
2315
4e9f8fe5
AH
2316 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2317 mrq = host->mrqs_done[i];
6ebebeab 2318 if (mrq)
4e9f8fe5 2319 break;
66fd8ad5 2320 }
d129bceb 2321
4e9f8fe5
AH
2322 if (!mrq) {
2323 spin_unlock_irqrestore(&host->lock, flags);
2324 return true;
2325 }
d129bceb 2326
d7422fb4
AH
2327 sdhci_del_timer(host, mrq);
2328
054cedff
RK
2329 /*
2330 * Always unmap the data buffers if they were mapped by
2331 * sdhci_prepare_data() whenever we finish with a request.
2332 * This avoids leaking DMA mappings on error.
2333 */
2334 if (host->flags & SDHCI_REQ_USE_DMA) {
2335 struct mmc_data *data = mrq->data;
2336
2337 if (data && data->host_cookie == COOKIE_MAPPED) {
2338 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2339 (data->flags & MMC_DATA_READ) ?
2340 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2341 data->host_cookie = COOKIE_UNMAPPED;
2342 }
2343 }
2344
d129bceb
PO
2345 /*
2346 * The controller needs a reset of internal state machines
2347 * upon error conditions.
2348 */
0cc563ce 2349 if (sdhci_needs_reset(host, mrq)) {
6ebebeab
AH
2350 /*
2351 * Do not finish until command and data lines are available for
2352 * reset. Note there can only be one other mrq, so it cannot
2353 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2354 * would both be null.
2355 */
2356 if (host->cmd || host->data_cmd) {
2357 spin_unlock_irqrestore(&host->lock, flags);
2358 return true;
2359 }
2360
645289dc 2361 /* Some controllers need this kick or reset won't work here */
8213af3b 2362 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
645289dc 2363 /* This is to force an update */
1771059c 2364 host->ops->set_clock(host, host->clock);
645289dc
PO
2365
2366 /* Spec says we should do both at the same time, but Ricoh
2367 controllers do not like that. */
6ebebeab
AH
2368 sdhci_do_reset(host, SDHCI_RESET_CMD);
2369 sdhci_do_reset(host, SDHCI_RESET_DATA);
ed1563de
AH
2370
2371 host->pending_reset = false;
d129bceb
PO
2372 }
2373
4e9f8fe5
AH
2374 if (!sdhci_has_requests(host))
2375 sdhci_led_deactivate(host);
d129bceb 2376
6ebebeab
AH
2377 host->mrqs_done[i] = NULL;
2378
5f25a66f 2379 mmiowb();
d129bceb
PO
2380 spin_unlock_irqrestore(&host->lock, flags);
2381
2382 mmc_request_done(host->mmc, mrq);
4e9f8fe5
AH
2383
2384 return false;
2385}
2386
2387static void sdhci_tasklet_finish(unsigned long param)
2388{
2389 struct sdhci_host *host = (struct sdhci_host *)param;
2390
2391 while (!sdhci_request_done(host))
2392 ;
d129bceb
PO
2393}
2394
2395static void sdhci_timeout_timer(unsigned long data)
2396{
2397 struct sdhci_host *host;
2398 unsigned long flags;
2399
2400 host = (struct sdhci_host*)data;
2401
2402 spin_lock_irqsave(&host->lock, flags);
2403
d7422fb4
AH
2404 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2405 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2406 mmc_hostname(host->mmc));
2407 sdhci_dumpregs(host);
2408
2409 host->cmd->error = -ETIMEDOUT;
2410 sdhci_finish_mrq(host, host->cmd->mrq);
2411 }
2412
2413 mmiowb();
2414 spin_unlock_irqrestore(&host->lock, flags);
2415}
2416
2417static void sdhci_timeout_data_timer(unsigned long data)
2418{
2419 struct sdhci_host *host;
2420 unsigned long flags;
2421
2422 host = (struct sdhci_host *)data;
2423
2424 spin_lock_irqsave(&host->lock, flags);
2425
2426 if (host->data || host->data_cmd ||
2427 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2e4456f0
MV
2428 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2429 mmc_hostname(host->mmc));
d129bceb
PO
2430 sdhci_dumpregs(host);
2431
2432 if (host->data) {
17b0429d 2433 host->data->error = -ETIMEDOUT;
d129bceb 2434 sdhci_finish_data(host);
d7422fb4
AH
2435 } else if (host->data_cmd) {
2436 host->data_cmd->error = -ETIMEDOUT;
2437 sdhci_finish_mrq(host, host->data_cmd->mrq);
d129bceb 2438 } else {
d7422fb4
AH
2439 host->cmd->error = -ETIMEDOUT;
2440 sdhci_finish_mrq(host, host->cmd->mrq);
d129bceb
PO
2441 }
2442 }
2443
5f25a66f 2444 mmiowb();
d129bceb
PO
2445 spin_unlock_irqrestore(&host->lock, flags);
2446}
2447
2448/*****************************************************************************\
2449 * *
2450 * Interrupt handling *
2451 * *
2452\*****************************************************************************/
2453
fc605f1d 2454static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
d129bceb 2455{
d129bceb 2456 if (!host->cmd) {
ed1563de
AH
2457 /*
2458 * SDHCI recovers from errors by resetting the cmd and data
2459 * circuits. Until that is done, there very well might be more
2460 * interrupts, so ignore them in that case.
2461 */
2462 if (host->pending_reset)
2463 return;
2e4456f0
MV
2464 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2465 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
2466 sdhci_dumpregs(host);
2467 return;
2468 }
2469
ec014cba
RK
2470 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2471 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2472 if (intmask & SDHCI_INT_TIMEOUT)
2473 host->cmd->error = -ETIMEDOUT;
2474 else
2475 host->cmd->error = -EILSEQ;
43b58b36 2476
71fcbda0
RK
2477 /*
2478 * If this command initiates a data phase and a response
2479 * CRC error is signalled, the card can start transferring
2480 * data - the card may have received the command without
2481 * error. We must not terminate the mmc_request early.
2482 *
2483 * If the card did not receive the command or returned an
2484 * error which prevented it sending data, the data phase
2485 * will time out.
2486 */
2487 if (host->cmd->data &&
2488 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2489 SDHCI_INT_CRC) {
2490 host->cmd = NULL;
2491 return;
2492 }
2493
a6d3bdd5 2494 sdhci_finish_mrq(host, host->cmd->mrq);
e809517f
PO
2495 return;
2496 }
2497
e809517f 2498 if (intmask & SDHCI_INT_RESPONSE)
43b58b36 2499 sdhci_finish_command(host);
d129bceb
PO
2500}
2501
0957c333 2502#ifdef CONFIG_MMC_DEBUG
08621b18 2503static void sdhci_adma_show_error(struct sdhci_host *host)
6882a8c0 2504{
1c3d5f6d 2505 void *desc = host->adma_table;
6882a8c0
BD
2506
2507 sdhci_dumpregs(host);
2508
2509 while (true) {
e57a5f61
AH
2510 struct sdhci_adma2_64_desc *dma_desc = desc;
2511
2512 if (host->flags & SDHCI_USE_64_BIT_DMA)
f421865d
AH
2513 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2514 desc, le32_to_cpu(dma_desc->addr_hi),
e57a5f61
AH
2515 le32_to_cpu(dma_desc->addr_lo),
2516 le16_to_cpu(dma_desc->len),
2517 le16_to_cpu(dma_desc->cmd));
2518 else
f421865d
AH
2519 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2520 desc, le32_to_cpu(dma_desc->addr_lo),
e57a5f61
AH
2521 le16_to_cpu(dma_desc->len),
2522 le16_to_cpu(dma_desc->cmd));
6882a8c0 2523
76fe379a 2524 desc += host->desc_sz;
6882a8c0 2525
0545230f 2526 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
6882a8c0
BD
2527 break;
2528 }
2529}
2530#else
08621b18 2531static void sdhci_adma_show_error(struct sdhci_host *host) { }
6882a8c0
BD
2532#endif
2533
d129bceb
PO
2534static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2535{
069c9f14 2536 u32 command;
d129bceb 2537
b513ea25
AN
2538 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2539 if (intmask & SDHCI_INT_DATA_AVAIL) {
069c9f14
G
2540 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2541 if (command == MMC_SEND_TUNING_BLOCK ||
2542 command == MMC_SEND_TUNING_BLOCK_HS200) {
b513ea25
AN
2543 host->tuning_done = 1;
2544 wake_up(&host->buf_ready_int);
2545 return;
2546 }
2547 }
2548
d129bceb 2549 if (!host->data) {
7c89a3d9
AH
2550 struct mmc_command *data_cmd = host->data_cmd;
2551
d129bceb 2552 /*
e809517f
PO
2553 * The "data complete" interrupt is also used to
2554 * indicate that a busy state has ended. See comment
2555 * above in sdhci_cmd_irq().
d129bceb 2556 */
7c89a3d9 2557 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
c5abd5e8 2558 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
69b962a6 2559 host->data_cmd = NULL;
7c89a3d9 2560 data_cmd->error = -ETIMEDOUT;
a6d3bdd5 2561 sdhci_finish_mrq(host, data_cmd->mrq);
c5abd5e8
MC
2562 return;
2563 }
e809517f 2564 if (intmask & SDHCI_INT_DATA_END) {
69b962a6 2565 host->data_cmd = NULL;
e99783a4
CM
2566 /*
2567 * Some cards handle busy-end interrupt
2568 * before the command completed, so make
2569 * sure we do things in the proper order.
2570 */
ea968023
AH
2571 if (host->cmd == data_cmd)
2572 return;
2573
a6d3bdd5 2574 sdhci_finish_mrq(host, data_cmd->mrq);
e809517f
PO
2575 return;
2576 }
2577 }
d129bceb 2578
ed1563de
AH
2579 /*
2580 * SDHCI recovers from errors by resetting the cmd and data
2581 * circuits. Until that is done, there very well might be more
2582 * interrupts, so ignore them in that case.
2583 */
2584 if (host->pending_reset)
2585 return;
2586
2e4456f0
MV
2587 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2588 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
2589 sdhci_dumpregs(host);
2590
2591 return;
2592 }
2593
2594 if (intmask & SDHCI_INT_DATA_TIMEOUT)
17b0429d 2595 host->data->error = -ETIMEDOUT;
22113efd
AL
2596 else if (intmask & SDHCI_INT_DATA_END_BIT)
2597 host->data->error = -EILSEQ;
2598 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2599 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2600 != MMC_BUS_TEST_R)
17b0429d 2601 host->data->error = -EILSEQ;
6882a8c0 2602 else if (intmask & SDHCI_INT_ADMA_ERROR) {
a3c76eb9 2603 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
08621b18 2604 sdhci_adma_show_error(host);
2134a922 2605 host->data->error = -EIO;
a4071fbb
HZ
2606 if (host->ops->adma_workaround)
2607 host->ops->adma_workaround(host, intmask);
6882a8c0 2608 }
d129bceb 2609
17b0429d 2610 if (host->data->error)
d129bceb
PO
2611 sdhci_finish_data(host);
2612 else {
a406f5a3 2613 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
d129bceb
PO
2614 sdhci_transfer_pio(host);
2615
6ba736a1
PO
2616 /*
2617 * We currently don't do anything fancy with DMA
2618 * boundaries, but as we can't disable the feature
2619 * we need to at least restart the transfer.
f6a03cbf
MV
2620 *
2621 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2622 * should return a valid address to continue from, but as
2623 * some controllers are faulty, don't trust them.
6ba736a1 2624 */
f6a03cbf
MV
2625 if (intmask & SDHCI_INT_DMA_END) {
2626 u32 dmastart, dmanow;
2627 dmastart = sg_dma_address(host->data->sg);
2628 dmanow = dmastart + host->data->bytes_xfered;
2629 /*
2630 * Force update to the next DMA block boundary.
2631 */
2632 dmanow = (dmanow &
2633 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2634 SDHCI_DEFAULT_BOUNDARY_SIZE;
2635 host->data->bytes_xfered = dmanow - dmastart;
f421865d
AH
2636 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2637 dmastart, host->data->bytes_xfered, dmanow);
f6a03cbf
MV
2638 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2639 }
6ba736a1 2640
e538fbe8 2641 if (intmask & SDHCI_INT_DATA_END) {
7c89a3d9 2642 if (host->cmd == host->data_cmd) {
e538fbe8
PO
2643 /*
2644 * Data managed to finish before the
2645 * command completed. Make sure we do
2646 * things in the proper order.
2647 */
2648 host->data_early = 1;
2649 } else {
2650 sdhci_finish_data(host);
2651 }
2652 }
d129bceb
PO
2653 }
2654}
2655
7d12e780 2656static irqreturn_t sdhci_irq(int irq, void *dev_id)
d129bceb 2657{
781e989c 2658 irqreturn_t result = IRQ_NONE;
66fd8ad5 2659 struct sdhci_host *host = dev_id;
41005003 2660 u32 intmask, mask, unexpected = 0;
781e989c 2661 int max_loops = 16;
d129bceb
PO
2662
2663 spin_lock(&host->lock);
2664
be138554 2665 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
66fd8ad5 2666 spin_unlock(&host->lock);
655bca76 2667 return IRQ_NONE;
66fd8ad5
AH
2668 }
2669
4e4141a5 2670 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
62df67a5 2671 if (!intmask || intmask == 0xffffffff) {
d129bceb
PO
2672 result = IRQ_NONE;
2673 goto out;
2674 }
2675
41005003 2676 do {
f12e39db
AH
2677 DBG("IRQ status 0x%08x\n", intmask);
2678
2679 if (host->ops->irq) {
2680 intmask = host->ops->irq(host, intmask);
2681 if (!intmask)
2682 goto cont;
2683 }
2684
41005003
RK
2685 /* Clear selected interrupts. */
2686 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2687 SDHCI_INT_BUS_POWER);
2688 sdhci_writel(host, mask, SDHCI_INT_STATUS);
d129bceb 2689
41005003
RK
2690 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2691 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2692 SDHCI_CARD_PRESENT;
d129bceb 2693
41005003
RK
2694 /*
2695 * There is a observation on i.mx esdhc. INSERT
2696 * bit will be immediately set again when it gets
2697 * cleared, if a card is inserted. We have to mask
2698 * the irq to prevent interrupt storm which will
2699 * freeze the system. And the REMOVE gets the
2700 * same situation.
2701 *
2702 * More testing are needed here to ensure it works
2703 * for other platforms though.
2704 */
b537f94c
RK
2705 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2706 SDHCI_INT_CARD_REMOVE);
2707 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2708 SDHCI_INT_CARD_INSERT;
2709 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2710 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
41005003
RK
2711
2712 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2713 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3560db8e
RK
2714
2715 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2716 SDHCI_INT_CARD_REMOVE);
2717 result = IRQ_WAKE_THREAD;
41005003 2718 }
d129bceb 2719
41005003 2720 if (intmask & SDHCI_INT_CMD_MASK)
fc605f1d 2721 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
964f9ce2 2722
41005003
RK
2723 if (intmask & SDHCI_INT_DATA_MASK)
2724 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
d129bceb 2725
41005003
RK
2726 if (intmask & SDHCI_INT_BUS_POWER)
2727 pr_err("%s: Card is consuming too much power!\n",
2728 mmc_hostname(host->mmc));
3192a28f 2729
f37b20eb
DA
2730 if (intmask & SDHCI_INT_RETUNE)
2731 mmc_retune_needed(host->mmc);
2732
161e6d44
GKB
2733 if ((intmask & SDHCI_INT_CARD_INT) &&
2734 (host->ier & SDHCI_INT_CARD_INT)) {
781e989c
RK
2735 sdhci_enable_sdio_irq_nolock(host, false);
2736 host->thread_isr |= SDHCI_INT_CARD_INT;
2737 result = IRQ_WAKE_THREAD;
2738 }
f75979b7 2739
41005003
RK
2740 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2741 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2742 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
f37b20eb 2743 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
f75979b7 2744
41005003
RK
2745 if (intmask) {
2746 unexpected |= intmask;
2747 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2748 }
f12e39db 2749cont:
781e989c
RK
2750 if (result == IRQ_NONE)
2751 result = IRQ_HANDLED;
d129bceb 2752
41005003 2753 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
41005003 2754 } while (intmask && --max_loops);
d129bceb
PO
2755out:
2756 spin_unlock(&host->lock);
2757
6379b237
AS
2758 if (unexpected) {
2759 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2760 mmc_hostname(host->mmc), unexpected);
2761 sdhci_dumpregs(host);
2762 }
f75979b7 2763
d129bceb
PO
2764 return result;
2765}
2766
781e989c
RK
2767static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2768{
2769 struct sdhci_host *host = dev_id;
2770 unsigned long flags;
2771 u32 isr;
2772
2773 spin_lock_irqsave(&host->lock, flags);
2774 isr = host->thread_isr;
2775 host->thread_isr = 0;
2776 spin_unlock_irqrestore(&host->lock, flags);
2777
3560db8e 2778 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
d3940f27
AH
2779 struct mmc_host *mmc = host->mmc;
2780
2781 mmc->ops->card_event(mmc);
2782 mmc_detect_change(mmc, msecs_to_jiffies(200));
3560db8e
RK
2783 }
2784
781e989c
RK
2785 if (isr & SDHCI_INT_CARD_INT) {
2786 sdio_run_irqs(host->mmc);
2787
2788 spin_lock_irqsave(&host->lock, flags);
2789 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2790 sdhci_enable_sdio_irq_nolock(host, true);
2791 spin_unlock_irqrestore(&host->lock, flags);
2792 }
2793
2794 return isr ? IRQ_HANDLED : IRQ_NONE;
2795}
2796
d129bceb
PO
2797/*****************************************************************************\
2798 * *
2799 * Suspend/resume *
2800 * *
2801\*****************************************************************************/
2802
2803#ifdef CONFIG_PM
84d62605
LD
2804/*
2805 * To enable wakeup events, the corresponding events have to be enabled in
2806 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2807 * Table' in the SD Host Controller Standard Specification.
2808 * It is useless to restore SDHCI_INT_ENABLE state in
2809 * sdhci_disable_irq_wakeups() since it will be set by
2810 * sdhci_enable_card_detection() or sdhci_init().
2811 */
ad080d79
KL
2812void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2813{
2814 u8 val;
2815 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2816 | SDHCI_WAKE_ON_INT;
84d62605
LD
2817 u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2818 SDHCI_INT_CARD_INT;
ad080d79
KL
2819
2820 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2821 val |= mask ;
2822 /* Avoid fake wake up */
84d62605 2823 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
ad080d79 2824 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
84d62605
LD
2825 irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2826 }
ad080d79 2827 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
84d62605 2828 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
ad080d79
KL
2829}
2830EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2831
0b10f478 2832static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
ad080d79
KL
2833{
2834 u8 val;
2835 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2836 | SDHCI_WAKE_ON_INT;
2837
2838 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2839 val &= ~mask;
2840 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2841}
d129bceb 2842
29495aa0 2843int sdhci_suspend_host(struct sdhci_host *host)
d129bceb 2844{
7260cf5e
AV
2845 sdhci_disable_card_detection(host);
2846
66c39dfc 2847 mmc_retune_timer_stop(host->mmc);
cf2b5eea 2848
ad080d79 2849 if (!device_may_wakeup(mmc_dev(host->mmc))) {
b537f94c
RK
2850 host->ier = 0;
2851 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2852 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
ad080d79
KL
2853 free_irq(host->irq, host);
2854 } else {
2855 sdhci_enable_irq_wakeups(host);
2856 enable_irq_wake(host->irq);
2857 }
4ee14ec6 2858 return 0;
d129bceb
PO
2859}
2860
b8c86fc5 2861EXPORT_SYMBOL_GPL(sdhci_suspend_host);
d129bceb 2862
b8c86fc5
PO
2863int sdhci_resume_host(struct sdhci_host *host)
2864{
d3940f27 2865 struct mmc_host *mmc = host->mmc;
4ee14ec6 2866 int ret = 0;
d129bceb 2867
a13abc7b 2868 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
b8c86fc5
PO
2869 if (host->ops->enable_dma)
2870 host->ops->enable_dma(host);
2871 }
d129bceb 2872
6308d290
AH
2873 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2874 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2875 /* Card keeps power but host controller does not */
2876 sdhci_init(host, 0);
2877 host->pwr = 0;
2878 host->clock = 0;
d3940f27 2879 mmc->ops->set_ios(mmc, &mmc->ios);
6308d290
AH
2880 } else {
2881 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2882 mmiowb();
2883 }
b8c86fc5 2884
14a7b416
HC
2885 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2886 ret = request_threaded_irq(host->irq, sdhci_irq,
2887 sdhci_thread_irq, IRQF_SHARED,
2888 mmc_hostname(host->mmc), host);
2889 if (ret)
2890 return ret;
2891 } else {
2892 sdhci_disable_irq_wakeups(host);
2893 disable_irq_wake(host->irq);
2894 }
2895
7260cf5e
AV
2896 sdhci_enable_card_detection(host);
2897
2f4cbb3d 2898 return ret;
d129bceb
PO
2899}
2900
b8c86fc5 2901EXPORT_SYMBOL_GPL(sdhci_resume_host);
66fd8ad5 2902
66fd8ad5
AH
2903int sdhci_runtime_suspend_host(struct sdhci_host *host)
2904{
2905 unsigned long flags;
66fd8ad5 2906
66c39dfc 2907 mmc_retune_timer_stop(host->mmc);
66fd8ad5
AH
2908
2909 spin_lock_irqsave(&host->lock, flags);
b537f94c
RK
2910 host->ier &= SDHCI_INT_CARD_INT;
2911 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2912 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
66fd8ad5
AH
2913 spin_unlock_irqrestore(&host->lock, flags);
2914
781e989c 2915 synchronize_hardirq(host->irq);
66fd8ad5
AH
2916
2917 spin_lock_irqsave(&host->lock, flags);
2918 host->runtime_suspended = true;
2919 spin_unlock_irqrestore(&host->lock, flags);
2920
8a125bad 2921 return 0;
66fd8ad5
AH
2922}
2923EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2924
2925int sdhci_runtime_resume_host(struct sdhci_host *host)
2926{
d3940f27 2927 struct mmc_host *mmc = host->mmc;
66fd8ad5 2928 unsigned long flags;
8a125bad 2929 int host_flags = host->flags;
66fd8ad5
AH
2930
2931 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2932 if (host->ops->enable_dma)
2933 host->ops->enable_dma(host);
2934 }
2935
2936 sdhci_init(host, 0);
2937
84ec048b
AH
2938 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
2939 /* Force clock and power re-program */
2940 host->pwr = 0;
2941 host->clock = 0;
2942 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2943 mmc->ops->set_ios(mmc, &mmc->ios);
66fd8ad5 2944
84ec048b
AH
2945 if ((host_flags & SDHCI_PV_ENABLED) &&
2946 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2947 spin_lock_irqsave(&host->lock, flags);
2948 sdhci_enable_preset_value(host, true);
2949 spin_unlock_irqrestore(&host->lock, flags);
2950 }
66fd8ad5 2951
84ec048b
AH
2952 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2953 mmc->ops->hs400_enhanced_strobe)
2954 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2955 }
086b0ddb 2956
66fd8ad5
AH
2957 spin_lock_irqsave(&host->lock, flags);
2958
2959 host->runtime_suspended = false;
2960
2961 /* Enable SDIO IRQ */
ef104333 2962 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
66fd8ad5
AH
2963 sdhci_enable_sdio_irq_nolock(host, true);
2964
2965 /* Enable Card Detection */
2966 sdhci_enable_card_detection(host);
2967
2968 spin_unlock_irqrestore(&host->lock, flags);
2969
8a125bad 2970 return 0;
66fd8ad5
AH
2971}
2972EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2973
162d6f98 2974#endif /* CONFIG_PM */
66fd8ad5 2975
f12e39db
AH
2976/*****************************************************************************\
2977 * *
2978 * Command Queue Engine (CQE) helpers *
2979 * *
2980\*****************************************************************************/
2981
2982void sdhci_cqe_enable(struct mmc_host *mmc)
2983{
2984 struct sdhci_host *host = mmc_priv(mmc);
2985 unsigned long flags;
2986 u8 ctrl;
2987
2988 spin_lock_irqsave(&host->lock, flags);
2989
2990 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2991 ctrl &= ~SDHCI_CTRL_DMA_MASK;
2992 if (host->flags & SDHCI_USE_64_BIT_DMA)
2993 ctrl |= SDHCI_CTRL_ADMA64;
2994 else
2995 ctrl |= SDHCI_CTRL_ADMA32;
2996 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2997
2998 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512),
2999 SDHCI_BLOCK_SIZE);
3000
3001 /* Set maximum timeout */
3002 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3003
3004 host->ier = host->cqe_ier;
3005
3006 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3007 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3008
3009 host->cqe_on = true;
3010
3011 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3012 mmc_hostname(mmc), host->ier,
3013 sdhci_readl(host, SDHCI_INT_STATUS));
3014
3015 mmiowb();
3016 spin_unlock_irqrestore(&host->lock, flags);
3017}
3018EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3019
3020void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3021{
3022 struct sdhci_host *host = mmc_priv(mmc);
3023 unsigned long flags;
3024
3025 spin_lock_irqsave(&host->lock, flags);
3026
3027 sdhci_set_default_irqs(host);
3028
3029 host->cqe_on = false;
3030
3031 if (recovery) {
3032 sdhci_do_reset(host, SDHCI_RESET_CMD);
3033 sdhci_do_reset(host, SDHCI_RESET_DATA);
3034 }
3035
3036 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3037 mmc_hostname(mmc), host->ier,
3038 sdhci_readl(host, SDHCI_INT_STATUS));
3039
3040 mmiowb();
3041 spin_unlock_irqrestore(&host->lock, flags);
3042}
3043EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3044
3045bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3046 int *data_error)
3047{
3048 u32 mask;
3049
3050 if (!host->cqe_on)
3051 return false;
3052
3053 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3054 *cmd_error = -EILSEQ;
3055 else if (intmask & SDHCI_INT_TIMEOUT)
3056 *cmd_error = -ETIMEDOUT;
3057 else
3058 *cmd_error = 0;
3059
3060 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3061 *data_error = -EILSEQ;
3062 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3063 *data_error = -ETIMEDOUT;
3064 else if (intmask & SDHCI_INT_ADMA_ERROR)
3065 *data_error = -EIO;
3066 else
3067 *data_error = 0;
3068
3069 /* Clear selected interrupts. */
3070 mask = intmask & host->cqe_ier;
3071 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3072
3073 if (intmask & SDHCI_INT_BUS_POWER)
3074 pr_err("%s: Card is consuming too much power!\n",
3075 mmc_hostname(host->mmc));
3076
3077 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3078 if (intmask) {
3079 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3080 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3081 mmc_hostname(host->mmc), intmask);
3082 sdhci_dumpregs(host);
3083 }
3084
3085 return true;
3086}
3087EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3088
d129bceb
PO
3089/*****************************************************************************\
3090 * *
b8c86fc5 3091 * Device allocation/registration *
d129bceb
PO
3092 * *
3093\*****************************************************************************/
3094
b8c86fc5
PO
3095struct sdhci_host *sdhci_alloc_host(struct device *dev,
3096 size_t priv_size)
d129bceb 3097{
d129bceb
PO
3098 struct mmc_host *mmc;
3099 struct sdhci_host *host;
3100
b8c86fc5 3101 WARN_ON(dev == NULL);
d129bceb 3102
b8c86fc5 3103 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
d129bceb 3104 if (!mmc)
b8c86fc5 3105 return ERR_PTR(-ENOMEM);
d129bceb
PO
3106
3107 host = mmc_priv(mmc);
3108 host->mmc = mmc;
bf60e592
AH
3109 host->mmc_host_ops = sdhci_ops;
3110 mmc->ops = &host->mmc_host_ops;
d129bceb 3111
8cb851a4
AH
3112 host->flags = SDHCI_SIGNALING_330;
3113
f12e39db
AH
3114 host->cqe_ier = SDHCI_CQE_INT_MASK;
3115 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3116
b8c86fc5
PO
3117 return host;
3118}
8a4da143 3119
b8c86fc5 3120EXPORT_SYMBOL_GPL(sdhci_alloc_host);
d129bceb 3121
7b91369b
AC
3122static int sdhci_set_dma_mask(struct sdhci_host *host)
3123{
3124 struct mmc_host *mmc = host->mmc;
3125 struct device *dev = mmc_dev(mmc);
3126 int ret = -EINVAL;
3127
3128 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3129 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3130
3131 /* Try 64-bit mask if hardware is capable of it */
3132 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3133 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3134 if (ret) {
3135 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3136 mmc_hostname(mmc));
3137 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3138 }
3139 }
3140
3141 /* 32-bit mask as default & fallback */
3142 if (ret) {
3143 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3144 if (ret)
3145 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3146 mmc_hostname(mmc));
3147 }
3148
3149 return ret;
3150}
3151
6132a3bf
AH
3152void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3153{
3154 u16 v;
92e0c44b
ZB
3155 u64 dt_caps_mask = 0;
3156 u64 dt_caps = 0;
6132a3bf
AH
3157
3158 if (host->read_caps)
3159 return;
3160
3161 host->read_caps = true;
3162
3163 if (debug_quirks)
3164 host->quirks = debug_quirks;
3165
3166 if (debug_quirks2)
3167 host->quirks2 = debug_quirks2;
3168
3169 sdhci_do_reset(host, SDHCI_RESET_ALL);
3170
92e0c44b
ZB
3171 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3172 "sdhci-caps-mask", &dt_caps_mask);
3173 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3174 "sdhci-caps", &dt_caps);
3175
6132a3bf
AH
3176 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3177 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3178
3179 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3180 return;
3181
92e0c44b
ZB
3182 if (caps) {
3183 host->caps = *caps;
3184 } else {
3185 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3186 host->caps &= ~lower_32_bits(dt_caps_mask);
3187 host->caps |= lower_32_bits(dt_caps);
3188 }
6132a3bf
AH
3189
3190 if (host->version < SDHCI_SPEC_300)
3191 return;
3192
92e0c44b
ZB
3193 if (caps1) {
3194 host->caps1 = *caps1;
3195 } else {
3196 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3197 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3198 host->caps1 |= upper_32_bits(dt_caps);
3199 }
6132a3bf
AH
3200}
3201EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3202
52f5336d 3203int sdhci_setup_host(struct sdhci_host *host)
b8c86fc5
PO
3204{
3205 struct mmc_host *mmc;
f2119df6
AN
3206 u32 max_current_caps;
3207 unsigned int ocr_avail;
f5fa92e5 3208 unsigned int override_timeout_clk;
59241757 3209 u32 max_clk;
b8c86fc5 3210 int ret;
d129bceb 3211
b8c86fc5
PO
3212 WARN_ON(host == NULL);
3213 if (host == NULL)
3214 return -EINVAL;
d129bceb 3215
b8c86fc5 3216 mmc = host->mmc;
d129bceb 3217
efba142b
JH
3218 /*
3219 * If there are external regulators, get them. Note this must be done
3220 * early before resetting the host and reading the capabilities so that
3221 * the host can take the appropriate action if regulators are not
3222 * available.
3223 */
3224 ret = mmc_regulator_get_supply(mmc);
3225 if (ret == -EPROBE_DEFER)
3226 return ret;
3227
6132a3bf 3228 sdhci_read_caps(host);
d129bceb 3229
f5fa92e5
AH
3230 override_timeout_clk = host->timeout_clk;
3231
85105c53 3232 if (host->version > SDHCI_SPEC_300) {
2e4456f0
MV
3233 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3234 mmc_hostname(mmc), host->version);
4a965505
PO
3235 }
3236
b8c86fc5 3237 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
a13abc7b 3238 host->flags |= SDHCI_USE_SDMA;
28da3589 3239 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
a13abc7b 3240 DBG("Controller doesn't have SDMA capability\n");
67435274 3241 else
a13abc7b 3242 host->flags |= SDHCI_USE_SDMA;
d129bceb 3243
b8c86fc5 3244 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
a13abc7b 3245 (host->flags & SDHCI_USE_SDMA)) {
cee687ce 3246 DBG("Disabling DMA as it is marked broken\n");
a13abc7b 3247 host->flags &= ~SDHCI_USE_SDMA;
7c168e3d
FT
3248 }
3249
f2119df6 3250 if ((host->version >= SDHCI_SPEC_200) &&
28da3589 3251 (host->caps & SDHCI_CAN_DO_ADMA2))
a13abc7b 3252 host->flags |= SDHCI_USE_ADMA;
2134a922
PO
3253
3254 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3255 (host->flags & SDHCI_USE_ADMA)) {
3256 DBG("Disabling ADMA as it is marked broken\n");
3257 host->flags &= ~SDHCI_USE_ADMA;
3258 }
3259
e57a5f61
AH
3260 /*
3261 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3262 * and *must* do 64-bit DMA. A driver has the opportunity to change
3263 * that during the first call to ->enable_dma(). Similarly
3264 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3265 * implement.
3266 */
28da3589 3267 if (host->caps & SDHCI_CAN_64BIT)
e57a5f61
AH
3268 host->flags |= SDHCI_USE_64_BIT_DMA;
3269
a13abc7b 3270 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
7b91369b
AC
3271 ret = sdhci_set_dma_mask(host);
3272
3273 if (!ret && host->ops->enable_dma)
3274 ret = host->ops->enable_dma(host);
3275
3276 if (ret) {
3277 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3278 mmc_hostname(mmc));
3279 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3280
3281 ret = 0;
d129bceb
PO
3282 }
3283 }
3284
e57a5f61
AH
3285 /* SDMA does not support 64-bit DMA */
3286 if (host->flags & SDHCI_USE_64_BIT_DMA)
3287 host->flags &= ~SDHCI_USE_SDMA;
3288
2134a922 3289 if (host->flags & SDHCI_USE_ADMA) {
e66e61cb
RK
3290 dma_addr_t dma;
3291 void *buf;
3292
2134a922 3293 /*
76fe379a
AH
3294 * The DMA descriptor table size is calculated as the maximum
3295 * number of segments times 2, to allow for an alignment
3296 * descriptor for each segment, plus 1 for a nop end descriptor,
3297 * all multipled by the descriptor size.
2134a922 3298 */
e57a5f61
AH
3299 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3300 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3301 SDHCI_ADMA2_64_DESC_SZ;
e57a5f61 3302 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
e57a5f61
AH
3303 } else {
3304 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3305 SDHCI_ADMA2_32_DESC_SZ;
e57a5f61 3306 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
e57a5f61 3307 }
e66e61cb 3308
04a5ae6f 3309 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
e66e61cb
RK
3310 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3311 host->adma_table_sz, &dma, GFP_KERNEL);
3312 if (!buf) {
6606110d 3313 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2134a922
PO
3314 mmc_hostname(mmc));
3315 host->flags &= ~SDHCI_USE_ADMA;
e66e61cb
RK
3316 } else if ((dma + host->align_buffer_sz) &
3317 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
6606110d
JP
3318 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3319 mmc_hostname(mmc));
d1e49f77 3320 host->flags &= ~SDHCI_USE_ADMA;
e66e61cb
RK
3321 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3322 host->adma_table_sz, buf, dma);
3323 } else {
3324 host->align_buffer = buf;
3325 host->align_addr = dma;
edd63fcc 3326
e66e61cb
RK
3327 host->adma_table = buf + host->align_buffer_sz;
3328 host->adma_addr = dma + host->align_buffer_sz;
3329 }
2134a922
PO
3330 }
3331
7659150c
PO
3332 /*
3333 * If we use DMA, then it's up to the caller to set the DMA
3334 * mask, but PIO does not need the hw shim so we set a new
3335 * mask here in that case.
3336 */
a13abc7b 3337 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
7659150c 3338 host->dma_mask = DMA_BIT_MASK(64);
4e743f1f 3339 mmc_dev(mmc)->dma_mask = &host->dma_mask;
7659150c 3340 }
d129bceb 3341
c4687d5f 3342 if (host->version >= SDHCI_SPEC_300)
28da3589 3343 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
c4687d5f
ZG
3344 >> SDHCI_CLOCK_BASE_SHIFT;
3345 else
28da3589 3346 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
c4687d5f
ZG
3347 >> SDHCI_CLOCK_BASE_SHIFT;
3348
4240ff0a 3349 host->max_clk *= 1000000;
f27f47ef
AV
3350 if (host->max_clk == 0 || host->quirks &
3351 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4240ff0a 3352 if (!host->ops->get_max_clock) {
2e4456f0
MV
3353 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3354 mmc_hostname(mmc));
eb5c20de
AH
3355 ret = -ENODEV;
3356 goto undma;
4240ff0a
BD
3357 }
3358 host->max_clk = host->ops->get_max_clock(host);
8ef1a143 3359 }
d129bceb 3360
c3ed3877
AN
3361 /*
3362 * In case of Host Controller v3.00, find out whether clock
3363 * multiplier is supported.
3364 */
28da3589 3365 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
c3ed3877
AN
3366 SDHCI_CLOCK_MUL_SHIFT;
3367
3368 /*
3369 * In case the value in Clock Multiplier is 0, then programmable
3370 * clock mode is not supported, otherwise the actual clock
3371 * multiplier is one more than the value of Clock Multiplier
3372 * in the Capabilities Register.
3373 */
3374 if (host->clk_mul)
3375 host->clk_mul += 1;
3376
d129bceb
PO
3377 /*
3378 * Set host parameters.
3379 */
59241757
DA
3380 max_clk = host->max_clk;
3381
ce5f036b 3382 if (host->ops->get_min_clock)
a9e58f25 3383 mmc->f_min = host->ops->get_min_clock(host);
c3ed3877
AN
3384 else if (host->version >= SDHCI_SPEC_300) {
3385 if (host->clk_mul) {
3386 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
59241757 3387 max_clk = host->max_clk * host->clk_mul;
c3ed3877
AN
3388 } else
3389 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3390 } else
0397526d 3391 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
15ec4461 3392
d310ae49 3393 if (!mmc->f_max || mmc->f_max > max_clk)
59241757
DA
3394 mmc->f_max = max_clk;
3395
28aab053 3396 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
28da3589 3397 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
28aab053 3398 SDHCI_TIMEOUT_CLK_SHIFT;
8cc35289
SL
3399
3400 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3401 host->timeout_clk *= 1000;
3402
28aab053 3403 if (host->timeout_clk == 0) {
8cc35289 3404 if (!host->ops->get_timeout_clock) {
28aab053
AD
3405 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3406 mmc_hostname(mmc));
eb5c20de
AH
3407 ret = -ENODEV;
3408 goto undma;
28aab053 3409 }
272308ca 3410
8cc35289
SL
3411 host->timeout_clk =
3412 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3413 1000);
3414 }
272308ca 3415
99513624
AH
3416 if (override_timeout_clk)
3417 host->timeout_clk = override_timeout_clk;
3418
28aab053 3419 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
a6ff5aeb 3420 host->ops->get_max_timeout_count(host) : 1 << 27;
28aab053
AD
3421 mmc->max_busy_timeout /= host->timeout_clk;
3422 }
58d1246d 3423
e89d456f 3424 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
781e989c 3425 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
e89d456f
AW
3426
3427 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3428 host->flags |= SDHCI_AUTO_CMD12;
5fe23c7f 3429
8edf6371 3430 /* Auto-CMD23 stuff only works in ADMA or PIO. */
4f3d3e9b 3431 if ((host->version >= SDHCI_SPEC_300) &&
8edf6371 3432 ((host->flags & SDHCI_USE_ADMA) ||
3bfa6f03
SB
3433 !(host->flags & SDHCI_USE_SDMA)) &&
3434 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
8edf6371 3435 host->flags |= SDHCI_AUTO_CMD23;
f421865d 3436 DBG("Auto-CMD23 available\n");
8edf6371 3437 } else {
f421865d 3438 DBG("Auto-CMD23 unavailable\n");
8edf6371
AW
3439 }
3440
15ec4461
PR
3441 /*
3442 * A controller may support 8-bit width, but the board itself
3443 * might not have the pins brought out. Boards that support
3444 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3445 * their platform code before calling sdhci_add_host(), and we
3446 * won't assume 8-bit width for hosts without that CAP.
3447 */
5fe23c7f 3448 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
15ec4461 3449 mmc->caps |= MMC_CAP_4_BIT_DATA;
d129bceb 3450
63ef5d8c
JH
3451 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3452 mmc->caps &= ~MMC_CAP_CMD23;
3453
28da3589 3454 if (host->caps & SDHCI_CAN_DO_HISPD)
a29e7e18 3455 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
cd9277c0 3456
176d1ed4 3457 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
860951c5 3458 mmc_card_is_removable(mmc) &&
287980e4 3459 mmc_gpio_get_cd(host->mmc) < 0)
68d1fb7e
AV
3460 mmc->caps |= MMC_CAP_NEEDS_POLL;
3461
6231f3de 3462 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3a48edc4
TK
3463 if (!IS_ERR(mmc->supply.vqmmc)) {
3464 ret = regulator_enable(mmc->supply.vqmmc);
3465 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3466 1950000))
28da3589
AH
3467 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3468 SDHCI_SUPPORT_SDR50 |
3469 SDHCI_SUPPORT_DDR50);
a3361aba
CB
3470 if (ret) {
3471 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3472 mmc_hostname(mmc), ret);
4bb74313 3473 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
a3361aba 3474 }
8363c374 3475 }
6231f3de 3476
28da3589
AH
3477 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3478 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3479 SDHCI_SUPPORT_DDR50);
3480 }
6a66180a 3481
4188bba0 3482 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
28da3589
AH
3483 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3484 SDHCI_SUPPORT_DDR50))
f2119df6
AN
3485 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3486
3487 /* SDR104 supports also implies SDR50 support */
28da3589 3488 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
f2119df6 3489 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
156e14b1
GC
3490 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3491 * field can be promoted to support HS200.
3492 */
549c0b18 3493 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
13868bf2 3494 mmc->caps2 |= MMC_CAP2_HS200;
28da3589 3495 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
f2119df6 3496 mmc->caps |= MMC_CAP_UHS_SDR50;
28da3589 3497 }
f2119df6 3498
e9fb05d5 3499 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
28da3589 3500 (host->caps1 & SDHCI_SUPPORT_HS400))
e9fb05d5
AH
3501 mmc->caps2 |= MMC_CAP2_HS400;
3502
549c0b18
AH
3503 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3504 (IS_ERR(mmc->supply.vqmmc) ||
3505 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3506 1300000)))
3507 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3508
28da3589
AH
3509 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3510 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
f2119df6
AN
3511 mmc->caps |= MMC_CAP_UHS_DDR50;
3512
069c9f14 3513 /* Does the host need tuning for SDR50? */
28da3589 3514 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
b513ea25
AN
3515 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3516
d6d50a15 3517 /* Driver Type(s) (A, C, D) supported by the host */
28da3589 3518 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
d6d50a15 3519 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
28da3589 3520 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
d6d50a15 3521 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
28da3589 3522 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
d6d50a15
AN
3523 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3524
cf2b5eea 3525 /* Initial value for re-tuning timer count */
28da3589
AH
3526 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3527 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
cf2b5eea
AN
3528
3529 /*
3530 * In case Re-tuning Timer is not disabled, the actual value of
3531 * re-tuning timer will be 2 ^ (n - 1).
3532 */
3533 if (host->tuning_count)
3534 host->tuning_count = 1 << (host->tuning_count - 1);
3535
3536 /* Re-tuning mode supported by the Host Controller */
28da3589 3537 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
cf2b5eea
AN
3538 SDHCI_RETUNING_MODE_SHIFT;
3539
8f230f45 3540 ocr_avail = 0;
bad37e1a 3541
f2119df6
AN
3542 /*
3543 * According to SD Host Controller spec v3.00, if the Host System
3544 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3545 * the value is meaningful only if Voltage Support in the Capabilities
3546 * register is set. The actual current value is 4 times the register
3547 * value.
3548 */
3549 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3a48edc4 3550 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
ae906037 3551 int curr = regulator_get_current_limit(mmc->supply.vmmc);
bad37e1a
PR
3552 if (curr > 0) {
3553
3554 /* convert to SDHCI_MAX_CURRENT format */
3555 curr = curr/1000; /* convert to mA */
3556 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3557
3558 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3559 max_current_caps =
3560 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3561 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3562 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3563 }
3564 }
f2119df6 3565
28da3589 3566 if (host->caps & SDHCI_CAN_VDD_330) {
8f230f45 3567 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
f2119df6 3568
55c4665e 3569 mmc->max_current_330 = ((max_current_caps &
f2119df6
AN
3570 SDHCI_MAX_CURRENT_330_MASK) >>
3571 SDHCI_MAX_CURRENT_330_SHIFT) *
3572 SDHCI_MAX_CURRENT_MULTIPLIER;
f2119df6 3573 }
28da3589 3574 if (host->caps & SDHCI_CAN_VDD_300) {
8f230f45 3575 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
f2119df6 3576
55c4665e 3577 mmc->max_current_300 = ((max_current_caps &
f2119df6
AN
3578 SDHCI_MAX_CURRENT_300_MASK) >>
3579 SDHCI_MAX_CURRENT_300_SHIFT) *
3580 SDHCI_MAX_CURRENT_MULTIPLIER;
f2119df6 3581 }
28da3589 3582 if (host->caps & SDHCI_CAN_VDD_180) {
8f230f45
TI
3583 ocr_avail |= MMC_VDD_165_195;
3584
55c4665e 3585 mmc->max_current_180 = ((max_current_caps &
f2119df6
AN
3586 SDHCI_MAX_CURRENT_180_MASK) >>
3587 SDHCI_MAX_CURRENT_180_SHIFT) *
3588 SDHCI_MAX_CURRENT_MULTIPLIER;
f2119df6
AN
3589 }
3590
5fd26c7e
UH
3591 /* If OCR set by host, use it instead. */
3592 if (host->ocr_mask)
3593 ocr_avail = host->ocr_mask;
3594
3595 /* If OCR set by external regulators, give it highest prio. */
3a48edc4 3596 if (mmc->ocr_avail)
52221610 3597 ocr_avail = mmc->ocr_avail;
3a48edc4 3598
8f230f45
TI
3599 mmc->ocr_avail = ocr_avail;
3600 mmc->ocr_avail_sdio = ocr_avail;
3601 if (host->ocr_avail_sdio)
3602 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3603 mmc->ocr_avail_sd = ocr_avail;
3604 if (host->ocr_avail_sd)
3605 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3606 else /* normal SD controllers don't support 1.8V */
3607 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3608 mmc->ocr_avail_mmc = ocr_avail;
3609 if (host->ocr_avail_mmc)
3610 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
146ad66e
PO
3611
3612 if (mmc->ocr_avail == 0) {
2e4456f0
MV
3613 pr_err("%s: Hardware doesn't report any support voltages.\n",
3614 mmc_hostname(mmc));
eb5c20de
AH
3615 ret = -ENODEV;
3616 goto unreg;
146ad66e
PO
3617 }
3618
8cb851a4
AH
3619 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3620 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3621 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3622 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3623 host->flags |= SDHCI_SIGNALING_180;
3624
3625 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3626 host->flags |= SDHCI_SIGNALING_120;
3627
d129bceb
PO
3628 spin_lock_init(&host->lock);
3629
3630 /*
2134a922
PO
3631 * Maximum number of segments. Depends on if the hardware
3632 * can do scatter/gather or not.
d129bceb 3633 */
2134a922 3634 if (host->flags & SDHCI_USE_ADMA)
4fb213f8 3635 mmc->max_segs = SDHCI_MAX_SEGS;
a13abc7b 3636 else if (host->flags & SDHCI_USE_SDMA)
a36274e0 3637 mmc->max_segs = 1;
2134a922 3638 else /* PIO */
4fb213f8 3639 mmc->max_segs = SDHCI_MAX_SEGS;
d129bceb
PO
3640
3641 /*
ac00531d
AH
3642 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3643 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3644 * is less anyway.
d129bceb 3645 */
55db890a 3646 mmc->max_req_size = 524288;
d129bceb
PO
3647
3648 /*
3649 * Maximum segment size. Could be one segment with the maximum number
2134a922
PO
3650 * of bytes. When doing hardware scatter/gather, each entry cannot
3651 * be larger than 64 KiB though.
d129bceb 3652 */
30652aa3
OJ
3653 if (host->flags & SDHCI_USE_ADMA) {
3654 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3655 mmc->max_seg_size = 65535;
3656 else
3657 mmc->max_seg_size = 65536;
3658 } else {
2134a922 3659 mmc->max_seg_size = mmc->max_req_size;
30652aa3 3660 }
d129bceb 3661
fe4a3c7a
PO
3662 /*
3663 * Maximum block size. This varies from controller to controller and
3664 * is specified in the capabilities register.
3665 */
0633f654
AV
3666 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3667 mmc->max_blk_size = 2;
3668 } else {
28da3589 3669 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
0633f654
AV
3670 SDHCI_MAX_BLOCK_SHIFT;
3671 if (mmc->max_blk_size >= 3) {
6606110d
JP
3672 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3673 mmc_hostname(mmc));
0633f654
AV
3674 mmc->max_blk_size = 0;
3675 }
3676 }
3677
3678 mmc->max_blk_size = 512 << mmc->max_blk_size;
fe4a3c7a 3679
55db890a
PO
3680 /*
3681 * Maximum block count.
3682 */
1388eefd 3683 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
55db890a 3684
52f5336d
AH
3685 return 0;
3686
3687unreg:
3688 if (!IS_ERR(mmc->supply.vqmmc))
3689 regulator_disable(mmc->supply.vqmmc);
3690undma:
3691 if (host->align_buffer)
3692 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3693 host->adma_table_sz, host->align_buffer,
3694 host->align_addr);
3695 host->adma_table = NULL;
3696 host->align_buffer = NULL;
3697
3698 return ret;
3699}
3700EXPORT_SYMBOL_GPL(sdhci_setup_host);
3701
4180ffa8
AH
3702void sdhci_cleanup_host(struct sdhci_host *host)
3703{
3704 struct mmc_host *mmc = host->mmc;
3705
3706 if (!IS_ERR(mmc->supply.vqmmc))
3707 regulator_disable(mmc->supply.vqmmc);
3708
3709 if (host->align_buffer)
3710 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3711 host->adma_table_sz, host->align_buffer,
3712 host->align_addr);
3713 host->adma_table = NULL;
3714 host->align_buffer = NULL;
3715}
3716EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3717
52f5336d
AH
3718int __sdhci_add_host(struct sdhci_host *host)
3719{
3720 struct mmc_host *mmc = host->mmc;
3721 int ret;
3722
d129bceb
PO
3723 /*
3724 * Init tasklets.
3725 */
d129bceb
PO
3726 tasklet_init(&host->finish_tasklet,
3727 sdhci_tasklet_finish, (unsigned long)host);
3728
e4cad1b5 3729 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
d7422fb4
AH
3730 setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3731 (unsigned long)host);
d129bceb 3732
250fb7b4 3733 init_waitqueue_head(&host->buf_ready_int);
b513ea25 3734
2af502ca
SG
3735 sdhci_init(host, 0);
3736
781e989c
RK
3737 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3738 IRQF_SHARED, mmc_hostname(mmc), host);
0fc81ee3
MB
3739 if (ret) {
3740 pr_err("%s: Failed to request IRQ %d: %d\n",
3741 mmc_hostname(mmc), host->irq, ret);
8ef1a143 3742 goto untasklet;
0fc81ee3 3743 }
d129bceb 3744
d129bceb
PO
3745#ifdef CONFIG_MMC_DEBUG
3746 sdhci_dumpregs(host);
3747#endif
3748
061d17a6 3749 ret = sdhci_led_register(host);
0fc81ee3
MB
3750 if (ret) {
3751 pr_err("%s: Failed to register LED device: %d\n",
3752 mmc_hostname(mmc), ret);
eb5c20de 3753 goto unirq;
0fc81ee3 3754 }
2f730fec 3755
5f25a66f
PO
3756 mmiowb();
3757
eb5c20de
AH
3758 ret = mmc_add_host(mmc);
3759 if (ret)
3760 goto unled;
d129bceb 3761
a3c76eb9 3762 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
d1b26863 3763 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
e57a5f61
AH
3764 (host->flags & SDHCI_USE_ADMA) ?
3765 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
a13abc7b 3766 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
d129bceb 3767
7260cf5e
AV
3768 sdhci_enable_card_detection(host);
3769
d129bceb
PO
3770 return 0;
3771
eb5c20de 3772unled:
061d17a6 3773 sdhci_led_unregister(host);
eb5c20de 3774unirq:
03231f9b 3775 sdhci_do_reset(host, SDHCI_RESET_ALL);
b537f94c
RK
3776 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3777 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2f730fec 3778 free_irq(host->irq, host);
8ef1a143 3779untasklet:
d129bceb 3780 tasklet_kill(&host->finish_tasklet);
52f5336d 3781
d129bceb
PO
3782 return ret;
3783}
52f5336d
AH
3784EXPORT_SYMBOL_GPL(__sdhci_add_host);
3785
3786int sdhci_add_host(struct sdhci_host *host)
3787{
3788 int ret;
3789
3790 ret = sdhci_setup_host(host);
3791 if (ret)
3792 return ret;
d129bceb 3793
4180ffa8
AH
3794 ret = __sdhci_add_host(host);
3795 if (ret)
3796 goto cleanup;
3797
3798 return 0;
3799
3800cleanup:
3801 sdhci_cleanup_host(host);
3802
3803 return ret;
52f5336d 3804}
b8c86fc5 3805EXPORT_SYMBOL_GPL(sdhci_add_host);
d129bceb 3806
1e72859e 3807void sdhci_remove_host(struct sdhci_host *host, int dead)
b8c86fc5 3808{
3a48edc4 3809 struct mmc_host *mmc = host->mmc;
1e72859e
PO
3810 unsigned long flags;
3811
3812 if (dead) {
3813 spin_lock_irqsave(&host->lock, flags);
3814
3815 host->flags |= SDHCI_DEVICE_DEAD;
3816
5d0d11c5 3817 if (sdhci_has_requests(host)) {
a3c76eb9 3818 pr_err("%s: Controller removed during "
4e743f1f 3819 " transfer!\n", mmc_hostname(mmc));
5d0d11c5 3820 sdhci_error_out_mrqs(host, -ENOMEDIUM);
1e72859e
PO
3821 }
3822
3823 spin_unlock_irqrestore(&host->lock, flags);
3824 }
3825
7260cf5e
AV
3826 sdhci_disable_card_detection(host);
3827
4e743f1f 3828 mmc_remove_host(mmc);
d129bceb 3829
061d17a6 3830 sdhci_led_unregister(host);
2f730fec 3831
1e72859e 3832 if (!dead)
03231f9b 3833 sdhci_do_reset(host, SDHCI_RESET_ALL);
d129bceb 3834
b537f94c
RK
3835 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3836 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
d129bceb
PO
3837 free_irq(host->irq, host);
3838
3839 del_timer_sync(&host->timer);
d7422fb4 3840 del_timer_sync(&host->data_timer);
d129bceb 3841
d129bceb 3842 tasklet_kill(&host->finish_tasklet);
2134a922 3843
3a48edc4
TK
3844 if (!IS_ERR(mmc->supply.vqmmc))
3845 regulator_disable(mmc->supply.vqmmc);
6231f3de 3846
edd63fcc 3847 if (host->align_buffer)
e66e61cb
RK
3848 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3849 host->adma_table_sz, host->align_buffer,
3850 host->align_addr);
2134a922 3851
4efaa6fb 3852 host->adma_table = NULL;
2134a922 3853 host->align_buffer = NULL;
d129bceb
PO
3854}
3855
b8c86fc5 3856EXPORT_SYMBOL_GPL(sdhci_remove_host);
d129bceb 3857
b8c86fc5 3858void sdhci_free_host(struct sdhci_host *host)
d129bceb 3859{
b8c86fc5 3860 mmc_free_host(host->mmc);
d129bceb
PO
3861}
3862
b8c86fc5 3863EXPORT_SYMBOL_GPL(sdhci_free_host);
d129bceb
PO
3864
3865/*****************************************************************************\
3866 * *
3867 * Driver init/exit *
3868 * *
3869\*****************************************************************************/
3870
3871static int __init sdhci_drv_init(void)
3872{
a3c76eb9 3873 pr_info(DRIVER_NAME
52fbf9c9 3874 ": Secure Digital Host Controller Interface driver\n");
a3c76eb9 3875 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
d129bceb 3876
b8c86fc5 3877 return 0;
d129bceb
PO
3878}
3879
3880static void __exit sdhci_drv_exit(void)
3881{
d129bceb
PO
3882}
3883
3884module_init(sdhci_drv_init);
3885module_exit(sdhci_drv_exit);
3886
df673b22 3887module_param(debug_quirks, uint, 0444);
66fd8ad5 3888module_param(debug_quirks2, uint, 0444);
67435274 3889
32710e8f 3890MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
b8c86fc5 3891MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
d129bceb 3892MODULE_LICENSE("GPL");
67435274 3893
df673b22 3894MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
66fd8ad5 3895MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");