]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/host/sdhci.c
94cffa77490a69ba8fc4315450ca67fbe3c6f5d7
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / host / sdhci.c
1 /*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
14 */
15
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25
26 #include <linux/leds.h>
27
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
33
34 #include "sdhci.h"
35
36 #define DRIVER_NAME "sdhci"
37
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
40
41 #define MAX_TUNING_LOOP 40
42
43 static unsigned int debug_quirks = 0;
44 static unsigned int debug_quirks2;
45
46 static void sdhci_finish_data(struct sdhci_host *);
47
48 static void sdhci_finish_command(struct sdhci_host *);
49 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51 static int sdhci_do_get_cd(struct sdhci_host *host);
52
53 static void sdhci_dumpregs(struct sdhci_host *host)
54 {
55 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
56 mmc_hostname(host->mmc));
57
58 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_ACMD12_ERR),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
92 sdhci_readw(host, SDHCI_HOST_CONTROL2));
93
94 if (host->flags & SDHCI_USE_ADMA) {
95 if (host->flags & SDHCI_USE_64_BIT_DMA)
96 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
97 readl(host->ioaddr + SDHCI_ADMA_ERROR),
98 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
99 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
100 else
101 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
102 readl(host->ioaddr + SDHCI_ADMA_ERROR),
103 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
104 }
105
106 pr_debug(DRIVER_NAME ": ===========================================\n");
107 }
108
109 /*****************************************************************************\
110 * *
111 * Low level functions *
112 * *
113 \*****************************************************************************/
114
115 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
116 {
117 u32 present;
118
119 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
120 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
121 return;
122
123 if (enable) {
124 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
125 SDHCI_CARD_PRESENT;
126
127 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
128 SDHCI_INT_CARD_INSERT;
129 } else {
130 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
131 }
132
133 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
134 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
135 }
136
137 static void sdhci_enable_card_detection(struct sdhci_host *host)
138 {
139 sdhci_set_card_detection(host, true);
140 }
141
142 static void sdhci_disable_card_detection(struct sdhci_host *host)
143 {
144 sdhci_set_card_detection(host, false);
145 }
146
147 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
148 {
149 if (host->bus_on)
150 return;
151 host->bus_on = true;
152 pm_runtime_get_noresume(host->mmc->parent);
153 }
154
155 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
156 {
157 if (!host->bus_on)
158 return;
159 host->bus_on = false;
160 pm_runtime_put_noidle(host->mmc->parent);
161 }
162
163 void sdhci_reset(struct sdhci_host *host, u8 mask)
164 {
165 unsigned long timeout;
166
167 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
168
169 if (mask & SDHCI_RESET_ALL) {
170 host->clock = 0;
171 /* Reset-all turns off SD Bus Power */
172 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
173 sdhci_runtime_pm_bus_off(host);
174 }
175
176 /* Wait max 100 ms */
177 timeout = 100;
178
179 /* hw clears the bit when it's done */
180 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
181 if (timeout == 0) {
182 pr_err("%s: Reset 0x%x never completed.\n",
183 mmc_hostname(host->mmc), (int)mask);
184 sdhci_dumpregs(host);
185 return;
186 }
187 timeout--;
188 mdelay(1);
189 }
190 }
191 EXPORT_SYMBOL_GPL(sdhci_reset);
192
193 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
194 {
195 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
196 if (!sdhci_do_get_cd(host))
197 return;
198 }
199
200 host->ops->reset(host, mask);
201
202 if (mask & SDHCI_RESET_ALL) {
203 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
204 if (host->ops->enable_dma)
205 host->ops->enable_dma(host);
206 }
207
208 /* Resetting the controller clears many */
209 host->preset_enabled = false;
210 }
211 }
212
213 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
214
215 static void sdhci_init(struct sdhci_host *host, int soft)
216 {
217 if (soft)
218 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
219 else
220 sdhci_do_reset(host, SDHCI_RESET_ALL);
221
222 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
223 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
224 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
225 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
226 SDHCI_INT_RESPONSE;
227
228 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
229 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
230
231 if (soft) {
232 /* force clock reconfiguration */
233 host->clock = 0;
234 sdhci_set_ios(host->mmc, &host->mmc->ios);
235 }
236 }
237
238 static void sdhci_reinit(struct sdhci_host *host)
239 {
240 sdhci_init(host, 0);
241 sdhci_enable_card_detection(host);
242 }
243
244 static void __sdhci_led_activate(struct sdhci_host *host)
245 {
246 u8 ctrl;
247
248 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
249 ctrl |= SDHCI_CTRL_LED;
250 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
251 }
252
253 static void __sdhci_led_deactivate(struct sdhci_host *host)
254 {
255 u8 ctrl;
256
257 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
258 ctrl &= ~SDHCI_CTRL_LED;
259 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
260 }
261
262 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
263 static void sdhci_led_control(struct led_classdev *led,
264 enum led_brightness brightness)
265 {
266 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
267 unsigned long flags;
268
269 spin_lock_irqsave(&host->lock, flags);
270
271 if (host->runtime_suspended)
272 goto out;
273
274 if (brightness == LED_OFF)
275 __sdhci_led_deactivate(host);
276 else
277 __sdhci_led_activate(host);
278 out:
279 spin_unlock_irqrestore(&host->lock, flags);
280 }
281
282 static int sdhci_led_register(struct sdhci_host *host)
283 {
284 struct mmc_host *mmc = host->mmc;
285
286 snprintf(host->led_name, sizeof(host->led_name),
287 "%s::", mmc_hostname(mmc));
288
289 host->led.name = host->led_name;
290 host->led.brightness = LED_OFF;
291 host->led.default_trigger = mmc_hostname(mmc);
292 host->led.brightness_set = sdhci_led_control;
293
294 return led_classdev_register(mmc_dev(mmc), &host->led);
295 }
296
297 static void sdhci_led_unregister(struct sdhci_host *host)
298 {
299 led_classdev_unregister(&host->led);
300 }
301
302 static inline void sdhci_led_activate(struct sdhci_host *host)
303 {
304 }
305
306 static inline void sdhci_led_deactivate(struct sdhci_host *host)
307 {
308 }
309
310 #else
311
312 static inline int sdhci_led_register(struct sdhci_host *host)
313 {
314 return 0;
315 }
316
317 static inline void sdhci_led_unregister(struct sdhci_host *host)
318 {
319 }
320
321 static inline void sdhci_led_activate(struct sdhci_host *host)
322 {
323 __sdhci_led_activate(host);
324 }
325
326 static inline void sdhci_led_deactivate(struct sdhci_host *host)
327 {
328 __sdhci_led_deactivate(host);
329 }
330
331 #endif
332
333 /*****************************************************************************\
334 * *
335 * Core functions *
336 * *
337 \*****************************************************************************/
338
339 static void sdhci_read_block_pio(struct sdhci_host *host)
340 {
341 unsigned long flags;
342 size_t blksize, len, chunk;
343 u32 uninitialized_var(scratch);
344 u8 *buf;
345
346 DBG("PIO reading\n");
347
348 blksize = host->data->blksz;
349 chunk = 0;
350
351 local_irq_save(flags);
352
353 while (blksize) {
354 BUG_ON(!sg_miter_next(&host->sg_miter));
355
356 len = min(host->sg_miter.length, blksize);
357
358 blksize -= len;
359 host->sg_miter.consumed = len;
360
361 buf = host->sg_miter.addr;
362
363 while (len) {
364 if (chunk == 0) {
365 scratch = sdhci_readl(host, SDHCI_BUFFER);
366 chunk = 4;
367 }
368
369 *buf = scratch & 0xFF;
370
371 buf++;
372 scratch >>= 8;
373 chunk--;
374 len--;
375 }
376 }
377
378 sg_miter_stop(&host->sg_miter);
379
380 local_irq_restore(flags);
381 }
382
383 static void sdhci_write_block_pio(struct sdhci_host *host)
384 {
385 unsigned long flags;
386 size_t blksize, len, chunk;
387 u32 scratch;
388 u8 *buf;
389
390 DBG("PIO writing\n");
391
392 blksize = host->data->blksz;
393 chunk = 0;
394 scratch = 0;
395
396 local_irq_save(flags);
397
398 while (blksize) {
399 BUG_ON(!sg_miter_next(&host->sg_miter));
400
401 len = min(host->sg_miter.length, blksize);
402
403 blksize -= len;
404 host->sg_miter.consumed = len;
405
406 buf = host->sg_miter.addr;
407
408 while (len) {
409 scratch |= (u32)*buf << (chunk * 8);
410
411 buf++;
412 chunk++;
413 len--;
414
415 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
416 sdhci_writel(host, scratch, SDHCI_BUFFER);
417 chunk = 0;
418 scratch = 0;
419 }
420 }
421 }
422
423 sg_miter_stop(&host->sg_miter);
424
425 local_irq_restore(flags);
426 }
427
428 static void sdhci_transfer_pio(struct sdhci_host *host)
429 {
430 u32 mask;
431
432 BUG_ON(!host->data);
433
434 if (host->blocks == 0)
435 return;
436
437 if (host->data->flags & MMC_DATA_READ)
438 mask = SDHCI_DATA_AVAILABLE;
439 else
440 mask = SDHCI_SPACE_AVAILABLE;
441
442 /*
443 * Some controllers (JMicron JMB38x) mess up the buffer bits
444 * for transfers < 4 bytes. As long as it is just one block,
445 * we can ignore the bits.
446 */
447 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
448 (host->data->blocks == 1))
449 mask = ~0;
450
451 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
452 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
453 udelay(100);
454
455 if (host->data->flags & MMC_DATA_READ)
456 sdhci_read_block_pio(host);
457 else
458 sdhci_write_block_pio(host);
459
460 host->blocks--;
461 if (host->blocks == 0)
462 break;
463 }
464
465 DBG("PIO transfer complete.\n");
466 }
467
468 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
469 struct mmc_data *data, int cookie)
470 {
471 int sg_count;
472
473 /*
474 * If the data buffers are already mapped, return the previous
475 * dma_map_sg() result.
476 */
477 if (data->host_cookie == COOKIE_PRE_MAPPED)
478 return data->sg_count;
479
480 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
481 data->flags & MMC_DATA_WRITE ?
482 DMA_TO_DEVICE : DMA_FROM_DEVICE);
483
484 if (sg_count == 0)
485 return -ENOSPC;
486
487 data->sg_count = sg_count;
488 data->host_cookie = cookie;
489
490 return sg_count;
491 }
492
493 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
494 {
495 local_irq_save(*flags);
496 return kmap_atomic(sg_page(sg)) + sg->offset;
497 }
498
499 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
500 {
501 kunmap_atomic(buffer);
502 local_irq_restore(*flags);
503 }
504
505 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
506 dma_addr_t addr, int len, unsigned cmd)
507 {
508 struct sdhci_adma2_64_desc *dma_desc = desc;
509
510 /* 32-bit and 64-bit descriptors have these members in same position */
511 dma_desc->cmd = cpu_to_le16(cmd);
512 dma_desc->len = cpu_to_le16(len);
513 dma_desc->addr_lo = cpu_to_le32((u32)addr);
514
515 if (host->flags & SDHCI_USE_64_BIT_DMA)
516 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
517 }
518
519 static void sdhci_adma_mark_end(void *desc)
520 {
521 struct sdhci_adma2_64_desc *dma_desc = desc;
522
523 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
524 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
525 }
526
527 static void sdhci_adma_table_pre(struct sdhci_host *host,
528 struct mmc_data *data, int sg_count)
529 {
530 struct scatterlist *sg;
531 unsigned long flags;
532 dma_addr_t addr, align_addr;
533 void *desc, *align;
534 char *buffer;
535 int len, offset, i;
536
537 /*
538 * The spec does not specify endianness of descriptor table.
539 * We currently guess that it is LE.
540 */
541
542 host->sg_count = sg_count;
543
544 desc = host->adma_table;
545 align = host->align_buffer;
546
547 align_addr = host->align_addr;
548
549 for_each_sg(data->sg, sg, host->sg_count, i) {
550 addr = sg_dma_address(sg);
551 len = sg_dma_len(sg);
552
553 /*
554 * The SDHCI specification states that ADMA addresses must
555 * be 32-bit aligned. If they aren't, then we use a bounce
556 * buffer for the (up to three) bytes that screw up the
557 * alignment.
558 */
559 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
560 SDHCI_ADMA2_MASK;
561 if (offset) {
562 if (data->flags & MMC_DATA_WRITE) {
563 buffer = sdhci_kmap_atomic(sg, &flags);
564 memcpy(align, buffer, offset);
565 sdhci_kunmap_atomic(buffer, &flags);
566 }
567
568 /* tran, valid */
569 sdhci_adma_write_desc(host, desc, align_addr, offset,
570 ADMA2_TRAN_VALID);
571
572 BUG_ON(offset > 65536);
573
574 align += SDHCI_ADMA2_ALIGN;
575 align_addr += SDHCI_ADMA2_ALIGN;
576
577 desc += host->desc_sz;
578
579 addr += offset;
580 len -= offset;
581 }
582
583 BUG_ON(len > 65536);
584
585 if (len) {
586 /* tran, valid */
587 sdhci_adma_write_desc(host, desc, addr, len,
588 ADMA2_TRAN_VALID);
589 desc += host->desc_sz;
590 }
591
592 /*
593 * If this triggers then we have a calculation bug
594 * somewhere. :/
595 */
596 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
597 }
598
599 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
600 /* Mark the last descriptor as the terminating descriptor */
601 if (desc != host->adma_table) {
602 desc -= host->desc_sz;
603 sdhci_adma_mark_end(desc);
604 }
605 } else {
606 /* Add a terminating entry - nop, end, valid */
607 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
608 }
609 }
610
611 static void sdhci_adma_table_post(struct sdhci_host *host,
612 struct mmc_data *data)
613 {
614 struct scatterlist *sg;
615 int i, size;
616 void *align;
617 char *buffer;
618 unsigned long flags;
619
620 if (data->flags & MMC_DATA_READ) {
621 bool has_unaligned = false;
622
623 /* Do a quick scan of the SG list for any unaligned mappings */
624 for_each_sg(data->sg, sg, host->sg_count, i)
625 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
626 has_unaligned = true;
627 break;
628 }
629
630 if (has_unaligned) {
631 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
632 data->sg_len, DMA_FROM_DEVICE);
633
634 align = host->align_buffer;
635
636 for_each_sg(data->sg, sg, host->sg_count, i) {
637 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
638 size = SDHCI_ADMA2_ALIGN -
639 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
640
641 buffer = sdhci_kmap_atomic(sg, &flags);
642 memcpy(buffer, align, size);
643 sdhci_kunmap_atomic(buffer, &flags);
644
645 align += SDHCI_ADMA2_ALIGN;
646 }
647 }
648 }
649 }
650 }
651
652 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
653 {
654 u8 count;
655 struct mmc_data *data = cmd->data;
656 unsigned target_timeout, current_timeout;
657
658 /*
659 * If the host controller provides us with an incorrect timeout
660 * value, just skip the check and use 0xE. The hardware may take
661 * longer to time out, but that's much better than having a too-short
662 * timeout value.
663 */
664 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
665 return 0xE;
666
667 /* Unspecified timeout, assume max */
668 if (!data && !cmd->busy_timeout)
669 return 0xE;
670
671 /* timeout in us */
672 if (!data)
673 target_timeout = cmd->busy_timeout * 1000;
674 else {
675 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
676 if (host->clock && data->timeout_clks) {
677 unsigned long long val;
678
679 /*
680 * data->timeout_clks is in units of clock cycles.
681 * host->clock is in Hz. target_timeout is in us.
682 * Hence, us = 1000000 * cycles / Hz. Round up.
683 */
684 val = 1000000 * data->timeout_clks;
685 if (do_div(val, host->clock))
686 target_timeout++;
687 target_timeout += val;
688 }
689 }
690
691 /*
692 * Figure out needed cycles.
693 * We do this in steps in order to fit inside a 32 bit int.
694 * The first step is the minimum timeout, which will have a
695 * minimum resolution of 6 bits:
696 * (1) 2^13*1000 > 2^22,
697 * (2) host->timeout_clk < 2^16
698 * =>
699 * (1) / (2) > 2^6
700 */
701 count = 0;
702 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
703 while (current_timeout < target_timeout) {
704 count++;
705 current_timeout <<= 1;
706 if (count >= 0xF)
707 break;
708 }
709
710 if (count >= 0xF) {
711 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
712 mmc_hostname(host->mmc), count, cmd->opcode);
713 count = 0xE;
714 }
715
716 return count;
717 }
718
719 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
720 {
721 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
722 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
723
724 if (host->flags & SDHCI_REQ_USE_DMA)
725 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
726 else
727 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
728
729 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
730 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
731 }
732
733 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
734 {
735 u8 count;
736
737 if (host->ops->set_timeout) {
738 host->ops->set_timeout(host, cmd);
739 } else {
740 count = sdhci_calc_timeout(host, cmd);
741 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
742 }
743 }
744
745 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
746 {
747 u8 ctrl;
748 struct mmc_data *data = cmd->data;
749
750 WARN_ON(host->data);
751
752 if (data || (cmd->flags & MMC_RSP_BUSY))
753 sdhci_set_timeout(host, cmd);
754
755 if (!data)
756 return;
757
758 /* Sanity checks */
759 BUG_ON(data->blksz * data->blocks > 524288);
760 BUG_ON(data->blksz > host->mmc->max_blk_size);
761 BUG_ON(data->blocks > 65535);
762
763 host->data = data;
764 host->data_early = 0;
765 host->data->bytes_xfered = 0;
766
767 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
768 struct scatterlist *sg;
769 unsigned int length_mask, offset_mask;
770 int i;
771
772 host->flags |= SDHCI_REQ_USE_DMA;
773
774 /*
775 * FIXME: This doesn't account for merging when mapping the
776 * scatterlist.
777 *
778 * The assumption here being that alignment and lengths are
779 * the same after DMA mapping to device address space.
780 */
781 length_mask = 0;
782 offset_mask = 0;
783 if (host->flags & SDHCI_USE_ADMA) {
784 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
785 length_mask = 3;
786 /*
787 * As we use up to 3 byte chunks to work
788 * around alignment problems, we need to
789 * check the offset as well.
790 */
791 offset_mask = 3;
792 }
793 } else {
794 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
795 length_mask = 3;
796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
797 offset_mask = 3;
798 }
799
800 if (unlikely(length_mask | offset_mask)) {
801 for_each_sg(data->sg, sg, data->sg_len, i) {
802 if (sg->length & length_mask) {
803 DBG("Reverting to PIO because of transfer size (%d)\n",
804 sg->length);
805 host->flags &= ~SDHCI_REQ_USE_DMA;
806 break;
807 }
808 if (sg->offset & offset_mask) {
809 DBG("Reverting to PIO because of bad alignment\n");
810 host->flags &= ~SDHCI_REQ_USE_DMA;
811 break;
812 }
813 }
814 }
815 }
816
817 if (host->flags & SDHCI_REQ_USE_DMA) {
818 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
819
820 if (sg_cnt <= 0) {
821 /*
822 * This only happens when someone fed
823 * us an invalid request.
824 */
825 WARN_ON(1);
826 host->flags &= ~SDHCI_REQ_USE_DMA;
827 } else if (host->flags & SDHCI_USE_ADMA) {
828 sdhci_adma_table_pre(host, data, sg_cnt);
829
830 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
831 if (host->flags & SDHCI_USE_64_BIT_DMA)
832 sdhci_writel(host,
833 (u64)host->adma_addr >> 32,
834 SDHCI_ADMA_ADDRESS_HI);
835 } else {
836 WARN_ON(sg_cnt != 1);
837 sdhci_writel(host, sg_dma_address(data->sg),
838 SDHCI_DMA_ADDRESS);
839 }
840 }
841
842 /*
843 * Always adjust the DMA selection as some controllers
844 * (e.g. JMicron) can't do PIO properly when the selection
845 * is ADMA.
846 */
847 if (host->version >= SDHCI_SPEC_200) {
848 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
849 ctrl &= ~SDHCI_CTRL_DMA_MASK;
850 if ((host->flags & SDHCI_REQ_USE_DMA) &&
851 (host->flags & SDHCI_USE_ADMA)) {
852 if (host->flags & SDHCI_USE_64_BIT_DMA)
853 ctrl |= SDHCI_CTRL_ADMA64;
854 else
855 ctrl |= SDHCI_CTRL_ADMA32;
856 } else {
857 ctrl |= SDHCI_CTRL_SDMA;
858 }
859 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
860 }
861
862 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
863 int flags;
864
865 flags = SG_MITER_ATOMIC;
866 if (host->data->flags & MMC_DATA_READ)
867 flags |= SG_MITER_TO_SG;
868 else
869 flags |= SG_MITER_FROM_SG;
870 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
871 host->blocks = data->blocks;
872 }
873
874 sdhci_set_transfer_irqs(host);
875
876 /* Set the DMA boundary value and block size */
877 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
878 data->blksz), SDHCI_BLOCK_SIZE);
879 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
880 }
881
882 static void sdhci_set_transfer_mode(struct sdhci_host *host,
883 struct mmc_command *cmd)
884 {
885 u16 mode = 0;
886 struct mmc_data *data = cmd->data;
887
888 if (data == NULL) {
889 if (host->quirks2 &
890 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
891 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
892 } else {
893 /* clear Auto CMD settings for no data CMDs */
894 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
895 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
896 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
897 }
898 return;
899 }
900
901 WARN_ON(!host->data);
902
903 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
904 mode = SDHCI_TRNS_BLK_CNT_EN;
905
906 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
907 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
908 /*
909 * If we are sending CMD23, CMD12 never gets sent
910 * on successful completion (so no Auto-CMD12).
911 */
912 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
913 (cmd->opcode != SD_IO_RW_EXTENDED))
914 mode |= SDHCI_TRNS_AUTO_CMD12;
915 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
916 mode |= SDHCI_TRNS_AUTO_CMD23;
917 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
918 }
919 }
920
921 if (data->flags & MMC_DATA_READ)
922 mode |= SDHCI_TRNS_READ;
923 if (host->flags & SDHCI_REQ_USE_DMA)
924 mode |= SDHCI_TRNS_DMA;
925
926 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
927 }
928
929 static void sdhci_finish_data(struct sdhci_host *host)
930 {
931 struct mmc_data *data;
932
933 BUG_ON(!host->data);
934
935 data = host->data;
936 host->data = NULL;
937
938 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
939 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
940 sdhci_adma_table_post(host, data);
941
942 /*
943 * The specification states that the block count register must
944 * be updated, but it does not specify at what point in the
945 * data flow. That makes the register entirely useless to read
946 * back so we have to assume that nothing made it to the card
947 * in the event of an error.
948 */
949 if (data->error)
950 data->bytes_xfered = 0;
951 else
952 data->bytes_xfered = data->blksz * data->blocks;
953
954 /*
955 * Need to send CMD12 if -
956 * a) open-ended multiblock transfer (no CMD23)
957 * b) error in multiblock transfer
958 */
959 if (data->stop &&
960 (data->error ||
961 !host->mrq->sbc)) {
962
963 /*
964 * The controller needs a reset of internal state machines
965 * upon error conditions.
966 */
967 if (data->error) {
968 sdhci_do_reset(host, SDHCI_RESET_CMD);
969 sdhci_do_reset(host, SDHCI_RESET_DATA);
970 }
971
972 sdhci_send_command(host, data->stop);
973 } else
974 tasklet_schedule(&host->finish_tasklet);
975 }
976
977 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
978 {
979 int flags;
980 u32 mask;
981 unsigned long timeout;
982
983 WARN_ON(host->cmd);
984
985 /* Initially, a command has no error */
986 cmd->error = 0;
987
988 /* Wait max 10 ms */
989 timeout = 10;
990
991 mask = SDHCI_CMD_INHIBIT;
992 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
993 mask |= SDHCI_DATA_INHIBIT;
994
995 /* We shouldn't wait for data inihibit for stop commands, even
996 though they might use busy signaling */
997 if (host->mrq->data && (cmd == host->mrq->data->stop))
998 mask &= ~SDHCI_DATA_INHIBIT;
999
1000 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1001 if (timeout == 0) {
1002 pr_err("%s: Controller never released inhibit bit(s).\n",
1003 mmc_hostname(host->mmc));
1004 sdhci_dumpregs(host);
1005 cmd->error = -EIO;
1006 tasklet_schedule(&host->finish_tasklet);
1007 return;
1008 }
1009 timeout--;
1010 mdelay(1);
1011 }
1012
1013 timeout = jiffies;
1014 if (!cmd->data && cmd->busy_timeout > 9000)
1015 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1016 else
1017 timeout += 10 * HZ;
1018 mod_timer(&host->timer, timeout);
1019
1020 host->cmd = cmd;
1021 host->busy_handle = 0;
1022
1023 sdhci_prepare_data(host, cmd);
1024
1025 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1026
1027 sdhci_set_transfer_mode(host, cmd);
1028
1029 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1030 pr_err("%s: Unsupported response type!\n",
1031 mmc_hostname(host->mmc));
1032 cmd->error = -EINVAL;
1033 tasklet_schedule(&host->finish_tasklet);
1034 return;
1035 }
1036
1037 if (!(cmd->flags & MMC_RSP_PRESENT))
1038 flags = SDHCI_CMD_RESP_NONE;
1039 else if (cmd->flags & MMC_RSP_136)
1040 flags = SDHCI_CMD_RESP_LONG;
1041 else if (cmd->flags & MMC_RSP_BUSY)
1042 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1043 else
1044 flags = SDHCI_CMD_RESP_SHORT;
1045
1046 if (cmd->flags & MMC_RSP_CRC)
1047 flags |= SDHCI_CMD_CRC;
1048 if (cmd->flags & MMC_RSP_OPCODE)
1049 flags |= SDHCI_CMD_INDEX;
1050
1051 /* CMD19 is special in that the Data Present Select should be set */
1052 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1053 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1054 flags |= SDHCI_CMD_DATA;
1055
1056 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1057 }
1058 EXPORT_SYMBOL_GPL(sdhci_send_command);
1059
1060 static void sdhci_finish_command(struct sdhci_host *host)
1061 {
1062 int i;
1063
1064 BUG_ON(host->cmd == NULL);
1065
1066 if (host->cmd->flags & MMC_RSP_PRESENT) {
1067 if (host->cmd->flags & MMC_RSP_136) {
1068 /* CRC is stripped so we need to do some shifting. */
1069 for (i = 0;i < 4;i++) {
1070 host->cmd->resp[i] = sdhci_readl(host,
1071 SDHCI_RESPONSE + (3-i)*4) << 8;
1072 if (i != 3)
1073 host->cmd->resp[i] |=
1074 sdhci_readb(host,
1075 SDHCI_RESPONSE + (3-i)*4-1);
1076 }
1077 } else {
1078 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1079 }
1080 }
1081
1082 /* Finished CMD23, now send actual command. */
1083 if (host->cmd == host->mrq->sbc) {
1084 host->cmd = NULL;
1085 sdhci_send_command(host, host->mrq->cmd);
1086 } else {
1087
1088 /* Processed actual command. */
1089 if (host->data && host->data_early)
1090 sdhci_finish_data(host);
1091
1092 if (!host->cmd->data)
1093 tasklet_schedule(&host->finish_tasklet);
1094
1095 host->cmd = NULL;
1096 }
1097 }
1098
1099 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1100 {
1101 u16 preset = 0;
1102
1103 switch (host->timing) {
1104 case MMC_TIMING_UHS_SDR12:
1105 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1106 break;
1107 case MMC_TIMING_UHS_SDR25:
1108 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1109 break;
1110 case MMC_TIMING_UHS_SDR50:
1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1112 break;
1113 case MMC_TIMING_UHS_SDR104:
1114 case MMC_TIMING_MMC_HS200:
1115 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1116 break;
1117 case MMC_TIMING_UHS_DDR50:
1118 case MMC_TIMING_MMC_DDR52:
1119 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1120 break;
1121 case MMC_TIMING_MMC_HS400:
1122 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1123 break;
1124 default:
1125 pr_warn("%s: Invalid UHS-I mode selected\n",
1126 mmc_hostname(host->mmc));
1127 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1128 break;
1129 }
1130 return preset;
1131 }
1132
1133 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1134 unsigned int *actual_clock)
1135 {
1136 int div = 0; /* Initialized for compiler warning */
1137 int real_div = div, clk_mul = 1;
1138 u16 clk = 0;
1139 bool switch_base_clk = false;
1140
1141 if (host->version >= SDHCI_SPEC_300) {
1142 if (host->preset_enabled) {
1143 u16 pre_val;
1144
1145 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1146 pre_val = sdhci_get_preset_value(host);
1147 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1148 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1149 if (host->clk_mul &&
1150 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1151 clk = SDHCI_PROG_CLOCK_MODE;
1152 real_div = div + 1;
1153 clk_mul = host->clk_mul;
1154 } else {
1155 real_div = max_t(int, 1, div << 1);
1156 }
1157 goto clock_set;
1158 }
1159
1160 /*
1161 * Check if the Host Controller supports Programmable Clock
1162 * Mode.
1163 */
1164 if (host->clk_mul) {
1165 for (div = 1; div <= 1024; div++) {
1166 if ((host->max_clk * host->clk_mul / div)
1167 <= clock)
1168 break;
1169 }
1170 if ((host->max_clk * host->clk_mul / div) <= clock) {
1171 /*
1172 * Set Programmable Clock Mode in the Clock
1173 * Control register.
1174 */
1175 clk = SDHCI_PROG_CLOCK_MODE;
1176 real_div = div;
1177 clk_mul = host->clk_mul;
1178 div--;
1179 } else {
1180 /*
1181 * Divisor can be too small to reach clock
1182 * speed requirement. Then use the base clock.
1183 */
1184 switch_base_clk = true;
1185 }
1186 }
1187
1188 if (!host->clk_mul || switch_base_clk) {
1189 /* Version 3.00 divisors must be a multiple of 2. */
1190 if (host->max_clk <= clock)
1191 div = 1;
1192 else {
1193 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1194 div += 2) {
1195 if ((host->max_clk / div) <= clock)
1196 break;
1197 }
1198 }
1199 real_div = div;
1200 div >>= 1;
1201 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1202 && !div && host->max_clk <= 25000000)
1203 div = 1;
1204 }
1205 } else {
1206 /* Version 2.00 divisors must be a power of 2. */
1207 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1208 if ((host->max_clk / div) <= clock)
1209 break;
1210 }
1211 real_div = div;
1212 div >>= 1;
1213 }
1214
1215 clock_set:
1216 if (real_div)
1217 *actual_clock = (host->max_clk * clk_mul) / real_div;
1218 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1219 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1220 << SDHCI_DIVIDER_HI_SHIFT;
1221
1222 return clk;
1223 }
1224 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1225
1226 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1227 {
1228 u16 clk;
1229 unsigned long timeout;
1230
1231 host->mmc->actual_clock = 0;
1232
1233 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1234
1235 if (clock == 0)
1236 return;
1237
1238 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1239
1240 clk |= SDHCI_CLOCK_INT_EN;
1241 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1242
1243 /* Wait max 20 ms */
1244 timeout = 20;
1245 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1246 & SDHCI_CLOCK_INT_STABLE)) {
1247 if (timeout == 0) {
1248 pr_err("%s: Internal clock never stabilised.\n",
1249 mmc_hostname(host->mmc));
1250 sdhci_dumpregs(host);
1251 return;
1252 }
1253 timeout--;
1254 mdelay(1);
1255 }
1256
1257 clk |= SDHCI_CLOCK_CARD_EN;
1258 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1259 }
1260 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1261
1262 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1263 unsigned short vdd)
1264 {
1265 struct mmc_host *mmc = host->mmc;
1266
1267 spin_unlock_irq(&host->lock);
1268 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1269 spin_lock_irq(&host->lock);
1270
1271 if (mode != MMC_POWER_OFF)
1272 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1273 else
1274 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1275 }
1276
1277 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1278 unsigned short vdd)
1279 {
1280 u8 pwr = 0;
1281
1282 if (mode != MMC_POWER_OFF) {
1283 switch (1 << vdd) {
1284 case MMC_VDD_165_195:
1285 pwr = SDHCI_POWER_180;
1286 break;
1287 case MMC_VDD_29_30:
1288 case MMC_VDD_30_31:
1289 pwr = SDHCI_POWER_300;
1290 break;
1291 case MMC_VDD_32_33:
1292 case MMC_VDD_33_34:
1293 pwr = SDHCI_POWER_330;
1294 break;
1295 default:
1296 WARN(1, "%s: Invalid vdd %#x\n",
1297 mmc_hostname(host->mmc), vdd);
1298 break;
1299 }
1300 }
1301
1302 if (host->pwr == pwr)
1303 return;
1304
1305 host->pwr = pwr;
1306
1307 if (pwr == 0) {
1308 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1309 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1310 sdhci_runtime_pm_bus_off(host);
1311 } else {
1312 /*
1313 * Spec says that we should clear the power reg before setting
1314 * a new value. Some controllers don't seem to like this though.
1315 */
1316 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1317 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1318
1319 /*
1320 * At least the Marvell CaFe chip gets confused if we set the
1321 * voltage and set turn on power at the same time, so set the
1322 * voltage first.
1323 */
1324 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1325 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1326
1327 pwr |= SDHCI_POWER_ON;
1328
1329 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1330
1331 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1332 sdhci_runtime_pm_bus_on(host);
1333
1334 /*
1335 * Some controllers need an extra 10ms delay of 10ms before
1336 * they can apply clock after applying power
1337 */
1338 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1339 mdelay(10);
1340 }
1341 }
1342 EXPORT_SYMBOL_GPL(sdhci_set_power);
1343
1344 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1345 unsigned short vdd)
1346 {
1347 struct mmc_host *mmc = host->mmc;
1348
1349 if (host->ops->set_power)
1350 host->ops->set_power(host, mode, vdd);
1351 else if (!IS_ERR(mmc->supply.vmmc))
1352 sdhci_set_power_reg(host, mode, vdd);
1353 else
1354 sdhci_set_power(host, mode, vdd);
1355 }
1356
1357 /*****************************************************************************\
1358 * *
1359 * MMC callbacks *
1360 * *
1361 \*****************************************************************************/
1362
1363 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1364 {
1365 struct sdhci_host *host;
1366 int present;
1367 unsigned long flags;
1368
1369 host = mmc_priv(mmc);
1370
1371 /* Firstly check card presence */
1372 present = mmc->ops->get_cd(mmc);
1373
1374 spin_lock_irqsave(&host->lock, flags);
1375
1376 WARN_ON(host->mrq != NULL);
1377
1378 sdhci_led_activate(host);
1379
1380 /*
1381 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1382 * requests if Auto-CMD12 is enabled.
1383 */
1384 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1385 if (mrq->stop) {
1386 mrq->data->stop = NULL;
1387 mrq->stop = NULL;
1388 }
1389 }
1390
1391 host->mrq = mrq;
1392
1393 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1394 host->mrq->cmd->error = -ENOMEDIUM;
1395 tasklet_schedule(&host->finish_tasklet);
1396 } else {
1397 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1398 sdhci_send_command(host, mrq->sbc);
1399 else
1400 sdhci_send_command(host, mrq->cmd);
1401 }
1402
1403 mmiowb();
1404 spin_unlock_irqrestore(&host->lock, flags);
1405 }
1406
1407 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1408 {
1409 u8 ctrl;
1410
1411 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1412 if (width == MMC_BUS_WIDTH_8) {
1413 ctrl &= ~SDHCI_CTRL_4BITBUS;
1414 if (host->version >= SDHCI_SPEC_300)
1415 ctrl |= SDHCI_CTRL_8BITBUS;
1416 } else {
1417 if (host->version >= SDHCI_SPEC_300)
1418 ctrl &= ~SDHCI_CTRL_8BITBUS;
1419 if (width == MMC_BUS_WIDTH_4)
1420 ctrl |= SDHCI_CTRL_4BITBUS;
1421 else
1422 ctrl &= ~SDHCI_CTRL_4BITBUS;
1423 }
1424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1425 }
1426 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1427
1428 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1429 {
1430 u16 ctrl_2;
1431
1432 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1433 /* Select Bus Speed Mode for host */
1434 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1435 if ((timing == MMC_TIMING_MMC_HS200) ||
1436 (timing == MMC_TIMING_UHS_SDR104))
1437 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1438 else if (timing == MMC_TIMING_UHS_SDR12)
1439 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1440 else if (timing == MMC_TIMING_UHS_SDR25)
1441 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1442 else if (timing == MMC_TIMING_UHS_SDR50)
1443 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1444 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1445 (timing == MMC_TIMING_MMC_DDR52))
1446 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1447 else if (timing == MMC_TIMING_MMC_HS400)
1448 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1449 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1450 }
1451 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1452
1453 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1454 {
1455 unsigned long flags;
1456 u8 ctrl;
1457 struct mmc_host *mmc = host->mmc;
1458
1459 spin_lock_irqsave(&host->lock, flags);
1460
1461 if (host->flags & SDHCI_DEVICE_DEAD) {
1462 spin_unlock_irqrestore(&host->lock, flags);
1463 if (!IS_ERR(mmc->supply.vmmc) &&
1464 ios->power_mode == MMC_POWER_OFF)
1465 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1466 return;
1467 }
1468
1469 /*
1470 * Reset the chip on each power off.
1471 * Should clear out any weird states.
1472 */
1473 if (ios->power_mode == MMC_POWER_OFF) {
1474 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1475 sdhci_reinit(host);
1476 }
1477
1478 if (host->version >= SDHCI_SPEC_300 &&
1479 (ios->power_mode == MMC_POWER_UP) &&
1480 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1481 sdhci_enable_preset_value(host, false);
1482
1483 if (!ios->clock || ios->clock != host->clock) {
1484 host->ops->set_clock(host, ios->clock);
1485 host->clock = ios->clock;
1486
1487 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1488 host->clock) {
1489 host->timeout_clk = host->mmc->actual_clock ?
1490 host->mmc->actual_clock / 1000 :
1491 host->clock / 1000;
1492 host->mmc->max_busy_timeout =
1493 host->ops->get_max_timeout_count ?
1494 host->ops->get_max_timeout_count(host) :
1495 1 << 27;
1496 host->mmc->max_busy_timeout /= host->timeout_clk;
1497 }
1498 }
1499
1500 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1501
1502 if (host->ops->platform_send_init_74_clocks)
1503 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1504
1505 host->ops->set_bus_width(host, ios->bus_width);
1506
1507 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1508
1509 if ((ios->timing == MMC_TIMING_SD_HS ||
1510 ios->timing == MMC_TIMING_MMC_HS)
1511 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1512 ctrl |= SDHCI_CTRL_HISPD;
1513 else
1514 ctrl &= ~SDHCI_CTRL_HISPD;
1515
1516 if (host->version >= SDHCI_SPEC_300) {
1517 u16 clk, ctrl_2;
1518
1519 /* In case of UHS-I modes, set High Speed Enable */
1520 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1521 (ios->timing == MMC_TIMING_MMC_HS200) ||
1522 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1523 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1524 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1525 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1526 (ios->timing == MMC_TIMING_UHS_SDR25))
1527 ctrl |= SDHCI_CTRL_HISPD;
1528
1529 if (!host->preset_enabled) {
1530 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1531 /*
1532 * We only need to set Driver Strength if the
1533 * preset value enable is not set.
1534 */
1535 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1536 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1537 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1538 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1539 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1540 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1541 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1542 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1543 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1544 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1545 else {
1546 pr_warn("%s: invalid driver type, default to driver type B\n",
1547 mmc_hostname(mmc));
1548 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1549 }
1550
1551 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1552 } else {
1553 /*
1554 * According to SDHC Spec v3.00, if the Preset Value
1555 * Enable in the Host Control 2 register is set, we
1556 * need to reset SD Clock Enable before changing High
1557 * Speed Enable to avoid generating clock gliches.
1558 */
1559
1560 /* Reset SD Clock Enable */
1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1562 clk &= ~SDHCI_CLOCK_CARD_EN;
1563 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1564
1565 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1566
1567 /* Re-enable SD Clock */
1568 host->ops->set_clock(host, host->clock);
1569 }
1570
1571 /* Reset SD Clock Enable */
1572 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1573 clk &= ~SDHCI_CLOCK_CARD_EN;
1574 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1575
1576 host->ops->set_uhs_signaling(host, ios->timing);
1577 host->timing = ios->timing;
1578
1579 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1580 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1581 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1582 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1583 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1584 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1585 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1586 u16 preset;
1587
1588 sdhci_enable_preset_value(host, true);
1589 preset = sdhci_get_preset_value(host);
1590 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1591 >> SDHCI_PRESET_DRV_SHIFT;
1592 }
1593
1594 /* Re-enable SD Clock */
1595 host->ops->set_clock(host, host->clock);
1596 } else
1597 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1598
1599 /*
1600 * Some (ENE) controllers go apeshit on some ios operation,
1601 * signalling timeout and CRC errors even on CMD0. Resetting
1602 * it on each ios seems to solve the problem.
1603 */
1604 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1605 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1606
1607 mmiowb();
1608 spin_unlock_irqrestore(&host->lock, flags);
1609 }
1610
1611 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1612 {
1613 struct sdhci_host *host = mmc_priv(mmc);
1614
1615 sdhci_do_set_ios(host, ios);
1616 }
1617
1618 static int sdhci_do_get_cd(struct sdhci_host *host)
1619 {
1620 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1621
1622 if (host->flags & SDHCI_DEVICE_DEAD)
1623 return 0;
1624
1625 /* If nonremovable, assume that the card is always present. */
1626 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1627 return 1;
1628
1629 /*
1630 * Try slot gpio detect, if defined it take precedence
1631 * over build in controller functionality
1632 */
1633 if (!IS_ERR_VALUE(gpio_cd))
1634 return !!gpio_cd;
1635
1636 /* If polling, assume that the card is always present. */
1637 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1638 return 1;
1639
1640 /* Host native card detect */
1641 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1642 }
1643
1644 static int sdhci_get_cd(struct mmc_host *mmc)
1645 {
1646 struct sdhci_host *host = mmc_priv(mmc);
1647
1648 return sdhci_do_get_cd(host);
1649 }
1650
1651 static int sdhci_check_ro(struct sdhci_host *host)
1652 {
1653 unsigned long flags;
1654 int is_readonly;
1655
1656 spin_lock_irqsave(&host->lock, flags);
1657
1658 if (host->flags & SDHCI_DEVICE_DEAD)
1659 is_readonly = 0;
1660 else if (host->ops->get_ro)
1661 is_readonly = host->ops->get_ro(host);
1662 else
1663 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1664 & SDHCI_WRITE_PROTECT);
1665
1666 spin_unlock_irqrestore(&host->lock, flags);
1667
1668 /* This quirk needs to be replaced by a callback-function later */
1669 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1670 !is_readonly : is_readonly;
1671 }
1672
1673 #define SAMPLE_COUNT 5
1674
1675 static int sdhci_do_get_ro(struct sdhci_host *host)
1676 {
1677 int i, ro_count;
1678
1679 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1680 return sdhci_check_ro(host);
1681
1682 ro_count = 0;
1683 for (i = 0; i < SAMPLE_COUNT; i++) {
1684 if (sdhci_check_ro(host)) {
1685 if (++ro_count > SAMPLE_COUNT / 2)
1686 return 1;
1687 }
1688 msleep(30);
1689 }
1690 return 0;
1691 }
1692
1693 static void sdhci_hw_reset(struct mmc_host *mmc)
1694 {
1695 struct sdhci_host *host = mmc_priv(mmc);
1696
1697 if (host->ops && host->ops->hw_reset)
1698 host->ops->hw_reset(host);
1699 }
1700
1701 static int sdhci_get_ro(struct mmc_host *mmc)
1702 {
1703 struct sdhci_host *host = mmc_priv(mmc);
1704
1705 return sdhci_do_get_ro(host);
1706 }
1707
1708 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1709 {
1710 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1711 if (enable)
1712 host->ier |= SDHCI_INT_CARD_INT;
1713 else
1714 host->ier &= ~SDHCI_INT_CARD_INT;
1715
1716 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1717 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1718 mmiowb();
1719 }
1720 }
1721
1722 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1723 {
1724 struct sdhci_host *host = mmc_priv(mmc);
1725 unsigned long flags;
1726
1727 spin_lock_irqsave(&host->lock, flags);
1728 if (enable)
1729 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1730 else
1731 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1732
1733 sdhci_enable_sdio_irq_nolock(host, enable);
1734 spin_unlock_irqrestore(&host->lock, flags);
1735 }
1736
1737 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1738 struct mmc_ios *ios)
1739 {
1740 struct mmc_host *mmc = host->mmc;
1741 u16 ctrl;
1742 int ret;
1743
1744 /*
1745 * Signal Voltage Switching is only applicable for Host Controllers
1746 * v3.00 and above.
1747 */
1748 if (host->version < SDHCI_SPEC_300)
1749 return 0;
1750
1751 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1752
1753 switch (ios->signal_voltage) {
1754 case MMC_SIGNAL_VOLTAGE_330:
1755 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1756 ctrl &= ~SDHCI_CTRL_VDD_180;
1757 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1758
1759 if (!IS_ERR(mmc->supply.vqmmc)) {
1760 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1761 3600000);
1762 if (ret) {
1763 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1764 mmc_hostname(mmc));
1765 return -EIO;
1766 }
1767 }
1768 /* Wait for 5ms */
1769 usleep_range(5000, 5500);
1770
1771 /* 3.3V regulator output should be stable within 5 ms */
1772 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1773 if (!(ctrl & SDHCI_CTRL_VDD_180))
1774 return 0;
1775
1776 pr_warn("%s: 3.3V regulator output did not became stable\n",
1777 mmc_hostname(mmc));
1778
1779 return -EAGAIN;
1780 case MMC_SIGNAL_VOLTAGE_180:
1781 if (!IS_ERR(mmc->supply.vqmmc)) {
1782 ret = regulator_set_voltage(mmc->supply.vqmmc,
1783 1700000, 1950000);
1784 if (ret) {
1785 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1786 mmc_hostname(mmc));
1787 return -EIO;
1788 }
1789 }
1790
1791 /*
1792 * Enable 1.8V Signal Enable in the Host Control2
1793 * register
1794 */
1795 ctrl |= SDHCI_CTRL_VDD_180;
1796 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1797
1798 /* Some controller need to do more when switching */
1799 if (host->ops->voltage_switch)
1800 host->ops->voltage_switch(host);
1801
1802 /* 1.8V regulator output should be stable within 5 ms */
1803 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1804 if (ctrl & SDHCI_CTRL_VDD_180)
1805 return 0;
1806
1807 pr_warn("%s: 1.8V regulator output did not became stable\n",
1808 mmc_hostname(mmc));
1809
1810 return -EAGAIN;
1811 case MMC_SIGNAL_VOLTAGE_120:
1812 if (!IS_ERR(mmc->supply.vqmmc)) {
1813 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1814 1300000);
1815 if (ret) {
1816 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1817 mmc_hostname(mmc));
1818 return -EIO;
1819 }
1820 }
1821 return 0;
1822 default:
1823 /* No signal voltage switch required */
1824 return 0;
1825 }
1826 }
1827
1828 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1829 struct mmc_ios *ios)
1830 {
1831 struct sdhci_host *host = mmc_priv(mmc);
1832
1833 if (host->version < SDHCI_SPEC_300)
1834 return 0;
1835
1836 return sdhci_do_start_signal_voltage_switch(host, ios);
1837 }
1838
1839 static int sdhci_card_busy(struct mmc_host *mmc)
1840 {
1841 struct sdhci_host *host = mmc_priv(mmc);
1842 u32 present_state;
1843
1844 /* Check whether DAT[3:0] is 0000 */
1845 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1846
1847 return !(present_state & SDHCI_DATA_LVL_MASK);
1848 }
1849
1850 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1851 {
1852 struct sdhci_host *host = mmc_priv(mmc);
1853 unsigned long flags;
1854
1855 spin_lock_irqsave(&host->lock, flags);
1856 host->flags |= SDHCI_HS400_TUNING;
1857 spin_unlock_irqrestore(&host->lock, flags);
1858
1859 return 0;
1860 }
1861
1862 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1863 {
1864 struct sdhci_host *host = mmc_priv(mmc);
1865 u16 ctrl;
1866 int tuning_loop_counter = MAX_TUNING_LOOP;
1867 int err = 0;
1868 unsigned long flags;
1869 unsigned int tuning_count = 0;
1870 bool hs400_tuning;
1871
1872 spin_lock_irqsave(&host->lock, flags);
1873
1874 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1875 host->flags &= ~SDHCI_HS400_TUNING;
1876
1877 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1878 tuning_count = host->tuning_count;
1879
1880 /*
1881 * The Host Controller needs tuning in case of SDR104 and DDR50
1882 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1883 * the Capabilities register.
1884 * If the Host Controller supports the HS200 mode then the
1885 * tuning function has to be executed.
1886 */
1887 switch (host->timing) {
1888 /* HS400 tuning is done in HS200 mode */
1889 case MMC_TIMING_MMC_HS400:
1890 err = -EINVAL;
1891 goto out_unlock;
1892
1893 case MMC_TIMING_MMC_HS200:
1894 /*
1895 * Periodic re-tuning for HS400 is not expected to be needed, so
1896 * disable it here.
1897 */
1898 if (hs400_tuning)
1899 tuning_count = 0;
1900 break;
1901
1902 case MMC_TIMING_UHS_SDR104:
1903 case MMC_TIMING_UHS_DDR50:
1904 break;
1905
1906 case MMC_TIMING_UHS_SDR50:
1907 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
1908 break;
1909 /* FALLTHROUGH */
1910
1911 default:
1912 goto out_unlock;
1913 }
1914
1915 if (host->ops->platform_execute_tuning) {
1916 spin_unlock_irqrestore(&host->lock, flags);
1917 err = host->ops->platform_execute_tuning(host, opcode);
1918 return err;
1919 }
1920
1921 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1922 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1923 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1924 ctrl |= SDHCI_CTRL_TUNED_CLK;
1925 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1926
1927 /*
1928 * As per the Host Controller spec v3.00, tuning command
1929 * generates Buffer Read Ready interrupt, so enable that.
1930 *
1931 * Note: The spec clearly says that when tuning sequence
1932 * is being performed, the controller does not generate
1933 * interrupts other than Buffer Read Ready interrupt. But
1934 * to make sure we don't hit a controller bug, we _only_
1935 * enable Buffer Read Ready interrupt here.
1936 */
1937 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1938 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1939
1940 /*
1941 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1942 * of loops reaches 40 times or a timeout of 150ms occurs.
1943 */
1944 do {
1945 struct mmc_command cmd = {0};
1946 struct mmc_request mrq = {NULL};
1947
1948 cmd.opcode = opcode;
1949 cmd.arg = 0;
1950 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1951 cmd.retries = 0;
1952 cmd.data = NULL;
1953 cmd.error = 0;
1954
1955 if (tuning_loop_counter-- == 0)
1956 break;
1957
1958 mrq.cmd = &cmd;
1959 host->mrq = &mrq;
1960
1961 /*
1962 * In response to CMD19, the card sends 64 bytes of tuning
1963 * block to the Host Controller. So we set the block size
1964 * to 64 here.
1965 */
1966 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1967 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1968 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1969 SDHCI_BLOCK_SIZE);
1970 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1971 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1972 SDHCI_BLOCK_SIZE);
1973 } else {
1974 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1975 SDHCI_BLOCK_SIZE);
1976 }
1977
1978 /*
1979 * The tuning block is sent by the card to the host controller.
1980 * So we set the TRNS_READ bit in the Transfer Mode register.
1981 * This also takes care of setting DMA Enable and Multi Block
1982 * Select in the same register to 0.
1983 */
1984 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1985
1986 sdhci_send_command(host, &cmd);
1987
1988 host->cmd = NULL;
1989 host->mrq = NULL;
1990
1991 spin_unlock_irqrestore(&host->lock, flags);
1992 /* Wait for Buffer Read Ready interrupt */
1993 wait_event_interruptible_timeout(host->buf_ready_int,
1994 (host->tuning_done == 1),
1995 msecs_to_jiffies(50));
1996 spin_lock_irqsave(&host->lock, flags);
1997
1998 if (!host->tuning_done) {
1999 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2000 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2001 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2002 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2003 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2004
2005 err = -EIO;
2006 goto out;
2007 }
2008
2009 host->tuning_done = 0;
2010
2011 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2012
2013 /* eMMC spec does not require a delay between tuning cycles */
2014 if (opcode == MMC_SEND_TUNING_BLOCK)
2015 mdelay(1);
2016 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2017
2018 /*
2019 * The Host Driver has exhausted the maximum number of loops allowed,
2020 * so use fixed sampling frequency.
2021 */
2022 if (tuning_loop_counter < 0) {
2023 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2024 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2025 }
2026 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2027 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2028 err = -EIO;
2029 }
2030
2031 out:
2032 if (tuning_count) {
2033 /*
2034 * In case tuning fails, host controllers which support
2035 * re-tuning can try tuning again at a later time, when the
2036 * re-tuning timer expires. So for these controllers, we
2037 * return 0. Since there might be other controllers who do not
2038 * have this capability, we return error for them.
2039 */
2040 err = 0;
2041 }
2042
2043 host->mmc->retune_period = err ? 0 : tuning_count;
2044
2045 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2046 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2047 out_unlock:
2048 spin_unlock_irqrestore(&host->lock, flags);
2049 return err;
2050 }
2051
2052 static int sdhci_select_drive_strength(struct mmc_card *card,
2053 unsigned int max_dtr, int host_drv,
2054 int card_drv, int *drv_type)
2055 {
2056 struct sdhci_host *host = mmc_priv(card->host);
2057
2058 if (!host->ops->select_drive_strength)
2059 return 0;
2060
2061 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2062 card_drv, drv_type);
2063 }
2064
2065 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2066 {
2067 /* Host Controller v3.00 defines preset value registers */
2068 if (host->version < SDHCI_SPEC_300)
2069 return;
2070
2071 /*
2072 * We only enable or disable Preset Value if they are not already
2073 * enabled or disabled respectively. Otherwise, we bail out.
2074 */
2075 if (host->preset_enabled != enable) {
2076 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2077
2078 if (enable)
2079 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2080 else
2081 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2082
2083 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2084
2085 if (enable)
2086 host->flags |= SDHCI_PV_ENABLED;
2087 else
2088 host->flags &= ~SDHCI_PV_ENABLED;
2089
2090 host->preset_enabled = enable;
2091 }
2092 }
2093
2094 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2095 int err)
2096 {
2097 struct sdhci_host *host = mmc_priv(mmc);
2098 struct mmc_data *data = mrq->data;
2099
2100 if (data->host_cookie != COOKIE_UNMAPPED)
2101 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2102 data->flags & MMC_DATA_WRITE ?
2103 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2104
2105 data->host_cookie = COOKIE_UNMAPPED;
2106 }
2107
2108 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2109 bool is_first_req)
2110 {
2111 struct sdhci_host *host = mmc_priv(mmc);
2112
2113 mrq->data->host_cookie = COOKIE_UNMAPPED;
2114
2115 if (host->flags & SDHCI_REQ_USE_DMA)
2116 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2117 }
2118
2119 static void sdhci_card_event(struct mmc_host *mmc)
2120 {
2121 struct sdhci_host *host = mmc_priv(mmc);
2122 unsigned long flags;
2123 int present;
2124
2125 /* First check if client has provided their own card event */
2126 if (host->ops->card_event)
2127 host->ops->card_event(host);
2128
2129 present = sdhci_do_get_cd(host);
2130
2131 spin_lock_irqsave(&host->lock, flags);
2132
2133 /* Check host->mrq first in case we are runtime suspended */
2134 if (host->mrq && !present) {
2135 pr_err("%s: Card removed during transfer!\n",
2136 mmc_hostname(host->mmc));
2137 pr_err("%s: Resetting controller.\n",
2138 mmc_hostname(host->mmc));
2139
2140 sdhci_do_reset(host, SDHCI_RESET_CMD);
2141 sdhci_do_reset(host, SDHCI_RESET_DATA);
2142
2143 host->mrq->cmd->error = -ENOMEDIUM;
2144 tasklet_schedule(&host->finish_tasklet);
2145 }
2146
2147 spin_unlock_irqrestore(&host->lock, flags);
2148 }
2149
2150 static const struct mmc_host_ops sdhci_ops = {
2151 .request = sdhci_request,
2152 .post_req = sdhci_post_req,
2153 .pre_req = sdhci_pre_req,
2154 .set_ios = sdhci_set_ios,
2155 .get_cd = sdhci_get_cd,
2156 .get_ro = sdhci_get_ro,
2157 .hw_reset = sdhci_hw_reset,
2158 .enable_sdio_irq = sdhci_enable_sdio_irq,
2159 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2160 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2161 .execute_tuning = sdhci_execute_tuning,
2162 .select_drive_strength = sdhci_select_drive_strength,
2163 .card_event = sdhci_card_event,
2164 .card_busy = sdhci_card_busy,
2165 };
2166
2167 /*****************************************************************************\
2168 * *
2169 * Tasklets *
2170 * *
2171 \*****************************************************************************/
2172
2173 static void sdhci_tasklet_finish(unsigned long param)
2174 {
2175 struct sdhci_host *host;
2176 unsigned long flags;
2177 struct mmc_request *mrq;
2178
2179 host = (struct sdhci_host*)param;
2180
2181 spin_lock_irqsave(&host->lock, flags);
2182
2183 /*
2184 * If this tasklet gets rescheduled while running, it will
2185 * be run again afterwards but without any active request.
2186 */
2187 if (!host->mrq) {
2188 spin_unlock_irqrestore(&host->lock, flags);
2189 return;
2190 }
2191
2192 del_timer(&host->timer);
2193
2194 mrq = host->mrq;
2195
2196 /*
2197 * Always unmap the data buffers if they were mapped by
2198 * sdhci_prepare_data() whenever we finish with a request.
2199 * This avoids leaking DMA mappings on error.
2200 */
2201 if (host->flags & SDHCI_REQ_USE_DMA) {
2202 struct mmc_data *data = mrq->data;
2203
2204 if (data && data->host_cookie == COOKIE_MAPPED) {
2205 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2206 (data->flags & MMC_DATA_READ) ?
2207 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2208 data->host_cookie = COOKIE_UNMAPPED;
2209 }
2210 }
2211
2212 /*
2213 * The controller needs a reset of internal state machines
2214 * upon error conditions.
2215 */
2216 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2217 ((mrq->cmd && mrq->cmd->error) ||
2218 (mrq->sbc && mrq->sbc->error) ||
2219 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2220 (mrq->data->stop && mrq->data->stop->error))) ||
2221 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2222
2223 /* Some controllers need this kick or reset won't work here */
2224 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2225 /* This is to force an update */
2226 host->ops->set_clock(host, host->clock);
2227
2228 /* Spec says we should do both at the same time, but Ricoh
2229 controllers do not like that. */
2230 sdhci_do_reset(host, SDHCI_RESET_CMD);
2231 sdhci_do_reset(host, SDHCI_RESET_DATA);
2232 }
2233
2234 host->mrq = NULL;
2235 host->cmd = NULL;
2236 host->data = NULL;
2237
2238 sdhci_led_deactivate(host);
2239
2240 mmiowb();
2241 spin_unlock_irqrestore(&host->lock, flags);
2242
2243 mmc_request_done(host->mmc, mrq);
2244 }
2245
2246 static void sdhci_timeout_timer(unsigned long data)
2247 {
2248 struct sdhci_host *host;
2249 unsigned long flags;
2250
2251 host = (struct sdhci_host*)data;
2252
2253 spin_lock_irqsave(&host->lock, flags);
2254
2255 if (host->mrq) {
2256 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2257 mmc_hostname(host->mmc));
2258 sdhci_dumpregs(host);
2259
2260 if (host->data) {
2261 host->data->error = -ETIMEDOUT;
2262 sdhci_finish_data(host);
2263 } else {
2264 if (host->cmd)
2265 host->cmd->error = -ETIMEDOUT;
2266 else
2267 host->mrq->cmd->error = -ETIMEDOUT;
2268
2269 tasklet_schedule(&host->finish_tasklet);
2270 }
2271 }
2272
2273 mmiowb();
2274 spin_unlock_irqrestore(&host->lock, flags);
2275 }
2276
2277 /*****************************************************************************\
2278 * *
2279 * Interrupt handling *
2280 * *
2281 \*****************************************************************************/
2282
2283 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2284 {
2285 BUG_ON(intmask == 0);
2286
2287 if (!host->cmd) {
2288 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2289 mmc_hostname(host->mmc), (unsigned)intmask);
2290 sdhci_dumpregs(host);
2291 return;
2292 }
2293
2294 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2295 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2296 if (intmask & SDHCI_INT_TIMEOUT)
2297 host->cmd->error = -ETIMEDOUT;
2298 else
2299 host->cmd->error = -EILSEQ;
2300
2301 /*
2302 * If this command initiates a data phase and a response
2303 * CRC error is signalled, the card can start transferring
2304 * data - the card may have received the command without
2305 * error. We must not terminate the mmc_request early.
2306 *
2307 * If the card did not receive the command or returned an
2308 * error which prevented it sending data, the data phase
2309 * will time out.
2310 */
2311 if (host->cmd->data &&
2312 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2313 SDHCI_INT_CRC) {
2314 host->cmd = NULL;
2315 return;
2316 }
2317
2318 tasklet_schedule(&host->finish_tasklet);
2319 return;
2320 }
2321
2322 /*
2323 * The host can send and interrupt when the busy state has
2324 * ended, allowing us to wait without wasting CPU cycles.
2325 * Unfortunately this is overloaded on the "data complete"
2326 * interrupt, so we need to take some care when handling
2327 * it.
2328 *
2329 * Note: The 1.0 specification is a bit ambiguous about this
2330 * feature so there might be some problems with older
2331 * controllers.
2332 */
2333 if (host->cmd->flags & MMC_RSP_BUSY) {
2334 if (host->cmd->data)
2335 DBG("Cannot wait for busy signal when also doing a data transfer");
2336 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2337 && !host->busy_handle) {
2338 /* Mark that command complete before busy is ended */
2339 host->busy_handle = 1;
2340 return;
2341 }
2342
2343 /* The controller does not support the end-of-busy IRQ,
2344 * fall through and take the SDHCI_INT_RESPONSE */
2345 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2346 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2347 *mask &= ~SDHCI_INT_DATA_END;
2348 }
2349
2350 if (intmask & SDHCI_INT_RESPONSE)
2351 sdhci_finish_command(host);
2352 }
2353
2354 #ifdef CONFIG_MMC_DEBUG
2355 static void sdhci_adma_show_error(struct sdhci_host *host)
2356 {
2357 const char *name = mmc_hostname(host->mmc);
2358 void *desc = host->adma_table;
2359
2360 sdhci_dumpregs(host);
2361
2362 while (true) {
2363 struct sdhci_adma2_64_desc *dma_desc = desc;
2364
2365 if (host->flags & SDHCI_USE_64_BIT_DMA)
2366 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2367 name, desc, le32_to_cpu(dma_desc->addr_hi),
2368 le32_to_cpu(dma_desc->addr_lo),
2369 le16_to_cpu(dma_desc->len),
2370 le16_to_cpu(dma_desc->cmd));
2371 else
2372 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2373 name, desc, le32_to_cpu(dma_desc->addr_lo),
2374 le16_to_cpu(dma_desc->len),
2375 le16_to_cpu(dma_desc->cmd));
2376
2377 desc += host->desc_sz;
2378
2379 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2380 break;
2381 }
2382 }
2383 #else
2384 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2385 #endif
2386
2387 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2388 {
2389 u32 command;
2390 BUG_ON(intmask == 0);
2391
2392 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2393 if (intmask & SDHCI_INT_DATA_AVAIL) {
2394 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2395 if (command == MMC_SEND_TUNING_BLOCK ||
2396 command == MMC_SEND_TUNING_BLOCK_HS200) {
2397 host->tuning_done = 1;
2398 wake_up(&host->buf_ready_int);
2399 return;
2400 }
2401 }
2402
2403 if (!host->data) {
2404 /*
2405 * The "data complete" interrupt is also used to
2406 * indicate that a busy state has ended. See comment
2407 * above in sdhci_cmd_irq().
2408 */
2409 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2410 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2411 host->cmd->error = -ETIMEDOUT;
2412 tasklet_schedule(&host->finish_tasklet);
2413 return;
2414 }
2415 if (intmask & SDHCI_INT_DATA_END) {
2416 /*
2417 * Some cards handle busy-end interrupt
2418 * before the command completed, so make
2419 * sure we do things in the proper order.
2420 */
2421 if (host->busy_handle)
2422 sdhci_finish_command(host);
2423 else
2424 host->busy_handle = 1;
2425 return;
2426 }
2427 }
2428
2429 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2430 mmc_hostname(host->mmc), (unsigned)intmask);
2431 sdhci_dumpregs(host);
2432
2433 return;
2434 }
2435
2436 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2437 host->data->error = -ETIMEDOUT;
2438 else if (intmask & SDHCI_INT_DATA_END_BIT)
2439 host->data->error = -EILSEQ;
2440 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2441 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2442 != MMC_BUS_TEST_R)
2443 host->data->error = -EILSEQ;
2444 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2445 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2446 sdhci_adma_show_error(host);
2447 host->data->error = -EIO;
2448 if (host->ops->adma_workaround)
2449 host->ops->adma_workaround(host, intmask);
2450 }
2451
2452 if (host->data->error)
2453 sdhci_finish_data(host);
2454 else {
2455 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2456 sdhci_transfer_pio(host);
2457
2458 /*
2459 * We currently don't do anything fancy with DMA
2460 * boundaries, but as we can't disable the feature
2461 * we need to at least restart the transfer.
2462 *
2463 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2464 * should return a valid address to continue from, but as
2465 * some controllers are faulty, don't trust them.
2466 */
2467 if (intmask & SDHCI_INT_DMA_END) {
2468 u32 dmastart, dmanow;
2469 dmastart = sg_dma_address(host->data->sg);
2470 dmanow = dmastart + host->data->bytes_xfered;
2471 /*
2472 * Force update to the next DMA block boundary.
2473 */
2474 dmanow = (dmanow &
2475 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2476 SDHCI_DEFAULT_BOUNDARY_SIZE;
2477 host->data->bytes_xfered = dmanow - dmastart;
2478 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2479 " next 0x%08x\n",
2480 mmc_hostname(host->mmc), dmastart,
2481 host->data->bytes_xfered, dmanow);
2482 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2483 }
2484
2485 if (intmask & SDHCI_INT_DATA_END) {
2486 if (host->cmd) {
2487 /*
2488 * Data managed to finish before the
2489 * command completed. Make sure we do
2490 * things in the proper order.
2491 */
2492 host->data_early = 1;
2493 } else {
2494 sdhci_finish_data(host);
2495 }
2496 }
2497 }
2498 }
2499
2500 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2501 {
2502 irqreturn_t result = IRQ_NONE;
2503 struct sdhci_host *host = dev_id;
2504 u32 intmask, mask, unexpected = 0;
2505 int max_loops = 16;
2506
2507 spin_lock(&host->lock);
2508
2509 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2510 spin_unlock(&host->lock);
2511 return IRQ_NONE;
2512 }
2513
2514 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2515 if (!intmask || intmask == 0xffffffff) {
2516 result = IRQ_NONE;
2517 goto out;
2518 }
2519
2520 do {
2521 /* Clear selected interrupts. */
2522 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2523 SDHCI_INT_BUS_POWER);
2524 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2525
2526 DBG("*** %s got interrupt: 0x%08x\n",
2527 mmc_hostname(host->mmc), intmask);
2528
2529 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2530 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2531 SDHCI_CARD_PRESENT;
2532
2533 /*
2534 * There is a observation on i.mx esdhc. INSERT
2535 * bit will be immediately set again when it gets
2536 * cleared, if a card is inserted. We have to mask
2537 * the irq to prevent interrupt storm which will
2538 * freeze the system. And the REMOVE gets the
2539 * same situation.
2540 *
2541 * More testing are needed here to ensure it works
2542 * for other platforms though.
2543 */
2544 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2545 SDHCI_INT_CARD_REMOVE);
2546 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2547 SDHCI_INT_CARD_INSERT;
2548 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2549 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2550
2551 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2552 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2553
2554 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2555 SDHCI_INT_CARD_REMOVE);
2556 result = IRQ_WAKE_THREAD;
2557 }
2558
2559 if (intmask & SDHCI_INT_CMD_MASK)
2560 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2561 &intmask);
2562
2563 if (intmask & SDHCI_INT_DATA_MASK)
2564 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2565
2566 if (intmask & SDHCI_INT_BUS_POWER)
2567 pr_err("%s: Card is consuming too much power!\n",
2568 mmc_hostname(host->mmc));
2569
2570 if (intmask & SDHCI_INT_CARD_INT) {
2571 sdhci_enable_sdio_irq_nolock(host, false);
2572 host->thread_isr |= SDHCI_INT_CARD_INT;
2573 result = IRQ_WAKE_THREAD;
2574 }
2575
2576 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2577 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2578 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2579 SDHCI_INT_CARD_INT);
2580
2581 if (intmask) {
2582 unexpected |= intmask;
2583 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2584 }
2585
2586 if (result == IRQ_NONE)
2587 result = IRQ_HANDLED;
2588
2589 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2590 } while (intmask && --max_loops);
2591 out:
2592 spin_unlock(&host->lock);
2593
2594 if (unexpected) {
2595 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2596 mmc_hostname(host->mmc), unexpected);
2597 sdhci_dumpregs(host);
2598 }
2599
2600 return result;
2601 }
2602
2603 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2604 {
2605 struct sdhci_host *host = dev_id;
2606 unsigned long flags;
2607 u32 isr;
2608
2609 spin_lock_irqsave(&host->lock, flags);
2610 isr = host->thread_isr;
2611 host->thread_isr = 0;
2612 spin_unlock_irqrestore(&host->lock, flags);
2613
2614 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2615 sdhci_card_event(host->mmc);
2616 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2617 }
2618
2619 if (isr & SDHCI_INT_CARD_INT) {
2620 sdio_run_irqs(host->mmc);
2621
2622 spin_lock_irqsave(&host->lock, flags);
2623 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2624 sdhci_enable_sdio_irq_nolock(host, true);
2625 spin_unlock_irqrestore(&host->lock, flags);
2626 }
2627
2628 return isr ? IRQ_HANDLED : IRQ_NONE;
2629 }
2630
2631 /*****************************************************************************\
2632 * *
2633 * Suspend/resume *
2634 * *
2635 \*****************************************************************************/
2636
2637 #ifdef CONFIG_PM
2638 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2639 {
2640 u8 val;
2641 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2642 | SDHCI_WAKE_ON_INT;
2643
2644 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2645 val |= mask ;
2646 /* Avoid fake wake up */
2647 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2648 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2649 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2650 }
2651 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2652
2653 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2654 {
2655 u8 val;
2656 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2657 | SDHCI_WAKE_ON_INT;
2658
2659 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2660 val &= ~mask;
2661 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2662 }
2663
2664 int sdhci_suspend_host(struct sdhci_host *host)
2665 {
2666 sdhci_disable_card_detection(host);
2667
2668 mmc_retune_timer_stop(host->mmc);
2669 mmc_retune_needed(host->mmc);
2670
2671 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2672 host->ier = 0;
2673 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2674 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2675 free_irq(host->irq, host);
2676 } else {
2677 sdhci_enable_irq_wakeups(host);
2678 enable_irq_wake(host->irq);
2679 }
2680 return 0;
2681 }
2682
2683 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2684
2685 int sdhci_resume_host(struct sdhci_host *host)
2686 {
2687 int ret = 0;
2688
2689 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2690 if (host->ops->enable_dma)
2691 host->ops->enable_dma(host);
2692 }
2693
2694 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2695 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2696 /* Card keeps power but host controller does not */
2697 sdhci_init(host, 0);
2698 host->pwr = 0;
2699 host->clock = 0;
2700 sdhci_do_set_ios(host, &host->mmc->ios);
2701 } else {
2702 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2703 mmiowb();
2704 }
2705
2706 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2707 ret = request_threaded_irq(host->irq, sdhci_irq,
2708 sdhci_thread_irq, IRQF_SHARED,
2709 mmc_hostname(host->mmc), host);
2710 if (ret)
2711 return ret;
2712 } else {
2713 sdhci_disable_irq_wakeups(host);
2714 disable_irq_wake(host->irq);
2715 }
2716
2717 sdhci_enable_card_detection(host);
2718
2719 return ret;
2720 }
2721
2722 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2723
2724 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2725 {
2726 unsigned long flags;
2727
2728 mmc_retune_timer_stop(host->mmc);
2729 mmc_retune_needed(host->mmc);
2730
2731 spin_lock_irqsave(&host->lock, flags);
2732 host->ier &= SDHCI_INT_CARD_INT;
2733 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2734 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2735 spin_unlock_irqrestore(&host->lock, flags);
2736
2737 synchronize_hardirq(host->irq);
2738
2739 spin_lock_irqsave(&host->lock, flags);
2740 host->runtime_suspended = true;
2741 spin_unlock_irqrestore(&host->lock, flags);
2742
2743 return 0;
2744 }
2745 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2746
2747 int sdhci_runtime_resume_host(struct sdhci_host *host)
2748 {
2749 unsigned long flags;
2750 int host_flags = host->flags;
2751
2752 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2753 if (host->ops->enable_dma)
2754 host->ops->enable_dma(host);
2755 }
2756
2757 sdhci_init(host, 0);
2758
2759 /* Force clock and power re-program */
2760 host->pwr = 0;
2761 host->clock = 0;
2762 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2763 sdhci_do_set_ios(host, &host->mmc->ios);
2764
2765 if ((host_flags & SDHCI_PV_ENABLED) &&
2766 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2767 spin_lock_irqsave(&host->lock, flags);
2768 sdhci_enable_preset_value(host, true);
2769 spin_unlock_irqrestore(&host->lock, flags);
2770 }
2771
2772 spin_lock_irqsave(&host->lock, flags);
2773
2774 host->runtime_suspended = false;
2775
2776 /* Enable SDIO IRQ */
2777 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2778 sdhci_enable_sdio_irq_nolock(host, true);
2779
2780 /* Enable Card Detection */
2781 sdhci_enable_card_detection(host);
2782
2783 spin_unlock_irqrestore(&host->lock, flags);
2784
2785 return 0;
2786 }
2787 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2788
2789 #endif /* CONFIG_PM */
2790
2791 /*****************************************************************************\
2792 * *
2793 * Device allocation/registration *
2794 * *
2795 \*****************************************************************************/
2796
2797 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2798 size_t priv_size)
2799 {
2800 struct mmc_host *mmc;
2801 struct sdhci_host *host;
2802
2803 WARN_ON(dev == NULL);
2804
2805 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2806 if (!mmc)
2807 return ERR_PTR(-ENOMEM);
2808
2809 host = mmc_priv(mmc);
2810 host->mmc = mmc;
2811 host->mmc_host_ops = sdhci_ops;
2812 mmc->ops = &host->mmc_host_ops;
2813
2814 return host;
2815 }
2816
2817 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2818
2819 static int sdhci_set_dma_mask(struct sdhci_host *host)
2820 {
2821 struct mmc_host *mmc = host->mmc;
2822 struct device *dev = mmc_dev(mmc);
2823 int ret = -EINVAL;
2824
2825 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
2826 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2827
2828 /* Try 64-bit mask if hardware is capable of it */
2829 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2830 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2831 if (ret) {
2832 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2833 mmc_hostname(mmc));
2834 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2835 }
2836 }
2837
2838 /* 32-bit mask as default & fallback */
2839 if (ret) {
2840 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2841 if (ret)
2842 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2843 mmc_hostname(mmc));
2844 }
2845
2846 return ret;
2847 }
2848
2849 int sdhci_add_host(struct sdhci_host *host)
2850 {
2851 struct mmc_host *mmc;
2852 u32 caps[2] = {0, 0};
2853 u32 max_current_caps;
2854 unsigned int ocr_avail;
2855 unsigned int override_timeout_clk;
2856 u32 max_clk;
2857 int ret;
2858
2859 WARN_ON(host == NULL);
2860 if (host == NULL)
2861 return -EINVAL;
2862
2863 mmc = host->mmc;
2864
2865 if (debug_quirks)
2866 host->quirks = debug_quirks;
2867 if (debug_quirks2)
2868 host->quirks2 = debug_quirks2;
2869
2870 override_timeout_clk = host->timeout_clk;
2871
2872 sdhci_do_reset(host, SDHCI_RESET_ALL);
2873
2874 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2875 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2876 >> SDHCI_SPEC_VER_SHIFT;
2877 if (host->version > SDHCI_SPEC_300) {
2878 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2879 mmc_hostname(mmc), host->version);
2880 }
2881
2882 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2883 sdhci_readl(host, SDHCI_CAPABILITIES);
2884
2885 if (host->version >= SDHCI_SPEC_300)
2886 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2887 host->caps1 :
2888 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2889
2890 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2891 host->flags |= SDHCI_USE_SDMA;
2892 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2893 DBG("Controller doesn't have SDMA capability\n");
2894 else
2895 host->flags |= SDHCI_USE_SDMA;
2896
2897 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2898 (host->flags & SDHCI_USE_SDMA)) {
2899 DBG("Disabling DMA as it is marked broken\n");
2900 host->flags &= ~SDHCI_USE_SDMA;
2901 }
2902
2903 if ((host->version >= SDHCI_SPEC_200) &&
2904 (caps[0] & SDHCI_CAN_DO_ADMA2))
2905 host->flags |= SDHCI_USE_ADMA;
2906
2907 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2908 (host->flags & SDHCI_USE_ADMA)) {
2909 DBG("Disabling ADMA as it is marked broken\n");
2910 host->flags &= ~SDHCI_USE_ADMA;
2911 }
2912
2913 /*
2914 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2915 * and *must* do 64-bit DMA. A driver has the opportunity to change
2916 * that during the first call to ->enable_dma(). Similarly
2917 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2918 * implement.
2919 */
2920 if (caps[0] & SDHCI_CAN_64BIT)
2921 host->flags |= SDHCI_USE_64_BIT_DMA;
2922
2923 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2924 ret = sdhci_set_dma_mask(host);
2925
2926 if (!ret && host->ops->enable_dma)
2927 ret = host->ops->enable_dma(host);
2928
2929 if (ret) {
2930 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2931 mmc_hostname(mmc));
2932 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2933
2934 ret = 0;
2935 }
2936 }
2937
2938 /* SDMA does not support 64-bit DMA */
2939 if (host->flags & SDHCI_USE_64_BIT_DMA)
2940 host->flags &= ~SDHCI_USE_SDMA;
2941
2942 if (host->flags & SDHCI_USE_ADMA) {
2943 dma_addr_t dma;
2944 void *buf;
2945
2946 /*
2947 * The DMA descriptor table size is calculated as the maximum
2948 * number of segments times 2, to allow for an alignment
2949 * descriptor for each segment, plus 1 for a nop end descriptor,
2950 * all multipled by the descriptor size.
2951 */
2952 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2953 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2954 SDHCI_ADMA2_64_DESC_SZ;
2955 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2956 } else {
2957 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2958 SDHCI_ADMA2_32_DESC_SZ;
2959 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2960 }
2961
2962 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
2963 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
2964 host->adma_table_sz, &dma, GFP_KERNEL);
2965 if (!buf) {
2966 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2967 mmc_hostname(mmc));
2968 host->flags &= ~SDHCI_USE_ADMA;
2969 } else if ((dma + host->align_buffer_sz) &
2970 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
2971 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2972 mmc_hostname(mmc));
2973 host->flags &= ~SDHCI_USE_ADMA;
2974 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
2975 host->adma_table_sz, buf, dma);
2976 } else {
2977 host->align_buffer = buf;
2978 host->align_addr = dma;
2979
2980 host->adma_table = buf + host->align_buffer_sz;
2981 host->adma_addr = dma + host->align_buffer_sz;
2982 }
2983 }
2984
2985 /*
2986 * If we use DMA, then it's up to the caller to set the DMA
2987 * mask, but PIO does not need the hw shim so we set a new
2988 * mask here in that case.
2989 */
2990 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2991 host->dma_mask = DMA_BIT_MASK(64);
2992 mmc_dev(mmc)->dma_mask = &host->dma_mask;
2993 }
2994
2995 if (host->version >= SDHCI_SPEC_300)
2996 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2997 >> SDHCI_CLOCK_BASE_SHIFT;
2998 else
2999 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
3000 >> SDHCI_CLOCK_BASE_SHIFT;
3001
3002 host->max_clk *= 1000000;
3003 if (host->max_clk == 0 || host->quirks &
3004 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3005 if (!host->ops->get_max_clock) {
3006 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3007 mmc_hostname(mmc));
3008 ret = -ENODEV;
3009 goto undma;
3010 }
3011 host->max_clk = host->ops->get_max_clock(host);
3012 }
3013
3014 /*
3015 * In case of Host Controller v3.00, find out whether clock
3016 * multiplier is supported.
3017 */
3018 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
3019 SDHCI_CLOCK_MUL_SHIFT;
3020
3021 /*
3022 * In case the value in Clock Multiplier is 0, then programmable
3023 * clock mode is not supported, otherwise the actual clock
3024 * multiplier is one more than the value of Clock Multiplier
3025 * in the Capabilities Register.
3026 */
3027 if (host->clk_mul)
3028 host->clk_mul += 1;
3029
3030 /*
3031 * Set host parameters.
3032 */
3033 max_clk = host->max_clk;
3034
3035 if (host->ops->get_min_clock)
3036 mmc->f_min = host->ops->get_min_clock(host);
3037 else if (host->version >= SDHCI_SPEC_300) {
3038 if (host->clk_mul) {
3039 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3040 max_clk = host->max_clk * host->clk_mul;
3041 } else
3042 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3043 } else
3044 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3045
3046 if (!mmc->f_max || mmc->f_max > max_clk)
3047 mmc->f_max = max_clk;
3048
3049 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3050 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3051 SDHCI_TIMEOUT_CLK_SHIFT;
3052 if (host->timeout_clk == 0) {
3053 if (host->ops->get_timeout_clock) {
3054 host->timeout_clk =
3055 host->ops->get_timeout_clock(host);
3056 } else {
3057 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3058 mmc_hostname(mmc));
3059 ret = -ENODEV;
3060 goto undma;
3061 }
3062 }
3063
3064 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3065 host->timeout_clk *= 1000;
3066
3067 if (override_timeout_clk)
3068 host->timeout_clk = override_timeout_clk;
3069
3070 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3071 host->ops->get_max_timeout_count(host) : 1 << 27;
3072 mmc->max_busy_timeout /= host->timeout_clk;
3073 }
3074
3075 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3076 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3077
3078 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3079 host->flags |= SDHCI_AUTO_CMD12;
3080
3081 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3082 if ((host->version >= SDHCI_SPEC_300) &&
3083 ((host->flags & SDHCI_USE_ADMA) ||
3084 !(host->flags & SDHCI_USE_SDMA)) &&
3085 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3086 host->flags |= SDHCI_AUTO_CMD23;
3087 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3088 } else {
3089 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3090 }
3091
3092 /*
3093 * A controller may support 8-bit width, but the board itself
3094 * might not have the pins brought out. Boards that support
3095 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3096 * their platform code before calling sdhci_add_host(), and we
3097 * won't assume 8-bit width for hosts without that CAP.
3098 */
3099 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3100 mmc->caps |= MMC_CAP_4_BIT_DATA;
3101
3102 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3103 mmc->caps &= ~MMC_CAP_CMD23;
3104
3105 if (caps[0] & SDHCI_CAN_DO_HISPD)
3106 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3107
3108 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3109 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3110 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
3111 mmc->caps |= MMC_CAP_NEEDS_POLL;
3112
3113 /* If there are external regulators, get them */
3114 ret = mmc_regulator_get_supply(mmc);
3115 if (ret == -EPROBE_DEFER)
3116 goto undma;
3117
3118 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3119 if (!IS_ERR(mmc->supply.vqmmc)) {
3120 ret = regulator_enable(mmc->supply.vqmmc);
3121 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3122 1950000))
3123 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3124 SDHCI_SUPPORT_SDR50 |
3125 SDHCI_SUPPORT_DDR50);
3126 if (ret) {
3127 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3128 mmc_hostname(mmc), ret);
3129 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3130 }
3131 }
3132
3133 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3134 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3135 SDHCI_SUPPORT_DDR50);
3136
3137 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3138 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3139 SDHCI_SUPPORT_DDR50))
3140 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3141
3142 /* SDR104 supports also implies SDR50 support */
3143 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3144 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3145 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3146 * field can be promoted to support HS200.
3147 */
3148 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3149 mmc->caps2 |= MMC_CAP2_HS200;
3150 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3151 mmc->caps |= MMC_CAP_UHS_SDR50;
3152
3153 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3154 (caps[1] & SDHCI_SUPPORT_HS400))
3155 mmc->caps2 |= MMC_CAP2_HS400;
3156
3157 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3158 (IS_ERR(mmc->supply.vqmmc) ||
3159 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3160 1300000)))
3161 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3162
3163 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3164 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3165 mmc->caps |= MMC_CAP_UHS_DDR50;
3166
3167 /* Does the host need tuning for SDR50? */
3168 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3169 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3170
3171 /* Driver Type(s) (A, C, D) supported by the host */
3172 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3173 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3174 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3175 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3176 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3177 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3178
3179 /* Initial value for re-tuning timer count */
3180 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3181 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3182
3183 /*
3184 * In case Re-tuning Timer is not disabled, the actual value of
3185 * re-tuning timer will be 2 ^ (n - 1).
3186 */
3187 if (host->tuning_count)
3188 host->tuning_count = 1 << (host->tuning_count - 1);
3189
3190 /* Re-tuning mode supported by the Host Controller */
3191 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3192 SDHCI_RETUNING_MODE_SHIFT;
3193
3194 ocr_avail = 0;
3195
3196 /*
3197 * According to SD Host Controller spec v3.00, if the Host System
3198 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3199 * the value is meaningful only if Voltage Support in the Capabilities
3200 * register is set. The actual current value is 4 times the register
3201 * value.
3202 */
3203 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3204 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3205 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3206 if (curr > 0) {
3207
3208 /* convert to SDHCI_MAX_CURRENT format */
3209 curr = curr/1000; /* convert to mA */
3210 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3211
3212 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3213 max_current_caps =
3214 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3215 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3216 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3217 }
3218 }
3219
3220 if (caps[0] & SDHCI_CAN_VDD_330) {
3221 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3222
3223 mmc->max_current_330 = ((max_current_caps &
3224 SDHCI_MAX_CURRENT_330_MASK) >>
3225 SDHCI_MAX_CURRENT_330_SHIFT) *
3226 SDHCI_MAX_CURRENT_MULTIPLIER;
3227 }
3228 if (caps[0] & SDHCI_CAN_VDD_300) {
3229 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3230
3231 mmc->max_current_300 = ((max_current_caps &
3232 SDHCI_MAX_CURRENT_300_MASK) >>
3233 SDHCI_MAX_CURRENT_300_SHIFT) *
3234 SDHCI_MAX_CURRENT_MULTIPLIER;
3235 }
3236 if (caps[0] & SDHCI_CAN_VDD_180) {
3237 ocr_avail |= MMC_VDD_165_195;
3238
3239 mmc->max_current_180 = ((max_current_caps &
3240 SDHCI_MAX_CURRENT_180_MASK) >>
3241 SDHCI_MAX_CURRENT_180_SHIFT) *
3242 SDHCI_MAX_CURRENT_MULTIPLIER;
3243 }
3244
3245 /* If OCR set by host, use it instead. */
3246 if (host->ocr_mask)
3247 ocr_avail = host->ocr_mask;
3248
3249 /* If OCR set by external regulators, give it highest prio. */
3250 if (mmc->ocr_avail)
3251 ocr_avail = mmc->ocr_avail;
3252
3253 mmc->ocr_avail = ocr_avail;
3254 mmc->ocr_avail_sdio = ocr_avail;
3255 if (host->ocr_avail_sdio)
3256 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3257 mmc->ocr_avail_sd = ocr_avail;
3258 if (host->ocr_avail_sd)
3259 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3260 else /* normal SD controllers don't support 1.8V */
3261 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3262 mmc->ocr_avail_mmc = ocr_avail;
3263 if (host->ocr_avail_mmc)
3264 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3265
3266 if (mmc->ocr_avail == 0) {
3267 pr_err("%s: Hardware doesn't report any support voltages.\n",
3268 mmc_hostname(mmc));
3269 ret = -ENODEV;
3270 goto unreg;
3271 }
3272
3273 spin_lock_init(&host->lock);
3274
3275 /*
3276 * Maximum number of segments. Depends on if the hardware
3277 * can do scatter/gather or not.
3278 */
3279 if (host->flags & SDHCI_USE_ADMA)
3280 mmc->max_segs = SDHCI_MAX_SEGS;
3281 else if (host->flags & SDHCI_USE_SDMA)
3282 mmc->max_segs = 1;
3283 else /* PIO */
3284 mmc->max_segs = SDHCI_MAX_SEGS;
3285
3286 /*
3287 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3288 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3289 * is less anyway.
3290 */
3291 mmc->max_req_size = 524288;
3292
3293 /*
3294 * Maximum segment size. Could be one segment with the maximum number
3295 * of bytes. When doing hardware scatter/gather, each entry cannot
3296 * be larger than 64 KiB though.
3297 */
3298 if (host->flags & SDHCI_USE_ADMA) {
3299 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3300 mmc->max_seg_size = 65535;
3301 else
3302 mmc->max_seg_size = 65536;
3303 } else {
3304 mmc->max_seg_size = mmc->max_req_size;
3305 }
3306
3307 /*
3308 * Maximum block size. This varies from controller to controller and
3309 * is specified in the capabilities register.
3310 */
3311 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3312 mmc->max_blk_size = 2;
3313 } else {
3314 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3315 SDHCI_MAX_BLOCK_SHIFT;
3316 if (mmc->max_blk_size >= 3) {
3317 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3318 mmc_hostname(mmc));
3319 mmc->max_blk_size = 0;
3320 }
3321 }
3322
3323 mmc->max_blk_size = 512 << mmc->max_blk_size;
3324
3325 /*
3326 * Maximum block count.
3327 */
3328 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3329
3330 /*
3331 * Init tasklets.
3332 */
3333 tasklet_init(&host->finish_tasklet,
3334 sdhci_tasklet_finish, (unsigned long)host);
3335
3336 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3337
3338 init_waitqueue_head(&host->buf_ready_int);
3339
3340 sdhci_init(host, 0);
3341
3342 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3343 IRQF_SHARED, mmc_hostname(mmc), host);
3344 if (ret) {
3345 pr_err("%s: Failed to request IRQ %d: %d\n",
3346 mmc_hostname(mmc), host->irq, ret);
3347 goto untasklet;
3348 }
3349
3350 #ifdef CONFIG_MMC_DEBUG
3351 sdhci_dumpregs(host);
3352 #endif
3353
3354 ret = sdhci_led_register(host);
3355 if (ret) {
3356 pr_err("%s: Failed to register LED device: %d\n",
3357 mmc_hostname(mmc), ret);
3358 goto unirq;
3359 }
3360
3361 mmiowb();
3362
3363 ret = mmc_add_host(mmc);
3364 if (ret)
3365 goto unled;
3366
3367 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3368 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3369 (host->flags & SDHCI_USE_ADMA) ?
3370 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3371 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3372
3373 sdhci_enable_card_detection(host);
3374
3375 return 0;
3376
3377 unled:
3378 sdhci_led_unregister(host);
3379 unirq:
3380 sdhci_do_reset(host, SDHCI_RESET_ALL);
3381 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3382 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3383 free_irq(host->irq, host);
3384 untasklet:
3385 tasklet_kill(&host->finish_tasklet);
3386 unreg:
3387 if (!IS_ERR(mmc->supply.vqmmc))
3388 regulator_disable(mmc->supply.vqmmc);
3389 undma:
3390 if (host->align_buffer)
3391 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3392 host->adma_table_sz, host->align_buffer,
3393 host->align_addr);
3394 host->adma_table = NULL;
3395 host->align_buffer = NULL;
3396
3397 return ret;
3398 }
3399
3400 EXPORT_SYMBOL_GPL(sdhci_add_host);
3401
3402 void sdhci_remove_host(struct sdhci_host *host, int dead)
3403 {
3404 struct mmc_host *mmc = host->mmc;
3405 unsigned long flags;
3406
3407 if (dead) {
3408 spin_lock_irqsave(&host->lock, flags);
3409
3410 host->flags |= SDHCI_DEVICE_DEAD;
3411
3412 if (host->mrq) {
3413 pr_err("%s: Controller removed during "
3414 " transfer!\n", mmc_hostname(mmc));
3415
3416 host->mrq->cmd->error = -ENOMEDIUM;
3417 tasklet_schedule(&host->finish_tasklet);
3418 }
3419
3420 spin_unlock_irqrestore(&host->lock, flags);
3421 }
3422
3423 sdhci_disable_card_detection(host);
3424
3425 mmc_remove_host(mmc);
3426
3427 sdhci_led_unregister(host);
3428
3429 if (!dead)
3430 sdhci_do_reset(host, SDHCI_RESET_ALL);
3431
3432 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3433 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3434 free_irq(host->irq, host);
3435
3436 del_timer_sync(&host->timer);
3437
3438 tasklet_kill(&host->finish_tasklet);
3439
3440 if (!IS_ERR(mmc->supply.vqmmc))
3441 regulator_disable(mmc->supply.vqmmc);
3442
3443 if (host->align_buffer)
3444 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3445 host->adma_table_sz, host->align_buffer,
3446 host->align_addr);
3447
3448 host->adma_table = NULL;
3449 host->align_buffer = NULL;
3450 }
3451
3452 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3453
3454 void sdhci_free_host(struct sdhci_host *host)
3455 {
3456 mmc_free_host(host->mmc);
3457 }
3458
3459 EXPORT_SYMBOL_GPL(sdhci_free_host);
3460
3461 /*****************************************************************************\
3462 * *
3463 * Driver init/exit *
3464 * *
3465 \*****************************************************************************/
3466
3467 static int __init sdhci_drv_init(void)
3468 {
3469 pr_info(DRIVER_NAME
3470 ": Secure Digital Host Controller Interface driver\n");
3471 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3472
3473 return 0;
3474 }
3475
3476 static void __exit sdhci_drv_exit(void)
3477 {
3478 }
3479
3480 module_init(sdhci_drv_init);
3481 module_exit(sdhci_drv_exit);
3482
3483 module_param(debug_quirks, uint, 0444);
3484 module_param(debug_quirks2, uint, 0444);
3485
3486 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3487 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3488 MODULE_LICENSE("GPL");
3489
3490 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3491 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");