2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #define MAX_TUNING_LOOP 40
43 static unsigned int debug_quirks
= 0;
44 static unsigned int debug_quirks2
;
46 static void sdhci_finish_data(struct sdhci_host
*);
48 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
50 static void sdhci_dumpregs(struct sdhci_host
*host
)
52 pr_err(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
53 mmc_hostname(host
->mmc
));
55 pr_err(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
56 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
57 sdhci_readw(host
, SDHCI_HOST_VERSION
));
58 pr_err(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
59 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
60 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
61 pr_err(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
62 sdhci_readl(host
, SDHCI_ARGUMENT
),
63 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
64 pr_err(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
65 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
66 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
67 pr_err(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
68 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
69 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
70 pr_err(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
71 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
72 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
73 pr_err(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
74 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
75 sdhci_readl(host
, SDHCI_INT_STATUS
));
76 pr_err(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
77 sdhci_readl(host
, SDHCI_INT_ENABLE
),
78 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
79 pr_err(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
80 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
81 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
82 pr_err(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
83 sdhci_readl(host
, SDHCI_CAPABILITIES
),
84 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
85 pr_err(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
86 sdhci_readw(host
, SDHCI_COMMAND
),
87 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
88 pr_err(DRIVER_NAME
": Host ctl2: 0x%08x\n",
89 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
91 if (host
->flags
& SDHCI_USE_ADMA
) {
92 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
93 pr_err(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
94 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
95 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
96 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
98 pr_err(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
99 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
100 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
103 pr_err(DRIVER_NAME
": ===========================================\n");
106 /*****************************************************************************\
108 * Low level functions *
110 \*****************************************************************************/
112 static inline bool sdhci_data_line_cmd(struct mmc_command
*cmd
)
114 return cmd
->data
|| cmd
->flags
& MMC_RSP_BUSY
;
117 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
121 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
122 !mmc_card_is_removable(host
->mmc
))
126 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
129 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
130 SDHCI_INT_CARD_INSERT
;
132 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
135 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
136 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
139 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
141 sdhci_set_card_detection(host
, true);
144 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
146 sdhci_set_card_detection(host
, false);
149 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
154 pm_runtime_get_noresume(host
->mmc
->parent
);
157 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
161 host
->bus_on
= false;
162 pm_runtime_put_noidle(host
->mmc
->parent
);
165 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
167 unsigned long timeout
;
169 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
171 if (mask
& SDHCI_RESET_ALL
) {
173 /* Reset-all turns off SD Bus Power */
174 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
175 sdhci_runtime_pm_bus_off(host
);
178 /* Wait max 100 ms */
181 /* hw clears the bit when it's done */
182 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
184 pr_err("%s: Reset 0x%x never completed.\n",
185 mmc_hostname(host
->mmc
), (int)mask
);
186 sdhci_dumpregs(host
);
193 EXPORT_SYMBOL_GPL(sdhci_reset
);
195 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
197 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
198 struct mmc_host
*mmc
= host
->mmc
;
200 if (!mmc
->ops
->get_cd(mmc
))
204 host
->ops
->reset(host
, mask
);
206 if (mask
& SDHCI_RESET_ALL
) {
207 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
208 if (host
->ops
->enable_dma
)
209 host
->ops
->enable_dma(host
);
212 /* Resetting the controller clears many */
213 host
->preset_enabled
= false;
217 static void sdhci_init(struct sdhci_host
*host
, int soft
)
219 struct mmc_host
*mmc
= host
->mmc
;
222 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
224 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
226 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
227 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
228 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
229 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
232 if (host
->tuning_mode
== SDHCI_TUNING_MODE_2
||
233 host
->tuning_mode
== SDHCI_TUNING_MODE_3
)
234 host
->ier
|= SDHCI_INT_RETUNE
;
236 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
237 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
240 /* force clock reconfiguration */
242 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
246 static void sdhci_reinit(struct sdhci_host
*host
)
249 sdhci_enable_card_detection(host
);
252 static void __sdhci_led_activate(struct sdhci_host
*host
)
256 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
257 ctrl
|= SDHCI_CTRL_LED
;
258 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
261 static void __sdhci_led_deactivate(struct sdhci_host
*host
)
265 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
266 ctrl
&= ~SDHCI_CTRL_LED
;
267 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
270 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
271 static void sdhci_led_control(struct led_classdev
*led
,
272 enum led_brightness brightness
)
274 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
277 spin_lock_irqsave(&host
->lock
, flags
);
279 if (host
->runtime_suspended
)
282 if (brightness
== LED_OFF
)
283 __sdhci_led_deactivate(host
);
285 __sdhci_led_activate(host
);
287 spin_unlock_irqrestore(&host
->lock
, flags
);
290 static int sdhci_led_register(struct sdhci_host
*host
)
292 struct mmc_host
*mmc
= host
->mmc
;
294 snprintf(host
->led_name
, sizeof(host
->led_name
),
295 "%s::", mmc_hostname(mmc
));
297 host
->led
.name
= host
->led_name
;
298 host
->led
.brightness
= LED_OFF
;
299 host
->led
.default_trigger
= mmc_hostname(mmc
);
300 host
->led
.brightness_set
= sdhci_led_control
;
302 return led_classdev_register(mmc_dev(mmc
), &host
->led
);
305 static void sdhci_led_unregister(struct sdhci_host
*host
)
307 led_classdev_unregister(&host
->led
);
310 static inline void sdhci_led_activate(struct sdhci_host
*host
)
314 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
320 static inline int sdhci_led_register(struct sdhci_host
*host
)
325 static inline void sdhci_led_unregister(struct sdhci_host
*host
)
329 static inline void sdhci_led_activate(struct sdhci_host
*host
)
331 __sdhci_led_activate(host
);
334 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
336 __sdhci_led_deactivate(host
);
341 /*****************************************************************************\
345 \*****************************************************************************/
347 static void sdhci_read_block_pio(struct sdhci_host
*host
)
350 size_t blksize
, len
, chunk
;
351 u32
uninitialized_var(scratch
);
354 DBG("PIO reading\n");
356 blksize
= host
->data
->blksz
;
359 local_irq_save(flags
);
362 BUG_ON(!sg_miter_next(&host
->sg_miter
));
364 len
= min(host
->sg_miter
.length
, blksize
);
367 host
->sg_miter
.consumed
= len
;
369 buf
= host
->sg_miter
.addr
;
373 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
377 *buf
= scratch
& 0xFF;
386 sg_miter_stop(&host
->sg_miter
);
388 local_irq_restore(flags
);
391 static void sdhci_write_block_pio(struct sdhci_host
*host
)
394 size_t blksize
, len
, chunk
;
398 DBG("PIO writing\n");
400 blksize
= host
->data
->blksz
;
404 local_irq_save(flags
);
407 BUG_ON(!sg_miter_next(&host
->sg_miter
));
409 len
= min(host
->sg_miter
.length
, blksize
);
412 host
->sg_miter
.consumed
= len
;
414 buf
= host
->sg_miter
.addr
;
417 scratch
|= (u32
)*buf
<< (chunk
* 8);
423 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
424 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
431 sg_miter_stop(&host
->sg_miter
);
433 local_irq_restore(flags
);
436 static void sdhci_transfer_pio(struct sdhci_host
*host
)
440 if (host
->blocks
== 0)
443 if (host
->data
->flags
& MMC_DATA_READ
)
444 mask
= SDHCI_DATA_AVAILABLE
;
446 mask
= SDHCI_SPACE_AVAILABLE
;
449 * Some controllers (JMicron JMB38x) mess up the buffer bits
450 * for transfers < 4 bytes. As long as it is just one block,
451 * we can ignore the bits.
453 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
454 (host
->data
->blocks
== 1))
457 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
458 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
461 if (host
->data
->flags
& MMC_DATA_READ
)
462 sdhci_read_block_pio(host
);
464 sdhci_write_block_pio(host
);
467 if (host
->blocks
== 0)
471 DBG("PIO transfer complete.\n");
474 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
475 struct mmc_data
*data
, int cookie
)
480 * If the data buffers are already mapped, return the previous
481 * dma_map_sg() result.
483 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
484 return data
->sg_count
;
486 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
487 data
->flags
& MMC_DATA_WRITE
?
488 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
493 data
->sg_count
= sg_count
;
494 data
->host_cookie
= cookie
;
499 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
501 local_irq_save(*flags
);
502 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
505 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
507 kunmap_atomic(buffer
);
508 local_irq_restore(*flags
);
511 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
512 dma_addr_t addr
, int len
, unsigned cmd
)
514 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
516 /* 32-bit and 64-bit descriptors have these members in same position */
517 dma_desc
->cmd
= cpu_to_le16(cmd
);
518 dma_desc
->len
= cpu_to_le16(len
);
519 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
521 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
522 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
525 static void sdhci_adma_mark_end(void *desc
)
527 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
529 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
530 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
533 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
534 struct mmc_data
*data
, int sg_count
)
536 struct scatterlist
*sg
;
538 dma_addr_t addr
, align_addr
;
544 * The spec does not specify endianness of descriptor table.
545 * We currently guess that it is LE.
548 host
->sg_count
= sg_count
;
550 desc
= host
->adma_table
;
551 align
= host
->align_buffer
;
553 align_addr
= host
->align_addr
;
555 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
556 addr
= sg_dma_address(sg
);
557 len
= sg_dma_len(sg
);
560 * The SDHCI specification states that ADMA addresses must
561 * be 32-bit aligned. If they aren't, then we use a bounce
562 * buffer for the (up to three) bytes that screw up the
565 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
568 if (data
->flags
& MMC_DATA_WRITE
) {
569 buffer
= sdhci_kmap_atomic(sg
, &flags
);
570 memcpy(align
, buffer
, offset
);
571 sdhci_kunmap_atomic(buffer
, &flags
);
575 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
578 BUG_ON(offset
> 65536);
580 align
+= SDHCI_ADMA2_ALIGN
;
581 align_addr
+= SDHCI_ADMA2_ALIGN
;
583 desc
+= host
->desc_sz
;
593 sdhci_adma_write_desc(host
, desc
, addr
, len
,
595 desc
+= host
->desc_sz
;
599 * If this triggers then we have a calculation bug
602 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
605 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
606 /* Mark the last descriptor as the terminating descriptor */
607 if (desc
!= host
->adma_table
) {
608 desc
-= host
->desc_sz
;
609 sdhci_adma_mark_end(desc
);
612 /* Add a terminating entry - nop, end, valid */
613 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
617 static void sdhci_adma_table_post(struct sdhci_host
*host
,
618 struct mmc_data
*data
)
620 struct scatterlist
*sg
;
626 if (data
->flags
& MMC_DATA_READ
) {
627 bool has_unaligned
= false;
629 /* Do a quick scan of the SG list for any unaligned mappings */
630 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
631 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
632 has_unaligned
= true;
637 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
638 data
->sg_len
, DMA_FROM_DEVICE
);
640 align
= host
->align_buffer
;
642 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
643 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
644 size
= SDHCI_ADMA2_ALIGN
-
645 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
647 buffer
= sdhci_kmap_atomic(sg
, &flags
);
648 memcpy(buffer
, align
, size
);
649 sdhci_kunmap_atomic(buffer
, &flags
);
651 align
+= SDHCI_ADMA2_ALIGN
;
658 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
661 struct mmc_data
*data
= cmd
->data
;
662 unsigned target_timeout
, current_timeout
;
665 * If the host controller provides us with an incorrect timeout
666 * value, just skip the check and use 0xE. The hardware may take
667 * longer to time out, but that's much better than having a too-short
670 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
673 /* Unspecified timeout, assume max */
674 if (!data
&& !cmd
->busy_timeout
)
679 target_timeout
= cmd
->busy_timeout
* 1000;
681 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
682 if (host
->clock
&& data
->timeout_clks
) {
683 unsigned long long val
;
686 * data->timeout_clks is in units of clock cycles.
687 * host->clock is in Hz. target_timeout is in us.
688 * Hence, us = 1000000 * cycles / Hz. Round up.
690 val
= 1000000ULL * data
->timeout_clks
;
691 if (do_div(val
, host
->clock
))
693 target_timeout
+= val
;
698 * Figure out needed cycles.
699 * We do this in steps in order to fit inside a 32 bit int.
700 * The first step is the minimum timeout, which will have a
701 * minimum resolution of 6 bits:
702 * (1) 2^13*1000 > 2^22,
703 * (2) host->timeout_clk < 2^16
708 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
709 while (current_timeout
< target_timeout
) {
711 current_timeout
<<= 1;
717 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
718 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
725 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
727 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
728 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
730 if (host
->flags
& SDHCI_REQ_USE_DMA
)
731 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
733 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
735 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
736 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
739 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
743 if (host
->ops
->set_timeout
) {
744 host
->ops
->set_timeout(host
, cmd
);
746 count
= sdhci_calc_timeout(host
, cmd
);
747 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
751 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
754 struct mmc_data
*data
= cmd
->data
;
756 if (sdhci_data_line_cmd(cmd
))
757 sdhci_set_timeout(host
, cmd
);
765 BUG_ON(data
->blksz
* data
->blocks
> 524288);
766 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
767 BUG_ON(data
->blocks
> 65535);
770 host
->data_early
= 0;
771 host
->data
->bytes_xfered
= 0;
773 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
774 struct scatterlist
*sg
;
775 unsigned int length_mask
, offset_mask
;
778 host
->flags
|= SDHCI_REQ_USE_DMA
;
781 * FIXME: This doesn't account for merging when mapping the
784 * The assumption here being that alignment and lengths are
785 * the same after DMA mapping to device address space.
789 if (host
->flags
& SDHCI_USE_ADMA
) {
790 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
793 * As we use up to 3 byte chunks to work
794 * around alignment problems, we need to
795 * check the offset as well.
800 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
802 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
806 if (unlikely(length_mask
| offset_mask
)) {
807 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
808 if (sg
->length
& length_mask
) {
809 DBG("Reverting to PIO because of transfer size (%d)\n",
811 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
814 if (sg
->offset
& offset_mask
) {
815 DBG("Reverting to PIO because of bad alignment\n");
816 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
823 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
824 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
828 * This only happens when someone fed
829 * us an invalid request.
832 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
833 } else if (host
->flags
& SDHCI_USE_ADMA
) {
834 sdhci_adma_table_pre(host
, data
, sg_cnt
);
836 sdhci_writel(host
, host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
837 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
839 (u64
)host
->adma_addr
>> 32,
840 SDHCI_ADMA_ADDRESS_HI
);
842 WARN_ON(sg_cnt
!= 1);
843 sdhci_writel(host
, sg_dma_address(data
->sg
),
849 * Always adjust the DMA selection as some controllers
850 * (e.g. JMicron) can't do PIO properly when the selection
853 if (host
->version
>= SDHCI_SPEC_200
) {
854 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
855 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
856 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
857 (host
->flags
& SDHCI_USE_ADMA
)) {
858 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
859 ctrl
|= SDHCI_CTRL_ADMA64
;
861 ctrl
|= SDHCI_CTRL_ADMA32
;
863 ctrl
|= SDHCI_CTRL_SDMA
;
865 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
868 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
871 flags
= SG_MITER_ATOMIC
;
872 if (host
->data
->flags
& MMC_DATA_READ
)
873 flags
|= SG_MITER_TO_SG
;
875 flags
|= SG_MITER_FROM_SG
;
876 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
877 host
->blocks
= data
->blocks
;
880 sdhci_set_transfer_irqs(host
);
882 /* Set the DMA boundary value and block size */
883 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
884 data
->blksz
), SDHCI_BLOCK_SIZE
);
885 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
888 static inline bool sdhci_auto_cmd12(struct sdhci_host
*host
,
889 struct mmc_request
*mrq
)
891 return !mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
892 !mrq
->cap_cmd_during_tfr
;
895 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
896 struct mmc_command
*cmd
)
899 struct mmc_data
*data
= cmd
->data
;
903 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
904 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
906 /* clear Auto CMD settings for no data CMDs */
907 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
908 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
909 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
914 WARN_ON(!host
->data
);
916 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
917 mode
= SDHCI_TRNS_BLK_CNT_EN
;
919 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
920 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
922 * If we are sending CMD23, CMD12 never gets sent
923 * on successful completion (so no Auto-CMD12).
925 if (sdhci_auto_cmd12(host
, cmd
->mrq
) &&
926 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
927 mode
|= SDHCI_TRNS_AUTO_CMD12
;
928 else if (cmd
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
929 mode
|= SDHCI_TRNS_AUTO_CMD23
;
930 sdhci_writel(host
, cmd
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
934 if (data
->flags
& MMC_DATA_READ
)
935 mode
|= SDHCI_TRNS_READ
;
936 if (host
->flags
& SDHCI_REQ_USE_DMA
)
937 mode
|= SDHCI_TRNS_DMA
;
939 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
942 static bool sdhci_needs_reset(struct sdhci_host
*host
, struct mmc_request
*mrq
)
944 return (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
945 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
946 (mrq
->sbc
&& mrq
->sbc
->error
) ||
947 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
948 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
949 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
)));
952 static void __sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
956 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
957 if (host
->mrqs_done
[i
] == mrq
) {
963 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
964 if (!host
->mrqs_done
[i
]) {
965 host
->mrqs_done
[i
] = mrq
;
970 WARN_ON(i
>= SDHCI_MAX_MRQS
);
972 tasklet_schedule(&host
->finish_tasklet
);
975 static void sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
977 if (host
->cmd
&& host
->cmd
->mrq
== mrq
)
980 if (host
->data_cmd
&& host
->data_cmd
->mrq
== mrq
)
981 host
->data_cmd
= NULL
;
983 if (host
->data
&& host
->data
->mrq
== mrq
)
986 if (sdhci_needs_reset(host
, mrq
))
987 host
->pending_reset
= true;
989 __sdhci_finish_mrq(host
, mrq
);
992 static void sdhci_finish_data(struct sdhci_host
*host
)
994 struct mmc_command
*data_cmd
= host
->data_cmd
;
995 struct mmc_data
*data
= host
->data
;
998 host
->data_cmd
= NULL
;
1000 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
1001 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
1002 sdhci_adma_table_post(host
, data
);
1005 * The specification states that the block count register must
1006 * be updated, but it does not specify at what point in the
1007 * data flow. That makes the register entirely useless to read
1008 * back so we have to assume that nothing made it to the card
1009 * in the event of an error.
1012 data
->bytes_xfered
= 0;
1014 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
1017 * Need to send CMD12 if -
1018 * a) open-ended multiblock transfer (no CMD23)
1019 * b) error in multiblock transfer
1026 * The controller needs a reset of internal state machines
1027 * upon error conditions.
1030 if (!host
->cmd
|| host
->cmd
== data_cmd
)
1031 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
1032 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
1036 * 'cap_cmd_during_tfr' request must not use the command line
1037 * after mmc_command_done() has been called. It is upper layer's
1038 * responsibility to send the stop command if required.
1040 if (data
->mrq
->cap_cmd_during_tfr
) {
1041 sdhci_finish_mrq(host
, data
->mrq
);
1043 /* Avoid triggering warning in sdhci_send_command() */
1045 sdhci_send_command(host
, data
->stop
);
1048 sdhci_finish_mrq(host
, data
->mrq
);
1052 static void sdhci_mod_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
,
1053 unsigned long timeout
)
1055 if (sdhci_data_line_cmd(mrq
->cmd
))
1056 mod_timer(&host
->data_timer
, timeout
);
1058 mod_timer(&host
->timer
, timeout
);
1061 static void sdhci_del_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1063 if (sdhci_data_line_cmd(mrq
->cmd
))
1064 del_timer(&host
->data_timer
);
1066 del_timer(&host
->timer
);
1069 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1073 unsigned long timeout
;
1077 /* Initially, a command has no error */
1080 if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
1081 cmd
->opcode
== MMC_STOP_TRANSMISSION
)
1082 cmd
->flags
|= MMC_RSP_BUSY
;
1084 /* Wait max 10 ms */
1087 mask
= SDHCI_CMD_INHIBIT
;
1088 if (sdhci_data_line_cmd(cmd
))
1089 mask
|= SDHCI_DATA_INHIBIT
;
1091 /* We shouldn't wait for data inihibit for stop commands, even
1092 though they might use busy signaling */
1093 if (cmd
->mrq
->data
&& (cmd
== cmd
->mrq
->data
->stop
))
1094 mask
&= ~SDHCI_DATA_INHIBIT
;
1096 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1098 pr_err("%s: Controller never released inhibit bit(s).\n",
1099 mmc_hostname(host
->mmc
));
1100 sdhci_dumpregs(host
);
1102 sdhci_finish_mrq(host
, cmd
->mrq
);
1110 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1111 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1114 sdhci_mod_timer(host
, cmd
->mrq
, timeout
);
1117 if (sdhci_data_line_cmd(cmd
)) {
1118 WARN_ON(host
->data_cmd
);
1119 host
->data_cmd
= cmd
;
1122 sdhci_prepare_data(host
, cmd
);
1124 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1126 sdhci_set_transfer_mode(host
, cmd
);
1128 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1129 pr_err("%s: Unsupported response type!\n",
1130 mmc_hostname(host
->mmc
));
1131 cmd
->error
= -EINVAL
;
1132 sdhci_finish_mrq(host
, cmd
->mrq
);
1136 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1137 flags
= SDHCI_CMD_RESP_NONE
;
1138 else if (cmd
->flags
& MMC_RSP_136
)
1139 flags
= SDHCI_CMD_RESP_LONG
;
1140 else if (cmd
->flags
& MMC_RSP_BUSY
)
1141 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1143 flags
= SDHCI_CMD_RESP_SHORT
;
1145 if (cmd
->flags
& MMC_RSP_CRC
)
1146 flags
|= SDHCI_CMD_CRC
;
1147 if (cmd
->flags
& MMC_RSP_OPCODE
)
1148 flags
|= SDHCI_CMD_INDEX
;
1150 /* CMD19 is special in that the Data Present Select should be set */
1151 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1152 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1153 flags
|= SDHCI_CMD_DATA
;
1155 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1157 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1159 static void sdhci_finish_command(struct sdhci_host
*host
)
1161 struct mmc_command
*cmd
= host
->cmd
;
1166 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1167 if (cmd
->flags
& MMC_RSP_136
) {
1168 /* CRC is stripped so we need to do some shifting. */
1169 for (i
= 0;i
< 4;i
++) {
1170 cmd
->resp
[i
] = sdhci_readl(host
,
1171 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1175 SDHCI_RESPONSE
+ (3-i
)*4-1);
1178 cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1182 if (cmd
->mrq
->cap_cmd_during_tfr
&& cmd
== cmd
->mrq
->cmd
)
1183 mmc_command_done(host
->mmc
, cmd
->mrq
);
1186 * The host can send and interrupt when the busy state has
1187 * ended, allowing us to wait without wasting CPU cycles.
1188 * The busy signal uses DAT0 so this is similar to waiting
1189 * for data to complete.
1191 * Note: The 1.0 specification is a bit ambiguous about this
1192 * feature so there might be some problems with older
1195 if (cmd
->flags
& MMC_RSP_BUSY
) {
1197 DBG("Cannot wait for busy signal when also doing a data transfer");
1198 } else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
) &&
1199 cmd
== host
->data_cmd
) {
1200 /* Command complete before busy is ended */
1205 /* Finished CMD23, now send actual command. */
1206 if (cmd
== cmd
->mrq
->sbc
) {
1207 sdhci_send_command(host
, cmd
->mrq
->cmd
);
1210 /* Processed actual command. */
1211 if (host
->data
&& host
->data_early
)
1212 sdhci_finish_data(host
);
1215 sdhci_finish_mrq(host
, cmd
->mrq
);
1219 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1223 switch (host
->timing
) {
1224 case MMC_TIMING_UHS_SDR12
:
1225 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1227 case MMC_TIMING_UHS_SDR25
:
1228 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1230 case MMC_TIMING_UHS_SDR50
:
1231 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1233 case MMC_TIMING_UHS_SDR104
:
1234 case MMC_TIMING_MMC_HS200
:
1235 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1237 case MMC_TIMING_UHS_DDR50
:
1238 case MMC_TIMING_MMC_DDR52
:
1239 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1241 case MMC_TIMING_MMC_HS400
:
1242 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1245 pr_warn("%s: Invalid UHS-I mode selected\n",
1246 mmc_hostname(host
->mmc
));
1247 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1253 u16
sdhci_calc_clk(struct sdhci_host
*host
, unsigned int clock
,
1254 unsigned int *actual_clock
)
1256 int div
= 0; /* Initialized for compiler warning */
1257 int real_div
= div
, clk_mul
= 1;
1259 bool switch_base_clk
= false;
1261 if (host
->version
>= SDHCI_SPEC_300
) {
1262 if (host
->preset_enabled
) {
1265 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1266 pre_val
= sdhci_get_preset_value(host
);
1267 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1268 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1269 if (host
->clk_mul
&&
1270 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1271 clk
= SDHCI_PROG_CLOCK_MODE
;
1273 clk_mul
= host
->clk_mul
;
1275 real_div
= max_t(int, 1, div
<< 1);
1281 * Check if the Host Controller supports Programmable Clock
1284 if (host
->clk_mul
) {
1285 for (div
= 1; div
<= 1024; div
++) {
1286 if ((host
->max_clk
* host
->clk_mul
/ div
)
1290 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1292 * Set Programmable Clock Mode in the Clock
1295 clk
= SDHCI_PROG_CLOCK_MODE
;
1297 clk_mul
= host
->clk_mul
;
1301 * Divisor can be too small to reach clock
1302 * speed requirement. Then use the base clock.
1304 switch_base_clk
= true;
1308 if (!host
->clk_mul
|| switch_base_clk
) {
1309 /* Version 3.00 divisors must be a multiple of 2. */
1310 if (host
->max_clk
<= clock
)
1313 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1315 if ((host
->max_clk
/ div
) <= clock
)
1321 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1322 && !div
&& host
->max_clk
<= 25000000)
1326 /* Version 2.00 divisors must be a power of 2. */
1327 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1328 if ((host
->max_clk
/ div
) <= clock
)
1337 *actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1338 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1339 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1340 << SDHCI_DIVIDER_HI_SHIFT
;
1344 EXPORT_SYMBOL_GPL(sdhci_calc_clk
);
1346 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1349 unsigned long timeout
;
1351 host
->mmc
->actual_clock
= 0;
1353 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1358 clk
= sdhci_calc_clk(host
, clock
, &host
->mmc
->actual_clock
);
1360 clk
|= SDHCI_CLOCK_INT_EN
;
1361 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1363 /* Wait max 20 ms */
1365 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1366 & SDHCI_CLOCK_INT_STABLE
)) {
1368 pr_err("%s: Internal clock never stabilised.\n",
1369 mmc_hostname(host
->mmc
));
1370 sdhci_dumpregs(host
);
1377 clk
|= SDHCI_CLOCK_CARD_EN
;
1378 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1380 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1382 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
1385 struct mmc_host
*mmc
= host
->mmc
;
1387 spin_unlock_irq(&host
->lock
);
1388 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1389 spin_lock_irq(&host
->lock
);
1391 if (mode
!= MMC_POWER_OFF
)
1392 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1394 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1397 void sdhci_set_power_noreg(struct sdhci_host
*host
, unsigned char mode
,
1402 if (mode
!= MMC_POWER_OFF
) {
1404 case MMC_VDD_165_195
:
1405 pwr
= SDHCI_POWER_180
;
1409 pwr
= SDHCI_POWER_300
;
1413 pwr
= SDHCI_POWER_330
;
1416 WARN(1, "%s: Invalid vdd %#x\n",
1417 mmc_hostname(host
->mmc
), vdd
);
1422 if (host
->pwr
== pwr
)
1428 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1429 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1430 sdhci_runtime_pm_bus_off(host
);
1433 * Spec says that we should clear the power reg before setting
1434 * a new value. Some controllers don't seem to like this though.
1436 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1437 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1440 * At least the Marvell CaFe chip gets confused if we set the
1441 * voltage and set turn on power at the same time, so set the
1444 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1445 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1447 pwr
|= SDHCI_POWER_ON
;
1449 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1451 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1452 sdhci_runtime_pm_bus_on(host
);
1455 * Some controllers need an extra 10ms delay of 10ms before
1456 * they can apply clock after applying power
1458 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1462 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg
);
1464 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1467 if (IS_ERR(host
->mmc
->supply
.vmmc
))
1468 sdhci_set_power_noreg(host
, mode
, vdd
);
1470 sdhci_set_power_reg(host
, mode
, vdd
);
1472 EXPORT_SYMBOL_GPL(sdhci_set_power
);
1474 /*****************************************************************************\
1478 \*****************************************************************************/
1480 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1482 struct sdhci_host
*host
;
1484 unsigned long flags
;
1486 host
= mmc_priv(mmc
);
1488 /* Firstly check card presence */
1489 present
= mmc
->ops
->get_cd(mmc
);
1491 spin_lock_irqsave(&host
->lock
, flags
);
1493 sdhci_led_activate(host
);
1496 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1497 * requests if Auto-CMD12 is enabled.
1499 if (sdhci_auto_cmd12(host
, mrq
)) {
1501 mrq
->data
->stop
= NULL
;
1506 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1507 mrq
->cmd
->error
= -ENOMEDIUM
;
1508 sdhci_finish_mrq(host
, mrq
);
1510 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1511 sdhci_send_command(host
, mrq
->sbc
);
1513 sdhci_send_command(host
, mrq
->cmd
);
1517 spin_unlock_irqrestore(&host
->lock
, flags
);
1520 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1524 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1525 if (width
== MMC_BUS_WIDTH_8
) {
1526 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1527 if (host
->version
>= SDHCI_SPEC_300
)
1528 ctrl
|= SDHCI_CTRL_8BITBUS
;
1530 if (host
->version
>= SDHCI_SPEC_300
)
1531 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1532 if (width
== MMC_BUS_WIDTH_4
)
1533 ctrl
|= SDHCI_CTRL_4BITBUS
;
1535 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1537 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1539 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1541 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1545 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1546 /* Select Bus Speed Mode for host */
1547 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1548 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1549 (timing
== MMC_TIMING_UHS_SDR104
))
1550 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1551 else if (timing
== MMC_TIMING_UHS_SDR12
)
1552 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1553 else if (timing
== MMC_TIMING_UHS_SDR25
)
1554 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1555 else if (timing
== MMC_TIMING_UHS_SDR50
)
1556 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1557 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1558 (timing
== MMC_TIMING_MMC_DDR52
))
1559 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1560 else if (timing
== MMC_TIMING_MMC_HS400
)
1561 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1562 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1564 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1566 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1568 struct sdhci_host
*host
= mmc_priv(mmc
);
1569 unsigned long flags
;
1572 spin_lock_irqsave(&host
->lock
, flags
);
1574 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1575 spin_unlock_irqrestore(&host
->lock
, flags
);
1576 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1577 ios
->power_mode
== MMC_POWER_OFF
)
1578 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1583 * Reset the chip on each power off.
1584 * Should clear out any weird states.
1586 if (ios
->power_mode
== MMC_POWER_OFF
) {
1587 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1591 if (host
->version
>= SDHCI_SPEC_300
&&
1592 (ios
->power_mode
== MMC_POWER_UP
) &&
1593 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1594 sdhci_enable_preset_value(host
, false);
1596 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1597 host
->ops
->set_clock(host
, ios
->clock
);
1598 host
->clock
= ios
->clock
;
1600 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1602 host
->timeout_clk
= host
->mmc
->actual_clock
?
1603 host
->mmc
->actual_clock
/ 1000 :
1605 host
->mmc
->max_busy_timeout
=
1606 host
->ops
->get_max_timeout_count
?
1607 host
->ops
->get_max_timeout_count(host
) :
1609 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1613 if (host
->ops
->set_power
)
1614 host
->ops
->set_power(host
, ios
->power_mode
, ios
->vdd
);
1616 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1618 if (host
->ops
->platform_send_init_74_clocks
)
1619 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1621 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1623 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1625 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1626 ios
->timing
== MMC_TIMING_MMC_HS
)
1627 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1628 ctrl
|= SDHCI_CTRL_HISPD
;
1630 ctrl
&= ~SDHCI_CTRL_HISPD
;
1632 if (host
->version
>= SDHCI_SPEC_300
) {
1635 /* In case of UHS-I modes, set High Speed Enable */
1636 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1637 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1638 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1639 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1640 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1641 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1642 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1643 ctrl
|= SDHCI_CTRL_HISPD
;
1645 if (!host
->preset_enabled
) {
1646 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1648 * We only need to set Driver Strength if the
1649 * preset value enable is not set.
1651 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1652 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1653 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1654 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1655 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1656 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1657 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1658 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1659 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1660 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1662 pr_warn("%s: invalid driver type, default to driver type B\n",
1664 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1667 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1670 * According to SDHC Spec v3.00, if the Preset Value
1671 * Enable in the Host Control 2 register is set, we
1672 * need to reset SD Clock Enable before changing High
1673 * Speed Enable to avoid generating clock gliches.
1676 /* Reset SD Clock Enable */
1677 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1678 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1679 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1681 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1683 /* Re-enable SD Clock */
1684 host
->ops
->set_clock(host
, host
->clock
);
1687 /* Reset SD Clock Enable */
1688 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1689 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1690 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1692 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1693 host
->timing
= ios
->timing
;
1695 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1696 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1697 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1698 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1699 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1700 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1701 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1704 sdhci_enable_preset_value(host
, true);
1705 preset
= sdhci_get_preset_value(host
);
1706 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1707 >> SDHCI_PRESET_DRV_SHIFT
;
1710 /* Re-enable SD Clock */
1711 host
->ops
->set_clock(host
, host
->clock
);
1713 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1716 * Some (ENE) controllers go apeshit on some ios operation,
1717 * signalling timeout and CRC errors even on CMD0. Resetting
1718 * it on each ios seems to solve the problem.
1720 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1721 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1724 spin_unlock_irqrestore(&host
->lock
, flags
);
1727 static int sdhci_get_cd(struct mmc_host
*mmc
)
1729 struct sdhci_host
*host
= mmc_priv(mmc
);
1730 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1732 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1735 /* If nonremovable, assume that the card is always present. */
1736 if (!mmc_card_is_removable(host
->mmc
))
1740 * Try slot gpio detect, if defined it take precedence
1741 * over build in controller functionality
1746 /* If polling, assume that the card is always present. */
1747 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1750 /* Host native card detect */
1751 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1754 static int sdhci_check_ro(struct sdhci_host
*host
)
1756 unsigned long flags
;
1759 spin_lock_irqsave(&host
->lock
, flags
);
1761 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1763 else if (host
->ops
->get_ro
)
1764 is_readonly
= host
->ops
->get_ro(host
);
1766 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1767 & SDHCI_WRITE_PROTECT
);
1769 spin_unlock_irqrestore(&host
->lock
, flags
);
1771 /* This quirk needs to be replaced by a callback-function later */
1772 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1773 !is_readonly
: is_readonly
;
1776 #define SAMPLE_COUNT 5
1778 static int sdhci_get_ro(struct mmc_host
*mmc
)
1780 struct sdhci_host
*host
= mmc_priv(mmc
);
1783 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1784 return sdhci_check_ro(host
);
1787 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1788 if (sdhci_check_ro(host
)) {
1789 if (++ro_count
> SAMPLE_COUNT
/ 2)
1797 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1799 struct sdhci_host
*host
= mmc_priv(mmc
);
1801 if (host
->ops
&& host
->ops
->hw_reset
)
1802 host
->ops
->hw_reset(host
);
1805 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1807 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1809 host
->ier
|= SDHCI_INT_CARD_INT
;
1811 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1813 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1814 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1819 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1821 struct sdhci_host
*host
= mmc_priv(mmc
);
1822 unsigned long flags
;
1824 spin_lock_irqsave(&host
->lock
, flags
);
1826 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1828 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1830 sdhci_enable_sdio_irq_nolock(host
, enable
);
1831 spin_unlock_irqrestore(&host
->lock
, flags
);
1834 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1835 struct mmc_ios
*ios
)
1837 struct sdhci_host
*host
= mmc_priv(mmc
);
1842 * Signal Voltage Switching is only applicable for Host Controllers
1845 if (host
->version
< SDHCI_SPEC_300
)
1848 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1850 switch (ios
->signal_voltage
) {
1851 case MMC_SIGNAL_VOLTAGE_330
:
1852 if (!(host
->flags
& SDHCI_SIGNALING_330
))
1854 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1855 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1856 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1858 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1859 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1861 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1867 usleep_range(5000, 5500);
1869 /* 3.3V regulator output should be stable within 5 ms */
1870 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1871 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1874 pr_warn("%s: 3.3V regulator output did not became stable\n",
1878 case MMC_SIGNAL_VOLTAGE_180
:
1879 if (!(host
->flags
& SDHCI_SIGNALING_180
))
1881 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1882 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1884 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1891 * Enable 1.8V Signal Enable in the Host Control2
1894 ctrl
|= SDHCI_CTRL_VDD_180
;
1895 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1897 /* Some controller need to do more when switching */
1898 if (host
->ops
->voltage_switch
)
1899 host
->ops
->voltage_switch(host
);
1901 /* 1.8V regulator output should be stable within 5 ms */
1902 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1903 if (ctrl
& SDHCI_CTRL_VDD_180
)
1906 pr_warn("%s: 1.8V regulator output did not became stable\n",
1910 case MMC_SIGNAL_VOLTAGE_120
:
1911 if (!(host
->flags
& SDHCI_SIGNALING_120
))
1913 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1914 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1916 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1923 /* No signal voltage switch required */
1928 static int sdhci_card_busy(struct mmc_host
*mmc
)
1930 struct sdhci_host
*host
= mmc_priv(mmc
);
1933 /* Check whether DAT[0] is 0 */
1934 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1936 return !(present_state
& SDHCI_DATA_0_LVL_MASK
);
1939 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1941 struct sdhci_host
*host
= mmc_priv(mmc
);
1942 unsigned long flags
;
1944 spin_lock_irqsave(&host
->lock
, flags
);
1945 host
->flags
|= SDHCI_HS400_TUNING
;
1946 spin_unlock_irqrestore(&host
->lock
, flags
);
1951 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1953 struct sdhci_host
*host
= mmc_priv(mmc
);
1955 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1957 unsigned long flags
;
1958 unsigned int tuning_count
= 0;
1961 spin_lock_irqsave(&host
->lock
, flags
);
1963 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1964 host
->flags
&= ~SDHCI_HS400_TUNING
;
1966 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
1967 tuning_count
= host
->tuning_count
;
1970 * The Host Controller needs tuning in case of SDR104 and DDR50
1971 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1972 * the Capabilities register.
1973 * If the Host Controller supports the HS200 mode then the
1974 * tuning function has to be executed.
1976 switch (host
->timing
) {
1977 /* HS400 tuning is done in HS200 mode */
1978 case MMC_TIMING_MMC_HS400
:
1982 case MMC_TIMING_MMC_HS200
:
1984 * Periodic re-tuning for HS400 is not expected to be needed, so
1991 case MMC_TIMING_UHS_SDR104
:
1992 case MMC_TIMING_UHS_DDR50
:
1995 case MMC_TIMING_UHS_SDR50
:
1996 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
)
2004 if (host
->ops
->platform_execute_tuning
) {
2005 spin_unlock_irqrestore(&host
->lock
, flags
);
2006 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
2010 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2011 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
2012 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
2013 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
2014 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2017 * As per the Host Controller spec v3.00, tuning command
2018 * generates Buffer Read Ready interrupt, so enable that.
2020 * Note: The spec clearly says that when tuning sequence
2021 * is being performed, the controller does not generate
2022 * interrupts other than Buffer Read Ready interrupt. But
2023 * to make sure we don't hit a controller bug, we _only_
2024 * enable Buffer Read Ready interrupt here.
2026 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
2027 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
2030 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2031 * of loops reaches 40 times.
2034 struct mmc_command cmd
= {0};
2035 struct mmc_request mrq
= {NULL
};
2037 cmd
.opcode
= opcode
;
2039 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
2045 if (tuning_loop_counter
-- == 0)
2051 * In response to CMD19, the card sends 64 bytes of tuning
2052 * block to the Host Controller. So we set the block size
2055 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
2056 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
2057 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
2059 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
2060 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
2063 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
2068 * The tuning block is sent by the card to the host controller.
2069 * So we set the TRNS_READ bit in the Transfer Mode register.
2070 * This also takes care of setting DMA Enable and Multi Block
2071 * Select in the same register to 0.
2073 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2075 sdhci_send_command(host
, &cmd
);
2078 sdhci_del_timer(host
, &mrq
);
2080 spin_unlock_irqrestore(&host
->lock
, flags
);
2081 /* Wait for Buffer Read Ready interrupt */
2082 wait_event_timeout(host
->buf_ready_int
,
2083 (host
->tuning_done
== 1),
2084 msecs_to_jiffies(50));
2085 spin_lock_irqsave(&host
->lock
, flags
);
2087 if (!host
->tuning_done
) {
2088 pr_info(DRIVER_NAME
": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2090 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2091 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2093 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2094 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2095 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2096 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2102 host
->tuning_done
= 0;
2104 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2106 /* eMMC spec does not require a delay between tuning cycles */
2107 if (opcode
== MMC_SEND_TUNING_BLOCK
)
2109 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
2112 * The Host Driver has exhausted the maximum number of loops allowed,
2113 * so use fixed sampling frequency.
2115 if (tuning_loop_counter
< 0) {
2116 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2117 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2119 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
2120 pr_info(DRIVER_NAME
": Tuning procedure failed, falling back to fixed sampling clock\n");
2127 * In case tuning fails, host controllers which support
2128 * re-tuning can try tuning again at a later time, when the
2129 * re-tuning timer expires. So for these controllers, we
2130 * return 0. Since there might be other controllers who do not
2131 * have this capability, we return error for them.
2136 host
->mmc
->retune_period
= err
? 0 : tuning_count
;
2138 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2139 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2141 spin_unlock_irqrestore(&host
->lock
, flags
);
2145 static int sdhci_select_drive_strength(struct mmc_card
*card
,
2146 unsigned int max_dtr
, int host_drv
,
2147 int card_drv
, int *drv_type
)
2149 struct sdhci_host
*host
= mmc_priv(card
->host
);
2151 if (!host
->ops
->select_drive_strength
)
2154 return host
->ops
->select_drive_strength(host
, card
, max_dtr
, host_drv
,
2155 card_drv
, drv_type
);
2158 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2160 /* Host Controller v3.00 defines preset value registers */
2161 if (host
->version
< SDHCI_SPEC_300
)
2165 * We only enable or disable Preset Value if they are not already
2166 * enabled or disabled respectively. Otherwise, we bail out.
2168 if (host
->preset_enabled
!= enable
) {
2169 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2172 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2174 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2176 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2179 host
->flags
|= SDHCI_PV_ENABLED
;
2181 host
->flags
&= ~SDHCI_PV_ENABLED
;
2183 host
->preset_enabled
= enable
;
2187 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2190 struct sdhci_host
*host
= mmc_priv(mmc
);
2191 struct mmc_data
*data
= mrq
->data
;
2193 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
2194 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2195 data
->flags
& MMC_DATA_WRITE
?
2196 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2198 data
->host_cookie
= COOKIE_UNMAPPED
;
2201 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2204 struct sdhci_host
*host
= mmc_priv(mmc
);
2206 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2208 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2209 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
2212 static inline bool sdhci_has_requests(struct sdhci_host
*host
)
2214 return host
->cmd
|| host
->data_cmd
;
2217 static void sdhci_error_out_mrqs(struct sdhci_host
*host
, int err
)
2219 if (host
->data_cmd
) {
2220 host
->data_cmd
->error
= err
;
2221 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2225 host
->cmd
->error
= err
;
2226 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2230 static void sdhci_card_event(struct mmc_host
*mmc
)
2232 struct sdhci_host
*host
= mmc_priv(mmc
);
2233 unsigned long flags
;
2236 /* First check if client has provided their own card event */
2237 if (host
->ops
->card_event
)
2238 host
->ops
->card_event(host
);
2240 present
= mmc
->ops
->get_cd(mmc
);
2242 spin_lock_irqsave(&host
->lock
, flags
);
2244 /* Check sdhci_has_requests() first in case we are runtime suspended */
2245 if (sdhci_has_requests(host
) && !present
) {
2246 pr_err("%s: Card removed during transfer!\n",
2247 mmc_hostname(host
->mmc
));
2248 pr_err("%s: Resetting controller.\n",
2249 mmc_hostname(host
->mmc
));
2251 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2252 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2254 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
2257 spin_unlock_irqrestore(&host
->lock
, flags
);
2260 static const struct mmc_host_ops sdhci_ops
= {
2261 .request
= sdhci_request
,
2262 .post_req
= sdhci_post_req
,
2263 .pre_req
= sdhci_pre_req
,
2264 .set_ios
= sdhci_set_ios
,
2265 .get_cd
= sdhci_get_cd
,
2266 .get_ro
= sdhci_get_ro
,
2267 .hw_reset
= sdhci_hw_reset
,
2268 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2269 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2270 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2271 .execute_tuning
= sdhci_execute_tuning
,
2272 .select_drive_strength
= sdhci_select_drive_strength
,
2273 .card_event
= sdhci_card_event
,
2274 .card_busy
= sdhci_card_busy
,
2277 /*****************************************************************************\
2281 \*****************************************************************************/
2283 static bool sdhci_request_done(struct sdhci_host
*host
)
2285 unsigned long flags
;
2286 struct mmc_request
*mrq
;
2289 spin_lock_irqsave(&host
->lock
, flags
);
2291 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
2292 mrq
= host
->mrqs_done
[i
];
2298 spin_unlock_irqrestore(&host
->lock
, flags
);
2302 sdhci_del_timer(host
, mrq
);
2305 * Always unmap the data buffers if they were mapped by
2306 * sdhci_prepare_data() whenever we finish with a request.
2307 * This avoids leaking DMA mappings on error.
2309 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2310 struct mmc_data
*data
= mrq
->data
;
2312 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
2313 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2314 (data
->flags
& MMC_DATA_READ
) ?
2315 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
2316 data
->host_cookie
= COOKIE_UNMAPPED
;
2321 * The controller needs a reset of internal state machines
2322 * upon error conditions.
2324 if (sdhci_needs_reset(host
, mrq
)) {
2326 * Do not finish until command and data lines are available for
2327 * reset. Note there can only be one other mrq, so it cannot
2328 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2329 * would both be null.
2331 if (host
->cmd
|| host
->data_cmd
) {
2332 spin_unlock_irqrestore(&host
->lock
, flags
);
2336 /* Some controllers need this kick or reset won't work here */
2337 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2338 /* This is to force an update */
2339 host
->ops
->set_clock(host
, host
->clock
);
2341 /* Spec says we should do both at the same time, but Ricoh
2342 controllers do not like that. */
2343 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2344 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2346 host
->pending_reset
= false;
2349 if (!sdhci_has_requests(host
))
2350 sdhci_led_deactivate(host
);
2352 host
->mrqs_done
[i
] = NULL
;
2355 spin_unlock_irqrestore(&host
->lock
, flags
);
2357 mmc_request_done(host
->mmc
, mrq
);
2362 static void sdhci_tasklet_finish(unsigned long param
)
2364 struct sdhci_host
*host
= (struct sdhci_host
*)param
;
2366 while (!sdhci_request_done(host
))
2370 static void sdhci_timeout_timer(unsigned long data
)
2372 struct sdhci_host
*host
;
2373 unsigned long flags
;
2375 host
= (struct sdhci_host
*)data
;
2377 spin_lock_irqsave(&host
->lock
, flags
);
2379 if (host
->cmd
&& !sdhci_data_line_cmd(host
->cmd
)) {
2380 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2381 mmc_hostname(host
->mmc
));
2382 sdhci_dumpregs(host
);
2384 host
->cmd
->error
= -ETIMEDOUT
;
2385 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2389 spin_unlock_irqrestore(&host
->lock
, flags
);
2392 static void sdhci_timeout_data_timer(unsigned long data
)
2394 struct sdhci_host
*host
;
2395 unsigned long flags
;
2397 host
= (struct sdhci_host
*)data
;
2399 spin_lock_irqsave(&host
->lock
, flags
);
2401 if (host
->data
|| host
->data_cmd
||
2402 (host
->cmd
&& sdhci_data_line_cmd(host
->cmd
))) {
2403 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2404 mmc_hostname(host
->mmc
));
2405 sdhci_dumpregs(host
);
2408 host
->data
->error
= -ETIMEDOUT
;
2409 sdhci_finish_data(host
);
2410 } else if (host
->data_cmd
) {
2411 host
->data_cmd
->error
= -ETIMEDOUT
;
2412 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2414 host
->cmd
->error
= -ETIMEDOUT
;
2415 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2420 spin_unlock_irqrestore(&host
->lock
, flags
);
2423 /*****************************************************************************\
2425 * Interrupt handling *
2427 \*****************************************************************************/
2429 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
)
2433 * SDHCI recovers from errors by resetting the cmd and data
2434 * circuits. Until that is done, there very well might be more
2435 * interrupts, so ignore them in that case.
2437 if (host
->pending_reset
)
2439 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2440 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2441 sdhci_dumpregs(host
);
2445 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
2446 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
2447 if (intmask
& SDHCI_INT_TIMEOUT
)
2448 host
->cmd
->error
= -ETIMEDOUT
;
2450 host
->cmd
->error
= -EILSEQ
;
2453 * If this command initiates a data phase and a response
2454 * CRC error is signalled, the card can start transferring
2455 * data - the card may have received the command without
2456 * error. We must not terminate the mmc_request early.
2458 * If the card did not receive the command or returned an
2459 * error which prevented it sending data, the data phase
2462 if (host
->cmd
->data
&&
2463 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
2469 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2473 if (intmask
& SDHCI_INT_RESPONSE
)
2474 sdhci_finish_command(host
);
2477 #ifdef CONFIG_MMC_DEBUG
2478 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2480 const char *name
= mmc_hostname(host
->mmc
);
2481 void *desc
= host
->adma_table
;
2483 sdhci_dumpregs(host
);
2486 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2488 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2489 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2490 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2491 le32_to_cpu(dma_desc
->addr_lo
),
2492 le16_to_cpu(dma_desc
->len
),
2493 le16_to_cpu(dma_desc
->cmd
));
2495 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2496 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2497 le16_to_cpu(dma_desc
->len
),
2498 le16_to_cpu(dma_desc
->cmd
));
2500 desc
+= host
->desc_sz
;
2502 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2507 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2510 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2514 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2515 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2516 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2517 if (command
== MMC_SEND_TUNING_BLOCK
||
2518 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2519 host
->tuning_done
= 1;
2520 wake_up(&host
->buf_ready_int
);
2526 struct mmc_command
*data_cmd
= host
->data_cmd
;
2529 * The "data complete" interrupt is also used to
2530 * indicate that a busy state has ended. See comment
2531 * above in sdhci_cmd_irq().
2533 if (data_cmd
&& (data_cmd
->flags
& MMC_RSP_BUSY
)) {
2534 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2535 host
->data_cmd
= NULL
;
2536 data_cmd
->error
= -ETIMEDOUT
;
2537 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2540 if (intmask
& SDHCI_INT_DATA_END
) {
2541 host
->data_cmd
= NULL
;
2543 * Some cards handle busy-end interrupt
2544 * before the command completed, so make
2545 * sure we do things in the proper order.
2547 if (host
->cmd
== data_cmd
)
2550 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2556 * SDHCI recovers from errors by resetting the cmd and data
2557 * circuits. Until that is done, there very well might be more
2558 * interrupts, so ignore them in that case.
2560 if (host
->pending_reset
)
2563 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2564 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2565 sdhci_dumpregs(host
);
2570 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2571 host
->data
->error
= -ETIMEDOUT
;
2572 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2573 host
->data
->error
= -EILSEQ
;
2574 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2575 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2577 host
->data
->error
= -EILSEQ
;
2578 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2579 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2580 sdhci_adma_show_error(host
);
2581 host
->data
->error
= -EIO
;
2582 if (host
->ops
->adma_workaround
)
2583 host
->ops
->adma_workaround(host
, intmask
);
2586 if (host
->data
->error
)
2587 sdhci_finish_data(host
);
2589 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2590 sdhci_transfer_pio(host
);
2593 * We currently don't do anything fancy with DMA
2594 * boundaries, but as we can't disable the feature
2595 * we need to at least restart the transfer.
2597 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2598 * should return a valid address to continue from, but as
2599 * some controllers are faulty, don't trust them.
2601 if (intmask
& SDHCI_INT_DMA_END
) {
2602 u32 dmastart
, dmanow
;
2603 dmastart
= sg_dma_address(host
->data
->sg
);
2604 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2606 * Force update to the next DMA block boundary.
2609 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2610 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2611 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2612 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2614 mmc_hostname(host
->mmc
), dmastart
,
2615 host
->data
->bytes_xfered
, dmanow
);
2616 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2619 if (intmask
& SDHCI_INT_DATA_END
) {
2620 if (host
->cmd
== host
->data_cmd
) {
2622 * Data managed to finish before the
2623 * command completed. Make sure we do
2624 * things in the proper order.
2626 host
->data_early
= 1;
2628 sdhci_finish_data(host
);
2634 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2636 irqreturn_t result
= IRQ_NONE
;
2637 struct sdhci_host
*host
= dev_id
;
2638 u32 intmask
, mask
, unexpected
= 0;
2641 spin_lock(&host
->lock
);
2643 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2644 spin_unlock(&host
->lock
);
2648 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2649 if (!intmask
|| intmask
== 0xffffffff) {
2655 /* Clear selected interrupts. */
2656 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2657 SDHCI_INT_BUS_POWER
);
2658 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2660 DBG("*** %s got interrupt: 0x%08x\n",
2661 mmc_hostname(host
->mmc
), intmask
);
2663 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2664 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2668 * There is a observation on i.mx esdhc. INSERT
2669 * bit will be immediately set again when it gets
2670 * cleared, if a card is inserted. We have to mask
2671 * the irq to prevent interrupt storm which will
2672 * freeze the system. And the REMOVE gets the
2675 * More testing are needed here to ensure it works
2676 * for other platforms though.
2678 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2679 SDHCI_INT_CARD_REMOVE
);
2680 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2681 SDHCI_INT_CARD_INSERT
;
2682 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2683 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2685 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2686 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2688 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2689 SDHCI_INT_CARD_REMOVE
);
2690 result
= IRQ_WAKE_THREAD
;
2693 if (intmask
& SDHCI_INT_CMD_MASK
)
2694 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
);
2696 if (intmask
& SDHCI_INT_DATA_MASK
)
2697 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2699 if (intmask
& SDHCI_INT_BUS_POWER
)
2700 pr_err("%s: Card is consuming too much power!\n",
2701 mmc_hostname(host
->mmc
));
2703 if (intmask
& SDHCI_INT_RETUNE
)
2704 mmc_retune_needed(host
->mmc
);
2706 if (intmask
& SDHCI_INT_CARD_INT
) {
2707 sdhci_enable_sdio_irq_nolock(host
, false);
2708 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2709 result
= IRQ_WAKE_THREAD
;
2712 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2713 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2714 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2715 SDHCI_INT_RETUNE
| SDHCI_INT_CARD_INT
);
2718 unexpected
|= intmask
;
2719 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2722 if (result
== IRQ_NONE
)
2723 result
= IRQ_HANDLED
;
2725 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2726 } while (intmask
&& --max_loops
);
2728 spin_unlock(&host
->lock
);
2731 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2732 mmc_hostname(host
->mmc
), unexpected
);
2733 sdhci_dumpregs(host
);
2739 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2741 struct sdhci_host
*host
= dev_id
;
2742 unsigned long flags
;
2745 spin_lock_irqsave(&host
->lock
, flags
);
2746 isr
= host
->thread_isr
;
2747 host
->thread_isr
= 0;
2748 spin_unlock_irqrestore(&host
->lock
, flags
);
2750 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2751 struct mmc_host
*mmc
= host
->mmc
;
2753 mmc
->ops
->card_event(mmc
);
2754 mmc_detect_change(mmc
, msecs_to_jiffies(200));
2757 if (isr
& SDHCI_INT_CARD_INT
) {
2758 sdio_run_irqs(host
->mmc
);
2760 spin_lock_irqsave(&host
->lock
, flags
);
2761 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2762 sdhci_enable_sdio_irq_nolock(host
, true);
2763 spin_unlock_irqrestore(&host
->lock
, flags
);
2766 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2769 /*****************************************************************************\
2773 \*****************************************************************************/
2777 * To enable wakeup events, the corresponding events have to be enabled in
2778 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2779 * Table' in the SD Host Controller Standard Specification.
2780 * It is useless to restore SDHCI_INT_ENABLE state in
2781 * sdhci_disable_irq_wakeups() since it will be set by
2782 * sdhci_enable_card_detection() or sdhci_init().
2784 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2787 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2788 | SDHCI_WAKE_ON_INT
;
2789 u32 irq_val
= SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2792 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2794 /* Avoid fake wake up */
2795 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) {
2796 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2797 irq_val
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
);
2799 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2800 sdhci_writel(host
, irq_val
, SDHCI_INT_ENABLE
);
2802 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2804 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2807 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2808 | SDHCI_WAKE_ON_INT
;
2810 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2812 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2815 int sdhci_suspend_host(struct sdhci_host
*host
)
2817 sdhci_disable_card_detection(host
);
2819 mmc_retune_timer_stop(host
->mmc
);
2820 if (host
->tuning_mode
!= SDHCI_TUNING_MODE_3
)
2821 mmc_retune_needed(host
->mmc
);
2823 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2825 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2826 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2827 free_irq(host
->irq
, host
);
2829 sdhci_enable_irq_wakeups(host
);
2830 enable_irq_wake(host
->irq
);
2835 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2837 int sdhci_resume_host(struct sdhci_host
*host
)
2839 struct mmc_host
*mmc
= host
->mmc
;
2842 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2843 if (host
->ops
->enable_dma
)
2844 host
->ops
->enable_dma(host
);
2847 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2848 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2849 /* Card keeps power but host controller does not */
2850 sdhci_init(host
, 0);
2853 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
2855 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2859 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2860 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2861 sdhci_thread_irq
, IRQF_SHARED
,
2862 mmc_hostname(host
->mmc
), host
);
2866 sdhci_disable_irq_wakeups(host
);
2867 disable_irq_wake(host
->irq
);
2870 sdhci_enable_card_detection(host
);
2875 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2877 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2879 unsigned long flags
;
2881 mmc_retune_timer_stop(host
->mmc
);
2882 if (host
->tuning_mode
!= SDHCI_TUNING_MODE_3
)
2883 mmc_retune_needed(host
->mmc
);
2885 spin_lock_irqsave(&host
->lock
, flags
);
2886 host
->ier
&= SDHCI_INT_CARD_INT
;
2887 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2888 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2889 spin_unlock_irqrestore(&host
->lock
, flags
);
2891 synchronize_hardirq(host
->irq
);
2893 spin_lock_irqsave(&host
->lock
, flags
);
2894 host
->runtime_suspended
= true;
2895 spin_unlock_irqrestore(&host
->lock
, flags
);
2899 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2901 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2903 struct mmc_host
*mmc
= host
->mmc
;
2904 unsigned long flags
;
2905 int host_flags
= host
->flags
;
2907 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2908 if (host
->ops
->enable_dma
)
2909 host
->ops
->enable_dma(host
);
2912 sdhci_init(host
, 0);
2914 /* Force clock and power re-program */
2917 mmc
->ops
->start_signal_voltage_switch(mmc
, &mmc
->ios
);
2918 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
2920 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2921 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2922 spin_lock_irqsave(&host
->lock
, flags
);
2923 sdhci_enable_preset_value(host
, true);
2924 spin_unlock_irqrestore(&host
->lock
, flags
);
2927 if ((mmc
->caps2
& MMC_CAP2_HS400_ES
) &&
2928 mmc
->ops
->hs400_enhanced_strobe
)
2929 mmc
->ops
->hs400_enhanced_strobe(mmc
, &mmc
->ios
);
2931 spin_lock_irqsave(&host
->lock
, flags
);
2933 host
->runtime_suspended
= false;
2935 /* Enable SDIO IRQ */
2936 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2937 sdhci_enable_sdio_irq_nolock(host
, true);
2939 /* Enable Card Detection */
2940 sdhci_enable_card_detection(host
);
2942 spin_unlock_irqrestore(&host
->lock
, flags
);
2946 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2948 #endif /* CONFIG_PM */
2950 /*****************************************************************************\
2952 * Device allocation/registration *
2954 \*****************************************************************************/
2956 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2959 struct mmc_host
*mmc
;
2960 struct sdhci_host
*host
;
2962 WARN_ON(dev
== NULL
);
2964 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2966 return ERR_PTR(-ENOMEM
);
2968 host
= mmc_priv(mmc
);
2970 host
->mmc_host_ops
= sdhci_ops
;
2971 mmc
->ops
= &host
->mmc_host_ops
;
2973 host
->flags
= SDHCI_SIGNALING_330
;
2978 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2980 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
2982 struct mmc_host
*mmc
= host
->mmc
;
2983 struct device
*dev
= mmc_dev(mmc
);
2986 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
2987 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2989 /* Try 64-bit mask if hardware is capable of it */
2990 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2991 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2993 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2995 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2999 /* 32-bit mask as default & fallback */
3001 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
3003 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3010 void __sdhci_read_caps(struct sdhci_host
*host
, u16
*ver
, u32
*caps
, u32
*caps1
)
3014 if (host
->read_caps
)
3017 host
->read_caps
= true;
3020 host
->quirks
= debug_quirks
;
3023 host
->quirks2
= debug_quirks2
;
3025 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3027 v
= ver
? *ver
: sdhci_readw(host
, SDHCI_HOST_VERSION
);
3028 host
->version
= (v
& SDHCI_SPEC_VER_MASK
) >> SDHCI_SPEC_VER_SHIFT
;
3030 if (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
)
3033 host
->caps
= caps
? *caps
: sdhci_readl(host
, SDHCI_CAPABILITIES
);
3035 if (host
->version
< SDHCI_SPEC_300
)
3038 host
->caps1
= caps1
? *caps1
: sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
3040 EXPORT_SYMBOL_GPL(__sdhci_read_caps
);
3042 int sdhci_setup_host(struct sdhci_host
*host
)
3044 struct mmc_host
*mmc
;
3045 u32 max_current_caps
;
3046 unsigned int ocr_avail
;
3047 unsigned int override_timeout_clk
;
3051 WARN_ON(host
== NULL
);
3058 * If there are external regulators, get them. Note this must be done
3059 * early before resetting the host and reading the capabilities so that
3060 * the host can take the appropriate action if regulators are not
3063 ret
= mmc_regulator_get_supply(mmc
);
3064 if (ret
== -EPROBE_DEFER
)
3067 sdhci_read_caps(host
);
3069 override_timeout_clk
= host
->timeout_clk
;
3071 if (host
->version
> SDHCI_SPEC_300
) {
3072 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3073 mmc_hostname(mmc
), host
->version
);
3076 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
3077 host
->flags
|= SDHCI_USE_SDMA
;
3078 else if (!(host
->caps
& SDHCI_CAN_DO_SDMA
))
3079 DBG("Controller doesn't have SDMA capability\n");
3081 host
->flags
|= SDHCI_USE_SDMA
;
3083 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
3084 (host
->flags
& SDHCI_USE_SDMA
)) {
3085 DBG("Disabling DMA as it is marked broken\n");
3086 host
->flags
&= ~SDHCI_USE_SDMA
;
3089 if ((host
->version
>= SDHCI_SPEC_200
) &&
3090 (host
->caps
& SDHCI_CAN_DO_ADMA2
))
3091 host
->flags
|= SDHCI_USE_ADMA
;
3093 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
3094 (host
->flags
& SDHCI_USE_ADMA
)) {
3095 DBG("Disabling ADMA as it is marked broken\n");
3096 host
->flags
&= ~SDHCI_USE_ADMA
;
3100 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3101 * and *must* do 64-bit DMA. A driver has the opportunity to change
3102 * that during the first call to ->enable_dma(). Similarly
3103 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3106 if (host
->caps
& SDHCI_CAN_64BIT
)
3107 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
3109 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3110 ret
= sdhci_set_dma_mask(host
);
3112 if (!ret
&& host
->ops
->enable_dma
)
3113 ret
= host
->ops
->enable_dma(host
);
3116 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3118 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
3124 /* SDMA does not support 64-bit DMA */
3125 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3126 host
->flags
&= ~SDHCI_USE_SDMA
;
3128 if (host
->flags
& SDHCI_USE_ADMA
) {
3133 * The DMA descriptor table size is calculated as the maximum
3134 * number of segments times 2, to allow for an alignment
3135 * descriptor for each segment, plus 1 for a nop end descriptor,
3136 * all multipled by the descriptor size.
3138 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3139 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3140 SDHCI_ADMA2_64_DESC_SZ
;
3141 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
3143 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3144 SDHCI_ADMA2_32_DESC_SZ
;
3145 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
3148 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
3149 buf
= dma_alloc_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3150 host
->adma_table_sz
, &dma
, GFP_KERNEL
);
3152 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3154 host
->flags
&= ~SDHCI_USE_ADMA
;
3155 } else if ((dma
+ host
->align_buffer_sz
) &
3156 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
3157 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3159 host
->flags
&= ~SDHCI_USE_ADMA
;
3160 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3161 host
->adma_table_sz
, buf
, dma
);
3163 host
->align_buffer
= buf
;
3164 host
->align_addr
= dma
;
3166 host
->adma_table
= buf
+ host
->align_buffer_sz
;
3167 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
3172 * If we use DMA, then it's up to the caller to set the DMA
3173 * mask, but PIO does not need the hw shim so we set a new
3174 * mask here in that case.
3176 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3177 host
->dma_mask
= DMA_BIT_MASK(64);
3178 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3181 if (host
->version
>= SDHCI_SPEC_300
)
3182 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_V3_BASE_MASK
)
3183 >> SDHCI_CLOCK_BASE_SHIFT
;
3185 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_BASE_MASK
)
3186 >> SDHCI_CLOCK_BASE_SHIFT
;
3188 host
->max_clk
*= 1000000;
3189 if (host
->max_clk
== 0 || host
->quirks
&
3190 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3191 if (!host
->ops
->get_max_clock
) {
3192 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3197 host
->max_clk
= host
->ops
->get_max_clock(host
);
3201 * In case of Host Controller v3.00, find out whether clock
3202 * multiplier is supported.
3204 host
->clk_mul
= (host
->caps1
& SDHCI_CLOCK_MUL_MASK
) >>
3205 SDHCI_CLOCK_MUL_SHIFT
;
3208 * In case the value in Clock Multiplier is 0, then programmable
3209 * clock mode is not supported, otherwise the actual clock
3210 * multiplier is one more than the value of Clock Multiplier
3211 * in the Capabilities Register.
3217 * Set host parameters.
3219 max_clk
= host
->max_clk
;
3221 if (host
->ops
->get_min_clock
)
3222 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3223 else if (host
->version
>= SDHCI_SPEC_300
) {
3224 if (host
->clk_mul
) {
3225 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3226 max_clk
= host
->max_clk
* host
->clk_mul
;
3228 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3230 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3232 if (!mmc
->f_max
|| mmc
->f_max
> max_clk
)
3233 mmc
->f_max
= max_clk
;
3235 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3236 host
->timeout_clk
= (host
->caps
& SDHCI_TIMEOUT_CLK_MASK
) >>
3237 SDHCI_TIMEOUT_CLK_SHIFT
;
3238 if (host
->timeout_clk
== 0) {
3239 if (host
->ops
->get_timeout_clock
) {
3241 host
->ops
->get_timeout_clock(host
);
3243 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3250 if (host
->caps
& SDHCI_TIMEOUT_CLK_UNIT
)
3251 host
->timeout_clk
*= 1000;
3253 if (override_timeout_clk
)
3254 host
->timeout_clk
= override_timeout_clk
;
3256 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3257 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3258 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3261 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3262 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3264 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3265 host
->flags
|= SDHCI_AUTO_CMD12
;
3267 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3268 if ((host
->version
>= SDHCI_SPEC_300
) &&
3269 ((host
->flags
& SDHCI_USE_ADMA
) ||
3270 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3271 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3272 host
->flags
|= SDHCI_AUTO_CMD23
;
3273 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3275 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3279 * A controller may support 8-bit width, but the board itself
3280 * might not have the pins brought out. Boards that support
3281 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3282 * their platform code before calling sdhci_add_host(), and we
3283 * won't assume 8-bit width for hosts without that CAP.
3285 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3286 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3288 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3289 mmc
->caps
&= ~MMC_CAP_CMD23
;
3291 if (host
->caps
& SDHCI_CAN_DO_HISPD
)
3292 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3294 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3295 mmc_card_is_removable(mmc
) &&
3296 mmc_gpio_get_cd(host
->mmc
) < 0)
3297 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3299 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3300 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3301 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3302 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3304 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
|
3305 SDHCI_SUPPORT_SDR50
|
3306 SDHCI_SUPPORT_DDR50
);
3308 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3309 mmc_hostname(mmc
), ret
);
3310 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3314 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
) {
3315 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3316 SDHCI_SUPPORT_DDR50
);
3319 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3320 if (host
->caps1
& (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3321 SDHCI_SUPPORT_DDR50
))
3322 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3324 /* SDR104 supports also implies SDR50 support */
3325 if (host
->caps1
& SDHCI_SUPPORT_SDR104
) {
3326 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3327 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3328 * field can be promoted to support HS200.
3330 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3331 mmc
->caps2
|= MMC_CAP2_HS200
;
3332 } else if (host
->caps1
& SDHCI_SUPPORT_SDR50
) {
3333 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3336 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3337 (host
->caps1
& SDHCI_SUPPORT_HS400
))
3338 mmc
->caps2
|= MMC_CAP2_HS400
;
3340 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3341 (IS_ERR(mmc
->supply
.vqmmc
) ||
3342 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3344 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3346 if ((host
->caps1
& SDHCI_SUPPORT_DDR50
) &&
3347 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3348 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3350 /* Does the host need tuning for SDR50? */
3351 if (host
->caps1
& SDHCI_USE_SDR50_TUNING
)
3352 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3354 /* Driver Type(s) (A, C, D) supported by the host */
3355 if (host
->caps1
& SDHCI_DRIVER_TYPE_A
)
3356 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3357 if (host
->caps1
& SDHCI_DRIVER_TYPE_C
)
3358 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3359 if (host
->caps1
& SDHCI_DRIVER_TYPE_D
)
3360 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3362 /* Initial value for re-tuning timer count */
3363 host
->tuning_count
= (host
->caps1
& SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3364 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3367 * In case Re-tuning Timer is not disabled, the actual value of
3368 * re-tuning timer will be 2 ^ (n - 1).
3370 if (host
->tuning_count
)
3371 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3373 /* Re-tuning mode supported by the Host Controller */
3374 host
->tuning_mode
= (host
->caps1
& SDHCI_RETUNING_MODE_MASK
) >>
3375 SDHCI_RETUNING_MODE_SHIFT
;
3380 * According to SD Host Controller spec v3.00, if the Host System
3381 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3382 * the value is meaningful only if Voltage Support in the Capabilities
3383 * register is set. The actual current value is 4 times the register
3386 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3387 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3388 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3391 /* convert to SDHCI_MAX_CURRENT format */
3392 curr
= curr
/1000; /* convert to mA */
3393 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3395 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3397 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3398 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3399 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3403 if (host
->caps
& SDHCI_CAN_VDD_330
) {
3404 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3406 mmc
->max_current_330
= ((max_current_caps
&
3407 SDHCI_MAX_CURRENT_330_MASK
) >>
3408 SDHCI_MAX_CURRENT_330_SHIFT
) *
3409 SDHCI_MAX_CURRENT_MULTIPLIER
;
3411 if (host
->caps
& SDHCI_CAN_VDD_300
) {
3412 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3414 mmc
->max_current_300
= ((max_current_caps
&
3415 SDHCI_MAX_CURRENT_300_MASK
) >>
3416 SDHCI_MAX_CURRENT_300_SHIFT
) *
3417 SDHCI_MAX_CURRENT_MULTIPLIER
;
3419 if (host
->caps
& SDHCI_CAN_VDD_180
) {
3420 ocr_avail
|= MMC_VDD_165_195
;
3422 mmc
->max_current_180
= ((max_current_caps
&
3423 SDHCI_MAX_CURRENT_180_MASK
) >>
3424 SDHCI_MAX_CURRENT_180_SHIFT
) *
3425 SDHCI_MAX_CURRENT_MULTIPLIER
;
3428 /* If OCR set by host, use it instead. */
3430 ocr_avail
= host
->ocr_mask
;
3432 /* If OCR set by external regulators, give it highest prio. */
3434 ocr_avail
= mmc
->ocr_avail
;
3436 mmc
->ocr_avail
= ocr_avail
;
3437 mmc
->ocr_avail_sdio
= ocr_avail
;
3438 if (host
->ocr_avail_sdio
)
3439 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3440 mmc
->ocr_avail_sd
= ocr_avail
;
3441 if (host
->ocr_avail_sd
)
3442 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3443 else /* normal SD controllers don't support 1.8V */
3444 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3445 mmc
->ocr_avail_mmc
= ocr_avail
;
3446 if (host
->ocr_avail_mmc
)
3447 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3449 if (mmc
->ocr_avail
== 0) {
3450 pr_err("%s: Hardware doesn't report any support voltages.\n",
3456 if ((mmc
->caps
& (MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
|
3457 MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_SDR104
|
3458 MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
)) ||
3459 (mmc
->caps2
& (MMC_CAP2_HS200_1_8V_SDR
| MMC_CAP2_HS400_1_8V
)))
3460 host
->flags
|= SDHCI_SIGNALING_180
;
3462 if (mmc
->caps2
& MMC_CAP2_HSX00_1_2V
)
3463 host
->flags
|= SDHCI_SIGNALING_120
;
3465 spin_lock_init(&host
->lock
);
3468 * Maximum number of segments. Depends on if the hardware
3469 * can do scatter/gather or not.
3471 if (host
->flags
& SDHCI_USE_ADMA
)
3472 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3473 else if (host
->flags
& SDHCI_USE_SDMA
)
3476 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3479 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3480 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3483 mmc
->max_req_size
= 524288;
3486 * Maximum segment size. Could be one segment with the maximum number
3487 * of bytes. When doing hardware scatter/gather, each entry cannot
3488 * be larger than 64 KiB though.
3490 if (host
->flags
& SDHCI_USE_ADMA
) {
3491 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3492 mmc
->max_seg_size
= 65535;
3494 mmc
->max_seg_size
= 65536;
3496 mmc
->max_seg_size
= mmc
->max_req_size
;
3500 * Maximum block size. This varies from controller to controller and
3501 * is specified in the capabilities register.
3503 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3504 mmc
->max_blk_size
= 2;
3506 mmc
->max_blk_size
= (host
->caps
& SDHCI_MAX_BLOCK_MASK
) >>
3507 SDHCI_MAX_BLOCK_SHIFT
;
3508 if (mmc
->max_blk_size
>= 3) {
3509 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3511 mmc
->max_blk_size
= 0;
3515 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3518 * Maximum block count.
3520 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3525 if (!IS_ERR(mmc
->supply
.vqmmc
))
3526 regulator_disable(mmc
->supply
.vqmmc
);
3528 if (host
->align_buffer
)
3529 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3530 host
->adma_table_sz
, host
->align_buffer
,
3532 host
->adma_table
= NULL
;
3533 host
->align_buffer
= NULL
;
3537 EXPORT_SYMBOL_GPL(sdhci_setup_host
);
3539 int __sdhci_add_host(struct sdhci_host
*host
)
3541 struct mmc_host
*mmc
= host
->mmc
;
3547 tasklet_init(&host
->finish_tasklet
,
3548 sdhci_tasklet_finish
, (unsigned long)host
);
3550 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3551 setup_timer(&host
->data_timer
, sdhci_timeout_data_timer
,
3552 (unsigned long)host
);
3554 init_waitqueue_head(&host
->buf_ready_int
);
3556 sdhci_init(host
, 0);
3558 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3559 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3561 pr_err("%s: Failed to request IRQ %d: %d\n",
3562 mmc_hostname(mmc
), host
->irq
, ret
);
3566 #ifdef CONFIG_MMC_DEBUG
3567 sdhci_dumpregs(host
);
3570 ret
= sdhci_led_register(host
);
3572 pr_err("%s: Failed to register LED device: %d\n",
3573 mmc_hostname(mmc
), ret
);
3579 ret
= mmc_add_host(mmc
);
3583 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3584 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3585 (host
->flags
& SDHCI_USE_ADMA
) ?
3586 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3587 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3589 sdhci_enable_card_detection(host
);
3594 sdhci_led_unregister(host
);
3596 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3597 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3598 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3599 free_irq(host
->irq
, host
);
3601 tasklet_kill(&host
->finish_tasklet
);
3603 if (!IS_ERR(mmc
->supply
.vqmmc
))
3604 regulator_disable(mmc
->supply
.vqmmc
);
3606 if (host
->align_buffer
)
3607 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3608 host
->adma_table_sz
, host
->align_buffer
,
3610 host
->adma_table
= NULL
;
3611 host
->align_buffer
= NULL
;
3615 EXPORT_SYMBOL_GPL(__sdhci_add_host
);
3617 int sdhci_add_host(struct sdhci_host
*host
)
3621 ret
= sdhci_setup_host(host
);
3625 return __sdhci_add_host(host
);
3627 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3629 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3631 struct mmc_host
*mmc
= host
->mmc
;
3632 unsigned long flags
;
3635 spin_lock_irqsave(&host
->lock
, flags
);
3637 host
->flags
|= SDHCI_DEVICE_DEAD
;
3639 if (sdhci_has_requests(host
)) {
3640 pr_err("%s: Controller removed during "
3641 " transfer!\n", mmc_hostname(mmc
));
3642 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
3645 spin_unlock_irqrestore(&host
->lock
, flags
);
3648 sdhci_disable_card_detection(host
);
3650 mmc_remove_host(mmc
);
3652 sdhci_led_unregister(host
);
3655 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3657 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3658 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3659 free_irq(host
->irq
, host
);
3661 del_timer_sync(&host
->timer
);
3662 del_timer_sync(&host
->data_timer
);
3664 tasklet_kill(&host
->finish_tasklet
);
3666 if (!IS_ERR(mmc
->supply
.vqmmc
))
3667 regulator_disable(mmc
->supply
.vqmmc
);
3669 if (host
->align_buffer
)
3670 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3671 host
->adma_table_sz
, host
->align_buffer
,
3674 host
->adma_table
= NULL
;
3675 host
->align_buffer
= NULL
;
3678 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3680 void sdhci_free_host(struct sdhci_host
*host
)
3682 mmc_free_host(host
->mmc
);
3685 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3687 /*****************************************************************************\
3689 * Driver init/exit *
3691 \*****************************************************************************/
3693 static int __init
sdhci_drv_init(void)
3696 ": Secure Digital Host Controller Interface driver\n");
3697 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3702 static void __exit
sdhci_drv_exit(void)
3706 module_init(sdhci_drv_init
);
3707 module_exit(sdhci_drv_exit
);
3709 module_param(debug_quirks
, uint
, 0444);
3710 module_param(debug_quirks2
, uint
, 0444);
3712 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3713 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3714 MODULE_LICENSE("GPL");
3716 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3717 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");