2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
30 #include <linux/leds.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
40 #define DRIVER_NAME "sdhci"
42 #define DBG(f, x...) \
43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45 #define SDHCI_DUMP(f, x...) \
46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
48 #define MAX_TUNING_LOOP 40
50 static unsigned int debug_quirks
= 0;
51 static unsigned int debug_quirks2
;
53 static void sdhci_finish_data(struct sdhci_host
*);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
57 void sdhci_dumpregs(struct sdhci_host
*host
)
59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
62 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
63 sdhci_readw(host
, SDHCI_HOST_VERSION
));
64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
65 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
66 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
68 sdhci_readl(host
, SDHCI_ARGUMENT
),
69 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
71 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
72 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
74 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
75 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
77 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
78 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
80 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
81 sdhci_readl(host
, SDHCI_INT_STATUS
));
82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
83 sdhci_readl(host
, SDHCI_INT_ENABLE
),
84 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
85 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86 sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
),
87 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
89 sdhci_readl(host
, SDHCI_CAPABILITIES
),
90 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
92 sdhci_readw(host
, SDHCI_COMMAND
),
93 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
95 sdhci_readl(host
, SDHCI_RESPONSE
),
96 sdhci_readl(host
, SDHCI_RESPONSE
+ 4));
97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
98 sdhci_readl(host
, SDHCI_RESPONSE
+ 8),
99 sdhci_readl(host
, SDHCI_RESPONSE
+ 12));
100 SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
103 if (host
->flags
& SDHCI_USE_ADMA
) {
104 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
107 sdhci_readl(host
, SDHCI_ADMA_ADDRESS_HI
),
108 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
111 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
112 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
116 SDHCI_DUMP("============================================\n");
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs
);
120 /*****************************************************************************\
122 * Low level functions *
124 \*****************************************************************************/
126 static void sdhci_do_enable_v4_mode(struct sdhci_host
*host
)
130 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
131 if (ctrl2
& SDHCI_CTRL_V4_MODE
)
134 ctrl2
|= SDHCI_CTRL_V4_MODE
;
135 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
142 void sdhci_enable_v4_mode(struct sdhci_host
*host
)
144 host
->v4_mode
= true;
145 sdhci_do_enable_v4_mode(host
);
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode
);
149 static inline bool sdhci_data_line_cmd(struct mmc_command
*cmd
)
151 return cmd
->data
|| cmd
->flags
& MMC_RSP_BUSY
;
154 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
158 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
159 !mmc_card_is_removable(host
->mmc
))
163 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
166 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
167 SDHCI_INT_CARD_INSERT
;
169 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
172 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
173 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
176 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
178 sdhci_set_card_detection(host
, true);
181 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
183 sdhci_set_card_detection(host
, false);
186 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
191 pm_runtime_get_noresume(host
->mmc
->parent
);
194 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
198 host
->bus_on
= false;
199 pm_runtime_put_noidle(host
->mmc
->parent
);
202 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
206 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
208 if (mask
& SDHCI_RESET_ALL
) {
210 /* Reset-all turns off SD Bus Power */
211 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
212 sdhci_runtime_pm_bus_off(host
);
215 /* Wait max 100 ms */
216 timeout
= ktime_add_ms(ktime_get(), 100);
218 /* hw clears the bit when it's done */
220 bool timedout
= ktime_after(ktime_get(), timeout
);
222 if (!(sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
))
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host
->mmc
), (int)mask
);
227 sdhci_dumpregs(host
);
233 EXPORT_SYMBOL_GPL(sdhci_reset
);
235 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
237 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
238 struct mmc_host
*mmc
= host
->mmc
;
240 if (!mmc
->ops
->get_cd(mmc
))
244 host
->ops
->reset(host
, mask
);
246 if (mask
& SDHCI_RESET_ALL
) {
247 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
248 if (host
->ops
->enable_dma
)
249 host
->ops
->enable_dma(host
);
252 /* Resetting the controller clears many */
253 host
->preset_enabled
= false;
257 static void sdhci_set_default_irqs(struct sdhci_host
*host
)
259 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
260 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
261 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
262 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
265 if (host
->tuning_mode
== SDHCI_TUNING_MODE_2
||
266 host
->tuning_mode
== SDHCI_TUNING_MODE_3
)
267 host
->ier
|= SDHCI_INT_RETUNE
;
269 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
270 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
273 static void sdhci_config_dma(struct sdhci_host
*host
)
278 if (host
->version
< SDHCI_SPEC_200
)
281 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
284 * Always adjust the DMA selection as some controllers
285 * (e.g. JMicron) can't do PIO properly when the selection
288 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
289 if (!(host
->flags
& SDHCI_REQ_USE_DMA
))
292 /* Note if DMA Select is zero then SDMA is selected */
293 if (host
->flags
& SDHCI_USE_ADMA
)
294 ctrl
|= SDHCI_CTRL_ADMA32
;
296 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
298 * If v4 mode, all supported DMA can be 64-bit addressing if
299 * controller supports 64-bit system address, otherwise only
300 * ADMA can support 64-bit addressing.
303 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
304 ctrl2
|= SDHCI_CTRL_64BIT_ADDR
;
305 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
306 } else if (host
->flags
& SDHCI_USE_ADMA
) {
308 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
309 * set SDHCI_CTRL_ADMA64.
311 ctrl
|= SDHCI_CTRL_ADMA64
;
316 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
319 static void sdhci_init(struct sdhci_host
*host
, int soft
)
321 struct mmc_host
*mmc
= host
->mmc
;
324 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
326 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
329 sdhci_do_enable_v4_mode(host
);
331 sdhci_set_default_irqs(host
);
333 host
->cqe_on
= false;
336 /* force clock reconfiguration */
338 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
342 static void sdhci_reinit(struct sdhci_host
*host
)
345 sdhci_enable_card_detection(host
);
348 static void __sdhci_led_activate(struct sdhci_host
*host
)
352 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
355 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
356 ctrl
|= SDHCI_CTRL_LED
;
357 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
360 static void __sdhci_led_deactivate(struct sdhci_host
*host
)
364 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
367 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
368 ctrl
&= ~SDHCI_CTRL_LED
;
369 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
372 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
373 static void sdhci_led_control(struct led_classdev
*led
,
374 enum led_brightness brightness
)
376 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
379 spin_lock_irqsave(&host
->lock
, flags
);
381 if (host
->runtime_suspended
)
384 if (brightness
== LED_OFF
)
385 __sdhci_led_deactivate(host
);
387 __sdhci_led_activate(host
);
389 spin_unlock_irqrestore(&host
->lock
, flags
);
392 static int sdhci_led_register(struct sdhci_host
*host
)
394 struct mmc_host
*mmc
= host
->mmc
;
396 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
399 snprintf(host
->led_name
, sizeof(host
->led_name
),
400 "%s::", mmc_hostname(mmc
));
402 host
->led
.name
= host
->led_name
;
403 host
->led
.brightness
= LED_OFF
;
404 host
->led
.default_trigger
= mmc_hostname(mmc
);
405 host
->led
.brightness_set
= sdhci_led_control
;
407 return led_classdev_register(mmc_dev(mmc
), &host
->led
);
410 static void sdhci_led_unregister(struct sdhci_host
*host
)
412 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
415 led_classdev_unregister(&host
->led
);
418 static inline void sdhci_led_activate(struct sdhci_host
*host
)
422 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
428 static inline int sdhci_led_register(struct sdhci_host
*host
)
433 static inline void sdhci_led_unregister(struct sdhci_host
*host
)
437 static inline void sdhci_led_activate(struct sdhci_host
*host
)
439 __sdhci_led_activate(host
);
442 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
444 __sdhci_led_deactivate(host
);
449 /*****************************************************************************\
453 \*****************************************************************************/
455 static void sdhci_read_block_pio(struct sdhci_host
*host
)
458 size_t blksize
, len
, chunk
;
459 u32
uninitialized_var(scratch
);
462 DBG("PIO reading\n");
464 blksize
= host
->data
->blksz
;
467 local_irq_save(flags
);
470 BUG_ON(!sg_miter_next(&host
->sg_miter
));
472 len
= min(host
->sg_miter
.length
, blksize
);
475 host
->sg_miter
.consumed
= len
;
477 buf
= host
->sg_miter
.addr
;
481 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
485 *buf
= scratch
& 0xFF;
494 sg_miter_stop(&host
->sg_miter
);
496 local_irq_restore(flags
);
499 static void sdhci_write_block_pio(struct sdhci_host
*host
)
502 size_t blksize
, len
, chunk
;
506 DBG("PIO writing\n");
508 blksize
= host
->data
->blksz
;
512 local_irq_save(flags
);
515 BUG_ON(!sg_miter_next(&host
->sg_miter
));
517 len
= min(host
->sg_miter
.length
, blksize
);
520 host
->sg_miter
.consumed
= len
;
522 buf
= host
->sg_miter
.addr
;
525 scratch
|= (u32
)*buf
<< (chunk
* 8);
531 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
532 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
539 sg_miter_stop(&host
->sg_miter
);
541 local_irq_restore(flags
);
544 static void sdhci_transfer_pio(struct sdhci_host
*host
)
548 if (host
->blocks
== 0)
551 if (host
->data
->flags
& MMC_DATA_READ
)
552 mask
= SDHCI_DATA_AVAILABLE
;
554 mask
= SDHCI_SPACE_AVAILABLE
;
557 * Some controllers (JMicron JMB38x) mess up the buffer bits
558 * for transfers < 4 bytes. As long as it is just one block,
559 * we can ignore the bits.
561 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
562 (host
->data
->blocks
== 1))
565 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
566 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
569 if (host
->data
->flags
& MMC_DATA_READ
)
570 sdhci_read_block_pio(host
);
572 sdhci_write_block_pio(host
);
575 if (host
->blocks
== 0)
579 DBG("PIO transfer complete.\n");
582 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
583 struct mmc_data
*data
, int cookie
)
588 * If the data buffers are already mapped, return the previous
589 * dma_map_sg() result.
591 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
592 return data
->sg_count
;
594 /* Bounce write requests to the bounce buffer */
595 if (host
->bounce_buffer
) {
596 unsigned int length
= data
->blksz
* data
->blocks
;
598 if (length
> host
->bounce_buffer_size
) {
599 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
600 mmc_hostname(host
->mmc
), length
,
601 host
->bounce_buffer_size
);
604 if (mmc_get_dma_dir(data
) == DMA_TO_DEVICE
) {
605 /* Copy the data to the bounce buffer */
606 sg_copy_to_buffer(data
->sg
, data
->sg_len
,
610 /* Switch ownership to the DMA */
611 dma_sync_single_for_device(host
->mmc
->parent
,
613 host
->bounce_buffer_size
,
614 mmc_get_dma_dir(data
));
615 /* Just a dummy value */
618 /* Just access the data directly from memory */
619 sg_count
= dma_map_sg(mmc_dev(host
->mmc
),
620 data
->sg
, data
->sg_len
,
621 mmc_get_dma_dir(data
));
627 data
->sg_count
= sg_count
;
628 data
->host_cookie
= cookie
;
633 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
635 local_irq_save(*flags
);
636 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
639 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
641 kunmap_atomic(buffer
);
642 local_irq_restore(*flags
);
645 void sdhci_adma_write_desc(struct sdhci_host
*host
, void **desc
,
646 dma_addr_t addr
, int len
, unsigned int cmd
)
648 struct sdhci_adma2_64_desc
*dma_desc
= *desc
;
650 /* 32-bit and 64-bit descriptors have these members in same position */
651 dma_desc
->cmd
= cpu_to_le16(cmd
);
652 dma_desc
->len
= cpu_to_le16(len
);
653 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
655 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
656 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
658 *desc
+= host
->desc_sz
;
660 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc
);
662 static inline void __sdhci_adma_write_desc(struct sdhci_host
*host
,
663 void **desc
, dma_addr_t addr
,
664 int len
, unsigned int cmd
)
666 if (host
->ops
->adma_write_desc
)
667 host
->ops
->adma_write_desc(host
, desc
, addr
, len
, cmd
);
669 sdhci_adma_write_desc(host
, desc
, addr
, len
, cmd
);
672 static void sdhci_adma_mark_end(void *desc
)
674 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
676 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
677 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
680 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
681 struct mmc_data
*data
, int sg_count
)
683 struct scatterlist
*sg
;
685 dma_addr_t addr
, align_addr
;
691 * The spec does not specify endianness of descriptor table.
692 * We currently guess that it is LE.
695 host
->sg_count
= sg_count
;
697 desc
= host
->adma_table
;
698 align
= host
->align_buffer
;
700 align_addr
= host
->align_addr
;
702 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
703 addr
= sg_dma_address(sg
);
704 len
= sg_dma_len(sg
);
707 * The SDHCI specification states that ADMA addresses must
708 * be 32-bit aligned. If they aren't, then we use a bounce
709 * buffer for the (up to three) bytes that screw up the
712 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
715 if (data
->flags
& MMC_DATA_WRITE
) {
716 buffer
= sdhci_kmap_atomic(sg
, &flags
);
717 memcpy(align
, buffer
, offset
);
718 sdhci_kunmap_atomic(buffer
, &flags
);
722 __sdhci_adma_write_desc(host
, &desc
, align_addr
,
723 offset
, ADMA2_TRAN_VALID
);
725 BUG_ON(offset
> 65536);
727 align
+= SDHCI_ADMA2_ALIGN
;
728 align_addr
+= SDHCI_ADMA2_ALIGN
;
738 __sdhci_adma_write_desc(host
, &desc
, addr
, len
,
742 * If this triggers then we have a calculation bug
745 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
748 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
749 /* Mark the last descriptor as the terminating descriptor */
750 if (desc
!= host
->adma_table
) {
751 desc
-= host
->desc_sz
;
752 sdhci_adma_mark_end(desc
);
755 /* Add a terminating entry - nop, end, valid */
756 __sdhci_adma_write_desc(host
, &desc
, 0, 0, ADMA2_NOP_END_VALID
);
760 static void sdhci_adma_table_post(struct sdhci_host
*host
,
761 struct mmc_data
*data
)
763 struct scatterlist
*sg
;
769 if (data
->flags
& MMC_DATA_READ
) {
770 bool has_unaligned
= false;
772 /* Do a quick scan of the SG list for any unaligned mappings */
773 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
774 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
775 has_unaligned
= true;
780 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
781 data
->sg_len
, DMA_FROM_DEVICE
);
783 align
= host
->align_buffer
;
785 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
786 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
787 size
= SDHCI_ADMA2_ALIGN
-
788 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
790 buffer
= sdhci_kmap_atomic(sg
, &flags
);
791 memcpy(buffer
, align
, size
);
792 sdhci_kunmap_atomic(buffer
, &flags
);
794 align
+= SDHCI_ADMA2_ALIGN
;
801 static dma_addr_t
sdhci_sdma_address(struct sdhci_host
*host
)
803 if (host
->bounce_buffer
)
804 return host
->bounce_addr
;
806 return sg_dma_address(host
->data
->sg
);
809 static void sdhci_set_sdma_addr(struct sdhci_host
*host
, dma_addr_t addr
)
812 sdhci_writel(host
, addr
, SDHCI_ADMA_ADDRESS
);
813 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
814 sdhci_writel(host
, (u64
)addr
>> 32, SDHCI_ADMA_ADDRESS_HI
);
816 sdhci_writel(host
, addr
, SDHCI_DMA_ADDRESS
);
820 static unsigned int sdhci_target_timeout(struct sdhci_host
*host
,
821 struct mmc_command
*cmd
,
822 struct mmc_data
*data
)
824 unsigned int target_timeout
;
828 target_timeout
= cmd
->busy_timeout
* 1000;
830 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
831 if (host
->clock
&& data
->timeout_clks
) {
832 unsigned long long val
;
835 * data->timeout_clks is in units of clock cycles.
836 * host->clock is in Hz. target_timeout is in us.
837 * Hence, us = 1000000 * cycles / Hz. Round up.
839 val
= 1000000ULL * data
->timeout_clks
;
840 if (do_div(val
, host
->clock
))
842 target_timeout
+= val
;
846 return target_timeout
;
849 static void sdhci_calc_sw_timeout(struct sdhci_host
*host
,
850 struct mmc_command
*cmd
)
852 struct mmc_data
*data
= cmd
->data
;
853 struct mmc_host
*mmc
= host
->mmc
;
854 struct mmc_ios
*ios
= &mmc
->ios
;
855 unsigned char bus_width
= 1 << ios
->bus_width
;
861 target_timeout
= sdhci_target_timeout(host
, cmd
, data
);
862 target_timeout
*= NSEC_PER_USEC
;
866 freq
= host
->mmc
->actual_clock
? : host
->clock
;
867 transfer_time
= (u64
)blksz
* NSEC_PER_SEC
* (8 / bus_width
);
868 do_div(transfer_time
, freq
);
869 /* multiply by '2' to account for any unknowns */
870 transfer_time
= transfer_time
* 2;
871 /* calculate timeout for the entire data */
872 host
->data_timeout
= data
->blocks
* target_timeout
+
875 host
->data_timeout
= target_timeout
;
878 if (host
->data_timeout
)
879 host
->data_timeout
+= MMC_CMD_TRANSFER_TIME
;
882 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
,
886 struct mmc_data
*data
= cmd
->data
;
887 unsigned target_timeout
, current_timeout
;
892 * If the host controller provides us with an incorrect timeout
893 * value, just skip the check and use 0xE. The hardware may take
894 * longer to time out, but that's much better than having a too-short
897 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
900 /* Unspecified timeout, assume max */
901 if (!data
&& !cmd
->busy_timeout
)
905 target_timeout
= sdhci_target_timeout(host
, cmd
, data
);
908 * Figure out needed cycles.
909 * We do this in steps in order to fit inside a 32 bit int.
910 * The first step is the minimum timeout, which will have a
911 * minimum resolution of 6 bits:
912 * (1) 2^13*1000 > 2^22,
913 * (2) host->timeout_clk < 2^16
918 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
919 while (current_timeout
< target_timeout
) {
921 current_timeout
<<= 1;
927 if (!(host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
))
928 DBG("Too large timeout 0x%x requested for CMD%d!\n",
938 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
940 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
941 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
943 if (host
->flags
& SDHCI_REQ_USE_DMA
)
944 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
946 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
948 if (host
->flags
& (SDHCI_AUTO_CMD23
| SDHCI_AUTO_CMD12
))
949 host
->ier
|= SDHCI_INT_AUTO_CMD_ERR
;
951 host
->ier
&= ~SDHCI_INT_AUTO_CMD_ERR
;
953 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
954 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
957 static void sdhci_set_data_timeout_irq(struct sdhci_host
*host
, bool enable
)
960 host
->ier
|= SDHCI_INT_DATA_TIMEOUT
;
962 host
->ier
&= ~SDHCI_INT_DATA_TIMEOUT
;
963 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
964 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
967 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
971 if (host
->ops
->set_timeout
) {
972 host
->ops
->set_timeout(host
, cmd
);
974 bool too_big
= false;
976 count
= sdhci_calc_timeout(host
, cmd
, &too_big
);
979 host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
) {
980 sdhci_calc_sw_timeout(host
, cmd
);
981 sdhci_set_data_timeout_irq(host
, false);
982 } else if (!(host
->ier
& SDHCI_INT_DATA_TIMEOUT
)) {
983 sdhci_set_data_timeout_irq(host
, true);
986 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
990 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
992 struct mmc_data
*data
= cmd
->data
;
994 host
->data_timeout
= 0;
996 if (sdhci_data_line_cmd(cmd
))
997 sdhci_set_timeout(host
, cmd
);
1002 WARN_ON(host
->data
);
1005 BUG_ON(data
->blksz
* data
->blocks
> 524288);
1006 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
1007 BUG_ON(data
->blocks
> 65535);
1010 host
->data_early
= 0;
1011 host
->data
->bytes_xfered
= 0;
1013 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
1014 struct scatterlist
*sg
;
1015 unsigned int length_mask
, offset_mask
;
1018 host
->flags
|= SDHCI_REQ_USE_DMA
;
1021 * FIXME: This doesn't account for merging when mapping the
1024 * The assumption here being that alignment and lengths are
1025 * the same after DMA mapping to device address space.
1029 if (host
->flags
& SDHCI_USE_ADMA
) {
1030 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
1033 * As we use up to 3 byte chunks to work
1034 * around alignment problems, we need to
1035 * check the offset as well.
1040 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
1042 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
1046 if (unlikely(length_mask
| offset_mask
)) {
1047 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
1048 if (sg
->length
& length_mask
) {
1049 DBG("Reverting to PIO because of transfer size (%d)\n",
1051 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1054 if (sg
->offset
& offset_mask
) {
1055 DBG("Reverting to PIO because of bad alignment\n");
1056 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1063 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
1064 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1068 * This only happens when someone fed
1069 * us an invalid request.
1072 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1073 } else if (host
->flags
& SDHCI_USE_ADMA
) {
1074 sdhci_adma_table_pre(host
, data
, sg_cnt
);
1076 sdhci_writel(host
, host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
1077 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
1079 (u64
)host
->adma_addr
>> 32,
1080 SDHCI_ADMA_ADDRESS_HI
);
1082 WARN_ON(sg_cnt
!= 1);
1083 sdhci_set_sdma_addr(host
, sdhci_sdma_address(host
));
1087 sdhci_config_dma(host
);
1089 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
1092 flags
= SG_MITER_ATOMIC
;
1093 if (host
->data
->flags
& MMC_DATA_READ
)
1094 flags
|= SG_MITER_TO_SG
;
1096 flags
|= SG_MITER_FROM_SG
;
1097 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1098 host
->blocks
= data
->blocks
;
1101 sdhci_set_transfer_irqs(host
);
1103 /* Set the DMA boundary value and block size */
1104 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, data
->blksz
),
1108 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1109 * can be supported, in that case 16-bit block count register must be 0.
1111 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
&&
1112 (host
->quirks2
& SDHCI_QUIRK2_USE_32BIT_BLK_CNT
)) {
1113 if (sdhci_readw(host
, SDHCI_BLOCK_COUNT
))
1114 sdhci_writew(host
, 0, SDHCI_BLOCK_COUNT
);
1115 sdhci_writew(host
, data
->blocks
, SDHCI_32BIT_BLK_CNT
);
1117 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
1121 static inline bool sdhci_auto_cmd12(struct sdhci_host
*host
,
1122 struct mmc_request
*mrq
)
1124 return !mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
1125 !mrq
->cap_cmd_during_tfr
;
1128 static inline void sdhci_auto_cmd_select(struct sdhci_host
*host
,
1129 struct mmc_command
*cmd
,
1132 bool use_cmd12
= sdhci_auto_cmd12(host
, cmd
->mrq
) &&
1133 (cmd
->opcode
!= SD_IO_RW_EXTENDED
);
1134 bool use_cmd23
= cmd
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
);
1138 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1139 * Select' is recommended rather than use of 'Auto CMD12
1140 * Enable' or 'Auto CMD23 Enable'.
1142 if (host
->version
>= SDHCI_SPEC_410
&& (use_cmd12
|| use_cmd23
)) {
1143 *mode
|= SDHCI_TRNS_AUTO_SEL
;
1145 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1147 ctrl2
|= SDHCI_CMD23_ENABLE
;
1149 ctrl2
&= ~SDHCI_CMD23_ENABLE
;
1150 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
1156 * If we are sending CMD23, CMD12 never gets sent
1157 * on successful completion (so no Auto-CMD12).
1160 *mode
|= SDHCI_TRNS_AUTO_CMD12
;
1162 *mode
|= SDHCI_TRNS_AUTO_CMD23
;
1165 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
1166 struct mmc_command
*cmd
)
1169 struct mmc_data
*data
= cmd
->data
;
1173 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
1174 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1175 if (cmd
->opcode
!= MMC_SEND_TUNING_BLOCK_HS200
)
1176 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
1178 /* clear Auto CMD settings for no data CMDs */
1179 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
1180 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
1181 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
1186 WARN_ON(!host
->data
);
1188 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
1189 mode
= SDHCI_TRNS_BLK_CNT_EN
;
1191 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
1192 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
1193 sdhci_auto_cmd_select(host
, cmd
, &mode
);
1194 if (cmd
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
))
1195 sdhci_writel(host
, cmd
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
1198 if (data
->flags
& MMC_DATA_READ
)
1199 mode
|= SDHCI_TRNS_READ
;
1200 if (host
->flags
& SDHCI_REQ_USE_DMA
)
1201 mode
|= SDHCI_TRNS_DMA
;
1203 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
1206 static bool sdhci_needs_reset(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1208 return (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
1209 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
1210 (mrq
->sbc
&& mrq
->sbc
->error
) ||
1211 (mrq
->data
&& mrq
->data
->stop
&& mrq
->data
->stop
->error
) ||
1212 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
)));
1215 static void __sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1219 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
1220 if (host
->mrqs_done
[i
] == mrq
) {
1226 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
1227 if (!host
->mrqs_done
[i
]) {
1228 host
->mrqs_done
[i
] = mrq
;
1233 WARN_ON(i
>= SDHCI_MAX_MRQS
);
1235 tasklet_schedule(&host
->finish_tasklet
);
1238 static void sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1240 if (host
->cmd
&& host
->cmd
->mrq
== mrq
)
1243 if (host
->data_cmd
&& host
->data_cmd
->mrq
== mrq
)
1244 host
->data_cmd
= NULL
;
1246 if (host
->data
&& host
->data
->mrq
== mrq
)
1249 if (sdhci_needs_reset(host
, mrq
))
1250 host
->pending_reset
= true;
1252 __sdhci_finish_mrq(host
, mrq
);
1255 static void sdhci_finish_data(struct sdhci_host
*host
)
1257 struct mmc_command
*data_cmd
= host
->data_cmd
;
1258 struct mmc_data
*data
= host
->data
;
1261 host
->data_cmd
= NULL
;
1264 * The controller needs a reset of internal state machines upon error
1268 if (!host
->cmd
|| host
->cmd
== data_cmd
)
1269 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
1270 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
1273 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
1274 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
1275 sdhci_adma_table_post(host
, data
);
1278 * The specification states that the block count register must
1279 * be updated, but it does not specify at what point in the
1280 * data flow. That makes the register entirely useless to read
1281 * back so we have to assume that nothing made it to the card
1282 * in the event of an error.
1285 data
->bytes_xfered
= 0;
1287 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
1290 * Need to send CMD12 if -
1291 * a) open-ended multiblock transfer (no CMD23)
1292 * b) error in multiblock transfer
1298 * 'cap_cmd_during_tfr' request must not use the command line
1299 * after mmc_command_done() has been called. It is upper layer's
1300 * responsibility to send the stop command if required.
1302 if (data
->mrq
->cap_cmd_during_tfr
) {
1303 sdhci_finish_mrq(host
, data
->mrq
);
1305 /* Avoid triggering warning in sdhci_send_command() */
1307 sdhci_send_command(host
, data
->stop
);
1310 sdhci_finish_mrq(host
, data
->mrq
);
1314 static void sdhci_mod_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
,
1315 unsigned long timeout
)
1317 if (sdhci_data_line_cmd(mrq
->cmd
))
1318 mod_timer(&host
->data_timer
, timeout
);
1320 mod_timer(&host
->timer
, timeout
);
1323 static void sdhci_del_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1325 if (sdhci_data_line_cmd(mrq
->cmd
))
1326 del_timer(&host
->data_timer
);
1328 del_timer(&host
->timer
);
1331 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1335 unsigned long timeout
;
1339 /* Initially, a command has no error */
1342 if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
1343 cmd
->opcode
== MMC_STOP_TRANSMISSION
)
1344 cmd
->flags
|= MMC_RSP_BUSY
;
1346 /* Wait max 10 ms */
1349 mask
= SDHCI_CMD_INHIBIT
;
1350 if (sdhci_data_line_cmd(cmd
))
1351 mask
|= SDHCI_DATA_INHIBIT
;
1353 /* We shouldn't wait for data inihibit for stop commands, even
1354 though they might use busy signaling */
1355 if (cmd
->mrq
->data
&& (cmd
== cmd
->mrq
->data
->stop
))
1356 mask
&= ~SDHCI_DATA_INHIBIT
;
1358 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1360 pr_err("%s: Controller never released inhibit bit(s).\n",
1361 mmc_hostname(host
->mmc
));
1362 sdhci_dumpregs(host
);
1364 sdhci_finish_mrq(host
, cmd
->mrq
);
1372 if (sdhci_data_line_cmd(cmd
)) {
1373 WARN_ON(host
->data_cmd
);
1374 host
->data_cmd
= cmd
;
1377 sdhci_prepare_data(host
, cmd
);
1379 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1381 sdhci_set_transfer_mode(host
, cmd
);
1383 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1384 pr_err("%s: Unsupported response type!\n",
1385 mmc_hostname(host
->mmc
));
1386 cmd
->error
= -EINVAL
;
1387 sdhci_finish_mrq(host
, cmd
->mrq
);
1391 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1392 flags
= SDHCI_CMD_RESP_NONE
;
1393 else if (cmd
->flags
& MMC_RSP_136
)
1394 flags
= SDHCI_CMD_RESP_LONG
;
1395 else if (cmd
->flags
& MMC_RSP_BUSY
)
1396 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1398 flags
= SDHCI_CMD_RESP_SHORT
;
1400 if (cmd
->flags
& MMC_RSP_CRC
)
1401 flags
|= SDHCI_CMD_CRC
;
1402 if (cmd
->flags
& MMC_RSP_OPCODE
)
1403 flags
|= SDHCI_CMD_INDEX
;
1405 /* CMD19 is special in that the Data Present Select should be set */
1406 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1407 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1408 flags
|= SDHCI_CMD_DATA
;
1411 if (host
->data_timeout
)
1412 timeout
+= nsecs_to_jiffies(host
->data_timeout
);
1413 else if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1414 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1417 sdhci_mod_timer(host
, cmd
->mrq
, timeout
);
1419 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1421 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1423 static void sdhci_read_rsp_136(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1427 for (i
= 0; i
< 4; i
++) {
1428 reg
= SDHCI_RESPONSE
+ (3 - i
) * 4;
1429 cmd
->resp
[i
] = sdhci_readl(host
, reg
);
1432 if (host
->quirks2
& SDHCI_QUIRK2_RSP_136_HAS_CRC
)
1435 /* CRC is stripped so we need to do some shifting */
1436 for (i
= 0; i
< 4; i
++) {
1439 cmd
->resp
[i
] |= cmd
->resp
[i
+ 1] >> 24;
1443 static void sdhci_finish_command(struct sdhci_host
*host
)
1445 struct mmc_command
*cmd
= host
->cmd
;
1449 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1450 if (cmd
->flags
& MMC_RSP_136
) {
1451 sdhci_read_rsp_136(host
, cmd
);
1453 cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1457 if (cmd
->mrq
->cap_cmd_during_tfr
&& cmd
== cmd
->mrq
->cmd
)
1458 mmc_command_done(host
->mmc
, cmd
->mrq
);
1461 * The host can send and interrupt when the busy state has
1462 * ended, allowing us to wait without wasting CPU cycles.
1463 * The busy signal uses DAT0 so this is similar to waiting
1464 * for data to complete.
1466 * Note: The 1.0 specification is a bit ambiguous about this
1467 * feature so there might be some problems with older
1470 if (cmd
->flags
& MMC_RSP_BUSY
) {
1472 DBG("Cannot wait for busy signal when also doing a data transfer");
1473 } else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
) &&
1474 cmd
== host
->data_cmd
) {
1475 /* Command complete before busy is ended */
1480 /* Finished CMD23, now send actual command. */
1481 if (cmd
== cmd
->mrq
->sbc
) {
1482 sdhci_send_command(host
, cmd
->mrq
->cmd
);
1485 /* Processed actual command. */
1486 if (host
->data
&& host
->data_early
)
1487 sdhci_finish_data(host
);
1490 sdhci_finish_mrq(host
, cmd
->mrq
);
1494 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1498 switch (host
->timing
) {
1499 case MMC_TIMING_UHS_SDR12
:
1500 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1502 case MMC_TIMING_UHS_SDR25
:
1503 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1505 case MMC_TIMING_UHS_SDR50
:
1506 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1508 case MMC_TIMING_UHS_SDR104
:
1509 case MMC_TIMING_MMC_HS200
:
1510 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1512 case MMC_TIMING_UHS_DDR50
:
1513 case MMC_TIMING_MMC_DDR52
:
1514 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1516 case MMC_TIMING_MMC_HS400
:
1517 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1520 pr_warn("%s: Invalid UHS-I mode selected\n",
1521 mmc_hostname(host
->mmc
));
1522 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1528 u16
sdhci_calc_clk(struct sdhci_host
*host
, unsigned int clock
,
1529 unsigned int *actual_clock
)
1531 int div
= 0; /* Initialized for compiler warning */
1532 int real_div
= div
, clk_mul
= 1;
1534 bool switch_base_clk
= false;
1536 if (host
->version
>= SDHCI_SPEC_300
) {
1537 if (host
->preset_enabled
) {
1540 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1541 pre_val
= sdhci_get_preset_value(host
);
1542 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1543 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1544 if (host
->clk_mul
&&
1545 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1546 clk
= SDHCI_PROG_CLOCK_MODE
;
1548 clk_mul
= host
->clk_mul
;
1550 real_div
= max_t(int, 1, div
<< 1);
1556 * Check if the Host Controller supports Programmable Clock
1559 if (host
->clk_mul
) {
1560 for (div
= 1; div
<= 1024; div
++) {
1561 if ((host
->max_clk
* host
->clk_mul
/ div
)
1565 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1567 * Set Programmable Clock Mode in the Clock
1570 clk
= SDHCI_PROG_CLOCK_MODE
;
1572 clk_mul
= host
->clk_mul
;
1576 * Divisor can be too small to reach clock
1577 * speed requirement. Then use the base clock.
1579 switch_base_clk
= true;
1583 if (!host
->clk_mul
|| switch_base_clk
) {
1584 /* Version 3.00 divisors must be a multiple of 2. */
1585 if (host
->max_clk
<= clock
)
1588 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1590 if ((host
->max_clk
/ div
) <= clock
)
1596 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1597 && !div
&& host
->max_clk
<= 25000000)
1601 /* Version 2.00 divisors must be a power of 2. */
1602 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1603 if ((host
->max_clk
/ div
) <= clock
)
1612 *actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1613 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1614 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1615 << SDHCI_DIVIDER_HI_SHIFT
;
1619 EXPORT_SYMBOL_GPL(sdhci_calc_clk
);
1621 void sdhci_enable_clk(struct sdhci_host
*host
, u16 clk
)
1625 clk
|= SDHCI_CLOCK_INT_EN
;
1626 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1628 /* Wait max 20 ms */
1629 timeout
= ktime_add_ms(ktime_get(), 20);
1631 bool timedout
= ktime_after(ktime_get(), timeout
);
1633 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1634 if (clk
& SDHCI_CLOCK_INT_STABLE
)
1637 pr_err("%s: Internal clock never stabilised.\n",
1638 mmc_hostname(host
->mmc
));
1639 sdhci_dumpregs(host
);
1645 clk
|= SDHCI_CLOCK_CARD_EN
;
1646 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1648 EXPORT_SYMBOL_GPL(sdhci_enable_clk
);
1650 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1654 host
->mmc
->actual_clock
= 0;
1656 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1661 clk
= sdhci_calc_clk(host
, clock
, &host
->mmc
->actual_clock
);
1662 sdhci_enable_clk(host
, clk
);
1664 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1666 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
1669 struct mmc_host
*mmc
= host
->mmc
;
1671 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1673 if (mode
!= MMC_POWER_OFF
)
1674 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1676 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1679 void sdhci_set_power_noreg(struct sdhci_host
*host
, unsigned char mode
,
1684 if (mode
!= MMC_POWER_OFF
) {
1686 case MMC_VDD_165_195
:
1688 * Without a regulator, SDHCI does not support 2.0v
1689 * so we only get here if the driver deliberately
1690 * added the 2.0v range to ocr_avail. Map it to 1.8v
1691 * for the purpose of turning on the power.
1694 pwr
= SDHCI_POWER_180
;
1698 pwr
= SDHCI_POWER_300
;
1702 pwr
= SDHCI_POWER_330
;
1705 WARN(1, "%s: Invalid vdd %#x\n",
1706 mmc_hostname(host
->mmc
), vdd
);
1711 if (host
->pwr
== pwr
)
1717 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1718 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1719 sdhci_runtime_pm_bus_off(host
);
1722 * Spec says that we should clear the power reg before setting
1723 * a new value. Some controllers don't seem to like this though.
1725 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1726 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1729 * At least the Marvell CaFe chip gets confused if we set the
1730 * voltage and set turn on power at the same time, so set the
1733 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1734 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1736 pwr
|= SDHCI_POWER_ON
;
1738 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1740 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1741 sdhci_runtime_pm_bus_on(host
);
1744 * Some controllers need an extra 10ms delay of 10ms before
1745 * they can apply clock after applying power
1747 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1751 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg
);
1753 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1756 if (IS_ERR(host
->mmc
->supply
.vmmc
))
1757 sdhci_set_power_noreg(host
, mode
, vdd
);
1759 sdhci_set_power_reg(host
, mode
, vdd
);
1761 EXPORT_SYMBOL_GPL(sdhci_set_power
);
1763 /*****************************************************************************\
1767 \*****************************************************************************/
1769 void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1771 struct sdhci_host
*host
;
1773 unsigned long flags
;
1775 host
= mmc_priv(mmc
);
1777 /* Firstly check card presence */
1778 present
= mmc
->ops
->get_cd(mmc
);
1780 spin_lock_irqsave(&host
->lock
, flags
);
1782 sdhci_led_activate(host
);
1785 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1786 * requests if Auto-CMD12 is enabled.
1788 if (sdhci_auto_cmd12(host
, mrq
)) {
1790 mrq
->data
->stop
= NULL
;
1795 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1796 mrq
->cmd
->error
= -ENOMEDIUM
;
1797 sdhci_finish_mrq(host
, mrq
);
1799 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1800 sdhci_send_command(host
, mrq
->sbc
);
1802 sdhci_send_command(host
, mrq
->cmd
);
1806 spin_unlock_irqrestore(&host
->lock
, flags
);
1808 EXPORT_SYMBOL_GPL(sdhci_request
);
1810 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1814 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1815 if (width
== MMC_BUS_WIDTH_8
) {
1816 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1817 ctrl
|= SDHCI_CTRL_8BITBUS
;
1819 if (host
->mmc
->caps
& MMC_CAP_8_BIT_DATA
)
1820 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1821 if (width
== MMC_BUS_WIDTH_4
)
1822 ctrl
|= SDHCI_CTRL_4BITBUS
;
1824 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1826 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1828 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1830 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1834 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1835 /* Select Bus Speed Mode for host */
1836 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1837 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1838 (timing
== MMC_TIMING_UHS_SDR104
))
1839 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1840 else if (timing
== MMC_TIMING_UHS_SDR12
)
1841 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1842 else if (timing
== MMC_TIMING_UHS_SDR25
)
1843 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1844 else if (timing
== MMC_TIMING_UHS_SDR50
)
1845 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1846 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1847 (timing
== MMC_TIMING_MMC_DDR52
))
1848 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1849 else if (timing
== MMC_TIMING_MMC_HS400
)
1850 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1851 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1853 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1855 void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1857 struct sdhci_host
*host
= mmc_priv(mmc
);
1860 if (ios
->power_mode
== MMC_POWER_UNDEFINED
)
1863 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1864 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1865 ios
->power_mode
== MMC_POWER_OFF
)
1866 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1871 * Reset the chip on each power off.
1872 * Should clear out any weird states.
1874 if (ios
->power_mode
== MMC_POWER_OFF
) {
1875 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1879 if (host
->version
>= SDHCI_SPEC_300
&&
1880 (ios
->power_mode
== MMC_POWER_UP
) &&
1881 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1882 sdhci_enable_preset_value(host
, false);
1884 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1885 host
->ops
->set_clock(host
, ios
->clock
);
1886 host
->clock
= ios
->clock
;
1888 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1890 host
->timeout_clk
= host
->mmc
->actual_clock
?
1891 host
->mmc
->actual_clock
/ 1000 :
1893 host
->mmc
->max_busy_timeout
=
1894 host
->ops
->get_max_timeout_count
?
1895 host
->ops
->get_max_timeout_count(host
) :
1897 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1901 if (host
->ops
->set_power
)
1902 host
->ops
->set_power(host
, ios
->power_mode
, ios
->vdd
);
1904 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1906 if (host
->ops
->platform_send_init_74_clocks
)
1907 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1909 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1911 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1913 if (!(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
)) {
1914 if (ios
->timing
== MMC_TIMING_SD_HS
||
1915 ios
->timing
== MMC_TIMING_MMC_HS
||
1916 ios
->timing
== MMC_TIMING_MMC_HS400
||
1917 ios
->timing
== MMC_TIMING_MMC_HS200
||
1918 ios
->timing
== MMC_TIMING_MMC_DDR52
||
1919 ios
->timing
== MMC_TIMING_UHS_SDR50
||
1920 ios
->timing
== MMC_TIMING_UHS_SDR104
||
1921 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1922 ios
->timing
== MMC_TIMING_UHS_SDR25
)
1923 ctrl
|= SDHCI_CTRL_HISPD
;
1925 ctrl
&= ~SDHCI_CTRL_HISPD
;
1928 if (host
->version
>= SDHCI_SPEC_300
) {
1931 if (!host
->preset_enabled
) {
1932 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1934 * We only need to set Driver Strength if the
1935 * preset value enable is not set.
1937 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1938 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1939 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1940 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1941 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1942 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1943 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1944 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1945 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1946 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1948 pr_warn("%s: invalid driver type, default to driver type B\n",
1950 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1953 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1956 * According to SDHC Spec v3.00, if the Preset Value
1957 * Enable in the Host Control 2 register is set, we
1958 * need to reset SD Clock Enable before changing High
1959 * Speed Enable to avoid generating clock gliches.
1962 /* Reset SD Clock Enable */
1963 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1964 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1965 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1967 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1969 /* Re-enable SD Clock */
1970 host
->ops
->set_clock(host
, host
->clock
);
1973 /* Reset SD Clock Enable */
1974 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1975 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1976 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1978 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1979 host
->timing
= ios
->timing
;
1981 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1982 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1983 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1984 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1985 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1986 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1987 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1990 sdhci_enable_preset_value(host
, true);
1991 preset
= sdhci_get_preset_value(host
);
1992 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1993 >> SDHCI_PRESET_DRV_SHIFT
;
1996 /* Re-enable SD Clock */
1997 host
->ops
->set_clock(host
, host
->clock
);
1999 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
2002 * Some (ENE) controllers go apeshit on some ios operation,
2003 * signalling timeout and CRC errors even on CMD0. Resetting
2004 * it on each ios seems to solve the problem.
2006 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
2007 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
2011 EXPORT_SYMBOL_GPL(sdhci_set_ios
);
2013 static int sdhci_get_cd(struct mmc_host
*mmc
)
2015 struct sdhci_host
*host
= mmc_priv(mmc
);
2016 int gpio_cd
= mmc_gpio_get_cd(mmc
);
2018 if (host
->flags
& SDHCI_DEVICE_DEAD
)
2021 /* If nonremovable, assume that the card is always present. */
2022 if (!mmc_card_is_removable(host
->mmc
))
2026 * Try slot gpio detect, if defined it take precedence
2027 * over build in controller functionality
2032 /* If polling, assume that the card is always present. */
2033 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2036 /* Host native card detect */
2037 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
2040 static int sdhci_check_ro(struct sdhci_host
*host
)
2042 unsigned long flags
;
2045 spin_lock_irqsave(&host
->lock
, flags
);
2047 if (host
->flags
& SDHCI_DEVICE_DEAD
)
2049 else if (host
->ops
->get_ro
)
2050 is_readonly
= host
->ops
->get_ro(host
);
2052 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
2053 & SDHCI_WRITE_PROTECT
);
2055 spin_unlock_irqrestore(&host
->lock
, flags
);
2057 /* This quirk needs to be replaced by a callback-function later */
2058 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
2059 !is_readonly
: is_readonly
;
2062 #define SAMPLE_COUNT 5
2064 static int sdhci_get_ro(struct mmc_host
*mmc
)
2066 struct sdhci_host
*host
= mmc_priv(mmc
);
2069 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
2070 return sdhci_check_ro(host
);
2073 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
2074 if (sdhci_check_ro(host
)) {
2075 if (++ro_count
> SAMPLE_COUNT
/ 2)
2083 static void sdhci_hw_reset(struct mmc_host
*mmc
)
2085 struct sdhci_host
*host
= mmc_priv(mmc
);
2087 if (host
->ops
&& host
->ops
->hw_reset
)
2088 host
->ops
->hw_reset(host
);
2091 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
2093 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
2095 host
->ier
|= SDHCI_INT_CARD_INT
;
2097 host
->ier
&= ~SDHCI_INT_CARD_INT
;
2099 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2100 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2105 void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
2107 struct sdhci_host
*host
= mmc_priv(mmc
);
2108 unsigned long flags
;
2111 pm_runtime_get_noresume(host
->mmc
->parent
);
2113 spin_lock_irqsave(&host
->lock
, flags
);
2115 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
2117 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
2119 sdhci_enable_sdio_irq_nolock(host
, enable
);
2120 spin_unlock_irqrestore(&host
->lock
, flags
);
2123 pm_runtime_put_noidle(host
->mmc
->parent
);
2125 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq
);
2127 int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
2128 struct mmc_ios
*ios
)
2130 struct sdhci_host
*host
= mmc_priv(mmc
);
2135 * Signal Voltage Switching is only applicable for Host Controllers
2138 if (host
->version
< SDHCI_SPEC_300
)
2141 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2143 switch (ios
->signal_voltage
) {
2144 case MMC_SIGNAL_VOLTAGE_330
:
2145 if (!(host
->flags
& SDHCI_SIGNALING_330
))
2147 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2148 ctrl
&= ~SDHCI_CTRL_VDD_180
;
2149 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2151 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2152 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2154 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2160 usleep_range(5000, 5500);
2162 /* 3.3V regulator output should be stable within 5 ms */
2163 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2164 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
2167 pr_warn("%s: 3.3V regulator output did not became stable\n",
2171 case MMC_SIGNAL_VOLTAGE_180
:
2172 if (!(host
->flags
& SDHCI_SIGNALING_180
))
2174 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2175 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2177 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2184 * Enable 1.8V Signal Enable in the Host Control2
2187 ctrl
|= SDHCI_CTRL_VDD_180
;
2188 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2190 /* Some controller need to do more when switching */
2191 if (host
->ops
->voltage_switch
)
2192 host
->ops
->voltage_switch(host
);
2194 /* 1.8V regulator output should be stable within 5 ms */
2195 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2196 if (ctrl
& SDHCI_CTRL_VDD_180
)
2199 pr_warn("%s: 1.8V regulator output did not became stable\n",
2203 case MMC_SIGNAL_VOLTAGE_120
:
2204 if (!(host
->flags
& SDHCI_SIGNALING_120
))
2206 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2207 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2209 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2216 /* No signal voltage switch required */
2220 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch
);
2222 static int sdhci_card_busy(struct mmc_host
*mmc
)
2224 struct sdhci_host
*host
= mmc_priv(mmc
);
2227 /* Check whether DAT[0] is 0 */
2228 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
2230 return !(present_state
& SDHCI_DATA_0_LVL_MASK
);
2233 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
2235 struct sdhci_host
*host
= mmc_priv(mmc
);
2236 unsigned long flags
;
2238 spin_lock_irqsave(&host
->lock
, flags
);
2239 host
->flags
|= SDHCI_HS400_TUNING
;
2240 spin_unlock_irqrestore(&host
->lock
, flags
);
2245 void sdhci_start_tuning(struct sdhci_host
*host
)
2249 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2250 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
2251 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
2252 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
2253 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2256 * As per the Host Controller spec v3.00, tuning command
2257 * generates Buffer Read Ready interrupt, so enable that.
2259 * Note: The spec clearly says that when tuning sequence
2260 * is being performed, the controller does not generate
2261 * interrupts other than Buffer Read Ready interrupt. But
2262 * to make sure we don't hit a controller bug, we _only_
2263 * enable Buffer Read Ready interrupt here.
2265 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
2266 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
2268 EXPORT_SYMBOL_GPL(sdhci_start_tuning
);
2270 void sdhci_end_tuning(struct sdhci_host
*host
)
2272 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2273 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2275 EXPORT_SYMBOL_GPL(sdhci_end_tuning
);
2277 void sdhci_reset_tuning(struct sdhci_host
*host
)
2281 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2282 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2283 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2284 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2286 EXPORT_SYMBOL_GPL(sdhci_reset_tuning
);
2288 static void sdhci_abort_tuning(struct sdhci_host
*host
, u32 opcode
)
2290 sdhci_reset_tuning(host
);
2292 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2293 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2295 sdhci_end_tuning(host
);
2297 mmc_abort_tuning(host
->mmc
, opcode
);
2301 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2302 * tuning command does not have a data payload (or rather the hardware does it
2303 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2304 * interrupt setup is different to other commands and there is no timeout
2305 * interrupt so special handling is needed.
2307 void sdhci_send_tuning(struct sdhci_host
*host
, u32 opcode
)
2309 struct mmc_host
*mmc
= host
->mmc
;
2310 struct mmc_command cmd
= {};
2311 struct mmc_request mrq
= {};
2312 unsigned long flags
;
2313 u32 b
= host
->sdma_boundary
;
2315 spin_lock_irqsave(&host
->lock
, flags
);
2317 cmd
.opcode
= opcode
;
2318 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
2323 * In response to CMD19, the card sends 64 bytes of tuning
2324 * block to the Host Controller. So we set the block size
2327 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
&&
2328 mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
2329 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 128), SDHCI_BLOCK_SIZE
);
2331 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 64), SDHCI_BLOCK_SIZE
);
2334 * The tuning block is sent by the card to the host controller.
2335 * So we set the TRNS_READ bit in the Transfer Mode register.
2336 * This also takes care of setting DMA Enable and Multi Block
2337 * Select in the same register to 0.
2339 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2341 sdhci_send_command(host
, &cmd
);
2345 sdhci_del_timer(host
, &mrq
);
2347 host
->tuning_done
= 0;
2350 spin_unlock_irqrestore(&host
->lock
, flags
);
2352 /* Wait for Buffer Read Ready interrupt */
2353 wait_event_timeout(host
->buf_ready_int
, (host
->tuning_done
== 1),
2354 msecs_to_jiffies(50));
2357 EXPORT_SYMBOL_GPL(sdhci_send_tuning
);
2359 static int __sdhci_execute_tuning(struct sdhci_host
*host
, u32 opcode
)
2364 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2365 * of loops reaches 40 times.
2367 for (i
= 0; i
< MAX_TUNING_LOOP
; i
++) {
2370 sdhci_send_tuning(host
, opcode
);
2372 if (!host
->tuning_done
) {
2373 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2374 mmc_hostname(host
->mmc
));
2375 sdhci_abort_tuning(host
, opcode
);
2379 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2380 if (!(ctrl
& SDHCI_CTRL_EXEC_TUNING
)) {
2381 if (ctrl
& SDHCI_CTRL_TUNED_CLK
)
2382 return 0; /* Success! */
2386 /* Spec does not require a delay between tuning cycles */
2387 if (host
->tuning_delay
> 0)
2388 mdelay(host
->tuning_delay
);
2391 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2392 mmc_hostname(host
->mmc
));
2393 sdhci_reset_tuning(host
);
2397 int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
2399 struct sdhci_host
*host
= mmc_priv(mmc
);
2401 unsigned int tuning_count
= 0;
2404 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
2406 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
2407 tuning_count
= host
->tuning_count
;
2410 * The Host Controller needs tuning in case of SDR104 and DDR50
2411 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2412 * the Capabilities register.
2413 * If the Host Controller supports the HS200 mode then the
2414 * tuning function has to be executed.
2416 switch (host
->timing
) {
2417 /* HS400 tuning is done in HS200 mode */
2418 case MMC_TIMING_MMC_HS400
:
2422 case MMC_TIMING_MMC_HS200
:
2424 * Periodic re-tuning for HS400 is not expected to be needed, so
2431 case MMC_TIMING_UHS_SDR104
:
2432 case MMC_TIMING_UHS_DDR50
:
2435 case MMC_TIMING_UHS_SDR50
:
2436 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
)
2444 if (host
->ops
->platform_execute_tuning
) {
2445 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
2449 host
->mmc
->retune_period
= tuning_count
;
2451 if (host
->tuning_delay
< 0)
2452 host
->tuning_delay
= opcode
== MMC_SEND_TUNING_BLOCK
;
2454 sdhci_start_tuning(host
);
2456 host
->tuning_err
= __sdhci_execute_tuning(host
, opcode
);
2458 sdhci_end_tuning(host
);
2460 host
->flags
&= ~SDHCI_HS400_TUNING
;
2464 EXPORT_SYMBOL_GPL(sdhci_execute_tuning
);
2466 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2468 /* Host Controller v3.00 defines preset value registers */
2469 if (host
->version
< SDHCI_SPEC_300
)
2473 * We only enable or disable Preset Value if they are not already
2474 * enabled or disabled respectively. Otherwise, we bail out.
2476 if (host
->preset_enabled
!= enable
) {
2477 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2480 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2482 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2484 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2487 host
->flags
|= SDHCI_PV_ENABLED
;
2489 host
->flags
&= ~SDHCI_PV_ENABLED
;
2491 host
->preset_enabled
= enable
;
2495 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2498 struct sdhci_host
*host
= mmc_priv(mmc
);
2499 struct mmc_data
*data
= mrq
->data
;
2501 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
2502 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2503 mmc_get_dma_dir(data
));
2505 data
->host_cookie
= COOKIE_UNMAPPED
;
2508 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
2510 struct sdhci_host
*host
= mmc_priv(mmc
);
2512 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2515 * No pre-mapping in the pre hook if we're using the bounce buffer,
2516 * for that we would need two bounce buffers since one buffer is
2517 * in flight when this is getting called.
2519 if (host
->flags
& SDHCI_REQ_USE_DMA
&& !host
->bounce_buffer
)
2520 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
2523 static inline bool sdhci_has_requests(struct sdhci_host
*host
)
2525 return host
->cmd
|| host
->data_cmd
;
2528 static void sdhci_error_out_mrqs(struct sdhci_host
*host
, int err
)
2530 if (host
->data_cmd
) {
2531 host
->data_cmd
->error
= err
;
2532 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2536 host
->cmd
->error
= err
;
2537 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2541 static void sdhci_card_event(struct mmc_host
*mmc
)
2543 struct sdhci_host
*host
= mmc_priv(mmc
);
2544 unsigned long flags
;
2547 /* First check if client has provided their own card event */
2548 if (host
->ops
->card_event
)
2549 host
->ops
->card_event(host
);
2551 present
= mmc
->ops
->get_cd(mmc
);
2553 spin_lock_irqsave(&host
->lock
, flags
);
2555 /* Check sdhci_has_requests() first in case we are runtime suspended */
2556 if (sdhci_has_requests(host
) && !present
) {
2557 pr_err("%s: Card removed during transfer!\n",
2558 mmc_hostname(host
->mmc
));
2559 pr_err("%s: Resetting controller.\n",
2560 mmc_hostname(host
->mmc
));
2562 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2563 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2565 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
2568 spin_unlock_irqrestore(&host
->lock
, flags
);
2571 static const struct mmc_host_ops sdhci_ops
= {
2572 .request
= sdhci_request
,
2573 .post_req
= sdhci_post_req
,
2574 .pre_req
= sdhci_pre_req
,
2575 .set_ios
= sdhci_set_ios
,
2576 .get_cd
= sdhci_get_cd
,
2577 .get_ro
= sdhci_get_ro
,
2578 .hw_reset
= sdhci_hw_reset
,
2579 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2580 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2581 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2582 .execute_tuning
= sdhci_execute_tuning
,
2583 .card_event
= sdhci_card_event
,
2584 .card_busy
= sdhci_card_busy
,
2587 /*****************************************************************************\
2591 \*****************************************************************************/
2593 static bool sdhci_request_done(struct sdhci_host
*host
)
2595 unsigned long flags
;
2596 struct mmc_request
*mrq
;
2599 spin_lock_irqsave(&host
->lock
, flags
);
2601 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
2602 mrq
= host
->mrqs_done
[i
];
2608 spin_unlock_irqrestore(&host
->lock
, flags
);
2612 sdhci_del_timer(host
, mrq
);
2615 * Always unmap the data buffers if they were mapped by
2616 * sdhci_prepare_data() whenever we finish with a request.
2617 * This avoids leaking DMA mappings on error.
2619 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2620 struct mmc_data
*data
= mrq
->data
;
2622 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
2623 if (host
->bounce_buffer
) {
2625 * On reads, copy the bounced data into the
2628 if (mmc_get_dma_dir(data
) == DMA_FROM_DEVICE
) {
2629 unsigned int length
= data
->bytes_xfered
;
2631 if (length
> host
->bounce_buffer_size
) {
2632 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2633 mmc_hostname(host
->mmc
),
2634 host
->bounce_buffer_size
,
2635 data
->bytes_xfered
);
2636 /* Cap it down and continue */
2637 length
= host
->bounce_buffer_size
;
2639 dma_sync_single_for_cpu(
2642 host
->bounce_buffer_size
,
2644 sg_copy_from_buffer(data
->sg
,
2646 host
->bounce_buffer
,
2649 /* No copying, just switch ownership */
2650 dma_sync_single_for_cpu(
2653 host
->bounce_buffer_size
,
2654 mmc_get_dma_dir(data
));
2657 /* Unmap the raw data */
2658 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
2660 mmc_get_dma_dir(data
));
2662 data
->host_cookie
= COOKIE_UNMAPPED
;
2667 * The controller needs a reset of internal state machines
2668 * upon error conditions.
2670 if (sdhci_needs_reset(host
, mrq
)) {
2672 * Do not finish until command and data lines are available for
2673 * reset. Note there can only be one other mrq, so it cannot
2674 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2675 * would both be null.
2677 if (host
->cmd
|| host
->data_cmd
) {
2678 spin_unlock_irqrestore(&host
->lock
, flags
);
2682 /* Some controllers need this kick or reset won't work here */
2683 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2684 /* This is to force an update */
2685 host
->ops
->set_clock(host
, host
->clock
);
2687 /* Spec says we should do both at the same time, but Ricoh
2688 controllers do not like that. */
2689 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2690 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2692 host
->pending_reset
= false;
2695 if (!sdhci_has_requests(host
))
2696 sdhci_led_deactivate(host
);
2698 host
->mrqs_done
[i
] = NULL
;
2701 spin_unlock_irqrestore(&host
->lock
, flags
);
2703 mmc_request_done(host
->mmc
, mrq
);
2708 static void sdhci_tasklet_finish(unsigned long param
)
2710 struct sdhci_host
*host
= (struct sdhci_host
*)param
;
2712 while (!sdhci_request_done(host
))
2716 static void sdhci_timeout_timer(struct timer_list
*t
)
2718 struct sdhci_host
*host
;
2719 unsigned long flags
;
2721 host
= from_timer(host
, t
, timer
);
2723 spin_lock_irqsave(&host
->lock
, flags
);
2725 if (host
->cmd
&& !sdhci_data_line_cmd(host
->cmd
)) {
2726 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2727 mmc_hostname(host
->mmc
));
2728 sdhci_dumpregs(host
);
2730 host
->cmd
->error
= -ETIMEDOUT
;
2731 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2735 spin_unlock_irqrestore(&host
->lock
, flags
);
2738 static void sdhci_timeout_data_timer(struct timer_list
*t
)
2740 struct sdhci_host
*host
;
2741 unsigned long flags
;
2743 host
= from_timer(host
, t
, data_timer
);
2745 spin_lock_irqsave(&host
->lock
, flags
);
2747 if (host
->data
|| host
->data_cmd
||
2748 (host
->cmd
&& sdhci_data_line_cmd(host
->cmd
))) {
2749 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2750 mmc_hostname(host
->mmc
));
2751 sdhci_dumpregs(host
);
2754 host
->data
->error
= -ETIMEDOUT
;
2755 sdhci_finish_data(host
);
2756 } else if (host
->data_cmd
) {
2757 host
->data_cmd
->error
= -ETIMEDOUT
;
2758 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2760 host
->cmd
->error
= -ETIMEDOUT
;
2761 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2766 spin_unlock_irqrestore(&host
->lock
, flags
);
2769 /*****************************************************************************\
2771 * Interrupt handling *
2773 \*****************************************************************************/
2775 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*intmask_p
)
2777 /* Handle auto-CMD12 error */
2778 if (intmask
& SDHCI_INT_AUTO_CMD_ERR
&& host
->data_cmd
) {
2779 struct mmc_request
*mrq
= host
->data_cmd
->mrq
;
2780 u16 auto_cmd_status
= sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
);
2781 int data_err_bit
= (auto_cmd_status
& SDHCI_AUTO_CMD_TIMEOUT
) ?
2782 SDHCI_INT_DATA_TIMEOUT
:
2785 /* Treat auto-CMD12 error the same as data error */
2786 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
2787 *intmask_p
|= data_err_bit
;
2794 * SDHCI recovers from errors by resetting the cmd and data
2795 * circuits. Until that is done, there very well might be more
2796 * interrupts, so ignore them in that case.
2798 if (host
->pending_reset
)
2800 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2801 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2802 sdhci_dumpregs(host
);
2806 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
2807 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
2808 if (intmask
& SDHCI_INT_TIMEOUT
)
2809 host
->cmd
->error
= -ETIMEDOUT
;
2811 host
->cmd
->error
= -EILSEQ
;
2813 /* Treat data command CRC error the same as data CRC error */
2814 if (host
->cmd
->data
&&
2815 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
2818 *intmask_p
|= SDHCI_INT_DATA_CRC
;
2822 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2826 /* Handle auto-CMD23 error */
2827 if (intmask
& SDHCI_INT_AUTO_CMD_ERR
) {
2828 struct mmc_request
*mrq
= host
->cmd
->mrq
;
2829 u16 auto_cmd_status
= sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
);
2830 int err
= (auto_cmd_status
& SDHCI_AUTO_CMD_TIMEOUT
) ?
2834 if (mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
2835 mrq
->sbc
->error
= err
;
2836 sdhci_finish_mrq(host
, mrq
);
2841 if (intmask
& SDHCI_INT_RESPONSE
)
2842 sdhci_finish_command(host
);
2845 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2847 void *desc
= host
->adma_table
;
2849 sdhci_dumpregs(host
);
2852 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2854 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2855 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2856 desc
, le32_to_cpu(dma_desc
->addr_hi
),
2857 le32_to_cpu(dma_desc
->addr_lo
),
2858 le16_to_cpu(dma_desc
->len
),
2859 le16_to_cpu(dma_desc
->cmd
));
2861 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2862 desc
, le32_to_cpu(dma_desc
->addr_lo
),
2863 le16_to_cpu(dma_desc
->len
),
2864 le16_to_cpu(dma_desc
->cmd
));
2866 desc
+= host
->desc_sz
;
2868 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2873 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2877 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2878 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2879 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2880 if (command
== MMC_SEND_TUNING_BLOCK
||
2881 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2882 host
->tuning_done
= 1;
2883 wake_up(&host
->buf_ready_int
);
2889 struct mmc_command
*data_cmd
= host
->data_cmd
;
2892 * The "data complete" interrupt is also used to
2893 * indicate that a busy state has ended. See comment
2894 * above in sdhci_cmd_irq().
2896 if (data_cmd
&& (data_cmd
->flags
& MMC_RSP_BUSY
)) {
2897 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2898 host
->data_cmd
= NULL
;
2899 data_cmd
->error
= -ETIMEDOUT
;
2900 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2903 if (intmask
& SDHCI_INT_DATA_END
) {
2904 host
->data_cmd
= NULL
;
2906 * Some cards handle busy-end interrupt
2907 * before the command completed, so make
2908 * sure we do things in the proper order.
2910 if (host
->cmd
== data_cmd
)
2913 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2919 * SDHCI recovers from errors by resetting the cmd and data
2920 * circuits. Until that is done, there very well might be more
2921 * interrupts, so ignore them in that case.
2923 if (host
->pending_reset
)
2926 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2927 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2928 sdhci_dumpregs(host
);
2933 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2934 host
->data
->error
= -ETIMEDOUT
;
2935 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2936 host
->data
->error
= -EILSEQ
;
2937 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2938 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2940 host
->data
->error
= -EILSEQ
;
2941 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2942 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2943 sdhci_adma_show_error(host
);
2944 host
->data
->error
= -EIO
;
2945 if (host
->ops
->adma_workaround
)
2946 host
->ops
->adma_workaround(host
, intmask
);
2949 if (host
->data
->error
)
2950 sdhci_finish_data(host
);
2952 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2953 sdhci_transfer_pio(host
);
2956 * We currently don't do anything fancy with DMA
2957 * boundaries, but as we can't disable the feature
2958 * we need to at least restart the transfer.
2960 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2961 * should return a valid address to continue from, but as
2962 * some controllers are faulty, don't trust them.
2964 if (intmask
& SDHCI_INT_DMA_END
) {
2965 dma_addr_t dmastart
, dmanow
;
2967 dmastart
= sdhci_sdma_address(host
);
2968 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2970 * Force update to the next DMA block boundary.
2973 ~((dma_addr_t
)SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2974 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2975 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2976 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
2977 &dmastart
, host
->data
->bytes_xfered
, &dmanow
);
2978 sdhci_set_sdma_addr(host
, dmanow
);
2981 if (intmask
& SDHCI_INT_DATA_END
) {
2982 if (host
->cmd
== host
->data_cmd
) {
2984 * Data managed to finish before the
2985 * command completed. Make sure we do
2986 * things in the proper order.
2988 host
->data_early
= 1;
2990 sdhci_finish_data(host
);
2996 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2998 irqreturn_t result
= IRQ_NONE
;
2999 struct sdhci_host
*host
= dev_id
;
3000 u32 intmask
, mask
, unexpected
= 0;
3003 spin_lock(&host
->lock
);
3005 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
3006 spin_unlock(&host
->lock
);
3010 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
3011 if (!intmask
|| intmask
== 0xffffffff) {
3017 DBG("IRQ status 0x%08x\n", intmask
);
3019 if (host
->ops
->irq
) {
3020 intmask
= host
->ops
->irq(host
, intmask
);
3025 /* Clear selected interrupts. */
3026 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
3027 SDHCI_INT_BUS_POWER
);
3028 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
3030 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
3031 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
3035 * There is a observation on i.mx esdhc. INSERT
3036 * bit will be immediately set again when it gets
3037 * cleared, if a card is inserted. We have to mask
3038 * the irq to prevent interrupt storm which will
3039 * freeze the system. And the REMOVE gets the
3042 * More testing are needed here to ensure it works
3043 * for other platforms though.
3045 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
3046 SDHCI_INT_CARD_REMOVE
);
3047 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
3048 SDHCI_INT_CARD_INSERT
;
3049 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3050 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3052 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
3053 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
3055 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
3056 SDHCI_INT_CARD_REMOVE
);
3057 result
= IRQ_WAKE_THREAD
;
3060 if (intmask
& SDHCI_INT_CMD_MASK
)
3061 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
, &intmask
);
3063 if (intmask
& SDHCI_INT_DATA_MASK
)
3064 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
3066 if (intmask
& SDHCI_INT_BUS_POWER
)
3067 pr_err("%s: Card is consuming too much power!\n",
3068 mmc_hostname(host
->mmc
));
3070 if (intmask
& SDHCI_INT_RETUNE
)
3071 mmc_retune_needed(host
->mmc
);
3073 if ((intmask
& SDHCI_INT_CARD_INT
) &&
3074 (host
->ier
& SDHCI_INT_CARD_INT
)) {
3075 sdhci_enable_sdio_irq_nolock(host
, false);
3076 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
3077 result
= IRQ_WAKE_THREAD
;
3080 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
3081 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
3082 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
3083 SDHCI_INT_RETUNE
| SDHCI_INT_CARD_INT
);
3086 unexpected
|= intmask
;
3087 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
3090 if (result
== IRQ_NONE
)
3091 result
= IRQ_HANDLED
;
3093 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
3094 } while (intmask
&& --max_loops
);
3096 spin_unlock(&host
->lock
);
3099 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3100 mmc_hostname(host
->mmc
), unexpected
);
3101 sdhci_dumpregs(host
);
3107 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
3109 struct sdhci_host
*host
= dev_id
;
3110 unsigned long flags
;
3113 spin_lock_irqsave(&host
->lock
, flags
);
3114 isr
= host
->thread_isr
;
3115 host
->thread_isr
= 0;
3116 spin_unlock_irqrestore(&host
->lock
, flags
);
3118 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
3119 struct mmc_host
*mmc
= host
->mmc
;
3121 mmc
->ops
->card_event(mmc
);
3122 mmc_detect_change(mmc
, msecs_to_jiffies(200));
3125 if (isr
& SDHCI_INT_CARD_INT
) {
3126 sdio_run_irqs(host
->mmc
);
3128 spin_lock_irqsave(&host
->lock
, flags
);
3129 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
3130 sdhci_enable_sdio_irq_nolock(host
, true);
3131 spin_unlock_irqrestore(&host
->lock
, flags
);
3134 return isr
? IRQ_HANDLED
: IRQ_NONE
;
3137 /*****************************************************************************\
3141 \*****************************************************************************/
3145 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host
*host
)
3147 return mmc_card_is_removable(host
->mmc
) &&
3148 !(host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3149 !mmc_can_gpio_cd(host
->mmc
);
3153 * To enable wakeup events, the corresponding events have to be enabled in
3154 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3155 * Table' in the SD Host Controller Standard Specification.
3156 * It is useless to restore SDHCI_INT_ENABLE state in
3157 * sdhci_disable_irq_wakeups() since it will be set by
3158 * sdhci_enable_card_detection() or sdhci_init().
3160 static bool sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
3162 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
|
3168 if (sdhci_cd_irq_can_wakeup(host
)) {
3169 wake_val
|= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
;
3170 irq_val
|= SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
;
3173 if (mmc_card_wake_sdio_irq(host
->mmc
)) {
3174 wake_val
|= SDHCI_WAKE_ON_INT
;
3175 irq_val
|= SDHCI_INT_CARD_INT
;
3181 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
3184 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
3186 sdhci_writel(host
, irq_val
, SDHCI_INT_ENABLE
);
3188 host
->irq_wake_enabled
= !enable_irq_wake(host
->irq
);
3190 return host
->irq_wake_enabled
;
3193 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
3196 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
3197 | SDHCI_WAKE_ON_INT
;
3199 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
3201 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
3203 disable_irq_wake(host
->irq
);
3205 host
->irq_wake_enabled
= false;
3208 int sdhci_suspend_host(struct sdhci_host
*host
)
3210 sdhci_disable_card_detection(host
);
3212 mmc_retune_timer_stop(host
->mmc
);
3214 if (!device_may_wakeup(mmc_dev(host
->mmc
)) ||
3215 !sdhci_enable_irq_wakeups(host
)) {
3217 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3218 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3219 free_irq(host
->irq
, host
);
3225 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
3227 int sdhci_resume_host(struct sdhci_host
*host
)
3229 struct mmc_host
*mmc
= host
->mmc
;
3232 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3233 if (host
->ops
->enable_dma
)
3234 host
->ops
->enable_dma(host
);
3237 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
3238 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
3239 /* Card keeps power but host controller does not */
3240 sdhci_init(host
, 0);
3243 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
3245 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
3249 if (host
->irq_wake_enabled
) {
3250 sdhci_disable_irq_wakeups(host
);
3252 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
3253 sdhci_thread_irq
, IRQF_SHARED
,
3254 mmc_hostname(host
->mmc
), host
);
3259 sdhci_enable_card_detection(host
);
3264 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
3266 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
3268 unsigned long flags
;
3270 mmc_retune_timer_stop(host
->mmc
);
3272 spin_lock_irqsave(&host
->lock
, flags
);
3273 host
->ier
&= SDHCI_INT_CARD_INT
;
3274 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3275 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3276 spin_unlock_irqrestore(&host
->lock
, flags
);
3278 synchronize_hardirq(host
->irq
);
3280 spin_lock_irqsave(&host
->lock
, flags
);
3281 host
->runtime_suspended
= true;
3282 spin_unlock_irqrestore(&host
->lock
, flags
);
3286 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
3288 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
3290 struct mmc_host
*mmc
= host
->mmc
;
3291 unsigned long flags
;
3292 int host_flags
= host
->flags
;
3294 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3295 if (host
->ops
->enable_dma
)
3296 host
->ops
->enable_dma(host
);
3299 sdhci_init(host
, 0);
3301 if (mmc
->ios
.power_mode
!= MMC_POWER_UNDEFINED
&&
3302 mmc
->ios
.power_mode
!= MMC_POWER_OFF
) {
3303 /* Force clock and power re-program */
3306 mmc
->ops
->start_signal_voltage_switch(mmc
, &mmc
->ios
);
3307 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
3309 if ((host_flags
& SDHCI_PV_ENABLED
) &&
3310 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
3311 spin_lock_irqsave(&host
->lock
, flags
);
3312 sdhci_enable_preset_value(host
, true);
3313 spin_unlock_irqrestore(&host
->lock
, flags
);
3316 if ((mmc
->caps2
& MMC_CAP2_HS400_ES
) &&
3317 mmc
->ops
->hs400_enhanced_strobe
)
3318 mmc
->ops
->hs400_enhanced_strobe(mmc
, &mmc
->ios
);
3321 spin_lock_irqsave(&host
->lock
, flags
);
3323 host
->runtime_suspended
= false;
3325 /* Enable SDIO IRQ */
3326 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
3327 sdhci_enable_sdio_irq_nolock(host
, true);
3329 /* Enable Card Detection */
3330 sdhci_enable_card_detection(host
);
3332 spin_unlock_irqrestore(&host
->lock
, flags
);
3336 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
3338 #endif /* CONFIG_PM */
3340 /*****************************************************************************\
3342 * Command Queue Engine (CQE) helpers *
3344 \*****************************************************************************/
3346 void sdhci_cqe_enable(struct mmc_host
*mmc
)
3348 struct sdhci_host
*host
= mmc_priv(mmc
);
3349 unsigned long flags
;
3352 spin_lock_irqsave(&host
->lock
, flags
);
3354 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
3355 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
3356 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3357 ctrl
|= SDHCI_CTRL_ADMA64
;
3359 ctrl
|= SDHCI_CTRL_ADMA32
;
3360 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
3362 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, 512),
3365 /* Set maximum timeout */
3366 sdhci_writeb(host
, 0xE, SDHCI_TIMEOUT_CONTROL
);
3368 host
->ier
= host
->cqe_ier
;
3370 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3371 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3373 host
->cqe_on
= true;
3375 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3376 mmc_hostname(mmc
), host
->ier
,
3377 sdhci_readl(host
, SDHCI_INT_STATUS
));
3380 spin_unlock_irqrestore(&host
->lock
, flags
);
3382 EXPORT_SYMBOL_GPL(sdhci_cqe_enable
);
3384 void sdhci_cqe_disable(struct mmc_host
*mmc
, bool recovery
)
3386 struct sdhci_host
*host
= mmc_priv(mmc
);
3387 unsigned long flags
;
3389 spin_lock_irqsave(&host
->lock
, flags
);
3391 sdhci_set_default_irqs(host
);
3393 host
->cqe_on
= false;
3396 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
3397 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
3400 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3401 mmc_hostname(mmc
), host
->ier
,
3402 sdhci_readl(host
, SDHCI_INT_STATUS
));
3405 spin_unlock_irqrestore(&host
->lock
, flags
);
3407 EXPORT_SYMBOL_GPL(sdhci_cqe_disable
);
3409 bool sdhci_cqe_irq(struct sdhci_host
*host
, u32 intmask
, int *cmd_error
,
3417 if (intmask
& (SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
))
3418 *cmd_error
= -EILSEQ
;
3419 else if (intmask
& SDHCI_INT_TIMEOUT
)
3420 *cmd_error
= -ETIMEDOUT
;
3424 if (intmask
& (SDHCI_INT_DATA_END_BIT
| SDHCI_INT_DATA_CRC
))
3425 *data_error
= -EILSEQ
;
3426 else if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
3427 *data_error
= -ETIMEDOUT
;
3428 else if (intmask
& SDHCI_INT_ADMA_ERROR
)
3433 /* Clear selected interrupts. */
3434 mask
= intmask
& host
->cqe_ier
;
3435 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
3437 if (intmask
& SDHCI_INT_BUS_POWER
)
3438 pr_err("%s: Card is consuming too much power!\n",
3439 mmc_hostname(host
->mmc
));
3441 intmask
&= ~(host
->cqe_ier
| SDHCI_INT_ERROR
);
3443 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
3444 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3445 mmc_hostname(host
->mmc
), intmask
);
3446 sdhci_dumpregs(host
);
3451 EXPORT_SYMBOL_GPL(sdhci_cqe_irq
);
3453 /*****************************************************************************\
3455 * Device allocation/registration *
3457 \*****************************************************************************/
3459 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
3462 struct mmc_host
*mmc
;
3463 struct sdhci_host
*host
;
3465 WARN_ON(dev
== NULL
);
3467 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
3469 return ERR_PTR(-ENOMEM
);
3471 host
= mmc_priv(mmc
);
3473 host
->mmc_host_ops
= sdhci_ops
;
3474 mmc
->ops
= &host
->mmc_host_ops
;
3476 host
->flags
= SDHCI_SIGNALING_330
;
3478 host
->cqe_ier
= SDHCI_CQE_INT_MASK
;
3479 host
->cqe_err_ier
= SDHCI_CQE_INT_ERR_MASK
;
3481 host
->tuning_delay
= -1;
3483 host
->sdma_boundary
= SDHCI_DEFAULT_BOUNDARY_ARG
;
3486 * The DMA table descriptor count is calculated as the maximum
3487 * number of segments times 2, to allow for an alignment
3488 * descriptor for each segment, plus 1 for a nop end descriptor.
3490 host
->adma_table_cnt
= SDHCI_MAX_SEGS
* 2 + 1;
3495 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
3497 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
3499 struct mmc_host
*mmc
= host
->mmc
;
3500 struct device
*dev
= mmc_dev(mmc
);
3503 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
3504 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
3506 /* Try 64-bit mask if hardware is capable of it */
3507 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3508 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
3510 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3512 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
3516 /* 32-bit mask as default & fallback */
3518 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
3520 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3527 void __sdhci_read_caps(struct sdhci_host
*host
, u16
*ver
, u32
*caps
, u32
*caps1
)
3530 u64 dt_caps_mask
= 0;
3533 if (host
->read_caps
)
3536 host
->read_caps
= true;
3539 host
->quirks
= debug_quirks
;
3542 host
->quirks2
= debug_quirks2
;
3544 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3547 sdhci_do_enable_v4_mode(host
);
3549 of_property_read_u64(mmc_dev(host
->mmc
)->of_node
,
3550 "sdhci-caps-mask", &dt_caps_mask
);
3551 of_property_read_u64(mmc_dev(host
->mmc
)->of_node
,
3552 "sdhci-caps", &dt_caps
);
3554 v
= ver
? *ver
: sdhci_readw(host
, SDHCI_HOST_VERSION
);
3555 host
->version
= (v
& SDHCI_SPEC_VER_MASK
) >> SDHCI_SPEC_VER_SHIFT
;
3557 if (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
)
3563 host
->caps
= sdhci_readl(host
, SDHCI_CAPABILITIES
);
3564 host
->caps
&= ~lower_32_bits(dt_caps_mask
);
3565 host
->caps
|= lower_32_bits(dt_caps
);
3568 if (host
->version
< SDHCI_SPEC_300
)
3572 host
->caps1
= *caps1
;
3574 host
->caps1
= sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
3575 host
->caps1
&= ~upper_32_bits(dt_caps_mask
);
3576 host
->caps1
|= upper_32_bits(dt_caps
);
3579 EXPORT_SYMBOL_GPL(__sdhci_read_caps
);
3581 static void sdhci_allocate_bounce_buffer(struct sdhci_host
*host
)
3583 struct mmc_host
*mmc
= host
->mmc
;
3584 unsigned int max_blocks
;
3585 unsigned int bounce_size
;
3589 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3590 * has diminishing returns, this is probably because SD/MMC
3591 * cards are usually optimized to handle this size of requests.
3593 bounce_size
= SZ_64K
;
3595 * Adjust downwards to maximum request size if this is less
3596 * than our segment size, else hammer down the maximum
3597 * request size to the maximum buffer size.
3599 if (mmc
->max_req_size
< bounce_size
)
3600 bounce_size
= mmc
->max_req_size
;
3601 max_blocks
= bounce_size
/ 512;
3604 * When we just support one segment, we can get significant
3605 * speedups by the help of a bounce buffer to group scattered
3606 * reads/writes together.
3608 host
->bounce_buffer
= devm_kmalloc(mmc
->parent
,
3611 if (!host
->bounce_buffer
) {
3612 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3616 * Exiting with zero here makes sure we proceed with
3617 * mmc->max_segs == 1.
3622 host
->bounce_addr
= dma_map_single(mmc
->parent
,
3623 host
->bounce_buffer
,
3626 ret
= dma_mapping_error(mmc
->parent
, host
->bounce_addr
);
3628 /* Again fall back to max_segs == 1 */
3630 host
->bounce_buffer_size
= bounce_size
;
3632 /* Lie about this since we're bouncing */
3633 mmc
->max_segs
= max_blocks
;
3634 mmc
->max_seg_size
= bounce_size
;
3635 mmc
->max_req_size
= bounce_size
;
3637 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3638 mmc_hostname(mmc
), max_blocks
, bounce_size
);
3641 static inline bool sdhci_can_64bit_dma(struct sdhci_host
*host
)
3644 * According to SD Host Controller spec v4.10, bit[27] added from
3645 * version 4.10 in Capabilities Register is used as 64-bit System
3646 * Address support for V4 mode.
3648 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
)
3649 return host
->caps
& SDHCI_CAN_64BIT_V4
;
3651 return host
->caps
& SDHCI_CAN_64BIT
;
3654 int sdhci_setup_host(struct sdhci_host
*host
)
3656 struct mmc_host
*mmc
;
3657 u32 max_current_caps
;
3658 unsigned int ocr_avail
;
3659 unsigned int override_timeout_clk
;
3663 WARN_ON(host
== NULL
);
3670 * If there are external regulators, get them. Note this must be done
3671 * early before resetting the host and reading the capabilities so that
3672 * the host can take the appropriate action if regulators are not
3675 ret
= mmc_regulator_get_supply(mmc
);
3679 DBG("Version: 0x%08x | Present: 0x%08x\n",
3680 sdhci_readw(host
, SDHCI_HOST_VERSION
),
3681 sdhci_readl(host
, SDHCI_PRESENT_STATE
));
3682 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3683 sdhci_readl(host
, SDHCI_CAPABILITIES
),
3684 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
3686 sdhci_read_caps(host
);
3688 override_timeout_clk
= host
->timeout_clk
;
3690 if (host
->version
> SDHCI_SPEC_420
) {
3691 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3692 mmc_hostname(mmc
), host
->version
);
3695 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
3696 host
->flags
|= SDHCI_USE_SDMA
;
3697 else if (!(host
->caps
& SDHCI_CAN_DO_SDMA
))
3698 DBG("Controller doesn't have SDMA capability\n");
3700 host
->flags
|= SDHCI_USE_SDMA
;
3702 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
3703 (host
->flags
& SDHCI_USE_SDMA
)) {
3704 DBG("Disabling DMA as it is marked broken\n");
3705 host
->flags
&= ~SDHCI_USE_SDMA
;
3708 if ((host
->version
>= SDHCI_SPEC_200
) &&
3709 (host
->caps
& SDHCI_CAN_DO_ADMA2
))
3710 host
->flags
|= SDHCI_USE_ADMA
;
3712 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
3713 (host
->flags
& SDHCI_USE_ADMA
)) {
3714 DBG("Disabling ADMA as it is marked broken\n");
3715 host
->flags
&= ~SDHCI_USE_ADMA
;
3719 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3720 * and *must* do 64-bit DMA. A driver has the opportunity to change
3721 * that during the first call to ->enable_dma(). Similarly
3722 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3725 if (sdhci_can_64bit_dma(host
))
3726 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
3728 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3729 ret
= sdhci_set_dma_mask(host
);
3731 if (!ret
&& host
->ops
->enable_dma
)
3732 ret
= host
->ops
->enable_dma(host
);
3735 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3737 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
3743 /* SDMA does not support 64-bit DMA if v4 mode not set */
3744 if ((host
->flags
& SDHCI_USE_64_BIT_DMA
) && !host
->v4_mode
)
3745 host
->flags
&= ~SDHCI_USE_SDMA
;
3747 if (host
->flags
& SDHCI_USE_ADMA
) {
3751 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3752 host
->adma_table_sz
= host
->adma_table_cnt
*
3753 SDHCI_ADMA2_64_DESC_SZ(host
);
3754 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ(host
);
3756 host
->adma_table_sz
= host
->adma_table_cnt
*
3757 SDHCI_ADMA2_32_DESC_SZ
;
3758 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
3761 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
3763 * Use zalloc to zero the reserved high 32-bits of 128-bit
3764 * descriptors so that they never need to be written.
3766 buf
= dma_alloc_coherent(mmc_dev(mmc
),
3767 host
->align_buffer_sz
+ host
->adma_table_sz
,
3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3772 host
->flags
&= ~SDHCI_USE_ADMA
;
3773 } else if ((dma
+ host
->align_buffer_sz
) &
3774 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
3775 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3777 host
->flags
&= ~SDHCI_USE_ADMA
;
3778 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3779 host
->adma_table_sz
, buf
, dma
);
3781 host
->align_buffer
= buf
;
3782 host
->align_addr
= dma
;
3784 host
->adma_table
= buf
+ host
->align_buffer_sz
;
3785 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
3790 * If we use DMA, then it's up to the caller to set the DMA
3791 * mask, but PIO does not need the hw shim so we set a new
3792 * mask here in that case.
3794 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3795 host
->dma_mask
= DMA_BIT_MASK(64);
3796 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3799 if (host
->version
>= SDHCI_SPEC_300
)
3800 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_V3_BASE_MASK
)
3801 >> SDHCI_CLOCK_BASE_SHIFT
;
3803 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_BASE_MASK
)
3804 >> SDHCI_CLOCK_BASE_SHIFT
;
3806 host
->max_clk
*= 1000000;
3807 if (host
->max_clk
== 0 || host
->quirks
&
3808 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3809 if (!host
->ops
->get_max_clock
) {
3810 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3815 host
->max_clk
= host
->ops
->get_max_clock(host
);
3819 * In case of Host Controller v3.00, find out whether clock
3820 * multiplier is supported.
3822 host
->clk_mul
= (host
->caps1
& SDHCI_CLOCK_MUL_MASK
) >>
3823 SDHCI_CLOCK_MUL_SHIFT
;
3826 * In case the value in Clock Multiplier is 0, then programmable
3827 * clock mode is not supported, otherwise the actual clock
3828 * multiplier is one more than the value of Clock Multiplier
3829 * in the Capabilities Register.
3835 * Set host parameters.
3837 max_clk
= host
->max_clk
;
3839 if (host
->ops
->get_min_clock
)
3840 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3841 else if (host
->version
>= SDHCI_SPEC_300
) {
3842 if (host
->clk_mul
) {
3843 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3844 max_clk
= host
->max_clk
* host
->clk_mul
;
3846 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3848 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3850 if (!mmc
->f_max
|| mmc
->f_max
> max_clk
)
3851 mmc
->f_max
= max_clk
;
3853 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3854 host
->timeout_clk
= (host
->caps
& SDHCI_TIMEOUT_CLK_MASK
) >>
3855 SDHCI_TIMEOUT_CLK_SHIFT
;
3857 if (host
->caps
& SDHCI_TIMEOUT_CLK_UNIT
)
3858 host
->timeout_clk
*= 1000;
3860 if (host
->timeout_clk
== 0) {
3861 if (!host
->ops
->get_timeout_clock
) {
3862 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3869 DIV_ROUND_UP(host
->ops
->get_timeout_clock(host
),
3873 if (override_timeout_clk
)
3874 host
->timeout_clk
= override_timeout_clk
;
3876 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3877 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3878 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3881 if (host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
&&
3882 !host
->ops
->get_max_timeout_count
)
3883 mmc
->max_busy_timeout
= 0;
3885 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3886 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3888 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3889 host
->flags
|= SDHCI_AUTO_CMD12
;
3892 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
3893 * For v4 mode, SDMA may use Auto-CMD23 as well.
3895 if ((host
->version
>= SDHCI_SPEC_300
) &&
3896 ((host
->flags
& SDHCI_USE_ADMA
) ||
3897 !(host
->flags
& SDHCI_USE_SDMA
) || host
->v4_mode
) &&
3898 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3899 host
->flags
|= SDHCI_AUTO_CMD23
;
3900 DBG("Auto-CMD23 available\n");
3902 DBG("Auto-CMD23 unavailable\n");
3906 * A controller may support 8-bit width, but the board itself
3907 * might not have the pins brought out. Boards that support
3908 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3909 * their platform code before calling sdhci_add_host(), and we
3910 * won't assume 8-bit width for hosts without that CAP.
3912 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3913 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3915 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3916 mmc
->caps
&= ~MMC_CAP_CMD23
;
3918 if (host
->caps
& SDHCI_CAN_DO_HISPD
)
3919 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3921 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3922 mmc_card_is_removable(mmc
) &&
3923 mmc_gpio_get_cd(host
->mmc
) < 0)
3924 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3926 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3927 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3929 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3930 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3932 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
|
3933 SDHCI_SUPPORT_SDR50
|
3934 SDHCI_SUPPORT_DDR50
);
3936 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3937 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 2700000,
3939 host
->flags
&= ~SDHCI_SIGNALING_330
;
3942 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3943 mmc_hostname(mmc
), ret
);
3944 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3948 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
) {
3949 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3950 SDHCI_SUPPORT_DDR50
);
3952 * The SDHCI controller in a SoC might support HS200/HS400
3953 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3954 * but if the board is modeled such that the IO lines are not
3955 * connected to 1.8v then HS200/HS400 cannot be supported.
3956 * Disable HS200/HS400 if the board does not have 1.8v connected
3957 * to the IO lines. (Applicable for other modes in 1.8v)
3959 mmc
->caps2
&= ~(MMC_CAP2_HSX00_1_8V
| MMC_CAP2_HS400_ES
);
3960 mmc
->caps
&= ~(MMC_CAP_1_8V_DDR
| MMC_CAP_UHS
);
3963 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3964 if (host
->caps1
& (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3965 SDHCI_SUPPORT_DDR50
))
3966 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3968 /* SDR104 supports also implies SDR50 support */
3969 if (host
->caps1
& SDHCI_SUPPORT_SDR104
) {
3970 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3971 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3972 * field can be promoted to support HS200.
3974 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3975 mmc
->caps2
|= MMC_CAP2_HS200
;
3976 } else if (host
->caps1
& SDHCI_SUPPORT_SDR50
) {
3977 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3980 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3981 (host
->caps1
& SDHCI_SUPPORT_HS400
))
3982 mmc
->caps2
|= MMC_CAP2_HS400
;
3984 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3985 (IS_ERR(mmc
->supply
.vqmmc
) ||
3986 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3988 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3990 if ((host
->caps1
& SDHCI_SUPPORT_DDR50
) &&
3991 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3992 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3994 /* Does the host need tuning for SDR50? */
3995 if (host
->caps1
& SDHCI_USE_SDR50_TUNING
)
3996 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3998 /* Driver Type(s) (A, C, D) supported by the host */
3999 if (host
->caps1
& SDHCI_DRIVER_TYPE_A
)
4000 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
4001 if (host
->caps1
& SDHCI_DRIVER_TYPE_C
)
4002 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
4003 if (host
->caps1
& SDHCI_DRIVER_TYPE_D
)
4004 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
4006 /* Initial value for re-tuning timer count */
4007 host
->tuning_count
= (host
->caps1
& SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
4008 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
4011 * In case Re-tuning Timer is not disabled, the actual value of
4012 * re-tuning timer will be 2 ^ (n - 1).
4014 if (host
->tuning_count
)
4015 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
4017 /* Re-tuning mode supported by the Host Controller */
4018 host
->tuning_mode
= (host
->caps1
& SDHCI_RETUNING_MODE_MASK
) >>
4019 SDHCI_RETUNING_MODE_SHIFT
;
4024 * According to SD Host Controller spec v3.00, if the Host System
4025 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4026 * the value is meaningful only if Voltage Support in the Capabilities
4027 * register is set. The actual current value is 4 times the register
4030 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
4031 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
4032 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
4035 /* convert to SDHCI_MAX_CURRENT format */
4036 curr
= curr
/1000; /* convert to mA */
4037 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
4039 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
4041 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
4042 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
4043 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
4047 if (host
->caps
& SDHCI_CAN_VDD_330
) {
4048 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
4050 mmc
->max_current_330
= ((max_current_caps
&
4051 SDHCI_MAX_CURRENT_330_MASK
) >>
4052 SDHCI_MAX_CURRENT_330_SHIFT
) *
4053 SDHCI_MAX_CURRENT_MULTIPLIER
;
4055 if (host
->caps
& SDHCI_CAN_VDD_300
) {
4056 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
4058 mmc
->max_current_300
= ((max_current_caps
&
4059 SDHCI_MAX_CURRENT_300_MASK
) >>
4060 SDHCI_MAX_CURRENT_300_SHIFT
) *
4061 SDHCI_MAX_CURRENT_MULTIPLIER
;
4063 if (host
->caps
& SDHCI_CAN_VDD_180
) {
4064 ocr_avail
|= MMC_VDD_165_195
;
4066 mmc
->max_current_180
= ((max_current_caps
&
4067 SDHCI_MAX_CURRENT_180_MASK
) >>
4068 SDHCI_MAX_CURRENT_180_SHIFT
) *
4069 SDHCI_MAX_CURRENT_MULTIPLIER
;
4072 /* If OCR set by host, use it instead. */
4074 ocr_avail
= host
->ocr_mask
;
4076 /* If OCR set by external regulators, give it highest prio. */
4078 ocr_avail
= mmc
->ocr_avail
;
4080 mmc
->ocr_avail
= ocr_avail
;
4081 mmc
->ocr_avail_sdio
= ocr_avail
;
4082 if (host
->ocr_avail_sdio
)
4083 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
4084 mmc
->ocr_avail_sd
= ocr_avail
;
4085 if (host
->ocr_avail_sd
)
4086 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
4087 else /* normal SD controllers don't support 1.8V */
4088 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
4089 mmc
->ocr_avail_mmc
= ocr_avail
;
4090 if (host
->ocr_avail_mmc
)
4091 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
4093 if (mmc
->ocr_avail
== 0) {
4094 pr_err("%s: Hardware doesn't report any support voltages.\n",
4100 if ((mmc
->caps
& (MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
|
4101 MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_SDR104
|
4102 MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
)) ||
4103 (mmc
->caps2
& (MMC_CAP2_HS200_1_8V_SDR
| MMC_CAP2_HS400_1_8V
)))
4104 host
->flags
|= SDHCI_SIGNALING_180
;
4106 if (mmc
->caps2
& MMC_CAP2_HSX00_1_2V
)
4107 host
->flags
|= SDHCI_SIGNALING_120
;
4109 spin_lock_init(&host
->lock
);
4112 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4113 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4116 mmc
->max_req_size
= 524288;
4119 * Maximum number of segments. Depends on if the hardware
4120 * can do scatter/gather or not.
4122 if (host
->flags
& SDHCI_USE_ADMA
) {
4123 mmc
->max_segs
= SDHCI_MAX_SEGS
;
4124 } else if (host
->flags
& SDHCI_USE_SDMA
) {
4126 if (swiotlb_max_segment()) {
4127 unsigned int max_req_size
= (1 << IO_TLB_SHIFT
) *
4129 mmc
->max_req_size
= min(mmc
->max_req_size
,
4133 mmc
->max_segs
= SDHCI_MAX_SEGS
;
4137 * Maximum segment size. Could be one segment with the maximum number
4138 * of bytes. When doing hardware scatter/gather, each entry cannot
4139 * be larger than 64 KiB though.
4141 if (host
->flags
& SDHCI_USE_ADMA
) {
4142 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
4143 mmc
->max_seg_size
= 65535;
4145 mmc
->max_seg_size
= 65536;
4147 mmc
->max_seg_size
= mmc
->max_req_size
;
4151 * Maximum block size. This varies from controller to controller and
4152 * is specified in the capabilities register.
4154 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
4155 mmc
->max_blk_size
= 2;
4157 mmc
->max_blk_size
= (host
->caps
& SDHCI_MAX_BLOCK_MASK
) >>
4158 SDHCI_MAX_BLOCK_SHIFT
;
4159 if (mmc
->max_blk_size
>= 3) {
4160 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4162 mmc
->max_blk_size
= 0;
4166 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
4169 * Maximum block count.
4171 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
4173 if (mmc
->max_segs
== 1)
4174 /* This may alter mmc->*_blk_* parameters */
4175 sdhci_allocate_bounce_buffer(host
);
4180 if (!IS_ERR(mmc
->supply
.vqmmc
))
4181 regulator_disable(mmc
->supply
.vqmmc
);
4183 if (host
->align_buffer
)
4184 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4185 host
->adma_table_sz
, host
->align_buffer
,
4187 host
->adma_table
= NULL
;
4188 host
->align_buffer
= NULL
;
4192 EXPORT_SYMBOL_GPL(sdhci_setup_host
);
4194 void sdhci_cleanup_host(struct sdhci_host
*host
)
4196 struct mmc_host
*mmc
= host
->mmc
;
4198 if (!IS_ERR(mmc
->supply
.vqmmc
))
4199 regulator_disable(mmc
->supply
.vqmmc
);
4201 if (host
->align_buffer
)
4202 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4203 host
->adma_table_sz
, host
->align_buffer
,
4205 host
->adma_table
= NULL
;
4206 host
->align_buffer
= NULL
;
4208 EXPORT_SYMBOL_GPL(sdhci_cleanup_host
);
4210 int __sdhci_add_host(struct sdhci_host
*host
)
4212 struct mmc_host
*mmc
= host
->mmc
;
4218 tasklet_init(&host
->finish_tasklet
,
4219 sdhci_tasklet_finish
, (unsigned long)host
);
4221 timer_setup(&host
->timer
, sdhci_timeout_timer
, 0);
4222 timer_setup(&host
->data_timer
, sdhci_timeout_data_timer
, 0);
4224 init_waitqueue_head(&host
->buf_ready_int
);
4226 sdhci_init(host
, 0);
4228 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
4229 IRQF_SHARED
, mmc_hostname(mmc
), host
);
4231 pr_err("%s: Failed to request IRQ %d: %d\n",
4232 mmc_hostname(mmc
), host
->irq
, ret
);
4236 ret
= sdhci_led_register(host
);
4238 pr_err("%s: Failed to register LED device: %d\n",
4239 mmc_hostname(mmc
), ret
);
4245 ret
= mmc_add_host(mmc
);
4249 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4250 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
4251 (host
->flags
& SDHCI_USE_ADMA
) ?
4252 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
4253 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
4255 sdhci_enable_card_detection(host
);
4260 sdhci_led_unregister(host
);
4262 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
4263 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
4264 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
4265 free_irq(host
->irq
, host
);
4267 tasklet_kill(&host
->finish_tasklet
);
4271 EXPORT_SYMBOL_GPL(__sdhci_add_host
);
4273 int sdhci_add_host(struct sdhci_host
*host
)
4277 ret
= sdhci_setup_host(host
);
4281 ret
= __sdhci_add_host(host
);
4288 sdhci_cleanup_host(host
);
4292 EXPORT_SYMBOL_GPL(sdhci_add_host
);
4294 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
4296 struct mmc_host
*mmc
= host
->mmc
;
4297 unsigned long flags
;
4300 spin_lock_irqsave(&host
->lock
, flags
);
4302 host
->flags
|= SDHCI_DEVICE_DEAD
;
4304 if (sdhci_has_requests(host
)) {
4305 pr_err("%s: Controller removed during "
4306 " transfer!\n", mmc_hostname(mmc
));
4307 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
4310 spin_unlock_irqrestore(&host
->lock
, flags
);
4313 sdhci_disable_card_detection(host
);
4315 mmc_remove_host(mmc
);
4317 sdhci_led_unregister(host
);
4320 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
4322 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
4323 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
4324 free_irq(host
->irq
, host
);
4326 del_timer_sync(&host
->timer
);
4327 del_timer_sync(&host
->data_timer
);
4329 tasklet_kill(&host
->finish_tasklet
);
4331 if (!IS_ERR(mmc
->supply
.vqmmc
))
4332 regulator_disable(mmc
->supply
.vqmmc
);
4334 if (host
->align_buffer
)
4335 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4336 host
->adma_table_sz
, host
->align_buffer
,
4339 host
->adma_table
= NULL
;
4340 host
->align_buffer
= NULL
;
4343 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
4345 void sdhci_free_host(struct sdhci_host
*host
)
4347 mmc_free_host(host
->mmc
);
4350 EXPORT_SYMBOL_GPL(sdhci_free_host
);
4352 /*****************************************************************************\
4354 * Driver init/exit *
4356 \*****************************************************************************/
4358 static int __init
sdhci_drv_init(void)
4361 ": Secure Digital Host Controller Interface driver\n");
4362 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
4367 static void __exit
sdhci_drv_exit(void)
4371 module_init(sdhci_drv_init
);
4372 module_exit(sdhci_drv_exit
);
4374 module_param(debug_quirks
, uint
, 0444);
4375 module_param(debug_quirks2
, uint
, 0444);
4377 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4378 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4379 MODULE_LICENSE("GPL");
4381 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
4382 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");