2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Driver for the MMC / SD / SDIO IP found in:
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/tmio.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/platform_device.h>
42 #include <linux/pm_qos.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/scatterlist.h>
45 #include <linux/spinlock.h>
46 #include <linux/workqueue.h>
50 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
52 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
53 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
56 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
58 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
59 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
62 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
64 sd_ctrl_write32(host
, CTL_STATUS
, ~i
);
67 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
69 host
->sg_len
= data
->sg_len
;
70 host
->sg_ptr
= data
->sg
;
71 host
->sg_orig
= data
->sg
;
75 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
77 host
->sg_ptr
= sg_next(host
->sg_ptr
);
79 return --host
->sg_len
;
82 #ifdef CONFIG_MMC_DEBUG
84 #define STATUS_TO_TEXT(a, status, i) \
86 if (status & TMIO_STAT_##a) { \
93 static void pr_debug_status(u32 status
)
96 pr_debug("status: %08x = ", status
);
97 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
98 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
99 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
100 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
101 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
102 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
103 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
104 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
105 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
106 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
107 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
108 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
109 STATUS_TO_TEXT(DATAEND
, status
, i
);
110 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
111 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
112 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
113 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
114 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
115 STATUS_TO_TEXT(RXRDY
, status
, i
);
116 STATUS_TO_TEXT(TXRQ
, status
, i
);
117 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
122 #define pr_debug_status(s) do { } while (0)
125 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
127 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
130 host
->sdio_irq_enabled
= 1;
131 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
&
132 ~TMIO_SDIO_STAT_IOIRQ
;
133 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
134 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
136 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
137 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
138 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
139 host
->sdio_irq_enabled
= 0;
143 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
148 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
149 new_clock
>= (clock
<<1); clk
>>= 1)
154 if (host
->set_clk_div
)
155 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
157 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
160 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
162 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
164 /* implicit BUG_ON(!res) */
165 if (resource_size(res
) > 0x100) {
166 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
170 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
171 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
175 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
177 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
179 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
180 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
183 /* implicit BUG_ON(!res) */
184 if (resource_size(res
) > 0x100) {
185 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
190 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
192 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
194 /* FIXME - should we set stop clock reg here */
195 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
196 /* implicit BUG_ON(!res) */
197 if (resource_size(res
) > 0x100)
198 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
200 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
201 if (resource_size(res
) > 0x100)
202 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
206 static void tmio_mmc_reset_work(struct work_struct
*work
)
208 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
209 delayed_reset_work
.work
);
210 struct mmc_request
*mrq
;
213 spin_lock_irqsave(&host
->lock
, flags
);
217 * is request already finished? Since we use a non-blocking
218 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
219 * us, so, have to check for IS_ERR(host->mrq)
221 if (IS_ERR_OR_NULL(mrq
)
222 || time_is_after_jiffies(host
->last_req_ts
+
223 msecs_to_jiffies(2000))) {
224 spin_unlock_irqrestore(&host
->lock
, flags
);
228 dev_warn(&host
->pdev
->dev
,
229 "timeout waiting for hardware interrupt (CMD%u)\n",
233 host
->data
->error
= -ETIMEDOUT
;
235 host
->cmd
->error
= -ETIMEDOUT
;
237 mrq
->cmd
->error
= -ETIMEDOUT
;
241 host
->force_pio
= false;
243 spin_unlock_irqrestore(&host
->lock
, flags
);
245 tmio_mmc_reset(host
);
247 /* Ready for new calls */
250 tmio_mmc_abort_dma(host
);
251 mmc_request_done(host
->mmc
, mrq
);
254 /* called with host->lock held, interrupts disabled */
255 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
257 struct mmc_request
*mrq
;
260 spin_lock_irqsave(&host
->lock
, flags
);
263 if (IS_ERR_OR_NULL(mrq
)) {
264 spin_unlock_irqrestore(&host
->lock
, flags
);
270 host
->force_pio
= false;
272 cancel_delayed_work(&host
->delayed_reset_work
);
275 spin_unlock_irqrestore(&host
->lock
, flags
);
277 if (mrq
->cmd
->error
|| (mrq
->data
&& mrq
->data
->error
))
278 tmio_mmc_abort_dma(host
);
280 mmc_request_done(host
->mmc
, mrq
);
283 static void tmio_mmc_done_work(struct work_struct
*work
)
285 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
287 tmio_mmc_finish_request(host
);
290 /* These are the bitmasks the tmio chip requires to implement the MMC response
291 * types. Note that R1 and R6 are the same in this scheme. */
292 #define APP_CMD 0x0040
293 #define RESP_NONE 0x0300
294 #define RESP_R1 0x0400
295 #define RESP_R1B 0x0500
296 #define RESP_R2 0x0600
297 #define RESP_R3 0x0700
298 #define DATA_PRESENT 0x0800
299 #define TRANSFER_READ 0x1000
300 #define TRANSFER_MULTI 0x2000
301 #define SECURITY_CMD 0x4000
303 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
305 struct mmc_data
*data
= host
->data
;
308 /* Command 12 is handled by hardware */
309 if (cmd
->opcode
== 12 && !cmd
->arg
) {
310 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
314 switch (mmc_resp_type(cmd
)) {
315 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
316 case MMC_RSP_R1
: c
|= RESP_R1
; break;
317 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
318 case MMC_RSP_R2
: c
|= RESP_R2
; break;
319 case MMC_RSP_R3
: c
|= RESP_R3
; break;
321 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
327 /* FIXME - this seems to be ok commented out but the spec suggest this bit
328 * should be set when issuing app commands.
329 * if(cmd->flags & MMC_FLAG_ACMD)
334 if (data
->blocks
> 1) {
335 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
338 if (data
->flags
& MMC_DATA_READ
)
342 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_CMD
);
344 /* Fire off the command */
345 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
346 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
352 * This chip always returns (at least?) as much data as you ask for.
353 * I'm unsure what happens if you ask for less than a block. This should be
354 * looked into to ensure that a funny length read doesn't hose the controller.
356 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
358 struct mmc_data
*data
= host
->data
;
364 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
365 pr_err("PIO IRQ in DMA mode!\n");
368 pr_debug("Spurious PIO IRQ\n");
372 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
373 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
375 count
= host
->sg_ptr
->length
- host
->sg_off
;
376 if (count
> data
->blksz
)
379 pr_debug("count: %08x offset: %08x flags %08x\n",
380 count
, host
->sg_off
, data
->flags
);
382 /* Transfer the data */
383 if (data
->flags
& MMC_DATA_READ
)
384 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
386 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
388 host
->sg_off
+= count
;
390 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
392 if (host
->sg_off
== host
->sg_ptr
->length
)
393 tmio_mmc_next_sg(host
);
398 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
400 if (host
->sg_ptr
== &host
->bounce_sg
) {
402 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
403 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
404 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
408 /* needs to be called with host->lock held */
409 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
411 struct mmc_data
*data
= host
->data
;
412 struct mmc_command
*stop
;
417 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
422 /* FIXME - return correct transfer count on errors */
424 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
426 data
->bytes_xfered
= 0;
428 pr_debug("Completed data request\n");
431 * FIXME: other drivers allow an optional stop command of any given type
432 * which we dont do, as the chip can auto generate them.
433 * Perhaps we can be smarter about when to use auto CMD12 and
434 * only issue the auto request when we know this is the desired
435 * stop command, allowing fallback to the stop command the
436 * upper layers expect. For now, we do what works.
439 if (data
->flags
& MMC_DATA_READ
) {
440 if (host
->chan_rx
&& !host
->force_pio
)
441 tmio_mmc_check_bounce_buffer(host
);
442 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
445 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
450 if (stop
->opcode
== 12 && !stop
->arg
)
451 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
456 schedule_work(&host
->done
);
459 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
461 struct mmc_data
*data
;
462 spin_lock(&host
->lock
);
468 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
470 * Has all data been written out yet? Testing on SuperH showed,
471 * that in most cases the first interrupt comes already with the
472 * BUSY status bit clear, but on some operations, like mount or
473 * in the beginning of a write / sync / umount, there is one
474 * DATAEND interrupt with the BUSY bit set, in this cases
475 * waiting for one more interrupt fixes the problem.
477 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
478 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
479 tasklet_schedule(&host
->dma_complete
);
481 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
482 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
483 tasklet_schedule(&host
->dma_complete
);
485 tmio_mmc_do_data_irq(host
);
486 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
489 spin_unlock(&host
->lock
);
492 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
495 struct mmc_command
*cmd
= host
->cmd
;
498 spin_lock(&host
->lock
);
501 pr_debug("Spurious CMD irq\n");
507 /* This controller is sicker than the PXA one. Not only do we need to
508 * drop the top 8 bits of the first response word, we also need to
509 * modify the order of the response for short response command types.
512 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
513 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
515 if (cmd
->flags
& MMC_RSP_136
) {
516 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
517 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
518 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
520 } else if (cmd
->flags
& MMC_RSP_R3
) {
521 cmd
->resp
[0] = cmd
->resp
[3];
524 if (stat
& TMIO_STAT_CMDTIMEOUT
)
525 cmd
->error
= -ETIMEDOUT
;
526 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
527 cmd
->error
= -EILSEQ
;
529 /* If there is data to handle we enable data IRQs here, and
530 * we will ultimatley finish the request in the data_end handler.
531 * If theres no data or we encountered an error, finish now.
533 if (host
->data
&& !cmd
->error
) {
534 if (host
->data
->flags
& MMC_DATA_READ
) {
535 if (host
->force_pio
|| !host
->chan_rx
)
536 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
538 tasklet_schedule(&host
->dma_issue
);
540 if (host
->force_pio
|| !host
->chan_tx
)
541 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
543 tasklet_schedule(&host
->dma_issue
);
546 schedule_work(&host
->done
);
550 spin_unlock(&host
->lock
);
553 static void tmio_mmc_card_irq_status(struct tmio_mmc_host
*host
,
554 int *ireg
, int *status
)
556 *status
= sd_ctrl_read32(host
, CTL_STATUS
);
557 *ireg
= *status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
559 pr_debug_status(*status
);
560 pr_debug_status(*ireg
);
563 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
564 int ireg
, int status
)
566 struct mmc_host
*mmc
= host
->mmc
;
568 /* Card insert / remove attempts */
569 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
570 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
571 TMIO_STAT_CARD_REMOVE
);
572 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
573 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
574 !work_pending(&mmc
->detect
.work
))
575 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
582 irqreturn_t
tmio_mmc_card_detect_irq(int irq
, void *devid
)
584 unsigned int ireg
, status
;
585 struct tmio_mmc_host
*host
= devid
;
587 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
588 __tmio_mmc_card_detect_irq(host
, ireg
, status
);
592 EXPORT_SYMBOL(tmio_mmc_card_detect_irq
);
594 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
,
595 int ireg
, int status
)
597 /* Command completion */
598 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
599 tmio_mmc_ack_mmc_irqs(host
,
600 TMIO_STAT_CMDRESPEND
|
601 TMIO_STAT_CMDTIMEOUT
);
602 tmio_mmc_cmd_irq(host
, status
);
607 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
608 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
609 tmio_mmc_pio_irq(host
);
613 /* Data transfer completion */
614 if (ireg
& TMIO_STAT_DATAEND
) {
615 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
616 tmio_mmc_data_irq(host
);
623 irqreturn_t
tmio_mmc_sdcard_irq(int irq
, void *devid
)
625 unsigned int ireg
, status
;
626 struct tmio_mmc_host
*host
= devid
;
628 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
629 __tmio_mmc_sdcard_irq(host
, ireg
, status
);
633 EXPORT_SYMBOL(tmio_mmc_sdcard_irq
);
635 irqreturn_t
tmio_mmc_sdio_irq(int irq
, void *devid
)
637 struct tmio_mmc_host
*host
= devid
;
638 struct mmc_host
*mmc
= host
->mmc
;
639 struct tmio_mmc_data
*pdata
= host
->pdata
;
640 unsigned int ireg
, status
;
642 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
645 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
646 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdcard_irq_mask
;
648 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, status
& ~TMIO_SDIO_MASK_ALL
);
650 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
651 mmc_signal_sdio_irq(mmc
);
655 EXPORT_SYMBOL(tmio_mmc_sdio_irq
);
657 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
659 struct tmio_mmc_host
*host
= devid
;
660 unsigned int ireg
, status
;
662 pr_debug("MMC IRQ begin\n");
664 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
665 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
667 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
670 tmio_mmc_sdio_irq(irq
, devid
);
674 EXPORT_SYMBOL(tmio_mmc_irq
);
676 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
677 struct mmc_data
*data
)
679 struct tmio_mmc_data
*pdata
= host
->pdata
;
681 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
682 data
->blksz
, data
->blocks
);
684 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
685 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
686 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
688 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
689 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
690 mmc_hostname(host
->mmc
), data
->blksz
);
695 tmio_mmc_init_sg(host
, data
);
698 /* Set transfer length / blocksize */
699 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
700 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
702 tmio_mmc_start_dma(host
, data
);
707 /* Process requests from the MMC layer */
708 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
710 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
714 spin_lock_irqsave(&host
->lock
, flags
);
717 pr_debug("request not null\n");
718 if (IS_ERR(host
->mrq
)) {
719 spin_unlock_irqrestore(&host
->lock
, flags
);
720 mrq
->cmd
->error
= -EAGAIN
;
721 mmc_request_done(mmc
, mrq
);
726 host
->last_req_ts
= jiffies
;
730 spin_unlock_irqrestore(&host
->lock
, flags
);
733 ret
= tmio_mmc_start_data(host
, mrq
->data
);
738 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
740 schedule_delayed_work(&host
->delayed_reset_work
,
741 msecs_to_jiffies(2000));
746 host
->force_pio
= false;
748 mrq
->cmd
->error
= ret
;
749 mmc_request_done(mmc
, mrq
);
752 /* Set MMC clock / power.
753 * Note: This controller uses a simple divider scheme therefore it cannot
754 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
755 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
758 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
760 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
761 struct tmio_mmc_data
*pdata
= host
->pdata
;
764 mutex_lock(&host
->ios_lock
);
766 spin_lock_irqsave(&host
->lock
, flags
);
768 if (IS_ERR(host
->mrq
)) {
769 dev_dbg(&host
->pdev
->dev
,
770 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
771 current
->comm
, task_pid_nr(current
),
772 ios
->clock
, ios
->power_mode
);
773 host
->mrq
= ERR_PTR(-EINTR
);
775 dev_dbg(&host
->pdev
->dev
,
776 "%s.%d: CMD%u active since %lu, now %lu!\n",
777 current
->comm
, task_pid_nr(current
),
778 host
->mrq
->cmd
->opcode
, host
->last_req_ts
, jiffies
);
780 spin_unlock_irqrestore(&host
->lock
, flags
);
782 mutex_unlock(&host
->ios_lock
);
786 host
->mrq
= ERR_PTR(-EBUSY
);
788 spin_unlock_irqrestore(&host
->lock
, flags
);
791 * pdata->power == false only if COLD_CD is available, otherwise only
792 * in short time intervals during probing or resuming
794 if (ios
->power_mode
== MMC_POWER_ON
&& ios
->clock
) {
796 pm_runtime_get_sync(&host
->pdev
->dev
);
799 tmio_mmc_set_clock(host
, ios
->clock
);
800 /* power up SD bus */
802 host
->set_pwr(host
->pdev
, 1);
803 /* start bus clock */
804 tmio_mmc_clk_start(host
);
805 } else if (ios
->power_mode
!= MMC_POWER_UP
) {
806 if (host
->set_pwr
&& ios
->power_mode
== MMC_POWER_OFF
)
807 host
->set_pwr(host
->pdev
, 0);
809 pdata
->power
= false;
810 pm_runtime_put(&host
->pdev
->dev
);
812 tmio_mmc_clk_stop(host
);
815 switch (ios
->bus_width
) {
816 case MMC_BUS_WIDTH_1
:
817 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
819 case MMC_BUS_WIDTH_4
:
820 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
824 /* Let things settle. delay taken from winCE driver */
826 if (PTR_ERR(host
->mrq
) == -EINTR
)
827 dev_dbg(&host
->pdev
->dev
,
828 "%s.%d: IOS interrupted: clk %u, mode %u",
829 current
->comm
, task_pid_nr(current
),
830 ios
->clock
, ios
->power_mode
);
833 mutex_unlock(&host
->ios_lock
);
836 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
838 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
839 struct tmio_mmc_data
*pdata
= host
->pdata
;
841 return !((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
842 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
));
845 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
847 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
848 struct tmio_mmc_data
*pdata
= host
->pdata
;
853 return pdata
->get_cd(host
->pdev
);
856 static const struct mmc_host_ops tmio_mmc_ops
= {
857 .request
= tmio_mmc_request
,
858 .set_ios
= tmio_mmc_set_ios
,
859 .get_ro
= tmio_mmc_get_ro
,
860 .get_cd
= tmio_mmc_get_cd
,
861 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
864 int __devinit
tmio_mmc_host_probe(struct tmio_mmc_host
**host
,
865 struct platform_device
*pdev
,
866 struct tmio_mmc_data
*pdata
)
868 struct tmio_mmc_host
*_host
;
869 struct mmc_host
*mmc
;
870 struct resource
*res_ctl
;
872 u32 irq_mask
= TMIO_MASK_CMD
;
874 res_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
878 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
882 pdata
->dev
= &pdev
->dev
;
883 _host
= mmc_priv(mmc
);
884 _host
->pdata
= pdata
;
887 platform_set_drvdata(pdev
, mmc
);
889 _host
->set_pwr
= pdata
->set_pwr
;
890 _host
->set_clk_div
= pdata
->set_clk_div
;
892 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
893 _host
->bus_shift
= resource_size(res_ctl
) >> 10;
895 _host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
901 mmc
->ops
= &tmio_mmc_ops
;
902 mmc
->caps
= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
903 mmc
->f_max
= pdata
->hclk
;
904 mmc
->f_min
= mmc
->f_max
/ 512;
906 mmc
->max_blk_size
= 512;
907 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
909 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
910 mmc
->max_seg_size
= mmc
->max_req_size
;
912 mmc
->ocr_avail
= pdata
->ocr_mask
;
914 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
916 pdata
->power
= false;
917 pm_runtime_enable(&pdev
->dev
);
918 ret
= pm_runtime_resume(&pdev
->dev
);
923 * There are 4 different scenarios for the card detection:
924 * 1) an external gpio irq handles the cd (best for power savings)
925 * 2) internal sdhi irq handles the cd
926 * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
927 * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
929 * While we increment the rtpm counter for all scenarios when the mmc
930 * core activates us by calling an appropriate set_ios(), we must
931 * additionally ensure that in case 2) the tmio mmc hardware stays
932 * powered on during runtime for the card detection to work.
934 if (!(pdata
->flags
& TMIO_MMC_HAS_COLD_CD
935 || mmc
->caps
& MMC_CAP_NEEDS_POLL
936 || mmc
->caps
& MMC_CAP_NONREMOVABLE
))
937 pm_runtime_get_noresume(&pdev
->dev
);
939 tmio_mmc_clk_stop(_host
);
940 tmio_mmc_reset(_host
);
942 _host
->sdcard_irq_mask
= sd_ctrl_read32(_host
, CTL_IRQ_MASK
);
943 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
944 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
945 tmio_mmc_enable_sdio_irq(mmc
, 0);
947 spin_lock_init(&_host
->lock
);
948 mutex_init(&_host
->ios_lock
);
950 /* Init delayed work for request timeouts */
951 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
952 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
954 /* See if we also get DMA */
955 tmio_mmc_request_dma(_host
, pdata
);
959 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
961 /* Unmask the IRQs we want to know about */
963 irq_mask
|= TMIO_MASK_READOP
;
965 irq_mask
|= TMIO_MASK_WRITEOP
;
967 tmio_mmc_enable_mmc_irqs(_host
, irq_mask
);
974 pm_runtime_disable(&pdev
->dev
);
981 EXPORT_SYMBOL(tmio_mmc_host_probe
);
983 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
985 struct platform_device
*pdev
= host
->pdev
;
988 * We don't have to manipulate pdata->power here: if there is a card in
989 * the slot, the runtime PM is active and our .runtime_resume() will not
990 * be run. If there is no card in the slot and the platform can suspend
991 * the controller, the runtime PM is suspended and pdata->power == false,
992 * so, our .runtime_resume() will not try to detect a card in the slot.
994 if (host
->pdata
->flags
& TMIO_MMC_HAS_COLD_CD
995 || host
->mmc
->caps
& MMC_CAP_NEEDS_POLL
996 || host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
997 pm_runtime_get_sync(&pdev
->dev
);
999 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1001 mmc_remove_host(host
->mmc
);
1002 cancel_work_sync(&host
->done
);
1003 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1004 tmio_mmc_release_dma(host
);
1006 pm_runtime_put_sync(&pdev
->dev
);
1007 pm_runtime_disable(&pdev
->dev
);
1010 mmc_free_host(host
->mmc
);
1012 EXPORT_SYMBOL(tmio_mmc_host_remove
);
1015 int tmio_mmc_host_suspend(struct device
*dev
)
1017 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1018 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1019 int ret
= mmc_suspend_host(mmc
);
1022 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1024 host
->pm_error
= pm_runtime_put_sync(dev
);
1028 EXPORT_SYMBOL(tmio_mmc_host_suspend
);
1030 int tmio_mmc_host_resume(struct device
*dev
)
1032 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1033 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1035 /* The MMC core will perform the complete set up */
1036 host
->pdata
->power
= false;
1038 host
->pm_global
= true;
1039 if (!host
->pm_error
)
1040 pm_runtime_get_sync(dev
);
1042 if (host
->pm_global
) {
1043 /* Runtime PM resume callback didn't run */
1044 tmio_mmc_reset(host
);
1045 tmio_mmc_enable_dma(host
, true);
1046 host
->pm_global
= false;
1049 return mmc_resume_host(mmc
);
1051 EXPORT_SYMBOL(tmio_mmc_host_resume
);
1053 #endif /* CONFIG_PM */
1055 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1059 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend
);
1061 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1063 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1064 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1065 struct tmio_mmc_data
*pdata
= host
->pdata
;
1067 tmio_mmc_reset(host
);
1068 tmio_mmc_enable_dma(host
, true);
1071 /* Only entered after a card-insert interrupt */
1073 tmio_mmc_set_ios(mmc
, &mmc
->ios
);
1074 mmc_detect_change(mmc
, msecs_to_jiffies(100));
1076 host
->pm_global
= false;
1080 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume
);
1082 MODULE_LICENSE("GPL v2");