4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
15 * 3. Handle MMC errors better
20 * The MMCIF driver is now processing MMC requests asynchronously, according
21 * to the Linux MMC API requirement.
23 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24 * data, and optional stop. To achieve asynchronous processing each of these
25 * stages is split into two halves: a top and a bottom half. The top half
26 * initialises the hardware, installs a timeout handler to handle completion
27 * timeouts, and returns. In case of the command stage this immediately returns
28 * control to the caller, leaving all further processing to run asynchronously.
29 * All further request processing is performed by the bottom halves.
31 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32 * thread, a DMA completion callback, if DMA is used, a timeout work, and
33 * request- and stage-specific handler methods.
35 * Each bottom half run begins with either a hardware interrupt, a DMA callback
36 * invocation, or a timeout work run. In case of an error or a successful
37 * processing completion, the MMC core is informed and the request processing is
38 * finished. In case processing has to continue, i.e., if data has to be read
39 * from or written to the card, or if a stop command has to be sent, the next
40 * top half is called, which performs the necessary hardware handling and
41 * reschedules the timeout work. This returns the driver state machine into the
42 * bottom half waiting state.
45 #include <linux/bitops.h>
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/mmc/card.h>
52 #include <linux/mmc/core.h>
53 #include <linux/mmc/host.h>
54 #include <linux/mmc/mmc.h>
55 #include <linux/mmc/sdio.h>
56 #include <linux/mmc/sh_mmcif.h>
57 #include <linux/pagemap.h>
58 #include <linux/platform_device.h>
59 #include <linux/pm_qos.h>
60 #include <linux/pm_runtime.h>
61 #include <linux/spinlock.h>
62 #include <linux/module.h>
64 #define DRIVER_NAME "sh_mmcif"
65 #define DRIVER_VERSION "2010-04-28"
68 #define CMD_MASK 0x3f000000
69 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
70 #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
71 #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
72 #define CMD_SET_RBSY (1 << 21) /* R1b */
73 #define CMD_SET_CCSEN (1 << 20)
74 #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
75 #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
76 #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
77 #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
78 #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
79 #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
80 #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
81 #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
82 #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
83 #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
84 #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
85 #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
86 #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
87 #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
88 #define CMD_SET_CCSH (1 << 5)
89 #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
90 #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
91 #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
94 #define CMD_CTRL_BREAK (1 << 0)
97 #define BLOCK_SIZE_MASK 0x0000ffff
100 #define INT_CCSDE (1 << 29)
101 #define INT_CMD12DRE (1 << 26)
102 #define INT_CMD12RBE (1 << 25)
103 #define INT_CMD12CRE (1 << 24)
104 #define INT_DTRANE (1 << 23)
105 #define INT_BUFRE (1 << 22)
106 #define INT_BUFWEN (1 << 21)
107 #define INT_BUFREN (1 << 20)
108 #define INT_CCSRCV (1 << 19)
109 #define INT_RBSYE (1 << 17)
110 #define INT_CRSPE (1 << 16)
111 #define INT_CMDVIO (1 << 15)
112 #define INT_BUFVIO (1 << 14)
113 #define INT_WDATERR (1 << 11)
114 #define INT_RDATERR (1 << 10)
115 #define INT_RIDXERR (1 << 9)
116 #define INT_RSPERR (1 << 8)
117 #define INT_CCSTO (1 << 5)
118 #define INT_CRCSTO (1 << 4)
119 #define INT_WDATTO (1 << 3)
120 #define INT_RDATTO (1 << 2)
121 #define INT_RBSYTO (1 << 1)
122 #define INT_RSPTO (1 << 0)
123 #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
124 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
125 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
126 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
129 #define MASK_ALL 0x00000000
130 #define MASK_MCCSDE (1 << 29)
131 #define MASK_MCMD12DRE (1 << 26)
132 #define MASK_MCMD12RBE (1 << 25)
133 #define MASK_MCMD12CRE (1 << 24)
134 #define MASK_MDTRANE (1 << 23)
135 #define MASK_MBUFRE (1 << 22)
136 #define MASK_MBUFWEN (1 << 21)
137 #define MASK_MBUFREN (1 << 20)
138 #define MASK_MCCSRCV (1 << 19)
139 #define MASK_MRBSYE (1 << 17)
140 #define MASK_MCRSPE (1 << 16)
141 #define MASK_MCMDVIO (1 << 15)
142 #define MASK_MBUFVIO (1 << 14)
143 #define MASK_MWDATERR (1 << 11)
144 #define MASK_MRDATERR (1 << 10)
145 #define MASK_MRIDXERR (1 << 9)
146 #define MASK_MRSPERR (1 << 8)
147 #define MASK_MCCSTO (1 << 5)
148 #define MASK_MCRCSTO (1 << 4)
149 #define MASK_MWDATTO (1 << 3)
150 #define MASK_MRDATTO (1 << 2)
151 #define MASK_MRBSYTO (1 << 1)
152 #define MASK_MRSPTO (1 << 0)
154 #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
155 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
156 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
157 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
160 #define STS1_CMDSEQ (1 << 31)
163 #define STS2_CRCSTE (1 << 31)
164 #define STS2_CRC16E (1 << 30)
165 #define STS2_AC12CRCE (1 << 29)
166 #define STS2_RSPCRC7E (1 << 28)
167 #define STS2_CRCSTEBE (1 << 27)
168 #define STS2_RDATEBE (1 << 26)
169 #define STS2_AC12REBE (1 << 25)
170 #define STS2_RSPEBE (1 << 24)
171 #define STS2_AC12IDXE (1 << 23)
172 #define STS2_RSPIDXE (1 << 22)
173 #define STS2_CCSTO (1 << 15)
174 #define STS2_RDATTO (1 << 14)
175 #define STS2_DATBSYTO (1 << 13)
176 #define STS2_CRCSTTO (1 << 12)
177 #define STS2_AC12BSYTO (1 << 11)
178 #define STS2_RSPBSYTO (1 << 10)
179 #define STS2_AC12RSPTO (1 << 9)
180 #define STS2_RSPTO (1 << 8)
181 #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
182 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
183 #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
184 STS2_DATBSYTO | STS2_CRCSTTO | \
185 STS2_AC12BSYTO | STS2_RSPBSYTO | \
186 STS2_AC12RSPTO | STS2_RSPTO)
188 #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
189 #define CLKDEV_MMC_DATA 20000000 /* 20MHz */
190 #define CLKDEV_INIT 400000 /* 400 KHz */
198 enum mmcif_wait_for
{
199 MMCIF_WAIT_FOR_REQUEST
,
201 MMCIF_WAIT_FOR_MREAD
,
202 MMCIF_WAIT_FOR_MWRITE
,
204 MMCIF_WAIT_FOR_WRITE
,
205 MMCIF_WAIT_FOR_READ_END
,
206 MMCIF_WAIT_FOR_WRITE_END
,
210 struct sh_mmcif_host
{
211 struct mmc_host
*mmc
;
212 struct mmc_request
*mrq
;
213 struct platform_device
*pd
;
214 struct sh_dmae_slave dma_slave_tx
;
215 struct sh_dmae_slave dma_slave_rx
;
224 spinlock_t lock
; /* protect sh_mmcif_host::state */
225 enum mmcif_state state
;
226 enum mmcif_wait_for wait_for
;
227 struct delayed_work timeout_work
;
235 struct dma_chan
*chan_rx
;
236 struct dma_chan
*chan_tx
;
237 struct completion dma_complete
;
241 static inline void sh_mmcif_bitset(struct sh_mmcif_host
*host
,
242 unsigned int reg
, u32 val
)
244 writel(val
| readl(host
->addr
+ reg
), host
->addr
+ reg
);
247 static inline void sh_mmcif_bitclr(struct sh_mmcif_host
*host
,
248 unsigned int reg
, u32 val
)
250 writel(~val
& readl(host
->addr
+ reg
), host
->addr
+ reg
);
253 static void mmcif_dma_complete(void *arg
)
255 struct sh_mmcif_host
*host
= arg
;
256 struct mmc_data
*data
= host
->mrq
->data
;
258 dev_dbg(&host
->pd
->dev
, "Command completed\n");
260 if (WARN(!data
, "%s: NULL data in DMA completion!\n",
261 dev_name(&host
->pd
->dev
)))
264 if (data
->flags
& MMC_DATA_READ
)
265 dma_unmap_sg(host
->chan_rx
->device
->dev
,
266 data
->sg
, data
->sg_len
,
269 dma_unmap_sg(host
->chan_tx
->device
->dev
,
270 data
->sg
, data
->sg_len
,
273 complete(&host
->dma_complete
);
276 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host
*host
)
278 struct mmc_data
*data
= host
->mrq
->data
;
279 struct scatterlist
*sg
= data
->sg
;
280 struct dma_async_tx_descriptor
*desc
= NULL
;
281 struct dma_chan
*chan
= host
->chan_rx
;
282 dma_cookie_t cookie
= -EINVAL
;
285 ret
= dma_map_sg(chan
->device
->dev
, sg
, data
->sg_len
,
288 host
->dma_active
= true;
289 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
290 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
294 desc
->callback
= mmcif_dma_complete
;
295 desc
->callback_param
= host
;
296 cookie
= dmaengine_submit(desc
);
297 sh_mmcif_bitset(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_DMAREN
);
298 dma_async_issue_pending(chan
);
300 dev_dbg(&host
->pd
->dev
, "%s(): mapped %d -> %d, cookie %d\n",
301 __func__
, data
->sg_len
, ret
, cookie
);
304 /* DMA failed, fall back to PIO */
307 host
->chan_rx
= NULL
;
308 host
->dma_active
= false;
309 dma_release_channel(chan
);
310 /* Free the Tx channel too */
311 chan
= host
->chan_tx
;
313 host
->chan_tx
= NULL
;
314 dma_release_channel(chan
);
316 dev_warn(&host
->pd
->dev
,
317 "DMA failed: %d, falling back to PIO\n", ret
);
318 sh_mmcif_bitclr(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_DMAREN
| BUF_ACC_DMAWEN
);
321 dev_dbg(&host
->pd
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
322 desc
, cookie
, data
->sg_len
);
325 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host
*host
)
327 struct mmc_data
*data
= host
->mrq
->data
;
328 struct scatterlist
*sg
= data
->sg
;
329 struct dma_async_tx_descriptor
*desc
= NULL
;
330 struct dma_chan
*chan
= host
->chan_tx
;
331 dma_cookie_t cookie
= -EINVAL
;
334 ret
= dma_map_sg(chan
->device
->dev
, sg
, data
->sg_len
,
337 host
->dma_active
= true;
338 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
339 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
343 desc
->callback
= mmcif_dma_complete
;
344 desc
->callback_param
= host
;
345 cookie
= dmaengine_submit(desc
);
346 sh_mmcif_bitset(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_DMAWEN
);
347 dma_async_issue_pending(chan
);
349 dev_dbg(&host
->pd
->dev
, "%s(): mapped %d -> %d, cookie %d\n",
350 __func__
, data
->sg_len
, ret
, cookie
);
353 /* DMA failed, fall back to PIO */
356 host
->chan_tx
= NULL
;
357 host
->dma_active
= false;
358 dma_release_channel(chan
);
359 /* Free the Rx channel too */
360 chan
= host
->chan_rx
;
362 host
->chan_rx
= NULL
;
363 dma_release_channel(chan
);
365 dev_warn(&host
->pd
->dev
,
366 "DMA failed: %d, falling back to PIO\n", ret
);
367 sh_mmcif_bitclr(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_DMAREN
| BUF_ACC_DMAWEN
);
370 dev_dbg(&host
->pd
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
374 static bool sh_mmcif_filter(struct dma_chan
*chan
, void *arg
)
376 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
381 static void sh_mmcif_request_dma(struct sh_mmcif_host
*host
,
382 struct sh_mmcif_plat_data
*pdata
)
384 struct sh_dmae_slave
*tx
, *rx
;
385 host
->dma_active
= false;
387 /* We can only either use DMA for both Tx and Rx or not use it at all */
389 dev_warn(&host
->pd
->dev
,
390 "Update your platform to use embedded DMA slave IDs\n");
391 tx
= &pdata
->dma
->chan_priv_tx
;
392 rx
= &pdata
->dma
->chan_priv_rx
;
394 tx
= &host
->dma_slave_tx
;
395 tx
->slave_id
= pdata
->slave_id_tx
;
396 rx
= &host
->dma_slave_rx
;
397 rx
->slave_id
= pdata
->slave_id_rx
;
399 if (tx
->slave_id
> 0 && rx
->slave_id
> 0) {
403 dma_cap_set(DMA_SLAVE
, mask
);
405 host
->chan_tx
= dma_request_channel(mask
, sh_mmcif_filter
, tx
);
406 dev_dbg(&host
->pd
->dev
, "%s: TX: got channel %p\n", __func__
,
412 host
->chan_rx
= dma_request_channel(mask
, sh_mmcif_filter
, rx
);
413 dev_dbg(&host
->pd
->dev
, "%s: RX: got channel %p\n", __func__
,
416 if (!host
->chan_rx
) {
417 dma_release_channel(host
->chan_tx
);
418 host
->chan_tx
= NULL
;
422 init_completion(&host
->dma_complete
);
426 static void sh_mmcif_release_dma(struct sh_mmcif_host
*host
)
428 sh_mmcif_bitclr(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_DMAREN
| BUF_ACC_DMAWEN
);
429 /* Descriptors are freed automatically */
431 struct dma_chan
*chan
= host
->chan_tx
;
432 host
->chan_tx
= NULL
;
433 dma_release_channel(chan
);
436 struct dma_chan
*chan
= host
->chan_rx
;
437 host
->chan_rx
= NULL
;
438 dma_release_channel(chan
);
441 host
->dma_active
= false;
444 static void sh_mmcif_clock_control(struct sh_mmcif_host
*host
, unsigned int clk
)
446 struct sh_mmcif_plat_data
*p
= host
->pd
->dev
.platform_data
;
448 sh_mmcif_bitclr(host
, MMCIF_CE_CLK_CTRL
, CLK_ENABLE
);
449 sh_mmcif_bitclr(host
, MMCIF_CE_CLK_CTRL
, CLK_CLEAR
);
453 if (p
->sup_pclk
&& clk
== host
->clk
)
454 sh_mmcif_bitset(host
, MMCIF_CE_CLK_CTRL
, CLK_SUP_PCLK
);
456 sh_mmcif_bitset(host
, MMCIF_CE_CLK_CTRL
, CLK_CLEAR
&
457 ((fls(host
->clk
/ clk
) - 1) << 16));
459 sh_mmcif_bitset(host
, MMCIF_CE_CLK_CTRL
, CLK_ENABLE
);
462 static void sh_mmcif_sync_reset(struct sh_mmcif_host
*host
)
466 tmp
= 0x010f0000 & sh_mmcif_readl(host
->addr
, MMCIF_CE_CLK_CTRL
);
468 sh_mmcif_writel(host
->addr
, MMCIF_CE_VERSION
, SOFT_RST_ON
);
469 sh_mmcif_writel(host
->addr
, MMCIF_CE_VERSION
, SOFT_RST_OFF
);
470 sh_mmcif_bitset(host
, MMCIF_CE_CLK_CTRL
, tmp
|
471 SRSPTO_256
| SRBSYTO_29
| SRWDTO_29
| SCCSTO_29
);
473 sh_mmcif_bitset(host
, MMCIF_CE_BUF_ACC
, BUF_ACC_ATYP
);
476 static int sh_mmcif_error_manage(struct sh_mmcif_host
*host
)
481 host
->sd_error
= false;
483 state1
= sh_mmcif_readl(host
->addr
, MMCIF_CE_HOST_STS1
);
484 state2
= sh_mmcif_readl(host
->addr
, MMCIF_CE_HOST_STS2
);
485 dev_dbg(&host
->pd
->dev
, "ERR HOST_STS1 = %08x\n", state1
);
486 dev_dbg(&host
->pd
->dev
, "ERR HOST_STS2 = %08x\n", state2
);
488 if (state1
& STS1_CMDSEQ
) {
489 sh_mmcif_bitset(host
, MMCIF_CE_CMD_CTRL
, CMD_CTRL_BREAK
);
490 sh_mmcif_bitset(host
, MMCIF_CE_CMD_CTRL
, ~CMD_CTRL_BREAK
);
491 for (timeout
= 10000000; timeout
; timeout
--) {
492 if (!(sh_mmcif_readl(host
->addr
, MMCIF_CE_HOST_STS1
)
498 dev_err(&host
->pd
->dev
,
499 "Forced end of command sequence timeout err\n");
502 sh_mmcif_sync_reset(host
);
503 dev_dbg(&host
->pd
->dev
, "Forced end of command sequence\n");
507 if (state2
& STS2_CRC_ERR
) {
508 dev_dbg(&host
->pd
->dev
, ": CRC error\n");
510 } else if (state2
& STS2_TIMEOUT_ERR
) {
511 dev_dbg(&host
->pd
->dev
, ": Timeout\n");
514 dev_dbg(&host
->pd
->dev
, ": End/Index error\n");
520 static bool sh_mmcif_next_block(struct sh_mmcif_host
*host
, u32
*p
)
522 struct mmc_data
*data
= host
->mrq
->data
;
524 host
->sg_blkidx
+= host
->blocksize
;
526 /* data->sg->length must be a multiple of host->blocksize? */
527 BUG_ON(host
->sg_blkidx
> data
->sg
->length
);
529 if (host
->sg_blkidx
== data
->sg
->length
) {
531 if (++host
->sg_idx
< data
->sg_len
)
532 host
->pio_ptr
= sg_virt(++data
->sg
);
537 if (host
->sg_idx
== data
->sg_len
)
543 static void sh_mmcif_single_read(struct sh_mmcif_host
*host
,
544 struct mmc_request
*mrq
)
546 host
->blocksize
= (sh_mmcif_readl(host
->addr
, MMCIF_CE_BLOCK_SET
) &
547 BLOCK_SIZE_MASK
) + 3;
549 host
->wait_for
= MMCIF_WAIT_FOR_READ
;
550 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
552 /* buf read enable */
553 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFREN
);
556 static bool sh_mmcif_read_block(struct sh_mmcif_host
*host
)
558 struct mmc_data
*data
= host
->mrq
->data
;
559 u32
*p
= sg_virt(data
->sg
);
562 if (host
->sd_error
) {
563 data
->error
= sh_mmcif_error_manage(host
);
567 for (i
= 0; i
< host
->blocksize
/ 4; i
++)
568 *p
++ = sh_mmcif_readl(host
->addr
, MMCIF_CE_DATA
);
570 /* buffer read end */
571 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFRE
);
572 host
->wait_for
= MMCIF_WAIT_FOR_READ_END
;
577 static void sh_mmcif_multi_read(struct sh_mmcif_host
*host
,
578 struct mmc_request
*mrq
)
580 struct mmc_data
*data
= mrq
->data
;
582 if (!data
->sg_len
|| !data
->sg
->length
)
585 host
->blocksize
= sh_mmcif_readl(host
->addr
, MMCIF_CE_BLOCK_SET
) &
588 host
->wait_for
= MMCIF_WAIT_FOR_MREAD
;
591 host
->pio_ptr
= sg_virt(data
->sg
);
592 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
593 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFREN
);
596 static bool sh_mmcif_mread_block(struct sh_mmcif_host
*host
)
598 struct mmc_data
*data
= host
->mrq
->data
;
599 u32
*p
= host
->pio_ptr
;
602 if (host
->sd_error
) {
603 data
->error
= sh_mmcif_error_manage(host
);
607 BUG_ON(!data
->sg
->length
);
609 for (i
= 0; i
< host
->blocksize
/ 4; i
++)
610 *p
++ = sh_mmcif_readl(host
->addr
, MMCIF_CE_DATA
);
612 if (!sh_mmcif_next_block(host
, p
))
615 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
616 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFREN
);
621 static void sh_mmcif_single_write(struct sh_mmcif_host
*host
,
622 struct mmc_request
*mrq
)
624 host
->blocksize
= (sh_mmcif_readl(host
->addr
, MMCIF_CE_BLOCK_SET
) &
625 BLOCK_SIZE_MASK
) + 3;
627 host
->wait_for
= MMCIF_WAIT_FOR_WRITE
;
628 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
630 /* buf write enable */
631 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFWEN
);
634 static bool sh_mmcif_write_block(struct sh_mmcif_host
*host
)
636 struct mmc_data
*data
= host
->mrq
->data
;
637 u32
*p
= sg_virt(data
->sg
);
640 if (host
->sd_error
) {
641 data
->error
= sh_mmcif_error_manage(host
);
645 for (i
= 0; i
< host
->blocksize
/ 4; i
++)
646 sh_mmcif_writel(host
->addr
, MMCIF_CE_DATA
, *p
++);
648 /* buffer write end */
649 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MDTRANE
);
650 host
->wait_for
= MMCIF_WAIT_FOR_WRITE_END
;
655 static void sh_mmcif_multi_write(struct sh_mmcif_host
*host
,
656 struct mmc_request
*mrq
)
658 struct mmc_data
*data
= mrq
->data
;
660 if (!data
->sg_len
|| !data
->sg
->length
)
663 host
->blocksize
= sh_mmcif_readl(host
->addr
, MMCIF_CE_BLOCK_SET
) &
666 host
->wait_for
= MMCIF_WAIT_FOR_MWRITE
;
669 host
->pio_ptr
= sg_virt(data
->sg
);
670 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
671 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFWEN
);
674 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host
*host
)
676 struct mmc_data
*data
= host
->mrq
->data
;
677 u32
*p
= host
->pio_ptr
;
680 if (host
->sd_error
) {
681 data
->error
= sh_mmcif_error_manage(host
);
685 BUG_ON(!data
->sg
->length
);
687 for (i
= 0; i
< host
->blocksize
/ 4; i
++)
688 sh_mmcif_writel(host
->addr
, MMCIF_CE_DATA
, *p
++);
690 if (!sh_mmcif_next_block(host
, p
))
693 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
694 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MBUFWEN
);
699 static void sh_mmcif_get_response(struct sh_mmcif_host
*host
,
700 struct mmc_command
*cmd
)
702 if (cmd
->flags
& MMC_RSP_136
) {
703 cmd
->resp
[0] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP3
);
704 cmd
->resp
[1] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP2
);
705 cmd
->resp
[2] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP1
);
706 cmd
->resp
[3] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP0
);
708 cmd
->resp
[0] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP0
);
711 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host
*host
,
712 struct mmc_command
*cmd
)
714 cmd
->resp
[0] = sh_mmcif_readl(host
->addr
, MMCIF_CE_RESP_CMD12
);
717 static u32
sh_mmcif_set_cmd(struct sh_mmcif_host
*host
,
718 struct mmc_request
*mrq
)
720 struct mmc_data
*data
= mrq
->data
;
721 struct mmc_command
*cmd
= mrq
->cmd
;
722 u32 opc
= cmd
->opcode
;
725 /* Response Type check */
726 switch (mmc_resp_type(cmd
)) {
728 tmp
|= CMD_SET_RTYP_NO
;
733 tmp
|= CMD_SET_RTYP_6B
;
736 tmp
|= CMD_SET_RTYP_17B
;
739 dev_err(&host
->pd
->dev
, "Unsupported response type.\n");
745 case MMC_STOP_TRANSMISSION
:
746 case MMC_SET_WRITE_PROT
:
747 case MMC_CLR_WRITE_PROT
:
756 switch (host
->bus_width
) {
757 case MMC_BUS_WIDTH_1
:
758 tmp
|= CMD_SET_DATW_1
;
760 case MMC_BUS_WIDTH_4
:
761 tmp
|= CMD_SET_DATW_4
;
763 case MMC_BUS_WIDTH_8
:
764 tmp
|= CMD_SET_DATW_8
;
767 dev_err(&host
->pd
->dev
, "Unsupported bus width.\n");
772 if (opc
== MMC_WRITE_BLOCK
|| opc
== MMC_WRITE_MULTIPLE_BLOCK
)
775 if (opc
== MMC_READ_MULTIPLE_BLOCK
|| opc
== MMC_WRITE_MULTIPLE_BLOCK
) {
776 tmp
|= CMD_SET_CMLTE
| CMD_SET_CMD12EN
;
777 sh_mmcif_bitset(host
, MMCIF_CE_BLOCK_SET
,
780 /* RIDXC[1:0] check bits */
781 if (opc
== MMC_SEND_OP_COND
|| opc
== MMC_ALL_SEND_CID
||
782 opc
== MMC_SEND_CSD
|| opc
== MMC_SEND_CID
)
783 tmp
|= CMD_SET_RIDXC_BITS
;
784 /* RCRC7C[1:0] check bits */
785 if (opc
== MMC_SEND_OP_COND
)
786 tmp
|= CMD_SET_CRC7C_BITS
;
787 /* RCRC7C[1:0] internal CRC7 */
788 if (opc
== MMC_ALL_SEND_CID
||
789 opc
== MMC_SEND_CSD
|| opc
== MMC_SEND_CID
)
790 tmp
|= CMD_SET_CRC7C_INTERNAL
;
792 return (opc
<< 24) | tmp
;
795 static int sh_mmcif_data_trans(struct sh_mmcif_host
*host
,
796 struct mmc_request
*mrq
, u32 opc
)
799 case MMC_READ_MULTIPLE_BLOCK
:
800 sh_mmcif_multi_read(host
, mrq
);
802 case MMC_WRITE_MULTIPLE_BLOCK
:
803 sh_mmcif_multi_write(host
, mrq
);
805 case MMC_WRITE_BLOCK
:
806 sh_mmcif_single_write(host
, mrq
);
808 case MMC_READ_SINGLE_BLOCK
:
809 case MMC_SEND_EXT_CSD
:
810 sh_mmcif_single_read(host
, mrq
);
813 dev_err(&host
->pd
->dev
, "UNSUPPORTED CMD = d'%08d\n", opc
);
818 static void sh_mmcif_start_cmd(struct sh_mmcif_host
*host
,
819 struct mmc_request
*mrq
)
821 struct mmc_command
*cmd
= mrq
->cmd
;
822 u32 opc
= cmd
->opcode
;
826 /* response busy check */
828 case MMC_STOP_TRANSMISSION
:
829 case MMC_SET_WRITE_PROT
:
830 case MMC_CLR_WRITE_PROT
:
833 mask
= MASK_START_CMD
| MASK_MRBSYE
;
836 mask
= MASK_START_CMD
| MASK_MCRSPE
;
841 sh_mmcif_writel(host
->addr
, MMCIF_CE_BLOCK_SET
, 0);
842 sh_mmcif_writel(host
->addr
, MMCIF_CE_BLOCK_SET
,
845 opc
= sh_mmcif_set_cmd(host
, mrq
);
847 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, 0xD80430C0);
848 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT_MASK
, mask
);
850 sh_mmcif_writel(host
->addr
, MMCIF_CE_ARG
, cmd
->arg
);
852 sh_mmcif_writel(host
->addr
, MMCIF_CE_CMD_SET
, opc
);
854 host
->wait_for
= MMCIF_WAIT_FOR_CMD
;
855 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
858 static void sh_mmcif_stop_cmd(struct sh_mmcif_host
*host
,
859 struct mmc_request
*mrq
)
861 switch (mrq
->cmd
->opcode
) {
862 case MMC_READ_MULTIPLE_BLOCK
:
863 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MCMD12DRE
);
865 case MMC_WRITE_MULTIPLE_BLOCK
:
866 sh_mmcif_bitset(host
, MMCIF_CE_INT_MASK
, MASK_MCMD12RBE
);
869 dev_err(&host
->pd
->dev
, "unsupported stop cmd\n");
870 mrq
->stop
->error
= sh_mmcif_error_manage(host
);
874 host
->wait_for
= MMCIF_WAIT_FOR_STOP
;
875 schedule_delayed_work(&host
->timeout_work
, host
->timeout
);
878 static void sh_mmcif_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
880 struct sh_mmcif_host
*host
= mmc_priv(mmc
);
883 spin_lock_irqsave(&host
->lock
, flags
);
884 if (host
->state
!= STATE_IDLE
) {
885 spin_unlock_irqrestore(&host
->lock
, flags
);
886 mrq
->cmd
->error
= -EAGAIN
;
887 mmc_request_done(mmc
, mrq
);
891 host
->state
= STATE_REQUEST
;
892 spin_unlock_irqrestore(&host
->lock
, flags
);
894 switch (mrq
->cmd
->opcode
) {
895 /* MMCIF does not support SD/SDIO command */
896 case SD_IO_SEND_OP_COND
:
898 host
->state
= STATE_IDLE
;
899 mrq
->cmd
->error
= -ETIMEDOUT
;
900 mmc_request_done(mmc
, mrq
);
902 case MMC_SEND_EXT_CSD
: /* = SD_SEND_IF_COND (8) */
904 /* send_if_cond cmd (not support) */
905 host
->state
= STATE_IDLE
;
906 mrq
->cmd
->error
= -ETIMEDOUT
;
907 mmc_request_done(mmc
, mrq
);
917 sh_mmcif_start_cmd(host
, mrq
);
920 static void sh_mmcif_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
922 struct sh_mmcif_host
*host
= mmc_priv(mmc
);
923 struct sh_mmcif_plat_data
*p
= host
->pd
->dev
.platform_data
;
926 spin_lock_irqsave(&host
->lock
, flags
);
927 if (host
->state
!= STATE_IDLE
) {
928 spin_unlock_irqrestore(&host
->lock
, flags
);
932 host
->state
= STATE_IOS
;
933 spin_unlock_irqrestore(&host
->lock
, flags
);
935 if (ios
->power_mode
== MMC_POWER_UP
) {
936 if (!host
->card_present
) {
937 /* See if we also get DMA */
938 sh_mmcif_request_dma(host
, host
->pd
->dev
.platform_data
);
939 host
->card_present
= true;
941 } else if (ios
->power_mode
== MMC_POWER_OFF
|| !ios
->clock
) {
943 sh_mmcif_clock_control(host
, 0);
944 if (ios
->power_mode
== MMC_POWER_OFF
) {
945 if (host
->card_present
) {
946 sh_mmcif_release_dma(host
);
947 host
->card_present
= false;
951 pm_runtime_put(&host
->pd
->dev
);
953 if (p
->down_pwr
&& ios
->power_mode
== MMC_POWER_OFF
)
954 p
->down_pwr(host
->pd
);
956 host
->state
= STATE_IDLE
;
963 p
->set_pwr(host
->pd
, ios
->power_mode
);
964 pm_runtime_get_sync(&host
->pd
->dev
);
966 sh_mmcif_sync_reset(host
);
968 sh_mmcif_clock_control(host
, ios
->clock
);
971 host
->bus_width
= ios
->bus_width
;
972 host
->state
= STATE_IDLE
;
975 static int sh_mmcif_get_cd(struct mmc_host
*mmc
)
977 struct sh_mmcif_host
*host
= mmc_priv(mmc
);
978 struct sh_mmcif_plat_data
*p
= host
->pd
->dev
.platform_data
;
983 return p
->get_cd(host
->pd
);
986 static struct mmc_host_ops sh_mmcif_ops
= {
987 .request
= sh_mmcif_request
,
988 .set_ios
= sh_mmcif_set_ios
,
989 .get_cd
= sh_mmcif_get_cd
,
992 static bool sh_mmcif_end_cmd(struct sh_mmcif_host
*host
)
994 struct mmc_command
*cmd
= host
->mrq
->cmd
;
995 struct mmc_data
*data
= host
->mrq
->data
;
998 if (host
->sd_error
) {
999 switch (cmd
->opcode
) {
1000 case MMC_ALL_SEND_CID
:
1001 case MMC_SELECT_CARD
:
1003 cmd
->error
= -ETIMEDOUT
;
1004 host
->sd_error
= false;
1007 cmd
->error
= sh_mmcif_error_manage(host
);
1008 dev_dbg(&host
->pd
->dev
, "Cmd(d'%d) error %d\n",
1009 cmd
->opcode
, cmd
->error
);
1014 if (!(cmd
->flags
& MMC_RSP_PRESENT
)) {
1019 sh_mmcif_get_response(host
, cmd
);
1024 if (data
->flags
& MMC_DATA_READ
) {
1026 sh_mmcif_start_dma_rx(host
);
1029 sh_mmcif_start_dma_tx(host
);
1032 if (!host
->dma_active
) {
1033 data
->error
= sh_mmcif_data_trans(host
, host
->mrq
, cmd
->opcode
);
1039 /* Running in the IRQ thread, can sleep */
1040 time
= wait_for_completion_interruptible_timeout(&host
->dma_complete
,
1042 if (host
->sd_error
) {
1043 dev_err(host
->mmc
->parent
,
1044 "Error IRQ while waiting for DMA completion!\n");
1045 /* Woken up by an error IRQ: abort DMA */
1046 if (data
->flags
& MMC_DATA_READ
)
1047 dmaengine_terminate_all(host
->chan_rx
);
1049 dmaengine_terminate_all(host
->chan_tx
);
1050 data
->error
= sh_mmcif_error_manage(host
);
1052 data
->error
= -ETIMEDOUT
;
1053 } else if (time
< 0) {
1056 sh_mmcif_bitclr(host
, MMCIF_CE_BUF_ACC
,
1057 BUF_ACC_DMAREN
| BUF_ACC_DMAWEN
);
1058 host
->dma_active
= false;
1061 data
->bytes_xfered
= 0;
1066 static irqreturn_t
sh_mmcif_irqt(int irq
, void *dev_id
)
1068 struct sh_mmcif_host
*host
= dev_id
;
1069 struct mmc_request
*mrq
= host
->mrq
;
1070 struct mmc_data
*data
= mrq
->data
;
1072 cancel_delayed_work_sync(&host
->timeout_work
);
1075 * All handlers return true, if processing continues, and false, if the
1076 * request has to be completed - successfully or not
1078 switch (host
->wait_for
) {
1079 case MMCIF_WAIT_FOR_REQUEST
:
1080 /* We're too late, the timeout has already kicked in */
1082 case MMCIF_WAIT_FOR_CMD
:
1083 if (sh_mmcif_end_cmd(host
))
1087 case MMCIF_WAIT_FOR_MREAD
:
1088 if (sh_mmcif_mread_block(host
))
1089 /* Wait for more data */
1092 case MMCIF_WAIT_FOR_READ
:
1093 if (sh_mmcif_read_block(host
))
1094 /* Wait for data end */
1097 case MMCIF_WAIT_FOR_MWRITE
:
1098 if (sh_mmcif_mwrite_block(host
))
1099 /* Wait data to write */
1102 case MMCIF_WAIT_FOR_WRITE
:
1103 if (sh_mmcif_write_block(host
))
1104 /* Wait for data end */
1107 case MMCIF_WAIT_FOR_STOP
:
1108 if (host
->sd_error
) {
1109 mrq
->stop
->error
= sh_mmcif_error_manage(host
);
1112 sh_mmcif_get_cmd12response(host
, mrq
->stop
);
1113 mrq
->stop
->error
= 0;
1115 case MMCIF_WAIT_FOR_READ_END
:
1116 case MMCIF_WAIT_FOR_WRITE_END
:
1118 data
->error
= sh_mmcif_error_manage(host
);
1124 if (host
->wait_for
!= MMCIF_WAIT_FOR_STOP
) {
1125 if (!mrq
->cmd
->error
&& data
&& !data
->error
)
1126 data
->bytes_xfered
=
1127 data
->blocks
* data
->blksz
;
1129 if (mrq
->stop
&& !mrq
->cmd
->error
&& (!data
|| !data
->error
)) {
1130 sh_mmcif_stop_cmd(host
, mrq
);
1131 if (!mrq
->stop
->error
)
1136 host
->wait_for
= MMCIF_WAIT_FOR_REQUEST
;
1137 host
->state
= STATE_IDLE
;
1139 mmc_request_done(host
->mmc
, mrq
);
1144 static irqreturn_t
sh_mmcif_intr(int irq
, void *dev_id
)
1146 struct sh_mmcif_host
*host
= dev_id
;
1150 state
= sh_mmcif_readl(host
->addr
, MMCIF_CE_INT
);
1152 if (state
& INT_ERR_STS
) {
1153 /* error interrupts - process first */
1154 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~state
);
1155 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, state
);
1157 } else if (state
& INT_RBSYE
) {
1158 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
,
1159 ~(INT_RBSYE
| INT_CRSPE
));
1160 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MRBSYE
);
1161 } else if (state
& INT_CRSPE
) {
1162 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~INT_CRSPE
);
1163 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MCRSPE
);
1164 } else if (state
& INT_BUFREN
) {
1165 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~INT_BUFREN
);
1166 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MBUFREN
);
1167 } else if (state
& INT_BUFWEN
) {
1168 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~INT_BUFWEN
);
1169 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MBUFWEN
);
1170 } else if (state
& INT_CMD12DRE
) {
1171 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
,
1172 ~(INT_CMD12DRE
| INT_CMD12RBE
|
1173 INT_CMD12CRE
| INT_BUFRE
));
1174 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MCMD12DRE
);
1175 } else if (state
& INT_BUFRE
) {
1176 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~INT_BUFRE
);
1177 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MBUFRE
);
1178 } else if (state
& INT_DTRANE
) {
1179 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~INT_DTRANE
);
1180 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MDTRANE
);
1181 } else if (state
& INT_CMD12RBE
) {
1182 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
,
1183 ~(INT_CMD12RBE
| INT_CMD12CRE
));
1184 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, MASK_MCMD12RBE
);
1186 dev_dbg(&host
->pd
->dev
, "Unsupported interrupt: 0x%x\n", state
);
1187 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT
, ~state
);
1188 sh_mmcif_bitclr(host
, MMCIF_CE_INT_MASK
, state
);
1192 host
->sd_error
= true;
1193 dev_dbg(&host
->pd
->dev
, "int err state = %08x\n", state
);
1195 if (state
& ~(INT_CMD12RBE
| INT_CMD12CRE
)) {
1196 if (!host
->dma_active
)
1197 return IRQ_WAKE_THREAD
;
1198 else if (host
->sd_error
)
1199 mmcif_dma_complete(host
);
1201 dev_dbg(&host
->pd
->dev
, "Unexpected IRQ 0x%x\n", state
);
1207 static void mmcif_timeout_work(struct work_struct
*work
)
1209 struct delayed_work
*d
= container_of(work
, struct delayed_work
, work
);
1210 struct sh_mmcif_host
*host
= container_of(d
, struct sh_mmcif_host
, timeout_work
);
1211 struct mmc_request
*mrq
= host
->mrq
;
1214 /* Don't run after mmc_remove_host() */
1218 * Handle races with cancel_delayed_work(), unless
1219 * cancel_delayed_work_sync() is used
1221 switch (host
->wait_for
) {
1222 case MMCIF_WAIT_FOR_CMD
:
1223 mrq
->cmd
->error
= sh_mmcif_error_manage(host
);
1225 case MMCIF_WAIT_FOR_STOP
:
1226 mrq
->stop
->error
= sh_mmcif_error_manage(host
);
1228 case MMCIF_WAIT_FOR_MREAD
:
1229 case MMCIF_WAIT_FOR_MWRITE
:
1230 case MMCIF_WAIT_FOR_READ
:
1231 case MMCIF_WAIT_FOR_WRITE
:
1232 case MMCIF_WAIT_FOR_READ_END
:
1233 case MMCIF_WAIT_FOR_WRITE_END
:
1234 mrq
->data
->error
= sh_mmcif_error_manage(host
);
1240 host
->state
= STATE_IDLE
;
1241 host
->wait_for
= MMCIF_WAIT_FOR_REQUEST
;
1243 mmc_request_done(host
->mmc
, mrq
);
1246 static int __devinit
sh_mmcif_probe(struct platform_device
*pdev
)
1248 int ret
= 0, irq
[2];
1249 struct mmc_host
*mmc
;
1250 struct sh_mmcif_host
*host
;
1251 struct sh_mmcif_plat_data
*pd
;
1252 struct resource
*res
;
1256 irq
[0] = platform_get_irq(pdev
, 0);
1257 irq
[1] = platform_get_irq(pdev
, 1);
1258 if (irq
[0] < 0 || irq
[1] < 0) {
1259 dev_err(&pdev
->dev
, "Get irq error\n");
1262 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1264 dev_err(&pdev
->dev
, "platform_get_resource error.\n");
1267 reg
= ioremap(res
->start
, resource_size(res
));
1269 dev_err(&pdev
->dev
, "ioremap error.\n");
1272 pd
= pdev
->dev
.platform_data
;
1274 dev_err(&pdev
->dev
, "sh_mmcif plat data error.\n");
1278 mmc
= mmc_alloc_host(sizeof(struct sh_mmcif_host
), &pdev
->dev
);
1283 host
= mmc_priv(mmc
);
1286 host
->timeout
= 1000;
1288 snprintf(clk_name
, sizeof(clk_name
), "mmc%d", pdev
->id
);
1289 host
->hclk
= clk_get(&pdev
->dev
, clk_name
);
1290 if (IS_ERR(host
->hclk
)) {
1291 dev_err(&pdev
->dev
, "cannot get clock \"%s\"\n", clk_name
);
1292 ret
= PTR_ERR(host
->hclk
);
1295 clk_enable(host
->hclk
);
1296 host
->clk
= clk_get_rate(host
->hclk
);
1299 spin_lock_init(&host
->lock
);
1301 mmc
->ops
= &sh_mmcif_ops
;
1302 mmc
->f_max
= host
->clk
;
1303 /* close to 400KHz */
1304 if (mmc
->f_max
< 51200000)
1305 mmc
->f_min
= mmc
->f_max
/ 128;
1306 else if (mmc
->f_max
< 102400000)
1307 mmc
->f_min
= mmc
->f_max
/ 256;
1309 mmc
->f_min
= mmc
->f_max
/ 512;
1311 mmc
->ocr_avail
= pd
->ocr
;
1312 mmc
->caps
= MMC_CAP_MMC_HIGHSPEED
;
1314 mmc
->caps
|= pd
->caps
;
1316 mmc
->max_blk_size
= 512;
1317 mmc
->max_req_size
= PAGE_CACHE_SIZE
* mmc
->max_segs
;
1318 mmc
->max_blk_count
= mmc
->max_req_size
/ mmc
->max_blk_size
;
1319 mmc
->max_seg_size
= mmc
->max_req_size
;
1321 sh_mmcif_sync_reset(host
);
1322 platform_set_drvdata(pdev
, host
);
1324 pm_runtime_enable(&pdev
->dev
);
1325 host
->power
= false;
1327 ret
= pm_runtime_resume(&pdev
->dev
);
1331 INIT_DELAYED_WORK(&host
->timeout_work
, mmcif_timeout_work
);
1333 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT_MASK
, MASK_ALL
);
1335 ret
= request_threaded_irq(irq
[0], sh_mmcif_intr
, sh_mmcif_irqt
, 0, "sh_mmc:error", host
);
1337 dev_err(&pdev
->dev
, "request_irq error (sh_mmc:error)\n");
1340 ret
= request_threaded_irq(irq
[1], sh_mmcif_intr
, sh_mmcif_irqt
, 0, "sh_mmc:int", host
);
1342 dev_err(&pdev
->dev
, "request_irq error (sh_mmc:int)\n");
1346 ret
= mmc_add_host(mmc
);
1350 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
1352 dev_info(&pdev
->dev
, "driver version %s\n", DRIVER_VERSION
);
1353 dev_dbg(&pdev
->dev
, "chip ver H'%04x\n",
1354 sh_mmcif_readl(host
->addr
, MMCIF_CE_VERSION
) & 0x0000ffff);
1358 free_irq(irq
[1], host
);
1360 free_irq(irq
[0], host
);
1362 pm_runtime_suspend(&pdev
->dev
);
1364 pm_runtime_disable(&pdev
->dev
);
1365 clk_disable(host
->hclk
);
1374 static int __devexit
sh_mmcif_remove(struct platform_device
*pdev
)
1376 struct sh_mmcif_host
*host
= platform_get_drvdata(pdev
);
1380 pm_runtime_get_sync(&pdev
->dev
);
1382 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1384 mmc_remove_host(host
->mmc
);
1385 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT_MASK
, MASK_ALL
);
1388 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1389 * mmc_remove_host() call above. But swapping order doesn't help either
1390 * (a query on the linux-mmc mailing list didn't bring any replies).
1392 cancel_delayed_work_sync(&host
->timeout_work
);
1395 iounmap(host
->addr
);
1397 irq
[0] = platform_get_irq(pdev
, 0);
1398 irq
[1] = platform_get_irq(pdev
, 1);
1400 free_irq(irq
[0], host
);
1401 free_irq(irq
[1], host
);
1403 platform_set_drvdata(pdev
, NULL
);
1405 clk_disable(host
->hclk
);
1406 mmc_free_host(host
->mmc
);
1407 pm_runtime_put_sync(&pdev
->dev
);
1408 pm_runtime_disable(&pdev
->dev
);
1414 static int sh_mmcif_suspend(struct device
*dev
)
1416 struct platform_device
*pdev
= to_platform_device(dev
);
1417 struct sh_mmcif_host
*host
= platform_get_drvdata(pdev
);
1418 int ret
= mmc_suspend_host(host
->mmc
);
1421 sh_mmcif_writel(host
->addr
, MMCIF_CE_INT_MASK
, MASK_ALL
);
1422 clk_disable(host
->hclk
);
1428 static int sh_mmcif_resume(struct device
*dev
)
1430 struct platform_device
*pdev
= to_platform_device(dev
);
1431 struct sh_mmcif_host
*host
= platform_get_drvdata(pdev
);
1433 clk_enable(host
->hclk
);
1435 return mmc_resume_host(host
->mmc
);
1438 #define sh_mmcif_suspend NULL
1439 #define sh_mmcif_resume NULL
1440 #endif /* CONFIG_PM */
1442 static const struct dev_pm_ops sh_mmcif_dev_pm_ops
= {
1443 .suspend
= sh_mmcif_suspend
,
1444 .resume
= sh_mmcif_resume
,
1447 static struct platform_driver sh_mmcif_driver
= {
1448 .probe
= sh_mmcif_probe
,
1449 .remove
= sh_mmcif_remove
,
1451 .name
= DRIVER_NAME
,
1452 .pm
= &sh_mmcif_dev_pm_ops
,
1456 module_platform_driver(sh_mmcif_driver
);
1458 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1459 MODULE_LICENSE("GPL");
1460 MODULE_ALIAS("platform:" DRIVER_NAME
);
1461 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");