]>
Commit | Line | Data |
---|---|---|
b6147490 GL |
1 | /* |
2 | * linux/drivers/mmc/host/tmio_mmc_pio.c | |
3 | * | |
4 | * Copyright (C) 2011 Guennadi Liakhovetski | |
5 | * Copyright (C) 2007 Ian Molton | |
6 | * Copyright (C) 2004 Ian Molton | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Driver for the MMC / SD / SDIO IP found in: | |
13 | * | |
14 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs | |
15 | * | |
16 | * This driver draws mainly on scattered spec sheets, Reverse engineering | |
17 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | |
18 | * support). (Further 4 bit support from a later datasheet). | |
19 | * | |
20 | * TODO: | |
21 | * Investigate using a workqueue for PIO transfers | |
22 | * Eliminate FIXMEs | |
23 | * SDIO support | |
24 | * Better Power management | |
25 | * Handle MMC errors better | |
26 | * double buffer support | |
27 | * | |
28 | */ | |
29 | ||
30 | #include <linux/delay.h> | |
31 | #include <linux/device.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/irq.h> | |
36 | #include <linux/mfd/tmio.h> | |
37 | #include <linux/mmc/host.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/pagemap.h> | |
40 | #include <linux/platform_device.h> | |
41 | #include <linux/scatterlist.h> | |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/spinlock.h> | |
44 | ||
45 | #include "tmio_mmc.h" | |
46 | ||
47 | #define CTL_SD_CMD 0x00 | |
48 | #define CTL_ARG_REG 0x04 | |
49 | #define CTL_STOP_INTERNAL_ACTION 0x08 | |
50 | #define CTL_XFER_BLK_COUNT 0xa | |
51 | #define CTL_RESPONSE 0x0c | |
52 | #define CTL_STATUS 0x1c | |
53 | #define CTL_IRQ_MASK 0x20 | |
54 | #define CTL_SD_CARD_CLK_CTL 0x24 | |
55 | #define CTL_SD_XFER_LEN 0x26 | |
56 | #define CTL_SD_MEM_CARD_OPT 0x28 | |
57 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | |
58 | #define CTL_SD_DATA_PORT 0x30 | |
59 | #define CTL_TRANSACTION_CTL 0x34 | |
60 | #define CTL_SDIO_STATUS 0x36 | |
61 | #define CTL_SDIO_IRQ_MASK 0x38 | |
62 | #define CTL_RESET_SD 0xe0 | |
63 | #define CTL_SDIO_REGS 0x100 | |
64 | #define CTL_CLK_AND_WAIT_CTL 0x138 | |
65 | #define CTL_RESET_SDIO 0x1e0 | |
66 | ||
67 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | |
68 | { | |
69 | return readw(host->ctl + (addr << host->bus_shift)); | |
70 | } | |
71 | ||
72 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | |
73 | u16 *buf, int count) | |
74 | { | |
75 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | |
76 | } | |
77 | ||
78 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | |
79 | { | |
80 | return readw(host->ctl + (addr << host->bus_shift)) | | |
81 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | |
82 | } | |
83 | ||
84 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | |
85 | { | |
86 | writew(val, host->ctl + (addr << host->bus_shift)); | |
87 | } | |
88 | ||
89 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | |
90 | u16 *buf, int count) | |
91 | { | |
92 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | |
93 | } | |
94 | ||
95 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | |
96 | { | |
97 | writew(val, host->ctl + (addr << host->bus_shift)); | |
98 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | |
99 | } | |
100 | ||
101 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | |
102 | { | |
103 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | |
104 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | |
105 | } | |
106 | ||
107 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | |
108 | { | |
109 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); | |
110 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | |
111 | } | |
112 | ||
113 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | |
114 | { | |
115 | sd_ctrl_write32(host, CTL_STATUS, ~i); | |
116 | } | |
117 | ||
118 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | |
119 | { | |
120 | host->sg_len = data->sg_len; | |
121 | host->sg_ptr = data->sg; | |
122 | host->sg_orig = data->sg; | |
123 | host->sg_off = 0; | |
124 | } | |
125 | ||
126 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | |
127 | { | |
128 | host->sg_ptr = sg_next(host->sg_ptr); | |
129 | host->sg_off = 0; | |
130 | return --host->sg_len; | |
131 | } | |
132 | ||
133 | #ifdef CONFIG_MMC_DEBUG | |
134 | ||
135 | #define STATUS_TO_TEXT(a, status, i) \ | |
136 | do { \ | |
137 | if (status & TMIO_STAT_##a) { \ | |
138 | if (i++) \ | |
139 | printk(" | "); \ | |
140 | printk(#a); \ | |
141 | } \ | |
142 | } while (0) | |
143 | ||
144 | static void pr_debug_status(u32 status) | |
145 | { | |
146 | int i = 0; | |
147 | printk(KERN_DEBUG "status: %08x = ", status); | |
148 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | |
149 | STATUS_TO_TEXT(CARD_INSERT, status, i); | |
150 | STATUS_TO_TEXT(SIGSTATE, status, i); | |
151 | STATUS_TO_TEXT(WRPROTECT, status, i); | |
152 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | |
153 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | |
154 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | |
155 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | |
156 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | |
157 | STATUS_TO_TEXT(ILL_FUNC, status, i); | |
158 | STATUS_TO_TEXT(CMD_BUSY, status, i); | |
159 | STATUS_TO_TEXT(CMDRESPEND, status, i); | |
160 | STATUS_TO_TEXT(DATAEND, status, i); | |
161 | STATUS_TO_TEXT(CRCFAIL, status, i); | |
162 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | |
163 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | |
164 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | |
165 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | |
166 | STATUS_TO_TEXT(RXRDY, status, i); | |
167 | STATUS_TO_TEXT(TXRQ, status, i); | |
168 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | |
169 | printk("\n"); | |
170 | } | |
171 | ||
172 | #else | |
173 | #define pr_debug_status(s) do { } while (0) | |
174 | #endif | |
175 | ||
176 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |
177 | { | |
178 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
179 | ||
180 | if (enable) { | |
181 | host->sdio_irq_enabled = 1; | |
182 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | |
183 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | |
184 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | |
185 | } else { | |
186 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | |
187 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | |
188 | host->sdio_irq_enabled = 0; | |
189 | } | |
190 | } | |
191 | ||
192 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | |
193 | { | |
194 | u32 clk = 0, clock; | |
195 | ||
196 | if (new_clock) { | |
197 | for (clock = host->mmc->f_min, clk = 0x80000080; | |
198 | new_clock >= (clock<<1); clk >>= 1) | |
199 | clock <<= 1; | |
200 | clk |= 0x100; | |
201 | } | |
202 | ||
203 | if (host->set_clk_div) | |
204 | host->set_clk_div(host->pdev, (clk>>22) & 1); | |
205 | ||
206 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | |
207 | } | |
208 | ||
209 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | |
210 | { | |
211 | struct tmio_mmc_data *pdata = host->pdata; | |
212 | ||
213 | /* | |
214 | * Testing on sh-mobile showed that SDIO IRQs are unmasked when | |
215 | * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the | |
216 | * device IRQ here and restore the SDIO IRQ mask before | |
217 | * re-enabling the device IRQ. | |
218 | */ | |
219 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | |
220 | disable_irq(host->irq); | |
221 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | |
222 | msleep(10); | |
223 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | |
224 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | |
225 | enable_irq(host->irq); | |
226 | } | |
227 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | |
228 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | |
229 | msleep(10); | |
230 | } | |
231 | ||
232 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | |
233 | { | |
234 | struct tmio_mmc_data *pdata = host->pdata; | |
235 | ||
236 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | |
237 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | |
238 | msleep(10); | |
239 | /* see comment in tmio_mmc_clk_stop above */ | |
240 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | |
241 | disable_irq(host->irq); | |
242 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | |
243 | msleep(10); | |
244 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | |
245 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | |
246 | enable_irq(host->irq); | |
247 | } | |
248 | } | |
249 | ||
250 | static void tmio_mmc_reset(struct tmio_mmc_host *host) | |
251 | { | |
252 | /* FIXME - should we set stop clock reg here */ | |
253 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | |
254 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | |
255 | msleep(10); | |
256 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | |
257 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | |
258 | msleep(10); | |
259 | } | |
260 | ||
261 | static void tmio_mmc_reset_work(struct work_struct *work) | |
262 | { | |
263 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | |
264 | delayed_reset_work.work); | |
265 | struct mmc_request *mrq; | |
266 | unsigned long flags; | |
267 | ||
268 | spin_lock_irqsave(&host->lock, flags); | |
269 | mrq = host->mrq; | |
270 | ||
271 | /* request already finished */ | |
272 | if (!mrq | |
273 | || time_is_after_jiffies(host->last_req_ts + | |
274 | msecs_to_jiffies(2000))) { | |
275 | spin_unlock_irqrestore(&host->lock, flags); | |
276 | return; | |
277 | } | |
278 | ||
279 | dev_warn(&host->pdev->dev, | |
280 | "timeout waiting for hardware interrupt (CMD%u)\n", | |
281 | mrq->cmd->opcode); | |
282 | ||
283 | if (host->data) | |
284 | host->data->error = -ETIMEDOUT; | |
285 | else if (host->cmd) | |
286 | host->cmd->error = -ETIMEDOUT; | |
287 | else | |
288 | mrq->cmd->error = -ETIMEDOUT; | |
289 | ||
290 | host->cmd = NULL; | |
291 | host->data = NULL; | |
292 | host->mrq = NULL; | |
293 | host->force_pio = false; | |
294 | ||
295 | spin_unlock_irqrestore(&host->lock, flags); | |
296 | ||
297 | tmio_mmc_reset(host); | |
298 | ||
299 | mmc_request_done(host->mmc, mrq); | |
300 | } | |
301 | ||
302 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | |
303 | { | |
304 | struct mmc_request *mrq = host->mrq; | |
305 | ||
306 | if (!mrq) | |
307 | return; | |
308 | ||
309 | host->mrq = NULL; | |
310 | host->cmd = NULL; | |
311 | host->data = NULL; | |
312 | host->force_pio = false; | |
313 | ||
314 | cancel_delayed_work(&host->delayed_reset_work); | |
315 | ||
316 | mmc_request_done(host->mmc, mrq); | |
317 | } | |
318 | ||
319 | /* These are the bitmasks the tmio chip requires to implement the MMC response | |
320 | * types. Note that R1 and R6 are the same in this scheme. */ | |
321 | #define APP_CMD 0x0040 | |
322 | #define RESP_NONE 0x0300 | |
323 | #define RESP_R1 0x0400 | |
324 | #define RESP_R1B 0x0500 | |
325 | #define RESP_R2 0x0600 | |
326 | #define RESP_R3 0x0700 | |
327 | #define DATA_PRESENT 0x0800 | |
328 | #define TRANSFER_READ 0x1000 | |
329 | #define TRANSFER_MULTI 0x2000 | |
330 | #define SECURITY_CMD 0x4000 | |
331 | ||
332 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | |
333 | { | |
334 | struct mmc_data *data = host->data; | |
335 | int c = cmd->opcode; | |
336 | ||
337 | /* Command 12 is handled by hardware */ | |
338 | if (cmd->opcode == 12 && !cmd->arg) { | |
339 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | |
340 | return 0; | |
341 | } | |
342 | ||
343 | switch (mmc_resp_type(cmd)) { | |
344 | case MMC_RSP_NONE: c |= RESP_NONE; break; | |
345 | case MMC_RSP_R1: c |= RESP_R1; break; | |
346 | case MMC_RSP_R1B: c |= RESP_R1B; break; | |
347 | case MMC_RSP_R2: c |= RESP_R2; break; | |
348 | case MMC_RSP_R3: c |= RESP_R3; break; | |
349 | default: | |
350 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | |
351 | return -EINVAL; | |
352 | } | |
353 | ||
354 | host->cmd = cmd; | |
355 | ||
356 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | |
357 | * should be set when issuing app commands. | |
358 | * if(cmd->flags & MMC_FLAG_ACMD) | |
359 | * c |= APP_CMD; | |
360 | */ | |
361 | if (data) { | |
362 | c |= DATA_PRESENT; | |
363 | if (data->blocks > 1) { | |
364 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | |
365 | c |= TRANSFER_MULTI; | |
366 | } | |
367 | if (data->flags & MMC_DATA_READ) | |
368 | c |= TRANSFER_READ; | |
369 | } | |
370 | ||
371 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); | |
372 | ||
373 | /* Fire off the command */ | |
374 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | |
375 | sd_ctrl_write16(host, CTL_SD_CMD, c); | |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
380 | /* | |
381 | * This chip always returns (at least?) as much data as you ask for. | |
382 | * I'm unsure what happens if you ask for less than a block. This should be | |
383 | * looked into to ensure that a funny length read doesnt hose the controller. | |
384 | */ | |
385 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |
386 | { | |
387 | struct mmc_data *data = host->data; | |
388 | void *sg_virt; | |
389 | unsigned short *buf; | |
390 | unsigned int count; | |
391 | unsigned long flags; | |
392 | ||
393 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { | |
394 | pr_err("PIO IRQ in DMA mode!\n"); | |
395 | return; | |
396 | } else if (!data) { | |
397 | pr_debug("Spurious PIO IRQ\n"); | |
398 | return; | |
399 | } | |
400 | ||
401 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | |
402 | buf = (unsigned short *)(sg_virt + host->sg_off); | |
403 | ||
404 | count = host->sg_ptr->length - host->sg_off; | |
405 | if (count > data->blksz) | |
406 | count = data->blksz; | |
407 | ||
408 | pr_debug("count: %08x offset: %08x flags %08x\n", | |
409 | count, host->sg_off, data->flags); | |
410 | ||
411 | /* Transfer the data */ | |
412 | if (data->flags & MMC_DATA_READ) | |
413 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | |
414 | else | |
415 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | |
416 | ||
417 | host->sg_off += count; | |
418 | ||
419 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | |
420 | ||
421 | if (host->sg_off == host->sg_ptr->length) | |
422 | tmio_mmc_next_sg(host); | |
423 | ||
424 | return; | |
425 | } | |
426 | ||
427 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) | |
428 | { | |
429 | if (host->sg_ptr == &host->bounce_sg) { | |
430 | unsigned long flags; | |
431 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | |
432 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | |
433 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | |
434 | } | |
435 | } | |
436 | ||
437 | /* needs to be called with host->lock held */ | |
438 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | |
439 | { | |
440 | struct mmc_data *data = host->data; | |
441 | struct mmc_command *stop; | |
442 | ||
443 | host->data = NULL; | |
444 | ||
445 | if (!data) { | |
446 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | |
447 | return; | |
448 | } | |
449 | stop = data->stop; | |
450 | ||
451 | /* FIXME - return correct transfer count on errors */ | |
452 | if (!data->error) | |
453 | data->bytes_xfered = data->blocks * data->blksz; | |
454 | else | |
455 | data->bytes_xfered = 0; | |
456 | ||
457 | pr_debug("Completed data request\n"); | |
458 | ||
459 | /* | |
460 | * FIXME: other drivers allow an optional stop command of any given type | |
461 | * which we dont do, as the chip can auto generate them. | |
462 | * Perhaps we can be smarter about when to use auto CMD12 and | |
463 | * only issue the auto request when we know this is the desired | |
464 | * stop command, allowing fallback to the stop command the | |
465 | * upper layers expect. For now, we do what works. | |
466 | */ | |
467 | ||
468 | if (data->flags & MMC_DATA_READ) { | |
469 | if (host->chan_rx && !host->force_pio) | |
470 | tmio_mmc_check_bounce_buffer(host); | |
471 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | |
472 | host->mrq); | |
473 | } else { | |
474 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | |
475 | host->mrq); | |
476 | } | |
477 | ||
478 | if (stop) { | |
479 | if (stop->opcode == 12 && !stop->arg) | |
480 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | |
481 | else | |
482 | BUG(); | |
483 | } | |
484 | ||
485 | tmio_mmc_finish_request(host); | |
486 | } | |
487 | ||
488 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | |
489 | { | |
490 | struct mmc_data *data; | |
491 | spin_lock(&host->lock); | |
492 | data = host->data; | |
493 | ||
494 | if (!data) | |
495 | goto out; | |
496 | ||
497 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { | |
498 | /* | |
499 | * Has all data been written out yet? Testing on SuperH showed, | |
500 | * that in most cases the first interrupt comes already with the | |
501 | * BUSY status bit clear, but on some operations, like mount or | |
502 | * in the beginning of a write / sync / umount, there is one | |
503 | * DATAEND interrupt with the BUSY bit set, in this cases | |
504 | * waiting for one more interrupt fixes the problem. | |
505 | */ | |
506 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | |
507 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
508 | tasklet_schedule(&host->dma_complete); | |
509 | } | |
510 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { | |
511 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
512 | tasklet_schedule(&host->dma_complete); | |
513 | } else { | |
514 | tmio_mmc_do_data_irq(host); | |
515 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); | |
516 | } | |
517 | out: | |
518 | spin_unlock(&host->lock); | |
519 | } | |
520 | ||
521 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | |
522 | unsigned int stat) | |
523 | { | |
524 | struct mmc_command *cmd = host->cmd; | |
525 | int i, addr; | |
526 | ||
527 | spin_lock(&host->lock); | |
528 | ||
529 | if (!host->cmd) { | |
530 | pr_debug("Spurious CMD irq\n"); | |
531 | goto out; | |
532 | } | |
533 | ||
534 | host->cmd = NULL; | |
535 | ||
536 | /* This controller is sicker than the PXA one. Not only do we need to | |
537 | * drop the top 8 bits of the first response word, we also need to | |
538 | * modify the order of the response for short response command types. | |
539 | */ | |
540 | ||
541 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | |
542 | cmd->resp[i] = sd_ctrl_read32(host, addr); | |
543 | ||
544 | if (cmd->flags & MMC_RSP_136) { | |
545 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | |
546 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | |
547 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | |
548 | cmd->resp[3] <<= 8; | |
549 | } else if (cmd->flags & MMC_RSP_R3) { | |
550 | cmd->resp[0] = cmd->resp[3]; | |
551 | } | |
552 | ||
553 | if (stat & TMIO_STAT_CMDTIMEOUT) | |
554 | cmd->error = -ETIMEDOUT; | |
555 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | |
556 | cmd->error = -EILSEQ; | |
557 | ||
558 | /* If there is data to handle we enable data IRQs here, and | |
559 | * we will ultimatley finish the request in the data_end handler. | |
560 | * If theres no data or we encountered an error, finish now. | |
561 | */ | |
562 | if (host->data && !cmd->error) { | |
563 | if (host->data->flags & MMC_DATA_READ) { | |
564 | if (host->force_pio || !host->chan_rx) | |
565 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); | |
566 | else | |
567 | tasklet_schedule(&host->dma_issue); | |
568 | } else { | |
569 | if (host->force_pio || !host->chan_tx) | |
570 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | |
571 | else | |
572 | tasklet_schedule(&host->dma_issue); | |
573 | } | |
574 | } else { | |
575 | tmio_mmc_finish_request(host); | |
576 | } | |
577 | ||
578 | out: | |
579 | spin_unlock(&host->lock); | |
580 | } | |
581 | ||
582 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | |
583 | { | |
584 | struct tmio_mmc_host *host = devid; | |
585 | struct tmio_mmc_data *pdata = host->pdata; | |
586 | unsigned int ireg, irq_mask, status; | |
587 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | |
588 | ||
589 | pr_debug("MMC IRQ begin\n"); | |
590 | ||
591 | status = sd_ctrl_read32(host, CTL_STATUS); | |
592 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | |
593 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | |
594 | ||
595 | sdio_ireg = 0; | |
596 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | |
597 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | |
598 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | |
599 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | |
600 | ||
601 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | |
602 | ||
603 | if (sdio_ireg && !host->sdio_irq_enabled) { | |
604 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | |
605 | sdio_status, sdio_irq_mask, sdio_ireg); | |
606 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | |
607 | goto out; | |
608 | } | |
609 | ||
610 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | |
611 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | |
612 | mmc_signal_sdio_irq(host->mmc); | |
613 | ||
614 | if (sdio_ireg) | |
615 | goto out; | |
616 | } | |
617 | ||
618 | pr_debug_status(status); | |
619 | pr_debug_status(ireg); | |
620 | ||
621 | if (!ireg) { | |
622 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | |
623 | ||
624 | pr_warning("tmio_mmc: Spurious irq, disabling! " | |
625 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | |
626 | pr_debug_status(status); | |
627 | ||
628 | goto out; | |
629 | } | |
630 | ||
631 | while (ireg) { | |
632 | /* Card insert / remove attempts */ | |
633 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | |
634 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | |
635 | TMIO_STAT_CARD_REMOVE); | |
636 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | |
637 | } | |
638 | ||
639 | /* CRC and other errors */ | |
640 | /* if (ireg & TMIO_STAT_ERR_IRQ) | |
641 | * handled |= tmio_error_irq(host, irq, stat); | |
642 | */ | |
643 | ||
644 | /* Command completion */ | |
645 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | |
646 | tmio_mmc_ack_mmc_irqs(host, | |
647 | TMIO_STAT_CMDRESPEND | | |
648 | TMIO_STAT_CMDTIMEOUT); | |
649 | tmio_mmc_cmd_irq(host, status); | |
650 | } | |
651 | ||
652 | /* Data transfer */ | |
653 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | |
654 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | |
655 | tmio_mmc_pio_irq(host); | |
656 | } | |
657 | ||
658 | /* Data transfer completion */ | |
659 | if (ireg & TMIO_STAT_DATAEND) { | |
660 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | |
661 | tmio_mmc_data_irq(host); | |
662 | } | |
663 | ||
664 | /* Check status - keep going until we've handled it all */ | |
665 | status = sd_ctrl_read32(host, CTL_STATUS); | |
666 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | |
667 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | |
668 | ||
669 | pr_debug("Status at end of loop: %08x\n", status); | |
670 | pr_debug_status(status); | |
671 | } | |
672 | pr_debug("MMC IRQ end\n"); | |
673 | ||
674 | out: | |
675 | return IRQ_HANDLED; | |
676 | } | |
677 | ||
678 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | |
679 | struct mmc_data *data) | |
680 | { | |
681 | struct tmio_mmc_data *pdata = host->pdata; | |
682 | ||
683 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | |
684 | data->blksz, data->blocks); | |
685 | ||
686 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | |
687 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | |
688 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | |
689 | ||
690 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | |
691 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | |
692 | mmc_hostname(host->mmc), data->blksz); | |
693 | return -EINVAL; | |
694 | } | |
695 | } | |
696 | ||
697 | tmio_mmc_init_sg(host, data); | |
698 | host->data = data; | |
699 | ||
700 | /* Set transfer length / blocksize */ | |
701 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | |
702 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | |
703 | ||
704 | tmio_mmc_start_dma(host, data); | |
705 | ||
706 | return 0; | |
707 | } | |
708 | ||
709 | /* Process requests from the MMC layer */ | |
710 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |
711 | { | |
712 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
713 | int ret; | |
714 | ||
715 | if (host->mrq) | |
716 | pr_debug("request not null\n"); | |
717 | ||
718 | host->last_req_ts = jiffies; | |
719 | wmb(); | |
720 | host->mrq = mrq; | |
721 | ||
722 | if (mrq->data) { | |
723 | ret = tmio_mmc_start_data(host, mrq->data); | |
724 | if (ret) | |
725 | goto fail; | |
726 | } | |
727 | ||
728 | ret = tmio_mmc_start_command(host, mrq->cmd); | |
729 | if (!ret) { | |
730 | schedule_delayed_work(&host->delayed_reset_work, | |
731 | msecs_to_jiffies(2000)); | |
732 | return; | |
733 | } | |
734 | ||
735 | fail: | |
736 | host->mrq = NULL; | |
737 | host->force_pio = false; | |
738 | mrq->cmd->error = ret; | |
739 | mmc_request_done(mmc, mrq); | |
740 | } | |
741 | ||
742 | /* Set MMC clock / power. | |
743 | * Note: This controller uses a simple divider scheme therefore it cannot | |
744 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | |
745 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | |
746 | * slowest setting. | |
747 | */ | |
748 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |
749 | { | |
750 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
751 | ||
752 | if (ios->clock) | |
753 | tmio_mmc_set_clock(host, ios->clock); | |
754 | ||
755 | /* Power sequence - OFF -> ON -> UP */ | |
756 | switch (ios->power_mode) { | |
757 | case MMC_POWER_OFF: /* power down SD bus */ | |
758 | if (host->set_pwr) | |
759 | host->set_pwr(host->pdev, 0); | |
760 | tmio_mmc_clk_stop(host); | |
761 | break; | |
762 | case MMC_POWER_ON: /* power up SD bus */ | |
763 | if (host->set_pwr) | |
764 | host->set_pwr(host->pdev, 1); | |
765 | break; | |
766 | case MMC_POWER_UP: /* start bus clock */ | |
767 | tmio_mmc_clk_start(host); | |
768 | break; | |
769 | } | |
770 | ||
771 | switch (ios->bus_width) { | |
772 | case MMC_BUS_WIDTH_1: | |
773 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | |
774 | break; | |
775 | case MMC_BUS_WIDTH_4: | |
776 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | |
777 | break; | |
778 | } | |
779 | ||
780 | /* Let things settle. delay taken from winCE driver */ | |
781 | udelay(140); | |
782 | } | |
783 | ||
784 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | |
785 | { | |
786 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
787 | struct tmio_mmc_data *pdata = host->pdata; | |
788 | ||
789 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | |
790 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | |
791 | } | |
792 | ||
793 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | |
794 | { | |
795 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
796 | struct tmio_mmc_data *pdata = host->pdata; | |
797 | ||
798 | if (!pdata->get_cd) | |
799 | return -ENOSYS; | |
800 | else | |
801 | return pdata->get_cd(host->pdev); | |
802 | } | |
803 | ||
804 | static const struct mmc_host_ops tmio_mmc_ops = { | |
805 | .request = tmio_mmc_request, | |
806 | .set_ios = tmio_mmc_set_ios, | |
807 | .get_ro = tmio_mmc_get_ro, | |
808 | .get_cd = tmio_mmc_get_cd, | |
809 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | |
810 | }; | |
811 | ||
812 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | |
813 | struct platform_device *pdev, | |
814 | struct tmio_mmc_data *pdata) | |
815 | { | |
816 | struct tmio_mmc_host *_host; | |
817 | struct mmc_host *mmc; | |
818 | struct resource *res_ctl; | |
819 | int ret; | |
820 | u32 irq_mask = TMIO_MASK_CMD; | |
821 | ||
822 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
823 | if (!res_ctl) | |
824 | return -EINVAL; | |
825 | ||
826 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); | |
827 | if (!mmc) | |
828 | return -ENOMEM; | |
829 | ||
830 | _host = mmc_priv(mmc); | |
831 | _host->pdata = pdata; | |
832 | _host->mmc = mmc; | |
833 | _host->pdev = pdev; | |
834 | platform_set_drvdata(pdev, mmc); | |
835 | ||
836 | _host->set_pwr = pdata->set_pwr; | |
837 | _host->set_clk_div = pdata->set_clk_div; | |
838 | ||
839 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | |
840 | _host->bus_shift = resource_size(res_ctl) >> 10; | |
841 | ||
842 | _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | |
843 | if (!_host->ctl) { | |
844 | ret = -ENOMEM; | |
845 | goto host_free; | |
846 | } | |
847 | ||
848 | mmc->ops = &tmio_mmc_ops; | |
849 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | |
850 | mmc->f_max = pdata->hclk; | |
851 | mmc->f_min = mmc->f_max / 512; | |
852 | mmc->max_segs = 32; | |
853 | mmc->max_blk_size = 512; | |
854 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | |
855 | mmc->max_segs; | |
856 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | |
857 | mmc->max_seg_size = mmc->max_req_size; | |
858 | if (pdata->ocr_mask) | |
859 | mmc->ocr_avail = pdata->ocr_mask; | |
860 | else | |
861 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | |
862 | ||
863 | tmio_mmc_clk_stop(_host); | |
864 | tmio_mmc_reset(_host); | |
865 | ||
866 | ret = platform_get_irq(pdev, 0); | |
867 | if (ret < 0) | |
868 | goto unmap_ctl; | |
869 | ||
870 | _host->irq = ret; | |
871 | ||
872 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | |
873 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | |
874 | tmio_mmc_enable_sdio_irq(mmc, 0); | |
875 | ||
876 | ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | | |
877 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); | |
878 | if (ret) | |
879 | goto unmap_ctl; | |
880 | ||
881 | spin_lock_init(&_host->lock); | |
882 | ||
883 | /* Init delayed work for request timeouts */ | |
884 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | |
885 | ||
886 | /* See if we also get DMA */ | |
887 | tmio_mmc_request_dma(_host, pdata); | |
888 | ||
889 | mmc_add_host(mmc); | |
890 | ||
891 | /* Unmask the IRQs we want to know about */ | |
892 | if (!_host->chan_rx) | |
893 | irq_mask |= TMIO_MASK_READOP; | |
894 | if (!_host->chan_tx) | |
895 | irq_mask |= TMIO_MASK_WRITEOP; | |
896 | ||
897 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); | |
898 | ||
899 | *host = _host; | |
900 | ||
901 | return 0; | |
902 | ||
903 | unmap_ctl: | |
904 | iounmap(_host->ctl); | |
905 | host_free: | |
906 | mmc_free_host(mmc); | |
907 | ||
908 | return ret; | |
909 | } | |
910 | EXPORT_SYMBOL(tmio_mmc_host_probe); | |
911 | ||
912 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | |
913 | { | |
914 | mmc_remove_host(host->mmc); | |
915 | cancel_delayed_work_sync(&host->delayed_reset_work); | |
916 | tmio_mmc_release_dma(host); | |
917 | free_irq(host->irq, host); | |
918 | iounmap(host->ctl); | |
919 | mmc_free_host(host->mmc); | |
920 | } | |
921 | EXPORT_SYMBOL(tmio_mmc_host_remove); | |
922 | ||
923 | MODULE_LICENSE("GPL v2"); |