]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/omap.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / omap.c
1 /*
2 * linux/drivers/media/mmc/omap.c
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/protocol.h>
26 #include <linux/mmc/card.h>
27 #include <linux/clk.h>
28
29 #include <asm/io.h>
30 #include <asm/irq.h>
31 #include <asm/scatterlist.h>
32 #include <asm/mach-types.h>
33
34 #include <asm/arch/board.h>
35 #include <asm/arch/gpio.h>
36 #include <asm/arch/dma.h>
37 #include <asm/arch/mux.h>
38 #include <asm/arch/fpga.h>
39 #include <asm/arch/tps65010.h>
40
41 #include "omap.h"
42
43 #define DRIVER_NAME "mmci-omap"
44 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
45
46 /* Specifies how often in millisecs to poll for card status changes
47 * when the cover switch is open */
48 #define OMAP_MMC_SWITCH_POLL_DELAY 500
49
50 static int mmc_omap_enable_poll = 1;
51
52 struct mmc_omap_host {
53 int initialized;
54 int suspended;
55 struct mmc_request * mrq;
56 struct mmc_command * cmd;
57 struct mmc_data * data;
58 struct mmc_host * mmc;
59 struct device * dev;
60 unsigned char id; /* 16xx chips have 2 MMC blocks */
61 struct clk * iclk;
62 struct clk * fclk;
63 void __iomem *base;
64 int irq;
65 unsigned char bus_mode;
66 unsigned char hw_bus_mode;
67
68 unsigned int sg_len;
69 int sg_idx;
70 u16 * buffer;
71 u32 buffer_bytes_left;
72 u32 total_bytes_left;
73
74 unsigned use_dma:1;
75 unsigned brs_received:1, dma_done:1;
76 unsigned dma_is_read:1;
77 unsigned dma_in_use:1;
78 int dma_ch;
79 spinlock_t dma_lock;
80 struct timer_list dma_timer;
81 unsigned dma_len;
82
83 short power_pin;
84 short wp_pin;
85
86 int switch_pin;
87 struct work_struct switch_work;
88 struct timer_list switch_timer;
89 int switch_last_state;
90 };
91
92 static inline int
93 mmc_omap_cover_is_open(struct mmc_omap_host *host)
94 {
95 if (host->switch_pin < 0)
96 return 0;
97 return omap_get_gpio_datain(host->switch_pin);
98 }
99
100 static ssize_t
101 mmc_omap_show_cover_switch(struct device *dev,
102 struct device_attribute *attr, char *buf)
103 {
104 struct mmc_omap_host *host = dev_get_drvdata(dev);
105
106 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
107 "closed");
108 }
109
110 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
111
112 static ssize_t
113 mmc_omap_show_enable_poll(struct device *dev,
114 struct device_attribute *attr, char *buf)
115 {
116 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
117 }
118
119 static ssize_t
120 mmc_omap_store_enable_poll(struct device *dev,
121 struct device_attribute *attr, const char *buf,
122 size_t size)
123 {
124 int enable_poll;
125
126 if (sscanf(buf, "%10d", &enable_poll) != 1)
127 return -EINVAL;
128
129 if (enable_poll != mmc_omap_enable_poll) {
130 struct mmc_omap_host *host = dev_get_drvdata(dev);
131
132 mmc_omap_enable_poll = enable_poll;
133 if (enable_poll && host->switch_pin >= 0)
134 schedule_work(&host->switch_work);
135 }
136 return size;
137 }
138
139 static DEVICE_ATTR(enable_poll, 0664,
140 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
141
142 static void
143 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
144 {
145 u32 cmdreg;
146 u32 resptype;
147 u32 cmdtype;
148
149 host->cmd = cmd;
150
151 resptype = 0;
152 cmdtype = 0;
153
154 /* Our hardware needs to know exact type */
155 switch (RSP_TYPE(mmc_resp_type(cmd))) {
156 case RSP_TYPE(MMC_RSP_R1):
157 /* resp 1, resp 1b */
158 resptype = 1;
159 break;
160 case RSP_TYPE(MMC_RSP_R2):
161 resptype = 2;
162 break;
163 case RSP_TYPE(MMC_RSP_R3):
164 resptype = 3;
165 break;
166 default:
167 break;
168 }
169
170 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
171 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
172 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
173 cmdtype = OMAP_MMC_CMDTYPE_BC;
174 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
175 cmdtype = OMAP_MMC_CMDTYPE_BCR;
176 } else {
177 cmdtype = OMAP_MMC_CMDTYPE_AC;
178 }
179
180 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
181
182 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
183 cmdreg |= 1 << 6;
184
185 if (cmd->flags & MMC_RSP_BUSY)
186 cmdreg |= 1 << 11;
187
188 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
189 cmdreg |= 1 << 15;
190
191 clk_enable(host->fclk);
192
193 OMAP_MMC_WRITE(host->base, CTO, 200);
194 OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
195 OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
196 OMAP_MMC_WRITE(host->base, IE,
197 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
198 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
199 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
200 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
201 OMAP_MMC_STAT_END_OF_DATA);
202 OMAP_MMC_WRITE(host->base, CMD, cmdreg);
203 }
204
205 static void
206 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
207 {
208 if (host->dma_in_use) {
209 enum dma_data_direction dma_data_dir;
210
211 BUG_ON(host->dma_ch < 0);
212 if (data->error != MMC_ERR_NONE)
213 omap_stop_dma(host->dma_ch);
214 /* Release DMA channel lazily */
215 mod_timer(&host->dma_timer, jiffies + HZ);
216 if (data->flags & MMC_DATA_WRITE)
217 dma_data_dir = DMA_TO_DEVICE;
218 else
219 dma_data_dir = DMA_FROM_DEVICE;
220 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
221 dma_data_dir);
222 }
223 host->data = NULL;
224 host->sg_len = 0;
225 clk_disable(host->fclk);
226
227 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
228 * dozens of requests until the card finishes writing data.
229 * It'd be cheaper to just wait till an EOFB interrupt arrives...
230 */
231
232 if (!data->stop) {
233 host->mrq = NULL;
234 mmc_request_done(host->mmc, data->mrq);
235 return;
236 }
237
238 mmc_omap_start_command(host, data->stop);
239 }
240
241 static void
242 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
243 {
244 unsigned long flags;
245 int done;
246
247 if (!host->dma_in_use) {
248 mmc_omap_xfer_done(host, data);
249 return;
250 }
251 done = 0;
252 spin_lock_irqsave(&host->dma_lock, flags);
253 if (host->dma_done)
254 done = 1;
255 else
256 host->brs_received = 1;
257 spin_unlock_irqrestore(&host->dma_lock, flags);
258 if (done)
259 mmc_omap_xfer_done(host, data);
260 }
261
262 static void
263 mmc_omap_dma_timer(unsigned long data)
264 {
265 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
266
267 BUG_ON(host->dma_ch < 0);
268 omap_free_dma(host->dma_ch);
269 host->dma_ch = -1;
270 }
271
272 static void
273 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
274 {
275 unsigned long flags;
276 int done;
277
278 done = 0;
279 spin_lock_irqsave(&host->dma_lock, flags);
280 if (host->brs_received)
281 done = 1;
282 else
283 host->dma_done = 1;
284 spin_unlock_irqrestore(&host->dma_lock, flags);
285 if (done)
286 mmc_omap_xfer_done(host, data);
287 }
288
289 static void
290 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
291 {
292 host->cmd = NULL;
293
294 if (cmd->flags & MMC_RSP_PRESENT) {
295 if (cmd->flags & MMC_RSP_136) {
296 /* response type 2 */
297 cmd->resp[3] =
298 OMAP_MMC_READ(host->base, RSP0) |
299 (OMAP_MMC_READ(host->base, RSP1) << 16);
300 cmd->resp[2] =
301 OMAP_MMC_READ(host->base, RSP2) |
302 (OMAP_MMC_READ(host->base, RSP3) << 16);
303 cmd->resp[1] =
304 OMAP_MMC_READ(host->base, RSP4) |
305 (OMAP_MMC_READ(host->base, RSP5) << 16);
306 cmd->resp[0] =
307 OMAP_MMC_READ(host->base, RSP6) |
308 (OMAP_MMC_READ(host->base, RSP7) << 16);
309 } else {
310 /* response types 1, 1b, 3, 4, 5, 6 */
311 cmd->resp[0] =
312 OMAP_MMC_READ(host->base, RSP6) |
313 (OMAP_MMC_READ(host->base, RSP7) << 16);
314 }
315 }
316
317 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
318 host->mrq = NULL;
319 clk_disable(host->fclk);
320 mmc_request_done(host->mmc, cmd->mrq);
321 }
322 }
323
324 /* PIO only */
325 static void
326 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
327 {
328 struct scatterlist *sg;
329
330 sg = host->data->sg + host->sg_idx;
331 host->buffer_bytes_left = sg->length;
332 host->buffer = page_address(sg->page) + sg->offset;
333 if (host->buffer_bytes_left > host->total_bytes_left)
334 host->buffer_bytes_left = host->total_bytes_left;
335 }
336
337 /* PIO only */
338 static void
339 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
340 {
341 int n;
342 void __iomem *reg;
343 u16 *p;
344
345 if (host->buffer_bytes_left == 0) {
346 host->sg_idx++;
347 BUG_ON(host->sg_idx == host->sg_len);
348 mmc_omap_sg_to_buf(host);
349 }
350 n = 64;
351 if (n > host->buffer_bytes_left)
352 n = host->buffer_bytes_left;
353 host->buffer_bytes_left -= n;
354 host->total_bytes_left -= n;
355 host->data->bytes_xfered += n;
356
357 if (write) {
358 __raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
359 } else {
360 __raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
361 }
362 }
363
364 static inline void mmc_omap_report_irq(u16 status)
365 {
366 static const char *mmc_omap_status_bits[] = {
367 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
368 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
369 };
370 int i, c = 0;
371
372 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
373 if (status & (1 << i)) {
374 if (c)
375 printk(" ");
376 printk("%s", mmc_omap_status_bits[i]);
377 c++;
378 }
379 }
380
381 static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
382 {
383 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
384 u16 status;
385 int end_command;
386 int end_transfer;
387 int transfer_error;
388
389 if (host->cmd == NULL && host->data == NULL) {
390 status = OMAP_MMC_READ(host->base, STAT);
391 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
392 if (status != 0) {
393 OMAP_MMC_WRITE(host->base, STAT, status);
394 OMAP_MMC_WRITE(host->base, IE, 0);
395 }
396 return IRQ_HANDLED;
397 }
398
399 end_command = 0;
400 end_transfer = 0;
401 transfer_error = 0;
402
403 while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
404 OMAP_MMC_WRITE(host->base, STAT, status);
405 #ifdef CONFIG_MMC_DEBUG
406 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
407 status, host->cmd != NULL ? host->cmd->opcode : -1);
408 mmc_omap_report_irq(status);
409 printk("\n");
410 #endif
411 if (host->total_bytes_left) {
412 if ((status & OMAP_MMC_STAT_A_FULL) ||
413 (status & OMAP_MMC_STAT_END_OF_DATA))
414 mmc_omap_xfer_data(host, 0);
415 if (status & OMAP_MMC_STAT_A_EMPTY)
416 mmc_omap_xfer_data(host, 1);
417 }
418
419 if (status & OMAP_MMC_STAT_END_OF_DATA) {
420 end_transfer = 1;
421 }
422
423 if (status & OMAP_MMC_STAT_DATA_TOUT) {
424 dev_dbg(mmc_dev(host->mmc), "data timeout\n");
425 if (host->data) {
426 host->data->error |= MMC_ERR_TIMEOUT;
427 transfer_error = 1;
428 }
429 }
430
431 if (status & OMAP_MMC_STAT_DATA_CRC) {
432 if (host->data) {
433 host->data->error |= MMC_ERR_BADCRC;
434 dev_dbg(mmc_dev(host->mmc),
435 "data CRC error, bytes left %d\n",
436 host->total_bytes_left);
437 transfer_error = 1;
438 } else {
439 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
440 }
441 }
442
443 if (status & OMAP_MMC_STAT_CMD_TOUT) {
444 /* Timeouts are routine with some commands */
445 if (host->cmd) {
446 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
447 host->cmd->opcode !=
448 MMC_SEND_OP_COND &&
449 host->cmd->opcode !=
450 MMC_APP_CMD &&
451 !mmc_omap_cover_is_open(host))
452 dev_err(mmc_dev(host->mmc),
453 "command timeout, CMD %d\n",
454 host->cmd->opcode);
455 host->cmd->error = MMC_ERR_TIMEOUT;
456 end_command = 1;
457 }
458 }
459
460 if (status & OMAP_MMC_STAT_CMD_CRC) {
461 if (host->cmd) {
462 dev_err(mmc_dev(host->mmc),
463 "command CRC error (CMD%d, arg 0x%08x)\n",
464 host->cmd->opcode, host->cmd->arg);
465 host->cmd->error = MMC_ERR_BADCRC;
466 end_command = 1;
467 } else
468 dev_err(mmc_dev(host->mmc),
469 "command CRC error without cmd?\n");
470 }
471
472 if (status & OMAP_MMC_STAT_CARD_ERR) {
473 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
474 u32 response = OMAP_MMC_READ(host->base, RSP6)
475 | (OMAP_MMC_READ(host->base, RSP7) << 16);
476 /* STOP sometimes sets must-ignore bits */
477 if (!(response & (R1_CC_ERROR
478 | R1_ILLEGAL_COMMAND
479 | R1_COM_CRC_ERROR))) {
480 end_command = 1;
481 continue;
482 }
483 }
484
485 dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
486 host->cmd->opcode);
487 if (host->cmd) {
488 host->cmd->error = MMC_ERR_FAILED;
489 end_command = 1;
490 }
491 if (host->data) {
492 host->data->error = MMC_ERR_FAILED;
493 transfer_error = 1;
494 }
495 }
496
497 /*
498 * NOTE: On 1610 the END_OF_CMD may come too early when
499 * starting a write
500 */
501 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
502 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
503 end_command = 1;
504 }
505 }
506
507 if (end_command) {
508 mmc_omap_cmd_done(host, host->cmd);
509 }
510 if (transfer_error)
511 mmc_omap_xfer_done(host, host->data);
512 else if (end_transfer)
513 mmc_omap_end_of_data(host, host->data);
514
515 return IRQ_HANDLED;
516 }
517
518 static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
519 {
520 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
521
522 schedule_work(&host->switch_work);
523
524 return IRQ_HANDLED;
525 }
526
527 static void mmc_omap_switch_timer(unsigned long arg)
528 {
529 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
530
531 schedule_work(&host->switch_work);
532 }
533
534 /* FIXME: Handle card insertion and removal properly. Maybe use a mask
535 * for MMC state? */
536 static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
537 {
538 }
539
540 static void mmc_omap_switch_handler(void *data)
541 {
542 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
543 struct mmc_card *card;
544 static int complained = 0;
545 int cards = 0, cover_open;
546
547 if (host->switch_pin == -1)
548 return;
549 cover_open = mmc_omap_cover_is_open(host);
550 if (cover_open != host->switch_last_state) {
551 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
552 host->switch_last_state = cover_open;
553 }
554 mmc_detect_change(host->mmc, 0);
555 list_for_each_entry(card, &host->mmc->cards, node) {
556 if (mmc_card_present(card))
557 cards++;
558 }
559 if (mmc_omap_cover_is_open(host)) {
560 if (!complained) {
561 dev_info(mmc_dev(host->mmc), "cover is open");
562 complained = 1;
563 }
564 if (mmc_omap_enable_poll)
565 mod_timer(&host->switch_timer, jiffies +
566 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
567 } else {
568 complained = 0;
569 }
570 }
571
572 /* Prepare to transfer the next segment of a scatterlist */
573 static void
574 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
575 {
576 int dma_ch = host->dma_ch;
577 unsigned long data_addr;
578 u16 buf, frame;
579 u32 count;
580 struct scatterlist *sg = &data->sg[host->sg_idx];
581 int src_port = 0;
582 int dst_port = 0;
583 int sync_dev = 0;
584
585 data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
586 frame = data->blksz;
587 count = sg_dma_len(sg);
588
589 if ((data->blocks == 1) && (count > data->blksz))
590 count = frame;
591
592 host->dma_len = count;
593
594 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
595 * Use 16 or 32 word frames when the blocksize is at least that large.
596 * Blocksize is usually 512 bytes; but not for some SD reads.
597 */
598 if (cpu_is_omap15xx() && frame > 32)
599 frame = 32;
600 else if (frame > 64)
601 frame = 64;
602 count /= frame;
603 frame >>= 1;
604
605 if (!(data->flags & MMC_DATA_WRITE)) {
606 buf = 0x800f | ((frame - 1) << 8);
607
608 if (cpu_class_is_omap1()) {
609 src_port = OMAP_DMA_PORT_TIPB;
610 dst_port = OMAP_DMA_PORT_EMIFF;
611 }
612 if (cpu_is_omap24xx())
613 sync_dev = OMAP24XX_DMA_MMC1_RX;
614
615 omap_set_dma_src_params(dma_ch, src_port,
616 OMAP_DMA_AMODE_CONSTANT,
617 data_addr, 0, 0);
618 omap_set_dma_dest_params(dma_ch, dst_port,
619 OMAP_DMA_AMODE_POST_INC,
620 sg_dma_address(sg), 0, 0);
621 omap_set_dma_dest_data_pack(dma_ch, 1);
622 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
623 } else {
624 buf = 0x0f80 | ((frame - 1) << 0);
625
626 if (cpu_class_is_omap1()) {
627 src_port = OMAP_DMA_PORT_EMIFF;
628 dst_port = OMAP_DMA_PORT_TIPB;
629 }
630 if (cpu_is_omap24xx())
631 sync_dev = OMAP24XX_DMA_MMC1_TX;
632
633 omap_set_dma_dest_params(dma_ch, dst_port,
634 OMAP_DMA_AMODE_CONSTANT,
635 data_addr, 0, 0);
636 omap_set_dma_src_params(dma_ch, src_port,
637 OMAP_DMA_AMODE_POST_INC,
638 sg_dma_address(sg), 0, 0);
639 omap_set_dma_src_data_pack(dma_ch, 1);
640 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
641 }
642
643 /* Max limit for DMA frame count is 0xffff */
644 if (unlikely(count > 0xffff))
645 BUG();
646
647 OMAP_MMC_WRITE(host->base, BUF, buf);
648 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
649 frame, count, OMAP_DMA_SYNC_FRAME,
650 sync_dev, 0);
651 }
652
653 /* A scatterlist segment completed */
654 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
655 {
656 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
657 struct mmc_data *mmcdat = host->data;
658
659 if (unlikely(host->dma_ch < 0)) {
660 dev_err(mmc_dev(host->mmc), "DMA callback while DMA not
661 enabled\n");
662 return;
663 }
664 /* FIXME: We really should do something to _handle_ the errors */
665 if (ch_status & OMAP_DMA_TOUT_IRQ) {
666 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
667 return;
668 }
669 if (ch_status & OMAP_DMA_DROP_IRQ) {
670 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
671 return;
672 }
673 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
674 return;
675 }
676 mmcdat->bytes_xfered += host->dma_len;
677 host->sg_idx++;
678 if (host->sg_idx < host->sg_len) {
679 mmc_omap_prepare_dma(host, host->data);
680 omap_start_dma(host->dma_ch);
681 } else
682 mmc_omap_dma_done(host, host->data);
683 }
684
685 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
686 {
687 const char *dev_name;
688 int sync_dev, dma_ch, is_read, r;
689
690 is_read = !(data->flags & MMC_DATA_WRITE);
691 del_timer_sync(&host->dma_timer);
692 if (host->dma_ch >= 0) {
693 if (is_read == host->dma_is_read)
694 return 0;
695 omap_free_dma(host->dma_ch);
696 host->dma_ch = -1;
697 }
698
699 if (is_read) {
700 if (host->id == 1) {
701 sync_dev = OMAP_DMA_MMC_RX;
702 dev_name = "MMC1 read";
703 } else {
704 sync_dev = OMAP_DMA_MMC2_RX;
705 dev_name = "MMC2 read";
706 }
707 } else {
708 if (host->id == 1) {
709 sync_dev = OMAP_DMA_MMC_TX;
710 dev_name = "MMC1 write";
711 } else {
712 sync_dev = OMAP_DMA_MMC2_TX;
713 dev_name = "MMC2 write";
714 }
715 }
716 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
717 host, &dma_ch);
718 if (r != 0) {
719 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
720 return r;
721 }
722 host->dma_ch = dma_ch;
723 host->dma_is_read = is_read;
724
725 return 0;
726 }
727
728 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
729 {
730 u16 reg;
731
732 reg = OMAP_MMC_READ(host->base, SDIO);
733 reg &= ~(1 << 5);
734 OMAP_MMC_WRITE(host->base, SDIO, reg);
735 /* Set maximum timeout */
736 OMAP_MMC_WRITE(host->base, CTO, 0xff);
737 }
738
739 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
740 {
741 int timeout;
742 u16 reg;
743
744 /* Convert ns to clock cycles by assuming 20MHz frequency
745 * 1 cycle at 20MHz = 500 ns
746 */
747 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
748
749 /* Check if we need to use timeout multiplier register */
750 reg = OMAP_MMC_READ(host->base, SDIO);
751 if (timeout > 0xffff) {
752 reg |= (1 << 5);
753 timeout /= 1024;
754 } else
755 reg &= ~(1 << 5);
756 OMAP_MMC_WRITE(host->base, SDIO, reg);
757 OMAP_MMC_WRITE(host->base, DTO, timeout);
758 }
759
760 static void
761 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
762 {
763 struct mmc_data *data = req->data;
764 int i, use_dma, block_size;
765 unsigned sg_len;
766
767 host->data = data;
768 if (data == NULL) {
769 OMAP_MMC_WRITE(host->base, BLEN, 0);
770 OMAP_MMC_WRITE(host->base, NBLK, 0);
771 OMAP_MMC_WRITE(host->base, BUF, 0);
772 host->dma_in_use = 0;
773 set_cmd_timeout(host, req);
774 return;
775 }
776
777
778 block_size = data->blksz;
779
780 OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
781 OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
782 set_data_timeout(host, req);
783
784 /* cope with calling layer confusion; it issues "single
785 * block" writes using multi-block scatterlists.
786 */
787 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
788
789 /* Only do DMA for entire blocks */
790 use_dma = host->use_dma;
791 if (use_dma) {
792 for (i = 0; i < sg_len; i++) {
793 if ((data->sg[i].length % block_size) != 0) {
794 use_dma = 0;
795 break;
796 }
797 }
798 }
799
800 host->sg_idx = 0;
801 if (use_dma) {
802 if (mmc_omap_get_dma_channel(host, data) == 0) {
803 enum dma_data_direction dma_data_dir;
804
805 if (data->flags & MMC_DATA_WRITE)
806 dma_data_dir = DMA_TO_DEVICE;
807 else
808 dma_data_dir = DMA_FROM_DEVICE;
809
810 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
811 sg_len, dma_data_dir);
812 host->total_bytes_left = 0;
813 mmc_omap_prepare_dma(host, req->data);
814 host->brs_received = 0;
815 host->dma_done = 0;
816 host->dma_in_use = 1;
817 } else
818 use_dma = 0;
819 }
820
821 /* Revert to PIO? */
822 if (!use_dma) {
823 OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
824 host->total_bytes_left = data->blocks * block_size;
825 host->sg_len = sg_len;
826 mmc_omap_sg_to_buf(host);
827 host->dma_in_use = 0;
828 }
829 }
830
831 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
832 {
833 struct mmc_omap_host *host = mmc_priv(mmc);
834
835 WARN_ON(host->mrq != NULL);
836
837 host->mrq = req;
838
839 /* only touch fifo AFTER the controller readies it */
840 mmc_omap_prepare_data(host, req);
841 mmc_omap_start_command(host, req->cmd);
842 if (host->dma_in_use)
843 omap_start_dma(host->dma_ch);
844 }
845
846 static void innovator_fpga_socket_power(int on)
847 {
848 #if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
849
850 if (on) {
851 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
852 OMAP1510_FPGA_POWER);
853 } else {
854 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
855 OMAP1510_FPGA_POWER);
856 }
857 #endif
858 }
859
860 /*
861 * Turn the socket power on/off. Innovator uses FPGA, most boards
862 * probably use GPIO.
863 */
864 static void mmc_omap_power(struct mmc_omap_host *host, int on)
865 {
866 if (on) {
867 if (machine_is_omap_innovator())
868 innovator_fpga_socket_power(1);
869 else if (machine_is_omap_h2())
870 tps65010_set_gpio_out_value(GPIO3, HIGH);
871 else if (machine_is_omap_h3())
872 /* GPIO 4 of TPS65010 sends SD_EN signal */
873 tps65010_set_gpio_out_value(GPIO4, HIGH);
874 else if (cpu_is_omap24xx()) {
875 u16 reg = OMAP_MMC_READ(host->base, CON);
876 OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
877 } else
878 if (host->power_pin >= 0)
879 omap_set_gpio_dataout(host->power_pin, 1);
880 } else {
881 if (machine_is_omap_innovator())
882 innovator_fpga_socket_power(0);
883 else if (machine_is_omap_h2())
884 tps65010_set_gpio_out_value(GPIO3, LOW);
885 else if (machine_is_omap_h3())
886 tps65010_set_gpio_out_value(GPIO4, LOW);
887 else if (cpu_is_omap24xx()) {
888 u16 reg = OMAP_MMC_READ(host->base, CON);
889 OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
890 } else
891 if (host->power_pin >= 0)
892 omap_set_gpio_dataout(host->power_pin, 0);
893 }
894 }
895
896 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
897 {
898 struct mmc_omap_host *host = mmc_priv(mmc);
899 int dsor;
900 int realclock, i;
901
902 realclock = ios->clock;
903
904 if (ios->clock == 0)
905 dsor = 0;
906 else {
907 int func_clk_rate = clk_get_rate(host->fclk);
908
909 dsor = func_clk_rate / realclock;
910 if (dsor < 1)
911 dsor = 1;
912
913 if (func_clk_rate / dsor > realclock)
914 dsor++;
915
916 if (dsor > 250)
917 dsor = 250;
918 dsor++;
919
920 if (ios->bus_width == MMC_BUS_WIDTH_4)
921 dsor |= 1 << 15;
922 }
923
924 switch (ios->power_mode) {
925 case MMC_POWER_OFF:
926 mmc_omap_power(host, 0);
927 break;
928 case MMC_POWER_UP:
929 case MMC_POWER_ON:
930 mmc_omap_power(host, 1);
931 dsor |= 1<<11;
932 break;
933 }
934
935 host->bus_mode = ios->bus_mode;
936 host->hw_bus_mode = host->bus_mode;
937
938 clk_enable(host->fclk);
939
940 /* On insanely high arm_per frequencies something sometimes
941 * goes somehow out of sync, and the POW bit is not being set,
942 * which results in the while loop below getting stuck.
943 * Writing to the CON register twice seems to do the trick. */
944 for (i = 0; i < 2; i++)
945 OMAP_MMC_WRITE(host->base, CON, dsor);
946 if (ios->power_mode == MMC_POWER_UP) {
947 /* Send clock cycles, poll completion */
948 OMAP_MMC_WRITE(host->base, IE, 0);
949 OMAP_MMC_WRITE(host->base, STAT, 0xffff);
950 OMAP_MMC_WRITE(host->base, CMD, 1<<7);
951 while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
952 OMAP_MMC_WRITE(host->base, STAT, 1);
953 }
954 clk_disable(host->fclk);
955 }
956
957 static int mmc_omap_get_ro(struct mmc_host *mmc)
958 {
959 struct mmc_omap_host *host = mmc_priv(mmc);
960
961 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
962 }
963
964 static struct mmc_host_ops mmc_omap_ops = {
965 .request = mmc_omap_request,
966 .set_ios = mmc_omap_set_ios,
967 .get_ro = mmc_omap_get_ro,
968 };
969
970 static int __init mmc_omap_probe(struct platform_device *pdev)
971 {
972 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
973 struct mmc_host *mmc;
974 struct mmc_omap_host *host = NULL;
975 int ret = 0;
976
977 if (platform_get_resource(pdev, IORESOURCE_MEM, 0) ||
978 platform_get_irq(pdev, IORESOURCE_IRQ, 0)) {
979 dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n");
980 return -ENODEV;
981 }
982
983 if (!request_mem_region(pdev->resource[0].start,
984 pdev->resource[0].end - pdev->resource[0].start + 1,
985 pdev->name)) {
986 dev_dbg(&pdev->dev, "request_mem_region failed\n");
987 return -EBUSY;
988 }
989
990 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
991 if (!mmc) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 host = mmc_priv(mmc);
997 host->mmc = mmc;
998
999 spin_lock_init(&host->dma_lock);
1000 init_timer(&host->dma_timer);
1001 host->dma_timer.function = mmc_omap_dma_timer;
1002 host->dma_timer.data = (unsigned long) host;
1003
1004 host->id = pdev->id;
1005
1006 if (cpu_is_omap24xx()) {
1007 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1008 if (IS_ERR(host->iclk))
1009 goto out;
1010 clk_enable(host->iclk);
1011 }
1012
1013 if (!cpu_is_omap24xx())
1014 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1015 else
1016 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1017
1018 if (IS_ERR(host->fclk)) {
1019 ret = PTR_ERR(host->fclk);
1020 goto out;
1021 }
1022
1023 /* REVISIT:
1024 * Also, use minfo->cover to decide how to manage
1025 * the card detect sensing.
1026 */
1027 host->power_pin = minfo->power_pin;
1028 host->switch_pin = minfo->switch_pin;
1029 host->wp_pin = minfo->wp_pin;
1030 host->use_dma = 1;
1031 host->dma_ch = -1;
1032
1033 host->irq = pdev->resource[1].start;
1034 host->base = ioremap(pdev->res.start, SZ_4K);
1035 if (!host->base) {
1036 ret = -ENOMEM;
1037 goto out;
1038 }
1039
1040 if (minfo->wire4)
1041 mmc->caps |= MMC_CAP_4_BIT_DATA;
1042
1043 mmc->ops = &mmc_omap_ops;
1044 mmc->f_min = 400000;
1045 mmc->f_max = 24000000;
1046 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1047
1048 /* Use scatterlist DMA to reduce per-transfer costs.
1049 * NOTE max_seg_size assumption that small blocks aren't
1050 * normally used (except e.g. for reading SD registers).
1051 */
1052 mmc->max_phys_segs = 32;
1053 mmc->max_hw_segs = 32;
1054 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
1055 mmc->max_seg_size = mmc->max_sectors * 512;
1056
1057 if (host->power_pin >= 0) {
1058 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1059 dev_err(mmc_dev(host->mmc), "Unable to get GPIO
1060 pin for MMC power\n");
1061 goto out;
1062 }
1063 omap_set_gpio_direction(host->power_pin, 0);
1064 }
1065
1066 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1067 if (ret)
1068 goto out;
1069
1070 host->dev = &pdev->dev;
1071 platform_set_drvdata(pdev, host);
1072
1073 mmc_add_host(mmc);
1074
1075 if (host->switch_pin >= 0) {
1076 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
1077 init_timer(&host->switch_timer);
1078 host->switch_timer.function = mmc_omap_switch_timer;
1079 host->switch_timer.data = (unsigned long) host;
1080 if (omap_request_gpio(host->switch_pin) != 0) {
1081 dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
1082 host->switch_pin = -1;
1083 goto no_switch;
1084 }
1085
1086 omap_set_gpio_direction(host->switch_pin, 1);
1087 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1088 mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host);
1089 if (ret) {
1090 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
1091 omap_free_gpio(host->switch_pin);
1092 host->switch_pin = -1;
1093 goto no_switch;
1094 }
1095 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1096 if (ret == 0) {
1097 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1098 if (ret != 0)
1099 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1100 }
1101 if (ret) {
1102 dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
1103 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1104 omap_free_gpio(host->switch_pin);
1105 host->switch_pin = -1;
1106 goto no_switch;
1107 }
1108 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1109 schedule_work(&host->switch_work);
1110 }
1111
1112 no_switch:
1113 return 0;
1114
1115 out:
1116 /* FIXME: Free other resources too. */
1117 if (host) {
1118 if (host->iclk && !IS_ERR(host->iclk))
1119 clk_put(host->iclk);
1120 if (host->fclk && !IS_ERR(host->fclk))
1121 clk_put(host->fclk);
1122 mmc_free_host(host->mmc);
1123 }
1124 return ret;
1125 }
1126
1127 static int mmc_omap_remove(struct platform_device *pdev)
1128 {
1129 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1130
1131 platform_set_drvdata(pdev, NULL);
1132
1133 if (host) {
1134 mmc_remove_host(host->mmc);
1135 free_irq(host->irq, host);
1136
1137 if (host->power_pin >= 0)
1138 omap_free_gpio(host->power_pin);
1139 if (host->switch_pin >= 0) {
1140 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1141 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1142 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1143 omap_free_gpio(host->switch_pin);
1144 host->switch_pin = -1;
1145 del_timer_sync(&host->switch_timer);
1146 flush_scheduled_work();
1147 }
1148 if (host->iclk && !IS_ERR(host->iclk))
1149 clk_put(host->iclk);
1150 if (host->fclk && !IS_ERR(host->fclk))
1151 clk_put(host->fclk);
1152 mmc_free_host(host->mmc);
1153 }
1154
1155 release_mem_region(pdev->resource[0].start,
1156 pdev->resource[0].end - pdev->resource[0].start + 1);
1157
1158 return 0;
1159 }
1160
1161 #ifdef CONFIG_PM
1162 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1163 {
1164 int ret = 0;
1165 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1166
1167 if (host && host->suspended)
1168 return 0;
1169
1170 if (host) {
1171 ret = mmc_suspend_host(host->mmc, mesg);
1172 if (ret == 0)
1173 host->suspended = 1;
1174 }
1175 return ret;
1176 }
1177
1178 static int mmc_omap_resume(struct platform_device *pdev)
1179 {
1180 int ret = 0;
1181 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1182
1183 if (host && !host->suspended)
1184 return 0;
1185
1186 if (host) {
1187 ret = mmc_resume_host(host->mmc);
1188 if (ret == 0)
1189 host->suspended = 0;
1190 }
1191
1192 return ret;
1193 }
1194 #else
1195 #define mmc_omap_suspend NULL
1196 #define mmc_omap_resume NULL
1197 #endif
1198
1199 static struct platform_driver mmc_omap_driver = {
1200 .probe = mmc_omap_probe,
1201 .remove = mmc_omap_remove,
1202 .suspend = mmc_omap_suspend,
1203 .resume = mmc_omap_resume,
1204 .driver = {
1205 .name = DRIVER_NAME,
1206 },
1207 };
1208
1209 static int __init mmc_omap_init(void)
1210 {
1211 return platform_driver_register(&mmc_omap_driver);
1212 }
1213
1214 static void __exit mmc_omap_exit(void)
1215 {
1216 platform_driver_unregister(&mmc_omap_driver);
1217 }
1218
1219 module_init(mmc_omap_init);
1220 module_exit(mmc_omap_exit);
1221
1222 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1223 MODULE_LICENSE("GPL");
1224 MODULE_ALIAS(DRIVER_NAME);
1225 MODULE_AUTHOR("Juha Yrjölä");