]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/host/omap.c
mmc: omap: add DMA engine support
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / host / omap.c
CommitLineData
730c9b7e 1/*
70f10482 2 * linux/drivers/mmc/host/omap.c
730c9b7e
CA
3 *
4 * Copyright (C) 2004 Nokia Corporation
d36b6910 5 * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
730c9b7e
CA
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
730c9b7e
CA
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/ioport.h>
18#include <linux/platform_device.h>
19#include <linux/interrupt.h>
3451c067 20#include <linux/dmaengine.h>
730c9b7e
CA
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23#include <linux/spinlock.h>
24#include <linux/timer.h>
3451c067 25#include <linux/omap-dma.h>
730c9b7e 26#include <linux/mmc/host.h>
730c9b7e
CA
27#include <linux/mmc/card.h>
28#include <linux/clk.h>
45711f1a 29#include <linux/scatterlist.h>
6d16bfb5 30#include <linux/i2c/tps65010.h>
5a0e3ad6 31#include <linux/slab.h>
730c9b7e
CA
32
33#include <asm/io.h>
34#include <asm/irq.h>
730c9b7e 35
ce491cf8
TL
36#include <plat/board.h>
37#include <plat/mmc.h>
1bc857f7 38#include <asm/gpio.h>
ce491cf8
TL
39#include <plat/dma.h>
40#include <plat/mux.h>
41#include <plat/fpga.h>
730c9b7e 42
0551f4df 43#define OMAP_MMC_REG_CMD 0x00
0e950fa6
MB
44#define OMAP_MMC_REG_ARGL 0x01
45#define OMAP_MMC_REG_ARGH 0x02
46#define OMAP_MMC_REG_CON 0x03
47#define OMAP_MMC_REG_STAT 0x04
48#define OMAP_MMC_REG_IE 0x05
49#define OMAP_MMC_REG_CTO 0x06
50#define OMAP_MMC_REG_DTO 0x07
51#define OMAP_MMC_REG_DATA 0x08
52#define OMAP_MMC_REG_BLEN 0x09
53#define OMAP_MMC_REG_NBLK 0x0a
54#define OMAP_MMC_REG_BUF 0x0b
55#define OMAP_MMC_REG_SDIO 0x0d
56#define OMAP_MMC_REG_REV 0x0f
57#define OMAP_MMC_REG_RSP0 0x10
58#define OMAP_MMC_REG_RSP1 0x11
59#define OMAP_MMC_REG_RSP2 0x12
60#define OMAP_MMC_REG_RSP3 0x13
61#define OMAP_MMC_REG_RSP4 0x14
62#define OMAP_MMC_REG_RSP5 0x15
63#define OMAP_MMC_REG_RSP6 0x16
64#define OMAP_MMC_REG_RSP7 0x17
65#define OMAP_MMC_REG_IOSR 0x18
66#define OMAP_MMC_REG_SYSC 0x19
67#define OMAP_MMC_REG_SYSS 0x1a
0551f4df
JY
68
69#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
70#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
71#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
72#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
73#define OMAP_MMC_STAT_A_FULL (1 << 10)
74#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
75#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
76#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
77#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
78#define OMAP_MMC_STAT_END_BUSY (1 << 4)
79#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
80#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
81#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
82
0e950fa6
MB
83#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
84#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
85#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
0551f4df
JY
86
87/*
88 * Command types
89 */
90#define OMAP_MMC_CMDTYPE_BC 0
91#define OMAP_MMC_CMDTYPE_BCR 1
92#define OMAP_MMC_CMDTYPE_AC 2
93#define OMAP_MMC_CMDTYPE_ADTC 3
94
730c9b7e
CA
95
96#define DRIVER_NAME "mmci-omap"
730c9b7e
CA
97
98/* Specifies how often in millisecs to poll for card status changes
99 * when the cover switch is open */
7584d276 100#define OMAP_MMC_COVER_POLL_DELAY 500
730c9b7e 101
abfbe5f7
JY
102struct mmc_omap_host;
103
3451c067
RK
104#define USE_DMA_PRIVATE
105
abfbe5f7
JY
106struct mmc_omap_slot {
107 int id;
108 unsigned int vdd;
109 u16 saved_con;
110 u16 bus_mode;
111 unsigned int fclk_freq;
112 unsigned powered:1;
113
7584d276
JL
114 struct tasklet_struct cover_tasklet;
115 struct timer_list cover_timer;
5a0f3f1f
JY
116 unsigned cover_open;
117
abfbe5f7
JY
118 struct mmc_request *mrq;
119 struct mmc_omap_host *host;
120 struct mmc_host *mmc;
121 struct omap_mmc_slot_data *pdata;
122};
123
730c9b7e
CA
124struct mmc_omap_host {
125 int initialized;
126 int suspended;
127 struct mmc_request * mrq;
128 struct mmc_command * cmd;
129 struct mmc_data * data;
130 struct mmc_host * mmc;
131 struct device * dev;
132 unsigned char id; /* 16xx chips have 2 MMC blocks */
133 struct clk * iclk;
134 struct clk * fclk;
3451c067
RK
135 struct dma_chan *dma_rx;
136 u32 dma_rx_burst;
137 struct dma_chan *dma_tx;
138 u32 dma_tx_burst;
89783b1e
JY
139 struct resource *mem_res;
140 void __iomem *virt_base;
141 unsigned int phys_base;
730c9b7e
CA
142 int irq;
143 unsigned char bus_mode;
144 unsigned char hw_bus_mode;
0e950fa6 145 unsigned int reg_shift;
730c9b7e 146
0fb4723d
JL
147 struct work_struct cmd_abort_work;
148 unsigned abort:1;
149 struct timer_list cmd_abort_timer;
eb1860bc 150
0f602ec7
JL
151 struct work_struct slot_release_work;
152 struct mmc_omap_slot *next_slot;
153 struct work_struct send_stop_work;
154 struct mmc_data *stop_data;
155
730c9b7e
CA
156 unsigned int sg_len;
157 int sg_idx;
158 u16 * buffer;
159 u32 buffer_bytes_left;
160 u32 total_bytes_left;
161
162 unsigned use_dma:1;
163 unsigned brs_received:1, dma_done:1;
730c9b7e 164 unsigned dma_in_use:1;
3451c067
RK
165#ifdef USE_DMA_PRIVATE
166 unsigned dma_is_read:1;
730c9b7e 167 int dma_ch;
730c9b7e
CA
168 struct timer_list dma_timer;
169 unsigned dma_len;
3451c067
RK
170#endif
171 spinlock_t dma_lock;
730c9b7e 172
abfbe5f7
JY
173 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
174 struct mmc_omap_slot *current_slot;
175 spinlock_t slot_lock;
176 wait_queue_head_t slot_wq;
177 int nr_slots;
178
0807a9b5
JL
179 struct timer_list clk_timer;
180 spinlock_t clk_lock; /* for changing enabled state */
181 unsigned int fclk_enabled:1;
b01a4f1c 182 struct workqueue_struct *mmc_omap_wq;
0807a9b5 183
abfbe5f7 184 struct omap_mmc_platform_data *pdata;
730c9b7e
CA
185};
186
0d9ee5b2 187
7c8ad982 188static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
0807a9b5
JL
189{
190 unsigned long tick_ns;
191
192 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
193 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
194 ndelay(8 * tick_ns);
195 }
196}
197
7c8ad982 198static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
0807a9b5
JL
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&host->clk_lock, flags);
203 if (host->fclk_enabled != enable) {
204 host->fclk_enabled = enable;
205 if (enable)
206 clk_enable(host->fclk);
207 else
208 clk_disable(host->fclk);
209 }
210 spin_unlock_irqrestore(&host->clk_lock, flags);
211}
212
abfbe5f7
JY
213static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
214{
215 struct mmc_omap_host *host = slot->host;
216 unsigned long flags;
217
218 if (claimed)
219 goto no_claim;
220 spin_lock_irqsave(&host->slot_lock, flags);
221 while (host->mmc != NULL) {
222 spin_unlock_irqrestore(&host->slot_lock, flags);
223 wait_event(host->slot_wq, host->mmc == NULL);
224 spin_lock_irqsave(&host->slot_lock, flags);
225 }
226 host->mmc = slot->mmc;
227 spin_unlock_irqrestore(&host->slot_lock, flags);
228no_claim:
0807a9b5
JL
229 del_timer(&host->clk_timer);
230 if (host->current_slot != slot || !claimed)
231 mmc_omap_fclk_offdelay(host->current_slot);
232
abfbe5f7 233 if (host->current_slot != slot) {
0807a9b5 234 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
abfbe5f7
JY
235 if (host->pdata->switch_slot != NULL)
236 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
237 host->current_slot = slot;
238 }
239
0807a9b5
JL
240 if (claimed) {
241 mmc_omap_fclk_enable(host, 1);
242
243 /* Doing the dummy read here seems to work around some bug
244 * at least in OMAP24xx silicon where the command would not
245 * start after writing the CMD register. Sigh. */
246 OMAP_MMC_READ(host, CON);
abfbe5f7 247
0807a9b5
JL
248 OMAP_MMC_WRITE(host, CON, slot->saved_con);
249 } else
250 mmc_omap_fclk_enable(host, 0);
abfbe5f7
JY
251}
252
253static void mmc_omap_start_request(struct mmc_omap_host *host,
254 struct mmc_request *req);
255
0f602ec7
JL
256static void mmc_omap_slot_release_work(struct work_struct *work)
257{
258 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
259 slot_release_work);
260 struct mmc_omap_slot *next_slot = host->next_slot;
261 struct mmc_request *rq;
262
263 host->next_slot = NULL;
264 mmc_omap_select_slot(next_slot, 1);
265
266 rq = next_slot->mrq;
267 next_slot->mrq = NULL;
268 mmc_omap_start_request(host, rq);
269}
270
0807a9b5 271static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
abfbe5f7
JY
272{
273 struct mmc_omap_host *host = slot->host;
274 unsigned long flags;
275 int i;
276
277 BUG_ON(slot == NULL || host->mmc == NULL);
0807a9b5
JL
278
279 if (clk_enabled)
280 /* Keeps clock running for at least 8 cycles on valid freq */
281 mod_timer(&host->clk_timer, jiffies + HZ/10);
282 else {
283 del_timer(&host->clk_timer);
284 mmc_omap_fclk_offdelay(slot);
285 mmc_omap_fclk_enable(host, 0);
286 }
abfbe5f7
JY
287
288 spin_lock_irqsave(&host->slot_lock, flags);
289 /* Check for any pending requests */
290 for (i = 0; i < host->nr_slots; i++) {
291 struct mmc_omap_slot *new_slot;
abfbe5f7
JY
292
293 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
294 continue;
295
0f602ec7 296 BUG_ON(host->next_slot != NULL);
abfbe5f7
JY
297 new_slot = host->slots[i];
298 /* The current slot should not have a request in queue */
299 BUG_ON(new_slot == host->current_slot);
300
0f602ec7 301 host->next_slot = new_slot;
abfbe5f7
JY
302 host->mmc = new_slot->mmc;
303 spin_unlock_irqrestore(&host->slot_lock, flags);
b01a4f1c 304 queue_work(host->mmc_omap_wq, &host->slot_release_work);
abfbe5f7
JY
305 return;
306 }
307
308 host->mmc = NULL;
309 wake_up(&host->slot_wq);
310 spin_unlock_irqrestore(&host->slot_lock, flags);
311}
312
5a0f3f1f
JY
313static inline
314int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
315{
8348f002
KP
316 if (slot->pdata->get_cover_state)
317 return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
318 slot->id);
319 return 0;
5a0f3f1f
JY
320}
321
322static ssize_t
323mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
324 char *buf)
325{
326 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
327 struct mmc_omap_slot *slot = mmc_priv(mmc);
328
329 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
330 "closed");
331}
332
333static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
334
abfbe5f7
JY
335static ssize_t
336mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
337 char *buf)
338{
339 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
340 struct mmc_omap_slot *slot = mmc_priv(mmc);
341
342 return sprintf(buf, "%s\n", slot->pdata->name);
343}
344
345static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
346
730c9b7e
CA
347static void
348mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
349{
350 u32 cmdreg;
351 u32 resptype;
352 u32 cmdtype;
353
354 host->cmd = cmd;
355
356 resptype = 0;
357 cmdtype = 0;
358
359 /* Our hardware needs to know exact type */
1b3b2631
CEA
360 switch (mmc_resp_type(cmd)) {
361 case MMC_RSP_NONE:
362 break;
363 case MMC_RSP_R1:
364 case MMC_RSP_R1B:
6f949909 365 /* resp 1, 1b, 6, 7 */
730c9b7e
CA
366 resptype = 1;
367 break;
1b3b2631 368 case MMC_RSP_R2:
730c9b7e
CA
369 resptype = 2;
370 break;
1b3b2631 371 case MMC_RSP_R3:
730c9b7e
CA
372 resptype = 3;
373 break;
374 default:
1b3b2631 375 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
730c9b7e
CA
376 break;
377 }
378
379 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
380 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
381 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
382 cmdtype = OMAP_MMC_CMDTYPE_BC;
383 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
384 cmdtype = OMAP_MMC_CMDTYPE_BCR;
385 } else {
386 cmdtype = OMAP_MMC_CMDTYPE_AC;
387 }
388
389 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
390
abfbe5f7 391 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
730c9b7e
CA
392 cmdreg |= 1 << 6;
393
394 if (cmd->flags & MMC_RSP_BUSY)
395 cmdreg |= 1 << 11;
396
397 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
398 cmdreg |= 1 << 15;
399
0fb4723d 400 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
eb1860bc 401
3342ee8b
JY
402 OMAP_MMC_WRITE(host, CTO, 200);
403 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
404 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
405 OMAP_MMC_WRITE(host, IE,
730c9b7e
CA
406 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
407 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
408 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
409 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
410 OMAP_MMC_STAT_END_OF_DATA);
3342ee8b 411 OMAP_MMC_WRITE(host, CMD, cmdreg);
730c9b7e
CA
412}
413
a914ded2
JY
414static void
415mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
416 int abort)
417{
418 enum dma_data_direction dma_data_dir;
3451c067
RK
419 struct device *dev = mmc_dev(host->mmc);
420 struct dma_chan *c;
a914ded2 421
3451c067 422#ifdef USE_DMA_PRIVATE
a914ded2
JY
423 BUG_ON(host->dma_ch < 0);
424 if (data->error)
425 omap_stop_dma(host->dma_ch);
426 /* Release DMA channel lazily */
427 mod_timer(&host->dma_timer, jiffies + HZ);
3451c067
RK
428#endif
429 if (data->flags & MMC_DATA_WRITE) {
a914ded2 430 dma_data_dir = DMA_TO_DEVICE;
3451c067
RK
431 c = host->dma_tx;
432 } else {
a914ded2 433 dma_data_dir = DMA_FROM_DEVICE;
3451c067
RK
434 c = host->dma_rx;
435 }
436 if (c) {
437 if (data->error) {
438 dmaengine_terminate_all(c);
439 /* Claim nothing transferred on error... */
440 data->bytes_xfered = 0;
441 }
442 dev = c->device->dev;
443 }
444 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
a914ded2
JY
445}
446
0f602ec7
JL
447static void mmc_omap_send_stop_work(struct work_struct *work)
448{
449 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
450 send_stop_work);
451 struct mmc_omap_slot *slot = host->current_slot;
452 struct mmc_data *data = host->stop_data;
453 unsigned long tick_ns;
454
455 tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
456 ndelay(8*tick_ns);
457
458 mmc_omap_start_command(host, data->stop);
459}
460
730c9b7e
CA
461static void
462mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
463{
a914ded2
JY
464 if (host->dma_in_use)
465 mmc_omap_release_dma(host, data, data->error);
466
730c9b7e
CA
467 host->data = NULL;
468 host->sg_len = 0;
730c9b7e
CA
469
470 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
471 * dozens of requests until the card finishes writing data.
472 * It'd be cheaper to just wait till an EOFB interrupt arrives...
473 */
474
475 if (!data->stop) {
a914ded2
JY
476 struct mmc_host *mmc;
477
730c9b7e 478 host->mrq = NULL;
a914ded2 479 mmc = host->mmc;
0807a9b5 480 mmc_omap_release_slot(host->current_slot, 1);
a914ded2 481 mmc_request_done(mmc, data->mrq);
730c9b7e
CA
482 return;
483 }
484
0f602ec7 485 host->stop_data = data;
b01a4f1c 486 queue_work(host->mmc_omap_wq, &host->send_stop_work);
730c9b7e
CA
487}
488
eb1860bc 489static void
0fb4723d 490mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
eb1860bc
JL
491{
492 struct mmc_omap_slot *slot = host->current_slot;
493 unsigned int restarts, passes, timeout;
494 u16 stat = 0;
495
496 /* Sending abort takes 80 clocks. Have some extra and round up */
497 timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
498 restarts = 0;
0fb4723d 499 while (restarts < maxloops) {
eb1860bc
JL
500 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
501 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
502
503 passes = 0;
504 while (passes < timeout) {
505 stat = OMAP_MMC_READ(host, STAT);
506 if (stat & OMAP_MMC_STAT_END_OF_CMD)
507 goto out;
508 udelay(1);
509 passes++;
510 }
511
512 restarts++;
513 }
514out:
515 OMAP_MMC_WRITE(host, STAT, stat);
516}
517
a914ded2
JY
518static void
519mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
520{
a914ded2
JY
521 if (host->dma_in_use)
522 mmc_omap_release_dma(host, data, 1);
523
524 host->data = NULL;
525 host->sg_len = 0;
526
0fb4723d 527 mmc_omap_send_abort(host, 10000);
a914ded2
JY
528}
529
730c9b7e
CA
530static void
531mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
532{
533 unsigned long flags;
534 int done;
535
536 if (!host->dma_in_use) {
537 mmc_omap_xfer_done(host, data);
538 return;
539 }
540 done = 0;
541 spin_lock_irqsave(&host->dma_lock, flags);
542 if (host->dma_done)
543 done = 1;
544 else
545 host->brs_received = 1;
546 spin_unlock_irqrestore(&host->dma_lock, flags);
547 if (done)
548 mmc_omap_xfer_done(host, data);
549}
550
3451c067 551#ifdef USE_DMA_PRIVATE
730c9b7e
CA
552static void
553mmc_omap_dma_timer(unsigned long data)
554{
555 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
556
557 BUG_ON(host->dma_ch < 0);
558 omap_free_dma(host->dma_ch);
559 host->dma_ch = -1;
560}
3451c067 561#endif
730c9b7e
CA
562
563static void
564mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
565{
566 unsigned long flags;
567 int done;
568
569 done = 0;
570 spin_lock_irqsave(&host->dma_lock, flags);
571 if (host->brs_received)
572 done = 1;
573 else
574 host->dma_done = 1;
575 spin_unlock_irqrestore(&host->dma_lock, flags);
576 if (done)
577 mmc_omap_xfer_done(host, data);
578}
579
580static void
581mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
582{
583 host->cmd = NULL;
584
0fb4723d 585 del_timer(&host->cmd_abort_timer);
eb1860bc 586
730c9b7e
CA
587 if (cmd->flags & MMC_RSP_PRESENT) {
588 if (cmd->flags & MMC_RSP_136) {
589 /* response type 2 */
590 cmd->resp[3] =
3342ee8b
JY
591 OMAP_MMC_READ(host, RSP0) |
592 (OMAP_MMC_READ(host, RSP1) << 16);
730c9b7e 593 cmd->resp[2] =
3342ee8b
JY
594 OMAP_MMC_READ(host, RSP2) |
595 (OMAP_MMC_READ(host, RSP3) << 16);
730c9b7e 596 cmd->resp[1] =
3342ee8b
JY
597 OMAP_MMC_READ(host, RSP4) |
598 (OMAP_MMC_READ(host, RSP5) << 16);
730c9b7e 599 cmd->resp[0] =
3342ee8b
JY
600 OMAP_MMC_READ(host, RSP6) |
601 (OMAP_MMC_READ(host, RSP7) << 16);
730c9b7e
CA
602 } else {
603 /* response types 1, 1b, 3, 4, 5, 6 */
604 cmd->resp[0] =
3342ee8b
JY
605 OMAP_MMC_READ(host, RSP6) |
606 (OMAP_MMC_READ(host, RSP7) << 16);
730c9b7e
CA
607 }
608 }
609
17b0429d 610 if (host->data == NULL || cmd->error) {
a914ded2
JY
611 struct mmc_host *mmc;
612
613 if (host->data != NULL)
614 mmc_omap_abort_xfer(host, host->data);
730c9b7e 615 host->mrq = NULL;
a914ded2 616 mmc = host->mmc;
0807a9b5 617 mmc_omap_release_slot(host->current_slot, 1);
a914ded2 618 mmc_request_done(mmc, cmd->mrq);
730c9b7e
CA
619 }
620}
621
eb1860bc
JL
622/*
623 * Abort stuck command. Can occur when card is removed while it is being
624 * read.
625 */
626static void mmc_omap_abort_command(struct work_struct *work)
627{
628 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
0fb4723d
JL
629 cmd_abort_work);
630 BUG_ON(!host->cmd);
eb1860bc
JL
631
632 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
633 host->cmd->opcode);
634
0fb4723d
JL
635 if (host->cmd->error == 0)
636 host->cmd->error = -ETIMEDOUT;
eb1860bc 637
0fb4723d
JL
638 if (host->data == NULL) {
639 struct mmc_command *cmd;
640 struct mmc_host *mmc;
641
642 cmd = host->cmd;
643 host->cmd = NULL;
644 mmc_omap_send_abort(host, 10000);
645
646 host->mrq = NULL;
647 mmc = host->mmc;
0807a9b5 648 mmc_omap_release_slot(host->current_slot, 1);
0fb4723d
JL
649 mmc_request_done(mmc, cmd->mrq);
650 } else
651 mmc_omap_cmd_done(host, host->cmd);
eb1860bc 652
0fb4723d
JL
653 host->abort = 0;
654 enable_irq(host->irq);
eb1860bc
JL
655}
656
657static void
658mmc_omap_cmd_timer(unsigned long data)
659{
660 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
0fb4723d 661 unsigned long flags;
eb1860bc 662
0fb4723d
JL
663 spin_lock_irqsave(&host->slot_lock, flags);
664 if (host->cmd != NULL && !host->abort) {
665 OMAP_MMC_WRITE(host, IE, 0);
666 disable_irq(host->irq);
667 host->abort = 1;
b01a4f1c 668 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
0fb4723d
JL
669 }
670 spin_unlock_irqrestore(&host->slot_lock, flags);
eb1860bc
JL
671}
672
730c9b7e
CA
673/* PIO only */
674static void
675mmc_omap_sg_to_buf(struct mmc_omap_host *host)
676{
677 struct scatterlist *sg;
678
679 sg = host->data->sg + host->sg_idx;
680 host->buffer_bytes_left = sg->length;
45711f1a 681 host->buffer = sg_virt(sg);
730c9b7e
CA
682 if (host->buffer_bytes_left > host->total_bytes_left)
683 host->buffer_bytes_left = host->total_bytes_left;
684}
685
0807a9b5
JL
686static void
687mmc_omap_clk_timer(unsigned long data)
688{
689 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
690
691 mmc_omap_fclk_enable(host, 0);
692}
693
730c9b7e
CA
694/* PIO only */
695static void
696mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
697{
698 int n;
730c9b7e
CA
699
700 if (host->buffer_bytes_left == 0) {
701 host->sg_idx++;
702 BUG_ON(host->sg_idx == host->sg_len);
703 mmc_omap_sg_to_buf(host);
704 }
705 n = 64;
706 if (n > host->buffer_bytes_left)
707 n = host->buffer_bytes_left;
708 host->buffer_bytes_left -= n;
709 host->total_bytes_left -= n;
710 host->data->bytes_xfered += n;
711
712 if (write) {
0e950fa6 713 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
730c9b7e 714 } else {
0e950fa6 715 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
730c9b7e
CA
716 }
717}
718
719static inline void mmc_omap_report_irq(u16 status)
720{
721 static const char *mmc_omap_status_bits[] = {
722 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
723 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
724 };
725 int i, c = 0;
726
727 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
728 if (status & (1 << i)) {
729 if (c)
730 printk(" ");
731 printk("%s", mmc_omap_status_bits[i]);
732 c++;
733 }
734}
735
7d12e780 736static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
730c9b7e
CA
737{
738 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
739 u16 status;
740 int end_command;
741 int end_transfer;
2a50b888 742 int transfer_error, cmd_error;
730c9b7e
CA
743
744 if (host->cmd == NULL && host->data == NULL) {
3342ee8b 745 status = OMAP_MMC_READ(host, STAT);
2a50b888
JY
746 dev_info(mmc_dev(host->slots[0]->mmc),
747 "Spurious IRQ 0x%04x\n", status);
730c9b7e 748 if (status != 0) {
3342ee8b
JY
749 OMAP_MMC_WRITE(host, STAT, status);
750 OMAP_MMC_WRITE(host, IE, 0);
730c9b7e
CA
751 }
752 return IRQ_HANDLED;
753 }
754
755 end_command = 0;
756 end_transfer = 0;
757 transfer_error = 0;
2a50b888 758 cmd_error = 0;
730c9b7e 759
3342ee8b 760 while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
2a50b888
JY
761 int cmd;
762
3342ee8b 763 OMAP_MMC_WRITE(host, STAT, status);
2a50b888
JY
764 if (host->cmd != NULL)
765 cmd = host->cmd->opcode;
766 else
767 cmd = -1;
730c9b7e
CA
768#ifdef CONFIG_MMC_DEBUG
769 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
2a50b888 770 status, cmd);
730c9b7e
CA
771 mmc_omap_report_irq(status);
772 printk("\n");
773#endif
774 if (host->total_bytes_left) {
775 if ((status & OMAP_MMC_STAT_A_FULL) ||
776 (status & OMAP_MMC_STAT_END_OF_DATA))
777 mmc_omap_xfer_data(host, 0);
778 if (status & OMAP_MMC_STAT_A_EMPTY)
779 mmc_omap_xfer_data(host, 1);
780 }
781
2a50b888 782 if (status & OMAP_MMC_STAT_END_OF_DATA)
730c9b7e 783 end_transfer = 1;
730c9b7e
CA
784
785 if (status & OMAP_MMC_STAT_DATA_TOUT) {
2a50b888
JY
786 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
787 cmd);
730c9b7e 788 if (host->data) {
17b0429d 789 host->data->error = -ETIMEDOUT;
730c9b7e
CA
790 transfer_error = 1;
791 }
792 }
793
794 if (status & OMAP_MMC_STAT_DATA_CRC) {
795 if (host->data) {
17b0429d 796 host->data->error = -EILSEQ;
730c9b7e
CA
797 dev_dbg(mmc_dev(host->mmc),
798 "data CRC error, bytes left %d\n",
799 host->total_bytes_left);
800 transfer_error = 1;
801 } else {
802 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
803 }
804 }
805
806 if (status & OMAP_MMC_STAT_CMD_TOUT) {
807 /* Timeouts are routine with some commands */
808 if (host->cmd) {
abfbe5f7
JY
809 struct mmc_omap_slot *slot =
810 host->current_slot;
2a50b888
JY
811 if (slot == NULL ||
812 !mmc_omap_cover_is_open(slot))
5a0f3f1f 813 dev_err(mmc_dev(host->mmc),
2a50b888
JY
814 "command timeout (CMD%d)\n",
815 cmd);
17b0429d 816 host->cmd->error = -ETIMEDOUT;
730c9b7e 817 end_command = 1;
2a50b888 818 cmd_error = 1;
730c9b7e
CA
819 }
820 }
821
822 if (status & OMAP_MMC_STAT_CMD_CRC) {
823 if (host->cmd) {
824 dev_err(mmc_dev(host->mmc),
825 "command CRC error (CMD%d, arg 0x%08x)\n",
2a50b888 826 cmd, host->cmd->arg);
17b0429d 827 host->cmd->error = -EILSEQ;
730c9b7e 828 end_command = 1;
2a50b888 829 cmd_error = 1;
730c9b7e
CA
830 } else
831 dev_err(mmc_dev(host->mmc),
832 "command CRC error without cmd?\n");
833 }
834
835 if (status & OMAP_MMC_STAT_CARD_ERR) {
0107a4b3
RM
836 dev_dbg(mmc_dev(host->mmc),
837 "ignoring card status error (CMD%d)\n",
2a50b888 838 cmd);
0107a4b3 839 end_command = 1;
730c9b7e
CA
840 }
841
842 /*
843 * NOTE: On 1610 the END_OF_CMD may come too early when
2a50b888 844 * starting a write
730c9b7e
CA
845 */
846 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
847 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
848 end_command = 1;
849 }
850 }
851
0fb4723d
JL
852 if (cmd_error && host->data) {
853 del_timer(&host->cmd_abort_timer);
854 host->abort = 1;
855 OMAP_MMC_WRITE(host, IE, 0);
e749c6f2 856 disable_irq_nosync(host->irq);
b01a4f1c 857 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
0fb4723d
JL
858 return IRQ_HANDLED;
859 }
860
f6947514 861 if (end_command && host->cmd)
730c9b7e 862 mmc_omap_cmd_done(host, host->cmd);
2a50b888
JY
863 if (host->data != NULL) {
864 if (transfer_error)
865 mmc_omap_xfer_done(host, host->data);
866 else if (end_transfer)
867 mmc_omap_end_of_data(host, host->data);
730c9b7e 868 }
730c9b7e
CA
869
870 return IRQ_HANDLED;
871}
872
7584d276 873void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
5a0f3f1f 874{
7584d276 875 int cover_open;
5a0f3f1f 876 struct mmc_omap_host *host = dev_get_drvdata(dev);
7584d276 877 struct mmc_omap_slot *slot = host->slots[num];
5a0f3f1f 878
7584d276 879 BUG_ON(num >= host->nr_slots);
5a0f3f1f
JY
880
881 /* Other subsystems can call in here before we're initialised. */
7584d276 882 if (host->nr_slots == 0 || !host->slots[num])
5a0f3f1f
JY
883 return;
884
7584d276
JL
885 cover_open = mmc_omap_cover_is_open(slot);
886 if (cover_open != slot->cover_open) {
887 slot->cover_open = cover_open;
888 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
889 }
890
891 tasklet_hi_schedule(&slot->cover_tasklet);
5a0f3f1f
JY
892}
893
7584d276 894static void mmc_omap_cover_timer(unsigned long arg)
5a0f3f1f
JY
895{
896 struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
7584d276 897 tasklet_schedule(&slot->cover_tasklet);
5a0f3f1f
JY
898}
899
7584d276 900static void mmc_omap_cover_handler(unsigned long param)
5a0f3f1f 901{
7584d276
JL
902 struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
903 int cover_open = mmc_omap_cover_is_open(slot);
5a0f3f1f 904
7584d276
JL
905 mmc_detect_change(slot->mmc, 0);
906 if (!cover_open)
907 return;
908
909 /*
910 * If no card is inserted, we postpone polling until
911 * the cover has been closed.
912 */
913 if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
914 return;
915
916 mod_timer(&slot->cover_timer,
917 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
5a0f3f1f
JY
918}
919
3451c067
RK
920static void mmc_omap_dma_callback(void *priv)
921{
922 struct mmc_omap_host *host = priv;
923 struct mmc_data *data = host->data;
924
925 /* If we got to the end of DMA, assume everything went well */
926 data->bytes_xfered += data->blocks * data->blksz;
927
928 mmc_omap_dma_done(host, data);
929}
930
931#ifdef USE_DMA_PRIVATE
730c9b7e
CA
932/* Prepare to transfer the next segment of a scatterlist */
933static void
934mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
935{
936 int dma_ch = host->dma_ch;
937 unsigned long data_addr;
938 u16 buf, frame;
939 u32 count;
940 struct scatterlist *sg = &data->sg[host->sg_idx];
941 int src_port = 0;
942 int dst_port = 0;
943 int sync_dev = 0;
944
0e950fa6 945 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
a3fd4a1b 946 frame = data->blksz;
730c9b7e
CA
947 count = sg_dma_len(sg);
948
a3fd4a1b 949 if ((data->blocks == 1) && (count > data->blksz))
730c9b7e
CA
950 count = frame;
951
952 host->dma_len = count;
953
954 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
955 * Use 16 or 32 word frames when the blocksize is at least that large.
956 * Blocksize is usually 512 bytes; but not for some SD reads.
957 */
958 if (cpu_is_omap15xx() && frame > 32)
959 frame = 32;
960 else if (frame > 64)
961 frame = 64;
962 count /= frame;
963 frame >>= 1;
964
965 if (!(data->flags & MMC_DATA_WRITE)) {
966 buf = 0x800f | ((frame - 1) << 8);
967
968 if (cpu_class_is_omap1()) {
969 src_port = OMAP_DMA_PORT_TIPB;
970 dst_port = OMAP_DMA_PORT_EMIFF;
971 }
972 if (cpu_is_omap24xx())
973 sync_dev = OMAP24XX_DMA_MMC1_RX;
974
975 omap_set_dma_src_params(dma_ch, src_port,
976 OMAP_DMA_AMODE_CONSTANT,
977 data_addr, 0, 0);
978 omap_set_dma_dest_params(dma_ch, dst_port,
979 OMAP_DMA_AMODE_POST_INC,
980 sg_dma_address(sg), 0, 0);
981 omap_set_dma_dest_data_pack(dma_ch, 1);
982 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
983 } else {
984 buf = 0x0f80 | ((frame - 1) << 0);
985
986 if (cpu_class_is_omap1()) {
987 src_port = OMAP_DMA_PORT_EMIFF;
988 dst_port = OMAP_DMA_PORT_TIPB;
989 }
990 if (cpu_is_omap24xx())
991 sync_dev = OMAP24XX_DMA_MMC1_TX;
992
993 omap_set_dma_dest_params(dma_ch, dst_port,
994 OMAP_DMA_AMODE_CONSTANT,
995 data_addr, 0, 0);
996 omap_set_dma_src_params(dma_ch, src_port,
997 OMAP_DMA_AMODE_POST_INC,
998 sg_dma_address(sg), 0, 0);
999 omap_set_dma_src_data_pack(dma_ch, 1);
1000 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
1001 }
1002
1003 /* Max limit for DMA frame count is 0xffff */
d99c5909 1004 BUG_ON(count > 0xffff);
730c9b7e 1005
3342ee8b 1006 OMAP_MMC_WRITE(host, BUF, buf);
730c9b7e
CA
1007 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
1008 frame, count, OMAP_DMA_SYNC_FRAME,
1009 sync_dev, 0);
1010}
1011
1012/* A scatterlist segment completed */
1013static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
1014{
1015 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
1016 struct mmc_data *mmcdat = host->data;
1017
1018 if (unlikely(host->dma_ch < 0)) {
ce9c1a83
TL
1019 dev_err(mmc_dev(host->mmc),
1020 "DMA callback while DMA not enabled\n");
730c9b7e
CA
1021 return;
1022 }
1023 /* FIXME: We really should do something to _handle_ the errors */
7ff879db 1024 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
730c9b7e
CA
1025 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
1026 return;
1027 }
1028 if (ch_status & OMAP_DMA_DROP_IRQ) {
1029 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
1030 return;
1031 }
1032 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1033 return;
1034 }
1035 mmcdat->bytes_xfered += host->dma_len;
1036 host->sg_idx++;
1037 if (host->sg_idx < host->sg_len) {
1038 mmc_omap_prepare_dma(host, host->data);
1039 omap_start_dma(host->dma_ch);
1040 } else
1041 mmc_omap_dma_done(host, host->data);
1042}
1043
1044static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
1045{
df48dd02 1046 const char *dma_dev_name;
730c9b7e
CA
1047 int sync_dev, dma_ch, is_read, r;
1048
1049 is_read = !(data->flags & MMC_DATA_WRITE);
1050 del_timer_sync(&host->dma_timer);
1051 if (host->dma_ch >= 0) {
1052 if (is_read == host->dma_is_read)
1053 return 0;
1054 omap_free_dma(host->dma_ch);
1055 host->dma_ch = -1;
1056 }
1057
1058 if (is_read) {
d8874665 1059 if (host->id == 0) {
730c9b7e 1060 sync_dev = OMAP_DMA_MMC_RX;
df48dd02 1061 dma_dev_name = "MMC1 read";
730c9b7e
CA
1062 } else {
1063 sync_dev = OMAP_DMA_MMC2_RX;
df48dd02 1064 dma_dev_name = "MMC2 read";
730c9b7e
CA
1065 }
1066 } else {
d8874665 1067 if (host->id == 0) {
730c9b7e 1068 sync_dev = OMAP_DMA_MMC_TX;
df48dd02 1069 dma_dev_name = "MMC1 write";
730c9b7e
CA
1070 } else {
1071 sync_dev = OMAP_DMA_MMC2_TX;
df48dd02 1072 dma_dev_name = "MMC2 write";
730c9b7e
CA
1073 }
1074 }
df48dd02 1075 r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
730c9b7e
CA
1076 host, &dma_ch);
1077 if (r != 0) {
1078 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1079 return r;
1080 }
1081 host->dma_ch = dma_ch;
1082 host->dma_is_read = is_read;
1083
1084 return 0;
1085}
3451c067 1086#endif
730c9b7e
CA
1087
1088static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1089{
1090 u16 reg;
1091
3342ee8b 1092 reg = OMAP_MMC_READ(host, SDIO);
730c9b7e 1093 reg &= ~(1 << 5);
3342ee8b 1094 OMAP_MMC_WRITE(host, SDIO, reg);
730c9b7e 1095 /* Set maximum timeout */
3342ee8b 1096 OMAP_MMC_WRITE(host, CTO, 0xff);
730c9b7e
CA
1097}
1098
1099static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1100{
b8f9f0e9 1101 unsigned int timeout, cycle_ns;
730c9b7e
CA
1102 u16 reg;
1103
b8f9f0e9
JY
1104 cycle_ns = 1000000000 / host->current_slot->fclk_freq;
1105 timeout = req->data->timeout_ns / cycle_ns;
1106 timeout += req->data->timeout_clks;
730c9b7e
CA
1107
1108 /* Check if we need to use timeout multiplier register */
3342ee8b 1109 reg = OMAP_MMC_READ(host, SDIO);
730c9b7e
CA
1110 if (timeout > 0xffff) {
1111 reg |= (1 << 5);
1112 timeout /= 1024;
1113 } else
1114 reg &= ~(1 << 5);
3342ee8b
JY
1115 OMAP_MMC_WRITE(host, SDIO, reg);
1116 OMAP_MMC_WRITE(host, DTO, timeout);
730c9b7e
CA
1117}
1118
1119static void
1120mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1121{
1122 struct mmc_data *data = req->data;
1123 int i, use_dma, block_size;
1124 unsigned sg_len;
1125
1126 host->data = data;
1127 if (data == NULL) {
3342ee8b
JY
1128 OMAP_MMC_WRITE(host, BLEN, 0);
1129 OMAP_MMC_WRITE(host, NBLK, 0);
1130 OMAP_MMC_WRITE(host, BUF, 0);
730c9b7e
CA
1131 host->dma_in_use = 0;
1132 set_cmd_timeout(host, req);
1133 return;
1134 }
1135
a3fd4a1b 1136 block_size = data->blksz;
730c9b7e 1137
3342ee8b
JY
1138 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
1139 OMAP_MMC_WRITE(host, BLEN, block_size - 1);
730c9b7e
CA
1140 set_data_timeout(host, req);
1141
1142 /* cope with calling layer confusion; it issues "single
1143 * block" writes using multi-block scatterlists.
1144 */
1145 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
1146
1147 /* Only do DMA for entire blocks */
1148 use_dma = host->use_dma;
1149 if (use_dma) {
1150 for (i = 0; i < sg_len; i++) {
1151 if ((data->sg[i].length % block_size) != 0) {
1152 use_dma = 0;
1153 break;
1154 }
1155 }
1156 }
1157
1158 host->sg_idx = 0;
3451c067
RK
1159 if (use_dma) {
1160 enum dma_data_direction dma_data_dir;
1161 struct dma_async_tx_descriptor *tx;
1162 struct dma_chan *c;
1163 u32 burst, *bp;
1164 u16 buf;
1165
1166 /*
1167 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
1168 * and 24xx. Use 16 or 32 word frames when the
1169 * blocksize is at least that large. Blocksize is
1170 * usually 512 bytes; but not for some SD reads.
1171 */
1172 burst = cpu_is_omap15xx() ? 32 : 64;
1173 if (burst > data->blksz)
1174 burst = data->blksz;
1175
1176 burst >>= 1;
1177
1178 if (data->flags & MMC_DATA_WRITE) {
1179 c = host->dma_tx;
1180 bp = &host->dma_tx_burst;
1181 buf = 0x0f80 | (burst - 1) << 0;
1182 dma_data_dir = DMA_TO_DEVICE;
1183 } else {
1184 c = host->dma_rx;
1185 bp = &host->dma_rx_burst;
1186 buf = 0x800f | (burst - 1) << 8;
1187 dma_data_dir = DMA_FROM_DEVICE;
1188 }
1189
1190 if (!c)
1191 goto use_pio;
1192
1193 /* Only reconfigure if we have a different burst size */
1194 if (*bp != burst) {
1195 struct dma_slave_config cfg;
1196
1197 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1198 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1199 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1200 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1201 cfg.src_maxburst = burst;
1202 cfg.dst_maxburst = burst;
1203
1204 if (dmaengine_slave_config(c, &cfg))
1205 goto use_pio;
1206
1207 *bp = burst;
1208 }
1209
1210 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1211 dma_data_dir);
1212 if (host->sg_len == 0)
1213 goto use_pio;
1214
1215 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1216 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1217 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1218 if (!tx)
1219 goto use_pio;
1220
1221 OMAP_MMC_WRITE(host, BUF, buf);
1222
1223 tx->callback = mmc_omap_dma_callback;
1224 tx->callback_param = host;
1225 dmaengine_submit(tx);
1226 host->brs_received = 0;
1227 host->dma_done = 0;
1228 host->dma_in_use = 1;
1229 return;
1230 }
1231 use_pio:
1232#ifdef USE_DMA_PRIVATE
730c9b7e
CA
1233 if (use_dma) {
1234 if (mmc_omap_get_dma_channel(host, data) == 0) {
1235 enum dma_data_direction dma_data_dir;
1236
1237 if (data->flags & MMC_DATA_WRITE)
1238 dma_data_dir = DMA_TO_DEVICE;
1239 else
1240 dma_data_dir = DMA_FROM_DEVICE;
1241
1242 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1243 sg_len, dma_data_dir);
1244 host->total_bytes_left = 0;
1245 mmc_omap_prepare_dma(host, req->data);
1246 host->brs_received = 0;
1247 host->dma_done = 0;
1248 host->dma_in_use = 1;
1249 } else
1250 use_dma = 0;
1251 }
3451c067
RK
1252#else
1253 use_dma = 0;
1254#endif
730c9b7e
CA
1255
1256 /* Revert to PIO? */
1257 if (!use_dma) {
3342ee8b 1258 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
730c9b7e
CA
1259 host->total_bytes_left = data->blocks * block_size;
1260 host->sg_len = sg_len;
1261 mmc_omap_sg_to_buf(host);
1262 host->dma_in_use = 0;
1263 }
1264}
1265
abfbe5f7
JY
1266static void mmc_omap_start_request(struct mmc_omap_host *host,
1267 struct mmc_request *req)
730c9b7e 1268{
abfbe5f7 1269 BUG_ON(host->mrq != NULL);
730c9b7e
CA
1270
1271 host->mrq = req;
1272
1273 /* only touch fifo AFTER the controller readies it */
1274 mmc_omap_prepare_data(host, req);
1275 mmc_omap_start_command(host, req->cmd);
3451c067
RK
1276 if (host->dma_in_use) {
1277 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1278 host->dma_tx : host->dma_rx;
1279
1280 if (c)
1281 dma_async_issue_pending(c);
1282#ifdef USE_DMA_PRIVATE
1283 else
1284 omap_start_dma(host->dma_ch);
1285#endif
1286 }
abfbe5f7
JY
1287}
1288
1289static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1290{
1291 struct mmc_omap_slot *slot = mmc_priv(mmc);
1292 struct mmc_omap_host *host = slot->host;
1293 unsigned long flags;
1294
1295 spin_lock_irqsave(&host->slot_lock, flags);
1296 if (host->mmc != NULL) {
1297 BUG_ON(slot->mrq != NULL);
1298 slot->mrq = req;
1299 spin_unlock_irqrestore(&host->slot_lock, flags);
1300 return;
1301 } else
1302 host->mmc = mmc;
1303 spin_unlock_irqrestore(&host->slot_lock, flags);
1304 mmc_omap_select_slot(slot, 1);
1305 mmc_omap_start_request(host, req);
730c9b7e
CA
1306}
1307
65b5b6e5
JY
1308static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1309 int vdd)
730c9b7e 1310{
65b5b6e5 1311 struct mmc_omap_host *host;
730c9b7e 1312
65b5b6e5
JY
1313 host = slot->host;
1314
1315 if (slot->pdata->set_power != NULL)
1316 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1317 vdd);
1318
1319 if (cpu_is_omap24xx()) {
1320 u16 w;
1321
1322 if (power_on) {
1323 w = OMAP_MMC_READ(host, CON);
1324 OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1325 } else {
1326 w = OMAP_MMC_READ(host, CON);
1327 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1328 }
730c9b7e
CA
1329 }
1330}
1331
d3af5abe 1332static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
730c9b7e 1333{
abfbe5f7
JY
1334 struct mmc_omap_slot *slot = mmc_priv(mmc);
1335 struct mmc_omap_host *host = slot->host;
d3af5abe 1336 int func_clk_rate = clk_get_rate(host->fclk);
730c9b7e 1337 int dsor;
730c9b7e
CA
1338
1339 if (ios->clock == 0)
d3af5abe 1340 return 0;
730c9b7e 1341
d3af5abe
TL
1342 dsor = func_clk_rate / ios->clock;
1343 if (dsor < 1)
1344 dsor = 1;
730c9b7e 1345
d3af5abe 1346 if (func_clk_rate / dsor > ios->clock)
730c9b7e
CA
1347 dsor++;
1348
d3af5abe
TL
1349 if (dsor > 250)
1350 dsor = 250;
d3af5abe 1351
abfbe5f7
JY
1352 slot->fclk_freq = func_clk_rate / dsor;
1353
d3af5abe
TL
1354 if (ios->bus_width == MMC_BUS_WIDTH_4)
1355 dsor |= 1 << 15;
1356
1357 return dsor;
1358}
1359
1360static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1361{
abfbe5f7
JY
1362 struct mmc_omap_slot *slot = mmc_priv(mmc);
1363 struct mmc_omap_host *host = slot->host;
1364 int i, dsor;
0807a9b5 1365 int clk_enabled;
65b5b6e5
JY
1366
1367 mmc_omap_select_slot(slot, 0);
1368
0807a9b5
JL
1369 dsor = mmc_omap_calc_divisor(mmc, ios);
1370
65b5b6e5
JY
1371 if (ios->vdd != slot->vdd)
1372 slot->vdd = ios->vdd;
730c9b7e 1373
0807a9b5 1374 clk_enabled = 0;
730c9b7e
CA
1375 switch (ios->power_mode) {
1376 case MMC_POWER_OFF:
65b5b6e5 1377 mmc_omap_set_power(slot, 0, ios->vdd);
730c9b7e
CA
1378 break;
1379 case MMC_POWER_UP:
46a6730e 1380 /* Cannot touch dsor yet, just power up MMC */
65b5b6e5
JY
1381 mmc_omap_set_power(slot, 1, ios->vdd);
1382 goto exit;
46a6730e 1383 case MMC_POWER_ON:
0807a9b5
JL
1384 mmc_omap_fclk_enable(host, 1);
1385 clk_enabled = 1;
c5cb431d 1386 dsor |= 1 << 11;
730c9b7e
CA
1387 break;
1388 }
1389
65b5b6e5
JY
1390 if (slot->bus_mode != ios->bus_mode) {
1391 if (slot->pdata->set_bus_mode != NULL)
1392 slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1393 ios->bus_mode);
1394 slot->bus_mode = ios->bus_mode;
1395 }
730c9b7e
CA
1396
1397 /* On insanely high arm_per frequencies something sometimes
1398 * goes somehow out of sync, and the POW bit is not being set,
1399 * which results in the while loop below getting stuck.
1400 * Writing to the CON register twice seems to do the trick. */
1401 for (i = 0; i < 2; i++)
3342ee8b 1402 OMAP_MMC_WRITE(host, CON, dsor);
65b5b6e5 1403 slot->saved_con = dsor;
46a6730e 1404 if (ios->power_mode == MMC_POWER_ON) {
9d7c6eee
JL
1405 /* worst case at 400kHz, 80 cycles makes 200 microsecs */
1406 int usecs = 250;
1407
730c9b7e 1408 /* Send clock cycles, poll completion */
3342ee8b
JY
1409 OMAP_MMC_WRITE(host, IE, 0);
1410 OMAP_MMC_WRITE(host, STAT, 0xffff);
c5cb431d 1411 OMAP_MMC_WRITE(host, CMD, 1 << 7);
9d7c6eee
JL
1412 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1413 udelay(1);
1414 usecs--;
1415 }
3342ee8b 1416 OMAP_MMC_WRITE(host, STAT, 1);
730c9b7e 1417 }
65b5b6e5
JY
1418
1419exit:
0807a9b5 1420 mmc_omap_release_slot(slot, clk_enabled);
730c9b7e
CA
1421}
1422
ab7aefd0 1423static const struct mmc_host_ops mmc_omap_ops = {
730c9b7e
CA
1424 .request = mmc_omap_request,
1425 .set_ios = mmc_omap_set_ios,
730c9b7e
CA
1426};
1427
4f837791 1428static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
730c9b7e 1429{
abfbe5f7 1430 struct mmc_omap_slot *slot = NULL;
730c9b7e 1431 struct mmc_host *mmc;
abfbe5f7
JY
1432 int r;
1433
1434 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1435 if (mmc == NULL)
1436 return -ENOMEM;
1437
1438 slot = mmc_priv(mmc);
1439 slot->host = host;
1440 slot->mmc = mmc;
1441 slot->id = id;
1442 slot->pdata = &host->pdata->slots[id];
1443
1444 host->slots[id] = slot;
1445
23af6039 1446 mmc->caps = 0;
90c62bf0 1447 if (host->pdata->slots[id].wires >= 4)
abfbe5f7
JY
1448 mmc->caps |= MMC_CAP_4_BIT_DATA;
1449
1450 mmc->ops = &mmc_omap_ops;
1451 mmc->f_min = 400000;
1452
1453 if (cpu_class_is_omap2())
1454 mmc->f_max = 48000000;
1455 else
1456 mmc->f_max = 24000000;
1457 if (host->pdata->max_freq)
1458 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1459 mmc->ocr_avail = slot->pdata->ocr_mask;
1460
1461 /* Use scatterlist DMA to reduce per-transfer costs.
1462 * NOTE max_seg_size assumption that small blocks aren't
1463 * normally used (except e.g. for reading SD registers).
1464 */
a36274e0 1465 mmc->max_segs = 32;
abfbe5f7
JY
1466 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1467 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1468 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1469 mmc->max_seg_size = mmc->max_req_size;
1470
1471 r = mmc_add_host(mmc);
1472 if (r < 0)
1473 goto err_remove_host;
1474
1475 if (slot->pdata->name != NULL) {
1476 r = device_create_file(&mmc->class_dev,
1477 &dev_attr_slot_name);
1478 if (r < 0)
1479 goto err_remove_host;
1480 }
1481
5a0f3f1f
JY
1482 if (slot->pdata->get_cover_state != NULL) {
1483 r = device_create_file(&mmc->class_dev,
1484 &dev_attr_cover_switch);
1485 if (r < 0)
1486 goto err_remove_slot_name;
1487
7584d276
JL
1488 setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1489 (unsigned long)slot);
1490 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1491 (unsigned long)slot);
1492 tasklet_schedule(&slot->cover_tasklet);
5a0f3f1f
JY
1493 }
1494
abfbe5f7
JY
1495 return 0;
1496
5a0f3f1f
JY
1497err_remove_slot_name:
1498 if (slot->pdata->name != NULL)
1499 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
abfbe5f7
JY
1500err_remove_host:
1501 mmc_remove_host(mmc);
1502 mmc_free_host(mmc);
1503 return r;
1504}
1505
1506static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1507{
1508 struct mmc_host *mmc = slot->mmc;
1509
1510 if (slot->pdata->name != NULL)
1511 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
5a0f3f1f
JY
1512 if (slot->pdata->get_cover_state != NULL)
1513 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1514
7584d276
JL
1515 tasklet_kill(&slot->cover_tasklet);
1516 del_timer_sync(&slot->cover_timer);
b01a4f1c 1517 flush_workqueue(slot->host->mmc_omap_wq);
abfbe5f7
JY
1518
1519 mmc_remove_host(mmc);
1520 mmc_free_host(mmc);
1521}
1522
b6e0703b 1523static int __devinit mmc_omap_probe(struct platform_device *pdev)
abfbe5f7
JY
1524{
1525 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
730c9b7e 1526 struct mmc_omap_host *host = NULL;
81ca7034 1527 struct resource *res;
3451c067
RK
1528 dma_cap_mask_t mask;
1529 unsigned sig;
abfbe5f7 1530 int i, ret = 0;
ce9c1a83 1531 int irq;
81ca7034 1532
abfbe5f7 1533 if (pdata == NULL) {
81ca7034
JY
1534 dev_err(&pdev->dev, "platform data missing\n");
1535 return -ENXIO;
1536 }
abfbe5f7
JY
1537 if (pdata->nr_slots == 0) {
1538 dev_err(&pdev->dev, "no slots\n");
1539 return -ENXIO;
1540 }
81ca7034
JY
1541
1542 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ce9c1a83 1543 irq = platform_get_irq(pdev, 0);
81ca7034 1544 if (res == NULL || irq < 0)
ce9c1a83 1545 return -ENXIO;
730c9b7e 1546
2092014d 1547 res = request_mem_region(res->start, resource_size(res),
abfbe5f7 1548 pdev->name);
81ca7034 1549 if (res == NULL)
730c9b7e 1550 return -EBUSY;
730c9b7e 1551
abfbe5f7
JY
1552 host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1553 if (host == NULL) {
730c9b7e 1554 ret = -ENOMEM;
81ca7034 1555 goto err_free_mem_region;
730c9b7e
CA
1556 }
1557
0f602ec7
JL
1558 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1559 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1560
0fb4723d
JL
1561 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1562 setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1563 (unsigned long) host);
eb1860bc 1564
0807a9b5
JL
1565 spin_lock_init(&host->clk_lock);
1566 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1567
730c9b7e 1568 spin_lock_init(&host->dma_lock);
3451c067 1569#ifdef USE_DMA_PRIVATE
01e77e13 1570 setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
3451c067 1571#endif
abfbe5f7
JY
1572 spin_lock_init(&host->slot_lock);
1573 init_waitqueue_head(&host->slot_wq);
1574
abfbe5f7
JY
1575 host->pdata = pdata;
1576 host->dev = &pdev->dev;
1577 platform_set_drvdata(pdev, host);
1578
730c9b7e 1579 host->id = pdev->id;
81ca7034 1580 host->mem_res = res;
ce9c1a83 1581 host->irq = irq;
730c9b7e 1582
abfbe5f7 1583 host->use_dma = 1;
3451c067 1584#ifdef USE_DMA_PRIVATE
d8874665 1585 host->dev->dma_mask = &pdata->dma_mask;
abfbe5f7 1586 host->dma_ch = -1;
3451c067 1587#endif
abfbe5f7
JY
1588
1589 host->irq = irq;
1590 host->phys_base = host->mem_res->start;
2092014d 1591 host->virt_base = ioremap(res->start, resource_size(res));
55c381e4
RK
1592 if (!host->virt_base)
1593 goto err_ioremap;
abfbe5f7 1594
d4a36645 1595 host->iclk = clk_get(&pdev->dev, "ick");
e799acb2
LM
1596 if (IS_ERR(host->iclk)) {
1597 ret = PTR_ERR(host->iclk);
d4a36645 1598 goto err_free_mmc_host;
e799acb2 1599 }
d4a36645 1600 clk_enable(host->iclk);
730c9b7e 1601
5c9e02b1 1602 host->fclk = clk_get(&pdev->dev, "fck");
730c9b7e
CA
1603 if (IS_ERR(host->fclk)) {
1604 ret = PTR_ERR(host->fclk);
81ca7034 1605 goto err_free_iclk;
730c9b7e
CA
1606 }
1607
3451c067
RK
1608 dma_cap_zero(mask);
1609 dma_cap_set(DMA_SLAVE, mask);
1610
1611 host->dma_tx_burst = -1;
1612 host->dma_rx_burst = -1;
1613
1614 if (cpu_is_omap24xx())
1615 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
1616 else
1617 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
1618 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1619#if 0
1620 if (!host->dma_tx) {
1621 dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
1622 sig);
1623 goto err_dma;
1624 }
1625#else
1626 if (!host->dma_tx)
1627 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1628 sig);
1629#endif
1630 if (cpu_is_omap24xx())
1631 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
1632 else
1633 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
1634 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1635#if 0
1636 if (!host->dma_rx) {
1637 dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
1638 sig);
1639 goto err_dma;
1640 }
1641#else
1642 if (!host->dma_rx)
1643 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1644 sig);
1645#endif
1646
abfbe5f7
JY
1647 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1648 if (ret)
3451c067 1649 goto err_free_dma;
42431acb 1650
abfbe5f7
JY
1651 if (pdata->init != NULL) {
1652 ret = pdata->init(&pdev->dev);
1653 if (ret < 0)
1654 goto err_free_irq;
1655 }
730c9b7e 1656
abfbe5f7 1657 host->nr_slots = pdata->nr_slots;
ebbe6f88 1658 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
3caf4140
TL
1659
1660 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1661 if (!host->mmc_omap_wq)
1662 goto err_plat_cleanup;
1663
abfbe5f7
JY
1664 for (i = 0; i < pdata->nr_slots; i++) {
1665 ret = mmc_omap_new_slot(host, i);
1666 if (ret < 0) {
1667 while (--i >= 0)
1668 mmc_omap_remove_slot(host->slots[i]);
730c9b7e 1669
3caf4140 1670 goto err_destroy_wq;
730c9b7e 1671 }
730c9b7e
CA
1672 }
1673
730c9b7e
CA
1674 return 0;
1675
3caf4140
TL
1676err_destroy_wq:
1677 destroy_workqueue(host->mmc_omap_wq);
abfbe5f7
JY
1678err_plat_cleanup:
1679 if (pdata->cleanup)
1680 pdata->cleanup(&pdev->dev);
1681err_free_irq:
1682 free_irq(host->irq, host);
3451c067
RK
1683err_free_dma:
1684 if (host->dma_tx)
1685 dma_release_channel(host->dma_tx);
1686 if (host->dma_rx)
1687 dma_release_channel(host->dma_rx);
81ca7034
JY
1688 clk_put(host->fclk);
1689err_free_iclk:
e799acb2
LM
1690 clk_disable(host->iclk);
1691 clk_put(host->iclk);
81ca7034 1692err_free_mmc_host:
55c381e4
RK
1693 iounmap(host->virt_base);
1694err_ioremap:
abfbe5f7 1695 kfree(host);
81ca7034 1696err_free_mem_region:
2092014d 1697 release_mem_region(res->start, resource_size(res));
730c9b7e
CA
1698 return ret;
1699}
1700
b6e0703b 1701static int __devexit mmc_omap_remove(struct platform_device *pdev)
730c9b7e
CA
1702{
1703 struct mmc_omap_host *host = platform_get_drvdata(pdev);
abfbe5f7 1704 int i;
730c9b7e
CA
1705
1706 platform_set_drvdata(pdev, NULL);
1707
81ca7034
JY
1708 BUG_ON(host == NULL);
1709
abfbe5f7
JY
1710 for (i = 0; i < host->nr_slots; i++)
1711 mmc_omap_remove_slot(host->slots[i]);
1712
1713 if (host->pdata->cleanup)
1714 host->pdata->cleanup(&pdev->dev);
81ca7034 1715
d4a36645 1716 mmc_omap_fclk_enable(host, 0);
49c1d9da 1717 free_irq(host->irq, host);
d4a36645
RK
1718 clk_put(host->fclk);
1719 clk_disable(host->iclk);
1720 clk_put(host->iclk);
730c9b7e 1721
3451c067
RK
1722 if (host->dma_tx)
1723 dma_release_channel(host->dma_tx);
1724 if (host->dma_rx)
1725 dma_release_channel(host->dma_rx);
1726
55c381e4 1727 iounmap(host->virt_base);
730c9b7e 1728 release_mem_region(pdev->resource[0].start,
81ca7034 1729 pdev->resource[0].end - pdev->resource[0].start + 1);
b01a4f1c 1730 destroy_workqueue(host->mmc_omap_wq);
81ca7034 1731
abfbe5f7 1732 kfree(host);
730c9b7e
CA
1733
1734 return 0;
1735}
1736
1737#ifdef CONFIG_PM
1738static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1739{
abfbe5f7 1740 int i, ret = 0;
730c9b7e
CA
1741 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1742
abfbe5f7 1743 if (host == NULL || host->suspended)
730c9b7e
CA
1744 return 0;
1745
abfbe5f7
JY
1746 for (i = 0; i < host->nr_slots; i++) {
1747 struct mmc_omap_slot *slot;
1748
1749 slot = host->slots[i];
1a13f8fa 1750 ret = mmc_suspend_host(slot->mmc);
abfbe5f7
JY
1751 if (ret < 0) {
1752 while (--i >= 0) {
1753 slot = host->slots[i];
1754 mmc_resume_host(slot->mmc);
1755 }
1756 return ret;
1757 }
730c9b7e 1758 }
abfbe5f7
JY
1759 host->suspended = 1;
1760 return 0;
730c9b7e
CA
1761}
1762
1763static int mmc_omap_resume(struct platform_device *pdev)
1764{
abfbe5f7 1765 int i, ret = 0;
730c9b7e
CA
1766 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1767
abfbe5f7 1768 if (host == NULL || !host->suspended)
730c9b7e
CA
1769 return 0;
1770
abfbe5f7
JY
1771 for (i = 0; i < host->nr_slots; i++) {
1772 struct mmc_omap_slot *slot;
1773 slot = host->slots[i];
1774 ret = mmc_resume_host(slot->mmc);
1775 if (ret < 0)
1776 return ret;
730c9b7e 1777
abfbe5f7
JY
1778 host->suspended = 0;
1779 }
1780 return 0;
730c9b7e
CA
1781}
1782#else
1783#define mmc_omap_suspend NULL
1784#define mmc_omap_resume NULL
1785#endif
1786
1787static struct platform_driver mmc_omap_driver = {
b6e0703b
V
1788 .probe = mmc_omap_probe,
1789 .remove = __devexit_p(mmc_omap_remove),
730c9b7e
CA
1790 .suspend = mmc_omap_suspend,
1791 .resume = mmc_omap_resume,
1792 .driver = {
1793 .name = DRIVER_NAME,
bc65c724 1794 .owner = THIS_MODULE,
730c9b7e
CA
1795 },
1796};
1797
680f1b5b 1798module_platform_driver(mmc_omap_driver);
730c9b7e
CA
1799MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1800MODULE_LICENSE("GPL");
bc65c724 1801MODULE_ALIAS("platform:" DRIVER_NAME);
d36b6910 1802MODULE_AUTHOR("Juha Yrjölä");