]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/host/at91_mci.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/mach/mmc.h>
74 #include <asm/arch/board.h>
75 #include <asm/arch/cpu.h>
76 #include <asm/arch/gpio.h>
77 #include <asm/arch/at91_mci.h>
78
79 #define DRIVER_NAME "at91_mci"
80
81 #undef SUPPORT_4WIRE
82
83 #define FL_SENT_COMMAND (1 << 0)
84 #define FL_SENT_STOP (1 << 1)
85
86 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
89
90 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
92
93
94 /*
95 * Low level type for this driver
96 */
97 struct at91mci_host
98 {
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
103 void __iomem *baseaddr;
104 int irq;
105
106 struct at91_mmc_data *board;
107 int present;
108
109 struct clk *mci_clk;
110
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129 };
130
131 /*
132 * Copy from sg to a dma block - used for transfers
133 */
134 static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
135 {
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 unsigned int *sgbuffer;
151
152 sg = &data->sg[i];
153
154 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
155 amount = min(size, sg->length);
156 size -= amount;
157
158 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
159 int index;
160
161 for (index = 0; index < (amount / 4); index++)
162 *dmabuf++ = swab32(sgbuffer[index]);
163 }
164 else
165 memcpy(dmabuf, sgbuffer, amount);
166
167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
168
169 if (size == 0)
170 break;
171 }
172
173 /*
174 * Check that we didn't get a request to transfer
175 * more data than can fit into the SG list.
176 */
177 BUG_ON(size != 0);
178 }
179
180 /*
181 * Prepare a dma read
182 */
183 static void at91mci_pre_dma_read(struct at91mci_host *host)
184 {
185 int i;
186 struct scatterlist *sg;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189
190 pr_debug("pre dma read\n");
191
192 cmd = host->cmd;
193 if (!cmd) {
194 pr_debug("no command\n");
195 return;
196 }
197
198 data = cmd->data;
199 if (!data) {
200 pr_debug("no data\n");
201 return;
202 }
203
204 for (i = 0; i < 2; i++) {
205 /* nothing left to transfer */
206 if (host->transfer_index >= data->sg_len) {
207 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
208 break;
209 }
210
211 /* Check to see if this needs filling */
212 if (i == 0) {
213 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
214 pr_debug("Transfer active in current\n");
215 continue;
216 }
217 }
218 else {
219 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
220 pr_debug("Transfer active in next\n");
221 continue;
222 }
223 }
224
225 /* Setup the next transfer */
226 pr_debug("Using transfer index %d\n", host->transfer_index);
227
228 sg = &data->sg[host->transfer_index++];
229 pr_debug("sg = %p\n", sg);
230
231 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
232
233 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
234
235 if (i == 0) {
236 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
237 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
238 }
239 else {
240 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
241 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
242 }
243 }
244
245 pr_debug("pre dma read done\n");
246 }
247
248 /*
249 * Handle after a dma read
250 */
251 static void at91mci_post_dma_read(struct at91mci_host *host)
252 {
253 struct mmc_command *cmd;
254 struct mmc_data *data;
255
256 pr_debug("post dma read\n");
257
258 cmd = host->cmd;
259 if (!cmd) {
260 pr_debug("no command\n");
261 return;
262 }
263
264 data = cmd->data;
265 if (!data) {
266 pr_debug("no data\n");
267 return;
268 }
269
270 while (host->in_use_index < host->transfer_index) {
271 unsigned int *buffer;
272
273 struct scatterlist *sg;
274
275 pr_debug("finishing index %d\n", host->in_use_index);
276
277 sg = &data->sg[host->in_use_index++];
278
279 pr_debug("Unmapping page %08X\n", sg->dma_address);
280
281 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
282
283 /* Swap the contents of the buffer */
284 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
285 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
286
287 data->bytes_xfered += sg->length;
288
289 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
290 int index;
291
292 for (index = 0; index < (sg->length / 4); index++)
293 buffer[index] = swab32(buffer[index]);
294 }
295
296 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
297 flush_dcache_page(sg->page);
298 }
299
300 /* Is there another transfer to trigger? */
301 if (host->transfer_index < data->sg_len)
302 at91mci_pre_dma_read(host);
303 else {
304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
305 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
306 }
307
308 pr_debug("post dma read done\n");
309 }
310
311 /*
312 * Handle transmitted data
313 */
314 static void at91_mci_handle_transmitted(struct at91mci_host *host)
315 {
316 struct mmc_command *cmd;
317 struct mmc_data *data;
318
319 pr_debug("Handling the transmit\n");
320
321 /* Disable the transfer */
322 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
323
324 /* Now wait for cmd ready */
325 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
326 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
327
328 cmd = host->cmd;
329 if (!cmd) return;
330
331 data = cmd->data;
332 if (!data) return;
333
334 data->bytes_xfered = host->total_length;
335 }
336
337 /*
338 * Enable the controller
339 */
340 static void at91_mci_enable(struct at91mci_host *host)
341 {
342 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
343 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
344 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
345 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
346
347 /* use Slot A or B (only one at same time) */
348 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
349 }
350
351 /*
352 * Disable the controller
353 */
354 static void at91_mci_disable(struct at91mci_host *host)
355 {
356 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
357 }
358
359 /*
360 * Send a command
361 * return the interrupts to enable
362 */
363 static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
364 {
365 unsigned int cmdr, mr;
366 unsigned int block_length;
367 struct mmc_data *data = cmd->data;
368
369 unsigned int blocks;
370 unsigned int ier = 0;
371
372 host->cmd = cmd;
373
374 /* Not sure if this is needed */
375 #if 0
376 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
377 pr_debug("Clearing timeout\n");
378 at91_mci_write(host, AT91_MCI_ARGR, 0);
379 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
380 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
381 /* spin */
382 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
383 }
384 }
385 #endif
386 cmdr = cmd->opcode;
387
388 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
389 cmdr |= AT91_MCI_RSPTYP_NONE;
390 else {
391 /* if a response is expected then allow maximum response latancy */
392 cmdr |= AT91_MCI_MAXLAT;
393 /* set 136 bit response for R2, 48 bit response otherwise */
394 if (mmc_resp_type(cmd) == MMC_RSP_R2)
395 cmdr |= AT91_MCI_RSPTYP_136;
396 else
397 cmdr |= AT91_MCI_RSPTYP_48;
398 }
399
400 if (data) {
401 block_length = data->blksz;
402 blocks = data->blocks;
403
404 /* always set data start - also set direction flag for read */
405 if (data->flags & MMC_DATA_READ)
406 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
407 else if (data->flags & MMC_DATA_WRITE)
408 cmdr |= AT91_MCI_TRCMD_START;
409
410 if (data->flags & MMC_DATA_STREAM)
411 cmdr |= AT91_MCI_TRTYP_STREAM;
412 if (data->flags & MMC_DATA_MULTI)
413 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
414 }
415 else {
416 block_length = 0;
417 blocks = 0;
418 }
419
420 if (host->flags & FL_SENT_STOP)
421 cmdr |= AT91_MCI_TRCMD_STOP;
422
423 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
424 cmdr |= AT91_MCI_OPDCMD;
425
426 /*
427 * Set the arguments and send the command
428 */
429 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
430 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
431
432 if (!data) {
433 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
434 at91_mci_write(host, ATMEL_PDC_RPR, 0);
435 at91_mci_write(host, ATMEL_PDC_RCR, 0);
436 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
437 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
438 at91_mci_write(host, ATMEL_PDC_TPR, 0);
439 at91_mci_write(host, ATMEL_PDC_TCR, 0);
440 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
441 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
442
443 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
444 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
445 return AT91_MCI_CMDRDY;
446 }
447
448 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
449 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
450
451 /*
452 * Disable the PDC controller
453 */
454 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
455
456 if (cmdr & AT91_MCI_TRCMD_START) {
457 data->bytes_xfered = 0;
458 host->transfer_index = 0;
459 host->in_use_index = 0;
460 if (cmdr & AT91_MCI_TRDIR) {
461 /*
462 * Handle a read
463 */
464 host->buffer = NULL;
465 host->total_length = 0;
466
467 at91mci_pre_dma_read(host);
468 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
469 }
470 else {
471 /*
472 * Handle a write
473 */
474 host->total_length = block_length * blocks;
475 host->buffer = dma_alloc_coherent(NULL,
476 host->total_length,
477 &host->physical_address, GFP_KERNEL);
478
479 at91mci_sg_to_dma(host, data);
480
481 pr_debug("Transmitting %d bytes\n", host->total_length);
482
483 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
484 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
485 ier = AT91_MCI_TXBUFE;
486 }
487 }
488
489 /*
490 * Send the command and then enable the PDC - not the other way round as
491 * the data sheet says
492 */
493
494 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
495 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
496
497 if (cmdr & AT91_MCI_TRCMD_START) {
498 if (cmdr & AT91_MCI_TRDIR)
499 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
500 else
501 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
502 }
503 return ier;
504 }
505
506 /*
507 * Wait for a command to complete
508 */
509 static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
510 {
511 unsigned int ier;
512
513 ier = at91_mci_send_command(host, cmd);
514
515 pr_debug("setting ier to %08X\n", ier);
516
517 /* Stop on errors or the required value */
518 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
519 }
520
521 /*
522 * Process the next step in the request
523 */
524 static void at91mci_process_next(struct at91mci_host *host)
525 {
526 if (!(host->flags & FL_SENT_COMMAND)) {
527 host->flags |= FL_SENT_COMMAND;
528 at91mci_process_command(host, host->request->cmd);
529 }
530 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
531 host->flags |= FL_SENT_STOP;
532 at91mci_process_command(host, host->request->stop);
533 }
534 else
535 mmc_request_done(host->mmc, host->request);
536 }
537
538 /*
539 * Handle a command that has been completed
540 */
541 static void at91mci_completed_command(struct at91mci_host *host)
542 {
543 struct mmc_command *cmd = host->cmd;
544 unsigned int status;
545
546 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
547
548 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
549 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
550 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
551 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
552
553 if (host->buffer) {
554 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
555 host->buffer = NULL;
556 }
557
558 status = at91_mci_read(host, AT91_MCI_SR);
559
560 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
561 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
562
563 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
564 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
565 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
566 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
567 cmd->error = MMC_ERR_NONE;
568 }
569 else {
570 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
571 cmd->error = MMC_ERR_TIMEOUT;
572 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
573 cmd->error = MMC_ERR_BADCRC;
574 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
575 cmd->error = MMC_ERR_FIFO;
576 else
577 cmd->error = MMC_ERR_FAILED;
578
579 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
580 cmd->error, cmd->opcode, cmd->retries);
581 }
582 }
583 else
584 cmd->error = MMC_ERR_NONE;
585
586 at91mci_process_next(host);
587 }
588
589 /*
590 * Handle an MMC request
591 */
592 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
593 {
594 struct at91mci_host *host = mmc_priv(mmc);
595 host->request = mrq;
596 host->flags = 0;
597
598 at91mci_process_next(host);
599 }
600
601 /*
602 * Set the IOS
603 */
604 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
605 {
606 int clkdiv;
607 struct at91mci_host *host = mmc_priv(mmc);
608 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
609
610 host->bus_mode = ios->bus_mode;
611
612 if (ios->clock == 0) {
613 /* Disable the MCI controller */
614 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
615 clkdiv = 0;
616 }
617 else {
618 /* Enable the MCI controller */
619 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
620
621 if ((at91_master_clock % (ios->clock * 2)) == 0)
622 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
623 else
624 clkdiv = (at91_master_clock / ios->clock) / 2;
625
626 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
627 at91_master_clock / (2 * (clkdiv + 1)));
628 }
629 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
630 pr_debug("MMC: Setting controller bus width to 4\n");
631 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
632 }
633 else {
634 pr_debug("MMC: Setting controller bus width to 1\n");
635 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
636 }
637
638 /* Set the clock divider */
639 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
640
641 /* maybe switch power to the card */
642 if (host->board->vcc_pin) {
643 switch (ios->power_mode) {
644 case MMC_POWER_OFF:
645 at91_set_gpio_value(host->board->vcc_pin, 0);
646 break;
647 case MMC_POWER_UP:
648 case MMC_POWER_ON:
649 at91_set_gpio_value(host->board->vcc_pin, 1);
650 break;
651 }
652 }
653 }
654
655 /*
656 * Handle an interrupt
657 */
658 static irqreturn_t at91_mci_irq(int irq, void *devid)
659 {
660 struct at91mci_host *host = devid;
661 int completed = 0;
662 unsigned int int_status, int_mask;
663
664 int_status = at91_mci_read(host, AT91_MCI_SR);
665 int_mask = at91_mci_read(host, AT91_MCI_IMR);
666
667 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
668 int_status & int_mask);
669
670 int_status = int_status & int_mask;
671
672 if (int_status & AT91_MCI_ERRORS) {
673 completed = 1;
674
675 if (int_status & AT91_MCI_UNRE)
676 pr_debug("MMC: Underrun error\n");
677 if (int_status & AT91_MCI_OVRE)
678 pr_debug("MMC: Overrun error\n");
679 if (int_status & AT91_MCI_DTOE)
680 pr_debug("MMC: Data timeout\n");
681 if (int_status & AT91_MCI_DCRCE)
682 pr_debug("MMC: CRC error in data\n");
683 if (int_status & AT91_MCI_RTOE)
684 pr_debug("MMC: Response timeout\n");
685 if (int_status & AT91_MCI_RENDE)
686 pr_debug("MMC: Response end bit error\n");
687 if (int_status & AT91_MCI_RCRCE)
688 pr_debug("MMC: Response CRC error\n");
689 if (int_status & AT91_MCI_RDIRE)
690 pr_debug("MMC: Response direction error\n");
691 if (int_status & AT91_MCI_RINDE)
692 pr_debug("MMC: Response index error\n");
693 } else {
694 /* Only continue processing if no errors */
695
696 if (int_status & AT91_MCI_TXBUFE) {
697 pr_debug("TX buffer empty\n");
698 at91_mci_handle_transmitted(host);
699 }
700
701 if (int_status & AT91_MCI_RXBUFF) {
702 pr_debug("RX buffer full\n");
703 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
704 }
705
706 if (int_status & AT91_MCI_ENDTX)
707 pr_debug("Transmit has ended\n");
708
709 if (int_status & AT91_MCI_ENDRX) {
710 pr_debug("Receive has ended\n");
711 at91mci_post_dma_read(host);
712 }
713
714 if (int_status & AT91_MCI_NOTBUSY) {
715 pr_debug("Card is ready\n");
716 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
717 }
718
719 if (int_status & AT91_MCI_DTIP)
720 pr_debug("Data transfer in progress\n");
721
722 if (int_status & AT91_MCI_BLKE)
723 pr_debug("Block transfer has ended\n");
724
725 if (int_status & AT91_MCI_TXRDY)
726 pr_debug("Ready to transmit\n");
727
728 if (int_status & AT91_MCI_RXRDY)
729 pr_debug("Ready to receive\n");
730
731 if (int_status & AT91_MCI_CMDRDY) {
732 pr_debug("Command ready\n");
733 completed = 1;
734 }
735 }
736
737 if (completed) {
738 pr_debug("Completed command\n");
739 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
740 at91mci_completed_command(host);
741 } else
742 at91_mci_write(host, AT91_MCI_IDR, int_status);
743
744 return IRQ_HANDLED;
745 }
746
747 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
748 {
749 struct at91mci_host *host = _host;
750 int present = !at91_get_gpio_value(irq);
751
752 /*
753 * we expect this irq on both insert and remove,
754 * and use a short delay to debounce.
755 */
756 if (present != host->present) {
757 host->present = present;
758 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
759 present ? "insert" : "remove");
760 if (!present) {
761 pr_debug("****** Resetting SD-card bus width ******\n");
762 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
763 }
764 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
765 }
766 return IRQ_HANDLED;
767 }
768
769 static int at91_mci_get_ro(struct mmc_host *mmc)
770 {
771 int read_only = 0;
772 struct at91mci_host *host = mmc_priv(mmc);
773
774 if (host->board->wp_pin) {
775 read_only = at91_get_gpio_value(host->board->wp_pin);
776 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
777 (read_only ? "read-only" : "read-write") );
778 }
779 else {
780 printk(KERN_WARNING "%s: host does not support reading read-only "
781 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
782 }
783 return read_only;
784 }
785
786 static const struct mmc_host_ops at91_mci_ops = {
787 .request = at91_mci_request,
788 .set_ios = at91_mci_set_ios,
789 .get_ro = at91_mci_get_ro,
790 };
791
792 /*
793 * Probe for the device
794 */
795 static int __init at91_mci_probe(struct platform_device *pdev)
796 {
797 struct mmc_host *mmc;
798 struct at91mci_host *host;
799 struct resource *res;
800 int ret;
801
802 pr_debug("Probe MCI devices\n");
803
804 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
805 if (!res)
806 return -ENXIO;
807
808 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
809 return -EBUSY;
810
811 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
812 if (!mmc) {
813 pr_debug("Failed to allocate mmc host\n");
814 release_mem_region(res->start, res->end - res->start + 1);
815 return -ENOMEM;
816 }
817
818 mmc->ops = &at91_mci_ops;
819 mmc->f_min = 375000;
820 mmc->f_max = 25000000;
821 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
822 mmc->caps = MMC_CAP_BYTEBLOCK;
823
824 mmc->max_blk_size = 4095;
825 mmc->max_blk_count = mmc->max_req_size;
826
827 host = mmc_priv(mmc);
828 host->mmc = mmc;
829 host->buffer = NULL;
830 host->bus_mode = 0;
831 host->board = pdev->dev.platform_data;
832 if (host->board->wire4) {
833 #ifdef SUPPORT_4WIRE
834 mmc->caps |= MMC_CAP_4_BIT_DATA;
835 #else
836 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
837 #endif
838 }
839
840 /*
841 * Get Clock
842 */
843 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
844 if (IS_ERR(host->mci_clk)) {
845 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
846 mmc_free_host(mmc);
847 release_mem_region(res->start, res->end - res->start + 1);
848 return -ENODEV;
849 }
850
851 /*
852 * Map I/O region
853 */
854 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
855 if (!host->baseaddr) {
856 clk_put(host->mci_clk);
857 mmc_free_host(mmc);
858 release_mem_region(res->start, res->end - res->start + 1);
859 return -ENOMEM;
860 }
861
862 /*
863 * Reset hardware
864 */
865 clk_enable(host->mci_clk); /* Enable the peripheral clock */
866 at91_mci_disable(host);
867 at91_mci_enable(host);
868
869 /*
870 * Allocate the MCI interrupt
871 */
872 host->irq = platform_get_irq(pdev, 0);
873 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
874 if (ret) {
875 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
876 clk_disable(host->mci_clk);
877 clk_put(host->mci_clk);
878 mmc_free_host(mmc);
879 iounmap(host->baseaddr);
880 release_mem_region(res->start, res->end - res->start + 1);
881 return ret;
882 }
883
884 platform_set_drvdata(pdev, mmc);
885
886 /*
887 * Add host to MMC layer
888 */
889 if (host->board->det_pin)
890 host->present = !at91_get_gpio_value(host->board->det_pin);
891 else
892 host->present = -1;
893
894 mmc_add_host(mmc);
895
896 /*
897 * monitor card insertion/removal if we can
898 */
899 if (host->board->det_pin) {
900 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
901 0, DRIVER_NAME, host);
902 if (ret)
903 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
904 }
905
906 pr_debug("Added MCI driver\n");
907
908 return 0;
909 }
910
911 /*
912 * Remove a device
913 */
914 static int __exit at91_mci_remove(struct platform_device *pdev)
915 {
916 struct mmc_host *mmc = platform_get_drvdata(pdev);
917 struct at91mci_host *host;
918 struct resource *res;
919
920 if (!mmc)
921 return -1;
922
923 host = mmc_priv(mmc);
924
925 if (host->present != -1) {
926 free_irq(host->board->det_pin, host);
927 cancel_delayed_work(&host->mmc->detect);
928 }
929
930 at91_mci_disable(host);
931 mmc_remove_host(mmc);
932 free_irq(host->irq, host);
933
934 clk_disable(host->mci_clk); /* Disable the peripheral clock */
935 clk_put(host->mci_clk);
936
937 iounmap(host->baseaddr);
938 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 release_mem_region(res->start, res->end - res->start + 1);
940
941 mmc_free_host(mmc);
942 platform_set_drvdata(pdev, NULL);
943 pr_debug("MCI Removed\n");
944
945 return 0;
946 }
947
948 #ifdef CONFIG_PM
949 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
950 {
951 struct mmc_host *mmc = platform_get_drvdata(pdev);
952 int ret = 0;
953
954 if (mmc)
955 ret = mmc_suspend_host(mmc, state);
956
957 return ret;
958 }
959
960 static int at91_mci_resume(struct platform_device *pdev)
961 {
962 struct mmc_host *mmc = platform_get_drvdata(pdev);
963 int ret = 0;
964
965 if (mmc)
966 ret = mmc_resume_host(mmc);
967
968 return ret;
969 }
970 #else
971 #define at91_mci_suspend NULL
972 #define at91_mci_resume NULL
973 #endif
974
975 static struct platform_driver at91_mci_driver = {
976 .remove = __exit_p(at91_mci_remove),
977 .suspend = at91_mci_suspend,
978 .resume = at91_mci_resume,
979 .driver = {
980 .name = DRIVER_NAME,
981 .owner = THIS_MODULE,
982 },
983 };
984
985 static int __init at91_mci_init(void)
986 {
987 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
988 }
989
990 static void __exit at91_mci_exit(void)
991 {
992 platform_driver_unregister(&at91_mci_driver);
993 }
994
995 module_init(at91_mci_init);
996 module_exit(at91_mci_exit);
997
998 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
999 MODULE_AUTHOR("Nick Randell");
1000 MODULE_LICENSE("GPL");