]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/mmc/host/at91_mci.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[mirror_ubuntu-zesty-kernel.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/mach/mmc.h>
74 #include <asm/arch/board.h>
75 #include <asm/arch/cpu.h>
76 #include <asm/arch/gpio.h>
77 #include <asm/arch/at91_mci.h>
78
79 #define DRIVER_NAME "at91_mci"
80
81 #define FL_SENT_COMMAND (1 << 0)
82 #define FL_SENT_STOP (1 << 1)
83
84 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
87
88 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
90
91
92 /*
93 * Low level type for this driver
94 */
95 struct at91mci_host
96 {
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
101 void __iomem *baseaddr;
102 int irq;
103
104 struct at91_mmc_data *board;
105 int present;
106
107 struct clk *mci_clk;
108
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127 };
128
129 /*
130 * Copy from sg to a dma block - used for transfers
131 */
132 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
133 {
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
148 unsigned int *sgbuffer;
149
150 sg = &data->sg[i];
151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length);
154 size -= amount;
155
156 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
157 int index;
158
159 for (index = 0; index < (amount / 4); index++)
160 *dmabuf++ = swab32(sgbuffer[index]);
161 }
162 else
163 memcpy(dmabuf, sgbuffer, amount);
164
165 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
166
167 if (size == 0)
168 break;
169 }
170
171 /*
172 * Check that we didn't get a request to transfer
173 * more data than can fit into the SG list.
174 */
175 BUG_ON(size != 0);
176 }
177
178 /*
179 * Prepare a dma read
180 */
181 static void at91_mci_pre_dma_read(struct at91mci_host *host)
182 {
183 int i;
184 struct scatterlist *sg;
185 struct mmc_command *cmd;
186 struct mmc_data *data;
187
188 pr_debug("pre dma read\n");
189
190 cmd = host->cmd;
191 if (!cmd) {
192 pr_debug("no command\n");
193 return;
194 }
195
196 data = cmd->data;
197 if (!data) {
198 pr_debug("no data\n");
199 return;
200 }
201
202 for (i = 0; i < 2; i++) {
203 /* nothing left to transfer */
204 if (host->transfer_index >= data->sg_len) {
205 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
206 break;
207 }
208
209 /* Check to see if this needs filling */
210 if (i == 0) {
211 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
212 pr_debug("Transfer active in current\n");
213 continue;
214 }
215 }
216 else {
217 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
218 pr_debug("Transfer active in next\n");
219 continue;
220 }
221 }
222
223 /* Setup the next transfer */
224 pr_debug("Using transfer index %d\n", host->transfer_index);
225
226 sg = &data->sg[host->transfer_index++];
227 pr_debug("sg = %p\n", sg);
228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
230
231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
232
233 if (i == 0) {
234 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
235 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
236 }
237 else {
238 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
239 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
240 }
241 }
242
243 pr_debug("pre dma read done\n");
244 }
245
246 /*
247 * Handle after a dma read
248 */
249 static void at91_mci_post_dma_read(struct at91mci_host *host)
250 {
251 struct mmc_command *cmd;
252 struct mmc_data *data;
253
254 pr_debug("post dma read\n");
255
256 cmd = host->cmd;
257 if (!cmd) {
258 pr_debug("no command\n");
259 return;
260 }
261
262 data = cmd->data;
263 if (!data) {
264 pr_debug("no data\n");
265 return;
266 }
267
268 while (host->in_use_index < host->transfer_index) {
269 struct scatterlist *sg;
270
271 pr_debug("finishing index %d\n", host->in_use_index);
272
273 sg = &data->sg[host->in_use_index++];
274
275 pr_debug("Unmapping page %08X\n", sg->dma_address);
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
279 data->bytes_xfered += sg->length;
280
281 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
282 unsigned int *buffer;
283 int index;
284
285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288
289 for (index = 0; index < (sg->length / 4); index++)
290 buffer[index] = swab32(buffer[index]);
291
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
293 }
294
295 flush_dcache_page(sg->page);
296 }
297
298 /* Is there another transfer to trigger? */
299 if (host->transfer_index < data->sg_len)
300 at91_mci_pre_dma_read(host);
301 else {
302 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
303 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
304 }
305
306 pr_debug("post dma read done\n");
307 }
308
309 /*
310 * Handle transmitted data
311 */
312 static void at91_mci_handle_transmitted(struct at91mci_host *host)
313 {
314 struct mmc_command *cmd;
315 struct mmc_data *data;
316
317 pr_debug("Handling the transmit\n");
318
319 /* Disable the transfer */
320 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
321
322 /* Now wait for cmd ready */
323 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
324
325 cmd = host->cmd;
326 if (!cmd) return;
327
328 data = cmd->data;
329 if (!data) return;
330
331 if (cmd->data->blocks > 1) {
332 pr_debug("multiple write : wait for BLKE...\n");
333 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
334 } else
335 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
336
337 data->bytes_xfered = host->total_length;
338 }
339
340 /*Handle after command sent ready*/
341 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
342 {
343 if (!host->cmd)
344 return 1;
345 else if (!host->cmd->data) {
346 if (host->flags & FL_SENT_STOP) {
347 /*After multi block write, we must wait for NOTBUSY*/
348 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
349 } else return 1;
350 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
351 /*After sendding multi-block-write command, start DMA transfer*/
352 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
355 }
356
357 /* command not completed, have to wait */
358 return 0;
359 }
360
361
362 /*
363 * Enable the controller
364 */
365 static void at91_mci_enable(struct at91mci_host *host)
366 {
367 unsigned int mr;
368
369 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
370 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
371 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
372 mr = AT91_MCI_PDCMODE | 0x34a;
373
374 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
375 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
376
377 at91_mci_write(host, AT91_MCI_MR, mr);
378
379 /* use Slot A or B (only one at same time) */
380 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
381 }
382
383 /*
384 * Disable the controller
385 */
386 static void at91_mci_disable(struct at91mci_host *host)
387 {
388 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
389 }
390
391 /*
392 * Send a command
393 */
394 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
395 {
396 unsigned int cmdr, mr;
397 unsigned int block_length;
398 struct mmc_data *data = cmd->data;
399
400 unsigned int blocks;
401 unsigned int ier = 0;
402
403 host->cmd = cmd;
404
405 /* Needed for leaving busy state before CMD1 */
406 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
407 pr_debug("Clearing timeout\n");
408 at91_mci_write(host, AT91_MCI_ARGR, 0);
409 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
410 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
411 /* spin */
412 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
413 }
414 }
415
416 cmdr = cmd->opcode;
417
418 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
419 cmdr |= AT91_MCI_RSPTYP_NONE;
420 else {
421 /* if a response is expected then allow maximum response latancy */
422 cmdr |= AT91_MCI_MAXLAT;
423 /* set 136 bit response for R2, 48 bit response otherwise */
424 if (mmc_resp_type(cmd) == MMC_RSP_R2)
425 cmdr |= AT91_MCI_RSPTYP_136;
426 else
427 cmdr |= AT91_MCI_RSPTYP_48;
428 }
429
430 if (data) {
431
432 if ( data->blksz & 0x3 ) {
433 pr_debug("Unsupported block size\n");
434 cmd->error = -EINVAL;
435 mmc_request_done(host->mmc, host->request);
436 return;
437 }
438
439 block_length = data->blksz;
440 blocks = data->blocks;
441
442 /* always set data start - also set direction flag for read */
443 if (data->flags & MMC_DATA_READ)
444 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
445 else if (data->flags & MMC_DATA_WRITE)
446 cmdr |= AT91_MCI_TRCMD_START;
447
448 if (data->flags & MMC_DATA_STREAM)
449 cmdr |= AT91_MCI_TRTYP_STREAM;
450 if (data->blocks > 1)
451 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
452 }
453 else {
454 block_length = 0;
455 blocks = 0;
456 }
457
458 if (host->flags & FL_SENT_STOP)
459 cmdr |= AT91_MCI_TRCMD_STOP;
460
461 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
462 cmdr |= AT91_MCI_OPDCMD;
463
464 /*
465 * Set the arguments and send the command
466 */
467 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
468 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
469
470 if (!data) {
471 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
472 at91_mci_write(host, ATMEL_PDC_RPR, 0);
473 at91_mci_write(host, ATMEL_PDC_RCR, 0);
474 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
475 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
476 at91_mci_write(host, ATMEL_PDC_TPR, 0);
477 at91_mci_write(host, ATMEL_PDC_TCR, 0);
478 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
479 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
480 ier = AT91_MCI_CMDRDY;
481 } else {
482 /* zero block length and PDC mode */
483 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
484 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
485
486 /*
487 * Disable the PDC controller
488 */
489 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
490
491 if (cmdr & AT91_MCI_TRCMD_START) {
492 data->bytes_xfered = 0;
493 host->transfer_index = 0;
494 host->in_use_index = 0;
495 if (cmdr & AT91_MCI_TRDIR) {
496 /*
497 * Handle a read
498 */
499 host->buffer = NULL;
500 host->total_length = 0;
501
502 at91_mci_pre_dma_read(host);
503 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
504 }
505 else {
506 /*
507 * Handle a write
508 */
509 host->total_length = block_length * blocks;
510 host->buffer = dma_alloc_coherent(NULL,
511 host->total_length,
512 &host->physical_address, GFP_KERNEL);
513
514 at91_mci_sg_to_dma(host, data);
515
516 pr_debug("Transmitting %d bytes\n", host->total_length);
517
518 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
519 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
520 ier = AT91_MCI_CMDRDY;
521 }
522 }
523 }
524
525 /*
526 * Send the command and then enable the PDC - not the other way round as
527 * the data sheet says
528 */
529
530 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
531 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
532
533 if (cmdr & AT91_MCI_TRCMD_START) {
534 if (cmdr & AT91_MCI_TRDIR)
535 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
536 }
537
538 /* Enable selected interrupts */
539 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
540 }
541
542 /*
543 * Process the next step in the request
544 */
545 static void at91_mci_process_next(struct at91mci_host *host)
546 {
547 if (!(host->flags & FL_SENT_COMMAND)) {
548 host->flags |= FL_SENT_COMMAND;
549 at91_mci_send_command(host, host->request->cmd);
550 }
551 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
552 host->flags |= FL_SENT_STOP;
553 at91_mci_send_command(host, host->request->stop);
554 }
555 else
556 mmc_request_done(host->mmc, host->request);
557 }
558
559 /*
560 * Handle a command that has been completed
561 */
562 static void at91_mci_completed_command(struct at91mci_host *host)
563 {
564 struct mmc_command *cmd = host->cmd;
565 unsigned int status;
566
567 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
568
569 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
570 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
571 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
572 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
573
574 if (host->buffer) {
575 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
576 host->buffer = NULL;
577 }
578
579 status = at91_mci_read(host, AT91_MCI_SR);
580
581 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
582 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
583
584 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
585 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
586 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
587 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
588 cmd->error = 0;
589 }
590 else {
591 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
592 cmd->error = -ETIMEDOUT;
593 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
594 cmd->error = -EILSEQ;
595 else
596 cmd->error = -EIO;
597
598 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
599 cmd->error, cmd->opcode, cmd->retries);
600 }
601 }
602 else
603 cmd->error = 0;
604
605 at91_mci_process_next(host);
606 }
607
608 /*
609 * Handle an MMC request
610 */
611 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
612 {
613 struct at91mci_host *host = mmc_priv(mmc);
614 host->request = mrq;
615 host->flags = 0;
616
617 at91_mci_process_next(host);
618 }
619
620 /*
621 * Set the IOS
622 */
623 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
624 {
625 int clkdiv;
626 struct at91mci_host *host = mmc_priv(mmc);
627 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
628
629 host->bus_mode = ios->bus_mode;
630
631 if (ios->clock == 0) {
632 /* Disable the MCI controller */
633 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
634 clkdiv = 0;
635 }
636 else {
637 /* Enable the MCI controller */
638 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
639
640 if ((at91_master_clock % (ios->clock * 2)) == 0)
641 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
642 else
643 clkdiv = (at91_master_clock / ios->clock) / 2;
644
645 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
646 at91_master_clock / (2 * (clkdiv + 1)));
647 }
648 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
649 pr_debug("MMC: Setting controller bus width to 4\n");
650 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
651 }
652 else {
653 pr_debug("MMC: Setting controller bus width to 1\n");
654 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
655 }
656
657 /* Set the clock divider */
658 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
659
660 /* maybe switch power to the card */
661 if (host->board->vcc_pin) {
662 switch (ios->power_mode) {
663 case MMC_POWER_OFF:
664 at91_set_gpio_value(host->board->vcc_pin, 0);
665 break;
666 case MMC_POWER_UP:
667 case MMC_POWER_ON:
668 at91_set_gpio_value(host->board->vcc_pin, 1);
669 break;
670 }
671 }
672 }
673
674 /*
675 * Handle an interrupt
676 */
677 static irqreturn_t at91_mci_irq(int irq, void *devid)
678 {
679 struct at91mci_host *host = devid;
680 int completed = 0;
681 unsigned int int_status, int_mask;
682
683 int_status = at91_mci_read(host, AT91_MCI_SR);
684 int_mask = at91_mci_read(host, AT91_MCI_IMR);
685
686 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
687 int_status & int_mask);
688
689 int_status = int_status & int_mask;
690
691 if (int_status & AT91_MCI_ERRORS) {
692 completed = 1;
693
694 if (int_status & AT91_MCI_UNRE)
695 pr_debug("MMC: Underrun error\n");
696 if (int_status & AT91_MCI_OVRE)
697 pr_debug("MMC: Overrun error\n");
698 if (int_status & AT91_MCI_DTOE)
699 pr_debug("MMC: Data timeout\n");
700 if (int_status & AT91_MCI_DCRCE)
701 pr_debug("MMC: CRC error in data\n");
702 if (int_status & AT91_MCI_RTOE)
703 pr_debug("MMC: Response timeout\n");
704 if (int_status & AT91_MCI_RENDE)
705 pr_debug("MMC: Response end bit error\n");
706 if (int_status & AT91_MCI_RCRCE)
707 pr_debug("MMC: Response CRC error\n");
708 if (int_status & AT91_MCI_RDIRE)
709 pr_debug("MMC: Response direction error\n");
710 if (int_status & AT91_MCI_RINDE)
711 pr_debug("MMC: Response index error\n");
712 } else {
713 /* Only continue processing if no errors */
714
715 if (int_status & AT91_MCI_TXBUFE) {
716 pr_debug("TX buffer empty\n");
717 at91_mci_handle_transmitted(host);
718 }
719
720 if (int_status & AT91_MCI_ENDRX) {
721 pr_debug("ENDRX\n");
722 at91_mci_post_dma_read(host);
723 }
724
725 if (int_status & AT91_MCI_RXBUFF) {
726 pr_debug("RX buffer full\n");
727 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
728 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
729 completed = 1;
730 }
731
732 if (int_status & AT91_MCI_ENDTX)
733 pr_debug("Transmit has ended\n");
734
735 if (int_status & AT91_MCI_NOTBUSY) {
736 pr_debug("Card is ready\n");
737 completed = 1;
738 }
739
740 if (int_status & AT91_MCI_DTIP)
741 pr_debug("Data transfer in progress\n");
742
743 if (int_status & AT91_MCI_BLKE) {
744 pr_debug("Block transfer has ended\n");
745 completed = 1;
746 }
747
748 if (int_status & AT91_MCI_TXRDY)
749 pr_debug("Ready to transmit\n");
750
751 if (int_status & AT91_MCI_RXRDY)
752 pr_debug("Ready to receive\n");
753
754 if (int_status & AT91_MCI_CMDRDY) {
755 pr_debug("Command ready\n");
756 completed = at91_mci_handle_cmdrdy(host);
757 }
758 }
759
760 if (completed) {
761 pr_debug("Completed command\n");
762 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
763 at91_mci_completed_command(host);
764 } else
765 at91_mci_write(host, AT91_MCI_IDR, int_status);
766
767 return IRQ_HANDLED;
768 }
769
770 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
771 {
772 struct at91mci_host *host = _host;
773 int present = !at91_get_gpio_value(irq);
774
775 /*
776 * we expect this irq on both insert and remove,
777 * and use a short delay to debounce.
778 */
779 if (present != host->present) {
780 host->present = present;
781 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
782 present ? "insert" : "remove");
783 if (!present) {
784 pr_debug("****** Resetting SD-card bus width ******\n");
785 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
786 }
787 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
788 }
789 return IRQ_HANDLED;
790 }
791
792 static int at91_mci_get_ro(struct mmc_host *mmc)
793 {
794 int read_only = 0;
795 struct at91mci_host *host = mmc_priv(mmc);
796
797 if (host->board->wp_pin) {
798 read_only = at91_get_gpio_value(host->board->wp_pin);
799 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
800 (read_only ? "read-only" : "read-write") );
801 }
802 else {
803 printk(KERN_WARNING "%s: host does not support reading read-only "
804 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
805 }
806 return read_only;
807 }
808
809 static const struct mmc_host_ops at91_mci_ops = {
810 .request = at91_mci_request,
811 .set_ios = at91_mci_set_ios,
812 .get_ro = at91_mci_get_ro,
813 };
814
815 /*
816 * Probe for the device
817 */
818 static int __init at91_mci_probe(struct platform_device *pdev)
819 {
820 struct mmc_host *mmc;
821 struct at91mci_host *host;
822 struct resource *res;
823 int ret;
824
825 pr_debug("Probe MCI devices\n");
826
827 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
828 if (!res)
829 return -ENXIO;
830
831 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
832 return -EBUSY;
833
834 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
835 if (!mmc) {
836 pr_debug("Failed to allocate mmc host\n");
837 release_mem_region(res->start, res->end - res->start + 1);
838 return -ENOMEM;
839 }
840
841 mmc->ops = &at91_mci_ops;
842 mmc->f_min = 375000;
843 mmc->f_max = 25000000;
844 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
845
846 mmc->max_blk_size = 4095;
847 mmc->max_blk_count = mmc->max_req_size;
848
849 host = mmc_priv(mmc);
850 host->mmc = mmc;
851 host->buffer = NULL;
852 host->bus_mode = 0;
853 host->board = pdev->dev.platform_data;
854 if (host->board->wire4) {
855 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
856 mmc->caps |= MMC_CAP_4_BIT_DATA;
857 else
858 printk("AT91 MMC: 4 wire bus mode not supported"
859 " - using 1 wire\n");
860 }
861
862 /*
863 * Get Clock
864 */
865 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
866 if (IS_ERR(host->mci_clk)) {
867 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
868 mmc_free_host(mmc);
869 release_mem_region(res->start, res->end - res->start + 1);
870 return -ENODEV;
871 }
872
873 /*
874 * Map I/O region
875 */
876 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
877 if (!host->baseaddr) {
878 clk_put(host->mci_clk);
879 mmc_free_host(mmc);
880 release_mem_region(res->start, res->end - res->start + 1);
881 return -ENOMEM;
882 }
883
884 /*
885 * Reset hardware
886 */
887 clk_enable(host->mci_clk); /* Enable the peripheral clock */
888 at91_mci_disable(host);
889 at91_mci_enable(host);
890
891 /*
892 * Allocate the MCI interrupt
893 */
894 host->irq = platform_get_irq(pdev, 0);
895 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
896 if (ret) {
897 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
898 clk_disable(host->mci_clk);
899 clk_put(host->mci_clk);
900 mmc_free_host(mmc);
901 iounmap(host->baseaddr);
902 release_mem_region(res->start, res->end - res->start + 1);
903 return ret;
904 }
905
906 platform_set_drvdata(pdev, mmc);
907
908 /*
909 * Add host to MMC layer
910 */
911 if (host->board->det_pin) {
912 host->present = !at91_get_gpio_value(host->board->det_pin);
913 device_init_wakeup(&pdev->dev, 1);
914 }
915 else
916 host->present = -1;
917
918 mmc_add_host(mmc);
919
920 /*
921 * monitor card insertion/removal if we can
922 */
923 if (host->board->det_pin) {
924 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
925 0, DRIVER_NAME, host);
926 if (ret)
927 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
928 }
929
930 pr_debug("Added MCI driver\n");
931
932 return 0;
933 }
934
935 /*
936 * Remove a device
937 */
938 static int __exit at91_mci_remove(struct platform_device *pdev)
939 {
940 struct mmc_host *mmc = platform_get_drvdata(pdev);
941 struct at91mci_host *host;
942 struct resource *res;
943
944 if (!mmc)
945 return -1;
946
947 host = mmc_priv(mmc);
948
949 if (host->board->det_pin) {
950 device_init_wakeup(&pdev->dev, 0);
951 free_irq(host->board->det_pin, host);
952 cancel_delayed_work(&host->mmc->detect);
953 }
954
955 at91_mci_disable(host);
956 mmc_remove_host(mmc);
957 free_irq(host->irq, host);
958
959 clk_disable(host->mci_clk); /* Disable the peripheral clock */
960 clk_put(host->mci_clk);
961
962 iounmap(host->baseaddr);
963 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
964 release_mem_region(res->start, res->end - res->start + 1);
965
966 mmc_free_host(mmc);
967 platform_set_drvdata(pdev, NULL);
968 pr_debug("MCI Removed\n");
969
970 return 0;
971 }
972
973 #ifdef CONFIG_PM
974 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
975 {
976 struct mmc_host *mmc = platform_get_drvdata(pdev);
977 struct at91mci_host *host = mmc_priv(mmc);
978 int ret = 0;
979
980 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
981 enable_irq_wake(host->board->det_pin);
982
983 if (mmc)
984 ret = mmc_suspend_host(mmc, state);
985
986 return ret;
987 }
988
989 static int at91_mci_resume(struct platform_device *pdev)
990 {
991 struct mmc_host *mmc = platform_get_drvdata(pdev);
992 struct at91mci_host *host = mmc_priv(mmc);
993 int ret = 0;
994
995 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
996 disable_irq_wake(host->board->det_pin);
997
998 if (mmc)
999 ret = mmc_resume_host(mmc);
1000
1001 return ret;
1002 }
1003 #else
1004 #define at91_mci_suspend NULL
1005 #define at91_mci_resume NULL
1006 #endif
1007
1008 static struct platform_driver at91_mci_driver = {
1009 .remove = __exit_p(at91_mci_remove),
1010 .suspend = at91_mci_suspend,
1011 .resume = at91_mci_resume,
1012 .driver = {
1013 .name = DRIVER_NAME,
1014 .owner = THIS_MODULE,
1015 },
1016 };
1017
1018 static int __init at91_mci_init(void)
1019 {
1020 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1021 }
1022
1023 static void __exit at91_mci_exit(void)
1024 {
1025 platform_driver_unregister(&at91_mci_driver);
1026 }
1027
1028 module_init(at91_mci_init);
1029 module_exit(at91_mci_exit);
1030
1031 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1032 MODULE_AUTHOR("Nick Randell");
1033 MODULE_LICENSE("GPL");