]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/mmc/host/at91_mci.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[mirror_ubuntu-zesty-kernel.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/mach/mmc.h>
74 #include <asm/arch/board.h>
75 #include <asm/arch/cpu.h>
76 #include <asm/arch/gpio.h>
77 #include <asm/arch/at91_mci.h>
78
79 #define DRIVER_NAME "at91_mci"
80
81 #define FL_SENT_COMMAND (1 << 0)
82 #define FL_SENT_STOP (1 << 1)
83
84 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
87
88 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
90
91
92 /*
93 * Low level type for this driver
94 */
95 struct at91mci_host
96 {
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
101 void __iomem *baseaddr;
102 int irq;
103
104 struct at91_mmc_data *board;
105 int present;
106
107 struct clk *mci_clk;
108
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127 };
128
129 /*
130 * Copy from sg to a dma block - used for transfers
131 */
132 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
133 {
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
148 unsigned int *sgbuffer;
149
150 sg = &data->sg[i];
151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length);
154 size -= amount;
155
156 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
157 int index;
158
159 for (index = 0; index < (amount / 4); index++)
160 *dmabuf++ = swab32(sgbuffer[index]);
161 }
162 else
163 memcpy(dmabuf, sgbuffer, amount);
164
165 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
166
167 if (size == 0)
168 break;
169 }
170
171 /*
172 * Check that we didn't get a request to transfer
173 * more data than can fit into the SG list.
174 */
175 BUG_ON(size != 0);
176 }
177
178 /*
179 * Prepare a dma read
180 */
181 static void at91_mci_pre_dma_read(struct at91mci_host *host)
182 {
183 int i;
184 struct scatterlist *sg;
185 struct mmc_command *cmd;
186 struct mmc_data *data;
187
188 pr_debug("pre dma read\n");
189
190 cmd = host->cmd;
191 if (!cmd) {
192 pr_debug("no command\n");
193 return;
194 }
195
196 data = cmd->data;
197 if (!data) {
198 pr_debug("no data\n");
199 return;
200 }
201
202 for (i = 0; i < 2; i++) {
203 /* nothing left to transfer */
204 if (host->transfer_index >= data->sg_len) {
205 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
206 break;
207 }
208
209 /* Check to see if this needs filling */
210 if (i == 0) {
211 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
212 pr_debug("Transfer active in current\n");
213 continue;
214 }
215 }
216 else {
217 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
218 pr_debug("Transfer active in next\n");
219 continue;
220 }
221 }
222
223 /* Setup the next transfer */
224 pr_debug("Using transfer index %d\n", host->transfer_index);
225
226 sg = &data->sg[host->transfer_index++];
227 pr_debug("sg = %p\n", sg);
228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
230
231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
232
233 if (i == 0) {
234 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
235 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
236 }
237 else {
238 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
239 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
240 }
241 }
242
243 pr_debug("pre dma read done\n");
244 }
245
246 /*
247 * Handle after a dma read
248 */
249 static void at91_mci_post_dma_read(struct at91mci_host *host)
250 {
251 struct mmc_command *cmd;
252 struct mmc_data *data;
253
254 pr_debug("post dma read\n");
255
256 cmd = host->cmd;
257 if (!cmd) {
258 pr_debug("no command\n");
259 return;
260 }
261
262 data = cmd->data;
263 if (!data) {
264 pr_debug("no data\n");
265 return;
266 }
267
268 while (host->in_use_index < host->transfer_index) {
269 struct scatterlist *sg;
270
271 pr_debug("finishing index %d\n", host->in_use_index);
272
273 sg = &data->sg[host->in_use_index++];
274
275 pr_debug("Unmapping page %08X\n", sg->dma_address);
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
279 data->bytes_xfered += sg->length;
280
281 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
282 unsigned int *buffer;
283 int index;
284
285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288
289 for (index = 0; index < (sg->length / 4); index++)
290 buffer[index] = swab32(buffer[index]);
291
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
293 }
294
295 flush_dcache_page(sg->page);
296 }
297
298 /* Is there another transfer to trigger? */
299 if (host->transfer_index < data->sg_len)
300 at91_mci_pre_dma_read(host);
301 else {
302 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
303 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
304 }
305
306 pr_debug("post dma read done\n");
307 }
308
309 /*
310 * Handle transmitted data
311 */
312 static void at91_mci_handle_transmitted(struct at91mci_host *host)
313 {
314 struct mmc_command *cmd;
315 struct mmc_data *data;
316
317 pr_debug("Handling the transmit\n");
318
319 /* Disable the transfer */
320 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
321
322 /* Now wait for cmd ready */
323 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
324
325 cmd = host->cmd;
326 if (!cmd) return;
327
328 data = cmd->data;
329 if (!data) return;
330
331 if (cmd->data->flags & MMC_DATA_MULTI) {
332 pr_debug("multiple write : wait for BLKE...\n");
333 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
334 } else
335 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
336
337 data->bytes_xfered = host->total_length;
338 }
339
340 /*Handle after command sent ready*/
341 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
342 {
343 if (!host->cmd)
344 return 1;
345 else if (!host->cmd->data) {
346 if (host->flags & FL_SENT_STOP) {
347 /*After multi block write, we must wait for NOTBUSY*/
348 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
349 } else return 1;
350 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
351 /*After sendding multi-block-write command, start DMA transfer*/
352 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
355 }
356
357 /* command not completed, have to wait */
358 return 0;
359 }
360
361
362 /*
363 * Enable the controller
364 */
365 static void at91_mci_enable(struct at91mci_host *host)
366 {
367 unsigned int mr;
368
369 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
370 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
371 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
372 mr = AT91_MCI_PDCMODE | 0x34a;
373
374 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
375 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
376
377 at91_mci_write(host, AT91_MCI_MR, mr);
378
379 /* use Slot A or B (only one at same time) */
380 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
381 }
382
383 /*
384 * Disable the controller
385 */
386 static void at91_mci_disable(struct at91mci_host *host)
387 {
388 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
389 }
390
391 /*
392 * Send a command
393 */
394 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
395 {
396 unsigned int cmdr, mr;
397 unsigned int block_length;
398 struct mmc_data *data = cmd->data;
399
400 unsigned int blocks;
401 unsigned int ier = 0;
402
403 host->cmd = cmd;
404
405 /* Needed for leaving busy state before CMD1 */
406 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
407 pr_debug("Clearing timeout\n");
408 at91_mci_write(host, AT91_MCI_ARGR, 0);
409 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
410 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
411 /* spin */
412 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
413 }
414 }
415
416 cmdr = cmd->opcode;
417
418 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
419 cmdr |= AT91_MCI_RSPTYP_NONE;
420 else {
421 /* if a response is expected then allow maximum response latancy */
422 cmdr |= AT91_MCI_MAXLAT;
423 /* set 136 bit response for R2, 48 bit response otherwise */
424 if (mmc_resp_type(cmd) == MMC_RSP_R2)
425 cmdr |= AT91_MCI_RSPTYP_136;
426 else
427 cmdr |= AT91_MCI_RSPTYP_48;
428 }
429
430 if (data) {
431 block_length = data->blksz;
432 blocks = data->blocks;
433
434 /* always set data start - also set direction flag for read */
435 if (data->flags & MMC_DATA_READ)
436 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
437 else if (data->flags & MMC_DATA_WRITE)
438 cmdr |= AT91_MCI_TRCMD_START;
439
440 if (data->flags & MMC_DATA_STREAM)
441 cmdr |= AT91_MCI_TRTYP_STREAM;
442 if (data->flags & MMC_DATA_MULTI)
443 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
444 }
445 else {
446 block_length = 0;
447 blocks = 0;
448 }
449
450 if (host->flags & FL_SENT_STOP)
451 cmdr |= AT91_MCI_TRCMD_STOP;
452
453 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
454 cmdr |= AT91_MCI_OPDCMD;
455
456 /*
457 * Set the arguments and send the command
458 */
459 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
460 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
461
462 if (!data) {
463 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
464 at91_mci_write(host, ATMEL_PDC_RPR, 0);
465 at91_mci_write(host, ATMEL_PDC_RCR, 0);
466 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
467 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
468 at91_mci_write(host, ATMEL_PDC_TPR, 0);
469 at91_mci_write(host, ATMEL_PDC_TCR, 0);
470 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
471 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
472 ier = AT91_MCI_CMDRDY;
473 } else {
474 /* zero block length and PDC mode */
475 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
476 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
477
478 /*
479 * Disable the PDC controller
480 */
481 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
482
483 if (cmdr & AT91_MCI_TRCMD_START) {
484 data->bytes_xfered = 0;
485 host->transfer_index = 0;
486 host->in_use_index = 0;
487 if (cmdr & AT91_MCI_TRDIR) {
488 /*
489 * Handle a read
490 */
491 host->buffer = NULL;
492 host->total_length = 0;
493
494 at91_mci_pre_dma_read(host);
495 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
496 }
497 else {
498 /*
499 * Handle a write
500 */
501 host->total_length = block_length * blocks;
502 host->buffer = dma_alloc_coherent(NULL,
503 host->total_length,
504 &host->physical_address, GFP_KERNEL);
505
506 at91_mci_sg_to_dma(host, data);
507
508 pr_debug("Transmitting %d bytes\n", host->total_length);
509
510 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
511 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
512 ier = AT91_MCI_CMDRDY;
513 }
514 }
515 }
516
517 /*
518 * Send the command and then enable the PDC - not the other way round as
519 * the data sheet says
520 */
521
522 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
523 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
524
525 if (cmdr & AT91_MCI_TRCMD_START) {
526 if (cmdr & AT91_MCI_TRDIR)
527 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
528 }
529
530 /* Enable selected interrupts */
531 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
532 }
533
534 /*
535 * Process the next step in the request
536 */
537 static void at91_mci_process_next(struct at91mci_host *host)
538 {
539 if (!(host->flags & FL_SENT_COMMAND)) {
540 host->flags |= FL_SENT_COMMAND;
541 at91_mci_send_command(host, host->request->cmd);
542 }
543 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
544 host->flags |= FL_SENT_STOP;
545 at91_mci_send_command(host, host->request->stop);
546 }
547 else
548 mmc_request_done(host->mmc, host->request);
549 }
550
551 /*
552 * Handle a command that has been completed
553 */
554 static void at91_mci_completed_command(struct at91mci_host *host)
555 {
556 struct mmc_command *cmd = host->cmd;
557 unsigned int status;
558
559 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
560
561 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
562 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
563 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
564 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
565
566 if (host->buffer) {
567 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
568 host->buffer = NULL;
569 }
570
571 status = at91_mci_read(host, AT91_MCI_SR);
572
573 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
574 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
575
576 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
577 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
578 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
579 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
580 cmd->error = MMC_ERR_NONE;
581 }
582 else {
583 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
584 cmd->error = MMC_ERR_TIMEOUT;
585 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
586 cmd->error = MMC_ERR_BADCRC;
587 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
588 cmd->error = MMC_ERR_FIFO;
589 else
590 cmd->error = MMC_ERR_FAILED;
591
592 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
593 cmd->error, cmd->opcode, cmd->retries);
594 }
595 }
596 else
597 cmd->error = MMC_ERR_NONE;
598
599 at91_mci_process_next(host);
600 }
601
602 /*
603 * Handle an MMC request
604 */
605 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
606 {
607 struct at91mci_host *host = mmc_priv(mmc);
608 host->request = mrq;
609 host->flags = 0;
610
611 at91_mci_process_next(host);
612 }
613
614 /*
615 * Set the IOS
616 */
617 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
618 {
619 int clkdiv;
620 struct at91mci_host *host = mmc_priv(mmc);
621 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
622
623 host->bus_mode = ios->bus_mode;
624
625 if (ios->clock == 0) {
626 /* Disable the MCI controller */
627 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
628 clkdiv = 0;
629 }
630 else {
631 /* Enable the MCI controller */
632 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
633
634 if ((at91_master_clock % (ios->clock * 2)) == 0)
635 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
636 else
637 clkdiv = (at91_master_clock / ios->clock) / 2;
638
639 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
640 at91_master_clock / (2 * (clkdiv + 1)));
641 }
642 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
643 pr_debug("MMC: Setting controller bus width to 4\n");
644 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
645 }
646 else {
647 pr_debug("MMC: Setting controller bus width to 1\n");
648 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
649 }
650
651 /* Set the clock divider */
652 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
653
654 /* maybe switch power to the card */
655 if (host->board->vcc_pin) {
656 switch (ios->power_mode) {
657 case MMC_POWER_OFF:
658 at91_set_gpio_value(host->board->vcc_pin, 0);
659 break;
660 case MMC_POWER_UP:
661 case MMC_POWER_ON:
662 at91_set_gpio_value(host->board->vcc_pin, 1);
663 break;
664 }
665 }
666 }
667
668 /*
669 * Handle an interrupt
670 */
671 static irqreturn_t at91_mci_irq(int irq, void *devid)
672 {
673 struct at91mci_host *host = devid;
674 int completed = 0;
675 unsigned int int_status, int_mask;
676
677 int_status = at91_mci_read(host, AT91_MCI_SR);
678 int_mask = at91_mci_read(host, AT91_MCI_IMR);
679
680 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
681 int_status & int_mask);
682
683 int_status = int_status & int_mask;
684
685 if (int_status & AT91_MCI_ERRORS) {
686 completed = 1;
687
688 if (int_status & AT91_MCI_UNRE)
689 pr_debug("MMC: Underrun error\n");
690 if (int_status & AT91_MCI_OVRE)
691 pr_debug("MMC: Overrun error\n");
692 if (int_status & AT91_MCI_DTOE)
693 pr_debug("MMC: Data timeout\n");
694 if (int_status & AT91_MCI_DCRCE)
695 pr_debug("MMC: CRC error in data\n");
696 if (int_status & AT91_MCI_RTOE)
697 pr_debug("MMC: Response timeout\n");
698 if (int_status & AT91_MCI_RENDE)
699 pr_debug("MMC: Response end bit error\n");
700 if (int_status & AT91_MCI_RCRCE)
701 pr_debug("MMC: Response CRC error\n");
702 if (int_status & AT91_MCI_RDIRE)
703 pr_debug("MMC: Response direction error\n");
704 if (int_status & AT91_MCI_RINDE)
705 pr_debug("MMC: Response index error\n");
706 } else {
707 /* Only continue processing if no errors */
708
709 if (int_status & AT91_MCI_TXBUFE) {
710 pr_debug("TX buffer empty\n");
711 at91_mci_handle_transmitted(host);
712 }
713
714 if (int_status & AT91_MCI_ENDRX) {
715 pr_debug("ENDRX\n");
716 at91_mci_post_dma_read(host);
717 }
718
719 if (int_status & AT91_MCI_RXBUFF) {
720 pr_debug("RX buffer full\n");
721 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
722 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
723 completed = 1;
724 }
725
726 if (int_status & AT91_MCI_ENDTX)
727 pr_debug("Transmit has ended\n");
728
729 if (int_status & AT91_MCI_NOTBUSY) {
730 pr_debug("Card is ready\n");
731 completed = 1;
732 }
733
734 if (int_status & AT91_MCI_DTIP)
735 pr_debug("Data transfer in progress\n");
736
737 if (int_status & AT91_MCI_BLKE) {
738 pr_debug("Block transfer has ended\n");
739 completed = 1;
740 }
741
742 if (int_status & AT91_MCI_TXRDY)
743 pr_debug("Ready to transmit\n");
744
745 if (int_status & AT91_MCI_RXRDY)
746 pr_debug("Ready to receive\n");
747
748 if (int_status & AT91_MCI_CMDRDY) {
749 pr_debug("Command ready\n");
750 completed = at91_mci_handle_cmdrdy(host);
751 }
752 }
753
754 if (completed) {
755 pr_debug("Completed command\n");
756 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
757 at91_mci_completed_command(host);
758 } else
759 at91_mci_write(host, AT91_MCI_IDR, int_status);
760
761 return IRQ_HANDLED;
762 }
763
764 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
765 {
766 struct at91mci_host *host = _host;
767 int present = !at91_get_gpio_value(irq);
768
769 /*
770 * we expect this irq on both insert and remove,
771 * and use a short delay to debounce.
772 */
773 if (present != host->present) {
774 host->present = present;
775 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
776 present ? "insert" : "remove");
777 if (!present) {
778 pr_debug("****** Resetting SD-card bus width ******\n");
779 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
780 }
781 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
782 }
783 return IRQ_HANDLED;
784 }
785
786 static int at91_mci_get_ro(struct mmc_host *mmc)
787 {
788 int read_only = 0;
789 struct at91mci_host *host = mmc_priv(mmc);
790
791 if (host->board->wp_pin) {
792 read_only = at91_get_gpio_value(host->board->wp_pin);
793 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
794 (read_only ? "read-only" : "read-write") );
795 }
796 else {
797 printk(KERN_WARNING "%s: host does not support reading read-only "
798 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
799 }
800 return read_only;
801 }
802
803 static const struct mmc_host_ops at91_mci_ops = {
804 .request = at91_mci_request,
805 .set_ios = at91_mci_set_ios,
806 .get_ro = at91_mci_get_ro,
807 };
808
809 /*
810 * Probe for the device
811 */
812 static int __init at91_mci_probe(struct platform_device *pdev)
813 {
814 struct mmc_host *mmc;
815 struct at91mci_host *host;
816 struct resource *res;
817 int ret;
818
819 pr_debug("Probe MCI devices\n");
820
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res)
823 return -ENXIO;
824
825 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
826 return -EBUSY;
827
828 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
829 if (!mmc) {
830 pr_debug("Failed to allocate mmc host\n");
831 release_mem_region(res->start, res->end - res->start + 1);
832 return -ENOMEM;
833 }
834
835 mmc->ops = &at91_mci_ops;
836 mmc->f_min = 375000;
837 mmc->f_max = 25000000;
838 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
839 mmc->caps = MMC_CAP_BYTEBLOCK;
840
841 mmc->max_blk_size = 4095;
842 mmc->max_blk_count = mmc->max_req_size;
843
844 host = mmc_priv(mmc);
845 host->mmc = mmc;
846 host->buffer = NULL;
847 host->bus_mode = 0;
848 host->board = pdev->dev.platform_data;
849 if (host->board->wire4) {
850 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
851 mmc->caps |= MMC_CAP_4_BIT_DATA;
852 else
853 printk("AT91 MMC: 4 wire bus mode not supported"
854 " - using 1 wire\n");
855 }
856
857 /*
858 * Get Clock
859 */
860 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
861 if (IS_ERR(host->mci_clk)) {
862 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
863 mmc_free_host(mmc);
864 release_mem_region(res->start, res->end - res->start + 1);
865 return -ENODEV;
866 }
867
868 /*
869 * Map I/O region
870 */
871 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
872 if (!host->baseaddr) {
873 clk_put(host->mci_clk);
874 mmc_free_host(mmc);
875 release_mem_region(res->start, res->end - res->start + 1);
876 return -ENOMEM;
877 }
878
879 /*
880 * Reset hardware
881 */
882 clk_enable(host->mci_clk); /* Enable the peripheral clock */
883 at91_mci_disable(host);
884 at91_mci_enable(host);
885
886 /*
887 * Allocate the MCI interrupt
888 */
889 host->irq = platform_get_irq(pdev, 0);
890 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
891 if (ret) {
892 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
893 clk_disable(host->mci_clk);
894 clk_put(host->mci_clk);
895 mmc_free_host(mmc);
896 iounmap(host->baseaddr);
897 release_mem_region(res->start, res->end - res->start + 1);
898 return ret;
899 }
900
901 platform_set_drvdata(pdev, mmc);
902
903 /*
904 * Add host to MMC layer
905 */
906 if (host->board->det_pin) {
907 host->present = !at91_get_gpio_value(host->board->det_pin);
908 device_init_wakeup(&pdev->dev, 1);
909 }
910 else
911 host->present = -1;
912
913 mmc_add_host(mmc);
914
915 /*
916 * monitor card insertion/removal if we can
917 */
918 if (host->board->det_pin) {
919 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
920 0, DRIVER_NAME, host);
921 if (ret)
922 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
923 }
924
925 pr_debug("Added MCI driver\n");
926
927 return 0;
928 }
929
930 /*
931 * Remove a device
932 */
933 static int __exit at91_mci_remove(struct platform_device *pdev)
934 {
935 struct mmc_host *mmc = platform_get_drvdata(pdev);
936 struct at91mci_host *host;
937 struct resource *res;
938
939 if (!mmc)
940 return -1;
941
942 host = mmc_priv(mmc);
943
944 if (host->present != -1) {
945 device_init_wakeup(&pdev->dev, 0);
946 free_irq(host->board->det_pin, host);
947 cancel_delayed_work(&host->mmc->detect);
948 }
949
950 at91_mci_disable(host);
951 mmc_remove_host(mmc);
952 free_irq(host->irq, host);
953
954 clk_disable(host->mci_clk); /* Disable the peripheral clock */
955 clk_put(host->mci_clk);
956
957 iounmap(host->baseaddr);
958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
959 release_mem_region(res->start, res->end - res->start + 1);
960
961 mmc_free_host(mmc);
962 platform_set_drvdata(pdev, NULL);
963 pr_debug("MCI Removed\n");
964
965 return 0;
966 }
967
968 #ifdef CONFIG_PM
969 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
970 {
971 struct mmc_host *mmc = platform_get_drvdata(pdev);
972 struct at91mci_host *host = mmc_priv(mmc);
973 int ret = 0;
974
975 if (device_may_wakeup(&pdev->dev))
976 enable_irq_wake(host->board->det_pin);
977
978 if (mmc)
979 ret = mmc_suspend_host(mmc, state);
980
981 return ret;
982 }
983
984 static int at91_mci_resume(struct platform_device *pdev)
985 {
986 struct mmc_host *mmc = platform_get_drvdata(pdev);
987 struct at91mci_host *host = mmc_priv(mmc);
988 int ret = 0;
989
990 if (device_may_wakeup(&pdev->dev))
991 disable_irq_wake(host->board->det_pin);
992
993 if (mmc)
994 ret = mmc_resume_host(mmc);
995
996 return ret;
997 }
998 #else
999 #define at91_mci_suspend NULL
1000 #define at91_mci_resume NULL
1001 #endif
1002
1003 static struct platform_driver at91_mci_driver = {
1004 .remove = __exit_p(at91_mci_remove),
1005 .suspend = at91_mci_suspend,
1006 .resume = at91_mci_resume,
1007 .driver = {
1008 .name = DRIVER_NAME,
1009 .owner = THIS_MODULE,
1010 },
1011 };
1012
1013 static int __init at91_mci_init(void)
1014 {
1015 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1016 }
1017
1018 static void __exit at91_mci_exit(void)
1019 {
1020 platform_driver_unregister(&at91_mci_driver);
1021 }
1022
1023 module_init(at91_mci_init);
1024 module_exit(at91_mci_exit);
1025
1026 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1027 MODULE_AUTHOR("Nick Randell");
1028 MODULE_LICENSE("GPL");