]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/mmc/host/at91_mci.c
at91_mci: use generic GPIO calls
[mirror_ubuntu-jammy-kernel.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/gpio.h>
74
75 #include <asm/mach/mmc.h>
76 #include <asm/arch/board.h>
77 #include <asm/arch/cpu.h>
78 #include <asm/arch/at91_mci.h>
79
80 #define DRIVER_NAME "at91_mci"
81
82 #define FL_SENT_COMMAND (1 << 0)
83 #define FL_SENT_STOP (1 << 1)
84
85 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
88
89 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
91
92
93 /*
94 * Low level type for this driver
95 */
96 struct at91mci_host
97 {
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
102 void __iomem *baseaddr;
103 int irq;
104
105 struct at91_mmc_data *board;
106 int present;
107
108 struct clk *mci_clk;
109
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
128 };
129
130 /*
131 * Copy from sg to a dma block - used for transfers
132 */
133 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
134 {
135 unsigned int len, i, size;
136 unsigned *dmabuf = host->buffer;
137
138 size = host->total_length;
139 len = data->sg_len;
140
141 /*
142 * Just loop through all entries. Size might not
143 * be the entire list though so make sure that
144 * we do not transfer too much.
145 */
146 for (i = 0; i < len; i++) {
147 struct scatterlist *sg;
148 int amount;
149 unsigned int *sgbuffer;
150
151 sg = &data->sg[i];
152
153 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
154 amount = min(size, sg->length);
155 size -= amount;
156
157 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
158 int index;
159
160 for (index = 0; index < (amount / 4); index++)
161 *dmabuf++ = swab32(sgbuffer[index]);
162 }
163 else
164 memcpy(dmabuf, sgbuffer, amount);
165
166 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
167
168 if (size == 0)
169 break;
170 }
171
172 /*
173 * Check that we didn't get a request to transfer
174 * more data than can fit into the SG list.
175 */
176 BUG_ON(size != 0);
177 }
178
179 /*
180 * Prepare a dma read
181 */
182 static void at91_mci_pre_dma_read(struct at91mci_host *host)
183 {
184 int i;
185 struct scatterlist *sg;
186 struct mmc_command *cmd;
187 struct mmc_data *data;
188
189 pr_debug("pre dma read\n");
190
191 cmd = host->cmd;
192 if (!cmd) {
193 pr_debug("no command\n");
194 return;
195 }
196
197 data = cmd->data;
198 if (!data) {
199 pr_debug("no data\n");
200 return;
201 }
202
203 for (i = 0; i < 2; i++) {
204 /* nothing left to transfer */
205 if (host->transfer_index >= data->sg_len) {
206 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
207 break;
208 }
209
210 /* Check to see if this needs filling */
211 if (i == 0) {
212 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
213 pr_debug("Transfer active in current\n");
214 continue;
215 }
216 }
217 else {
218 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
219 pr_debug("Transfer active in next\n");
220 continue;
221 }
222 }
223
224 /* Setup the next transfer */
225 pr_debug("Using transfer index %d\n", host->transfer_index);
226
227 sg = &data->sg[host->transfer_index++];
228 pr_debug("sg = %p\n", sg);
229
230 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
231
232 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
233
234 if (i == 0) {
235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
236 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
237 }
238 else {
239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
240 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
241 }
242 }
243
244 pr_debug("pre dma read done\n");
245 }
246
247 /*
248 * Handle after a dma read
249 */
250 static void at91_mci_post_dma_read(struct at91mci_host *host)
251 {
252 struct mmc_command *cmd;
253 struct mmc_data *data;
254
255 pr_debug("post dma read\n");
256
257 cmd = host->cmd;
258 if (!cmd) {
259 pr_debug("no command\n");
260 return;
261 }
262
263 data = cmd->data;
264 if (!data) {
265 pr_debug("no data\n");
266 return;
267 }
268
269 while (host->in_use_index < host->transfer_index) {
270 struct scatterlist *sg;
271
272 pr_debug("finishing index %d\n", host->in_use_index);
273
274 sg = &data->sg[host->in_use_index++];
275
276 pr_debug("Unmapping page %08X\n", sg->dma_address);
277
278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
279
280 data->bytes_xfered += sg->length;
281
282 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
283 unsigned int *buffer;
284 int index;
285
286 /* Swap the contents of the buffer */
287 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
288 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
289
290 for (index = 0; index < (sg->length / 4); index++)
291 buffer[index] = swab32(buffer[index]);
292
293 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
294 }
295
296 flush_dcache_page(sg_page(sg));
297 }
298
299 /* Is there another transfer to trigger? */
300 if (host->transfer_index < data->sg_len)
301 at91_mci_pre_dma_read(host);
302 else {
303 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
305 }
306
307 pr_debug("post dma read done\n");
308 }
309
310 /*
311 * Handle transmitted data
312 */
313 static void at91_mci_handle_transmitted(struct at91mci_host *host)
314 {
315 struct mmc_command *cmd;
316 struct mmc_data *data;
317
318 pr_debug("Handling the transmit\n");
319
320 /* Disable the transfer */
321 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
322
323 /* Now wait for cmd ready */
324 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
325
326 cmd = host->cmd;
327 if (!cmd) return;
328
329 data = cmd->data;
330 if (!data) return;
331
332 if (cmd->data->blocks > 1) {
333 pr_debug("multiple write : wait for BLKE...\n");
334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
335 } else
336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
337
338 data->bytes_xfered = host->total_length;
339 }
340
341 /*Handle after command sent ready*/
342 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
343 {
344 if (!host->cmd)
345 return 1;
346 else if (!host->cmd->data) {
347 if (host->flags & FL_SENT_STOP) {
348 /*After multi block write, we must wait for NOTBUSY*/
349 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
350 } else return 1;
351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
352 /*After sendding multi-block-write command, start DMA transfer*/
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
354 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
355 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
356 }
357
358 /* command not completed, have to wait */
359 return 0;
360 }
361
362
363 /*
364 * Enable the controller
365 */
366 static void at91_mci_enable(struct at91mci_host *host)
367 {
368 unsigned int mr;
369
370 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
371 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
372 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
373 mr = AT91_MCI_PDCMODE | 0x34a;
374
375 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
376 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
377
378 at91_mci_write(host, AT91_MCI_MR, mr);
379
380 /* use Slot A or B (only one at same time) */
381 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
382 }
383
384 /*
385 * Disable the controller
386 */
387 static void at91_mci_disable(struct at91mci_host *host)
388 {
389 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
390 }
391
392 /*
393 * Send a command
394 */
395 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
396 {
397 unsigned int cmdr, mr;
398 unsigned int block_length;
399 struct mmc_data *data = cmd->data;
400
401 unsigned int blocks;
402 unsigned int ier = 0;
403
404 host->cmd = cmd;
405
406 /* Needed for leaving busy state before CMD1 */
407 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
408 pr_debug("Clearing timeout\n");
409 at91_mci_write(host, AT91_MCI_ARGR, 0);
410 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
411 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
412 /* spin */
413 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
414 }
415 }
416
417 cmdr = cmd->opcode;
418
419 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
420 cmdr |= AT91_MCI_RSPTYP_NONE;
421 else {
422 /* if a response is expected then allow maximum response latancy */
423 cmdr |= AT91_MCI_MAXLAT;
424 /* set 136 bit response for R2, 48 bit response otherwise */
425 if (mmc_resp_type(cmd) == MMC_RSP_R2)
426 cmdr |= AT91_MCI_RSPTYP_136;
427 else
428 cmdr |= AT91_MCI_RSPTYP_48;
429 }
430
431 if (data) {
432
433 if ( data->blksz & 0x3 ) {
434 pr_debug("Unsupported block size\n");
435 cmd->error = -EINVAL;
436 mmc_request_done(host->mmc, host->request);
437 return;
438 }
439
440 block_length = data->blksz;
441 blocks = data->blocks;
442
443 /* always set data start - also set direction flag for read */
444 if (data->flags & MMC_DATA_READ)
445 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
446 else if (data->flags & MMC_DATA_WRITE)
447 cmdr |= AT91_MCI_TRCMD_START;
448
449 if (data->flags & MMC_DATA_STREAM)
450 cmdr |= AT91_MCI_TRTYP_STREAM;
451 if (data->blocks > 1)
452 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
453 }
454 else {
455 block_length = 0;
456 blocks = 0;
457 }
458
459 if (host->flags & FL_SENT_STOP)
460 cmdr |= AT91_MCI_TRCMD_STOP;
461
462 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
463 cmdr |= AT91_MCI_OPDCMD;
464
465 /*
466 * Set the arguments and send the command
467 */
468 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
469 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
470
471 if (!data) {
472 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
473 at91_mci_write(host, ATMEL_PDC_RPR, 0);
474 at91_mci_write(host, ATMEL_PDC_RCR, 0);
475 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
476 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
477 at91_mci_write(host, ATMEL_PDC_TPR, 0);
478 at91_mci_write(host, ATMEL_PDC_TCR, 0);
479 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
480 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
481 ier = AT91_MCI_CMDRDY;
482 } else {
483 /* zero block length and PDC mode */
484 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
485 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
486
487 /*
488 * Disable the PDC controller
489 */
490 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
491
492 if (cmdr & AT91_MCI_TRCMD_START) {
493 data->bytes_xfered = 0;
494 host->transfer_index = 0;
495 host->in_use_index = 0;
496 if (cmdr & AT91_MCI_TRDIR) {
497 /*
498 * Handle a read
499 */
500 host->buffer = NULL;
501 host->total_length = 0;
502
503 at91_mci_pre_dma_read(host);
504 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
505 }
506 else {
507 /*
508 * Handle a write
509 */
510 host->total_length = block_length * blocks;
511 host->buffer = dma_alloc_coherent(NULL,
512 host->total_length,
513 &host->physical_address, GFP_KERNEL);
514
515 at91_mci_sg_to_dma(host, data);
516
517 pr_debug("Transmitting %d bytes\n", host->total_length);
518
519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
520 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
521 ier = AT91_MCI_CMDRDY;
522 }
523 }
524 }
525
526 /*
527 * Send the command and then enable the PDC - not the other way round as
528 * the data sheet says
529 */
530
531 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
532 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
533
534 if (cmdr & AT91_MCI_TRCMD_START) {
535 if (cmdr & AT91_MCI_TRDIR)
536 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
537 }
538
539 /* Enable selected interrupts */
540 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
541 }
542
543 /*
544 * Process the next step in the request
545 */
546 static void at91_mci_process_next(struct at91mci_host *host)
547 {
548 if (!(host->flags & FL_SENT_COMMAND)) {
549 host->flags |= FL_SENT_COMMAND;
550 at91_mci_send_command(host, host->request->cmd);
551 }
552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
553 host->flags |= FL_SENT_STOP;
554 at91_mci_send_command(host, host->request->stop);
555 }
556 else
557 mmc_request_done(host->mmc, host->request);
558 }
559
560 /*
561 * Handle a command that has been completed
562 */
563 static void at91_mci_completed_command(struct at91mci_host *host)
564 {
565 struct mmc_command *cmd = host->cmd;
566 unsigned int status;
567
568 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
569
570 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
571 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
572 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
573 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
574
575 if (host->buffer) {
576 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
577 host->buffer = NULL;
578 }
579
580 status = at91_mci_read(host, AT91_MCI_SR);
581
582 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
583 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
584
585 if (status & AT91_MCI_ERRORS) {
586 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
587 cmd->error = 0;
588 }
589 else {
590 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
591 cmd->error = -ETIMEDOUT;
592 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
593 cmd->error = -EILSEQ;
594 else
595 cmd->error = -EIO;
596
597 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
598 cmd->error, cmd->opcode, cmd->retries);
599 }
600 }
601 else
602 cmd->error = 0;
603
604 at91_mci_process_next(host);
605 }
606
607 /*
608 * Handle an MMC request
609 */
610 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
611 {
612 struct at91mci_host *host = mmc_priv(mmc);
613 host->request = mrq;
614 host->flags = 0;
615
616 at91_mci_process_next(host);
617 }
618
619 /*
620 * Set the IOS
621 */
622 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
623 {
624 int clkdiv;
625 struct at91mci_host *host = mmc_priv(mmc);
626 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
627
628 host->bus_mode = ios->bus_mode;
629
630 if (ios->clock == 0) {
631 /* Disable the MCI controller */
632 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
633 clkdiv = 0;
634 }
635 else {
636 /* Enable the MCI controller */
637 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
638
639 if ((at91_master_clock % (ios->clock * 2)) == 0)
640 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
641 else
642 clkdiv = (at91_master_clock / ios->clock) / 2;
643
644 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
645 at91_master_clock / (2 * (clkdiv + 1)));
646 }
647 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
648 pr_debug("MMC: Setting controller bus width to 4\n");
649 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
650 }
651 else {
652 pr_debug("MMC: Setting controller bus width to 1\n");
653 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
654 }
655
656 /* Set the clock divider */
657 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
658
659 /* maybe switch power to the card */
660 if (host->board->vcc_pin) {
661 switch (ios->power_mode) {
662 case MMC_POWER_OFF:
663 gpio_set_value(host->board->vcc_pin, 0);
664 break;
665 case MMC_POWER_UP:
666 case MMC_POWER_ON:
667 gpio_set_value(host->board->vcc_pin, 1);
668 break;
669 }
670 }
671 }
672
673 /*
674 * Handle an interrupt
675 */
676 static irqreturn_t at91_mci_irq(int irq, void *devid)
677 {
678 struct at91mci_host *host = devid;
679 int completed = 0;
680 unsigned int int_status, int_mask;
681
682 int_status = at91_mci_read(host, AT91_MCI_SR);
683 int_mask = at91_mci_read(host, AT91_MCI_IMR);
684
685 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
686 int_status & int_mask);
687
688 int_status = int_status & int_mask;
689
690 if (int_status & AT91_MCI_ERRORS) {
691 completed = 1;
692
693 if (int_status & AT91_MCI_UNRE)
694 pr_debug("MMC: Underrun error\n");
695 if (int_status & AT91_MCI_OVRE)
696 pr_debug("MMC: Overrun error\n");
697 if (int_status & AT91_MCI_DTOE)
698 pr_debug("MMC: Data timeout\n");
699 if (int_status & AT91_MCI_DCRCE)
700 pr_debug("MMC: CRC error in data\n");
701 if (int_status & AT91_MCI_RTOE)
702 pr_debug("MMC: Response timeout\n");
703 if (int_status & AT91_MCI_RENDE)
704 pr_debug("MMC: Response end bit error\n");
705 if (int_status & AT91_MCI_RCRCE)
706 pr_debug("MMC: Response CRC error\n");
707 if (int_status & AT91_MCI_RDIRE)
708 pr_debug("MMC: Response direction error\n");
709 if (int_status & AT91_MCI_RINDE)
710 pr_debug("MMC: Response index error\n");
711 } else {
712 /* Only continue processing if no errors */
713
714 if (int_status & AT91_MCI_TXBUFE) {
715 pr_debug("TX buffer empty\n");
716 at91_mci_handle_transmitted(host);
717 }
718
719 if (int_status & AT91_MCI_ENDRX) {
720 pr_debug("ENDRX\n");
721 at91_mci_post_dma_read(host);
722 }
723
724 if (int_status & AT91_MCI_RXBUFF) {
725 pr_debug("RX buffer full\n");
726 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
727 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
728 completed = 1;
729 }
730
731 if (int_status & AT91_MCI_ENDTX)
732 pr_debug("Transmit has ended\n");
733
734 if (int_status & AT91_MCI_NOTBUSY) {
735 pr_debug("Card is ready\n");
736 completed = 1;
737 }
738
739 if (int_status & AT91_MCI_DTIP)
740 pr_debug("Data transfer in progress\n");
741
742 if (int_status & AT91_MCI_BLKE) {
743 pr_debug("Block transfer has ended\n");
744 completed = 1;
745 }
746
747 if (int_status & AT91_MCI_TXRDY)
748 pr_debug("Ready to transmit\n");
749
750 if (int_status & AT91_MCI_RXRDY)
751 pr_debug("Ready to receive\n");
752
753 if (int_status & AT91_MCI_CMDRDY) {
754 pr_debug("Command ready\n");
755 completed = at91_mci_handle_cmdrdy(host);
756 }
757 }
758
759 if (completed) {
760 pr_debug("Completed command\n");
761 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
762 at91_mci_completed_command(host);
763 } else
764 at91_mci_write(host, AT91_MCI_IDR, int_status);
765
766 return IRQ_HANDLED;
767 }
768
769 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
770 {
771 struct at91mci_host *host = _host;
772 int present = !gpio_get_value(irq_to_gpio(irq));
773
774 /*
775 * we expect this irq on both insert and remove,
776 * and use a short delay to debounce.
777 */
778 if (present != host->present) {
779 host->present = present;
780 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
781 present ? "insert" : "remove");
782 if (!present) {
783 pr_debug("****** Resetting SD-card bus width ******\n");
784 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
785 }
786 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
787 }
788 return IRQ_HANDLED;
789 }
790
791 static int at91_mci_get_ro(struct mmc_host *mmc)
792 {
793 int read_only = 0;
794 struct at91mci_host *host = mmc_priv(mmc);
795
796 if (host->board->wp_pin) {
797 read_only = gpio_get_value(host->board->wp_pin);
798 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
799 (read_only ? "read-only" : "read-write") );
800 }
801 else {
802 printk(KERN_WARNING "%s: host does not support reading read-only "
803 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
804 }
805 return read_only;
806 }
807
808 static const struct mmc_host_ops at91_mci_ops = {
809 .request = at91_mci_request,
810 .set_ios = at91_mci_set_ios,
811 .get_ro = at91_mci_get_ro,
812 };
813
814 /*
815 * Probe for the device
816 */
817 static int __init at91_mci_probe(struct platform_device *pdev)
818 {
819 struct mmc_host *mmc;
820 struct at91mci_host *host;
821 struct resource *res;
822 int ret;
823
824 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
825 if (!res)
826 return -ENXIO;
827
828 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
829 return -EBUSY;
830
831 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
832 if (!mmc) {
833 ret = -ENOMEM;
834 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
835 goto fail6;
836 }
837
838 mmc->ops = &at91_mci_ops;
839 mmc->f_min = 375000;
840 mmc->f_max = 25000000;
841 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
842
843 mmc->max_blk_size = 4095;
844 mmc->max_blk_count = mmc->max_req_size;
845
846 host = mmc_priv(mmc);
847 host->mmc = mmc;
848 host->buffer = NULL;
849 host->bus_mode = 0;
850 host->board = pdev->dev.platform_data;
851 if (host->board->wire4) {
852 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
853 mmc->caps |= MMC_CAP_4_BIT_DATA;
854 else
855 dev_warn(&pdev->dev, "4 wire bus mode not supported"
856 " - using 1 wire\n");
857 }
858
859 /*
860 * Reserve GPIOs ... board init code makes sure these pins are set
861 * up as GPIOs with the right direction (input, except for vcc)
862 */
863 if (host->board->det_pin) {
864 ret = gpio_request(host->board->det_pin, "mmc_detect");
865 if (ret < 0) {
866 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
867 goto fail5;
868 }
869 }
870 if (host->board->wp_pin) {
871 ret = gpio_request(host->board->wp_pin, "mmc_wp");
872 if (ret < 0) {
873 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
874 goto fail4;
875 }
876 }
877 if (host->board->vcc_pin) {
878 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
879 if (ret < 0) {
880 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
881 goto fail3;
882 }
883 }
884
885 /*
886 * Get Clock
887 */
888 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
889 if (IS_ERR(host->mci_clk)) {
890 ret = -ENODEV;
891 dev_dbg(&pdev->dev, "no mci_clk?\n");
892 goto fail2;
893 }
894
895 /*
896 * Map I/O region
897 */
898 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
899 if (!host->baseaddr) {
900 ret = -ENOMEM;
901 goto fail1;
902 }
903
904 /*
905 * Reset hardware
906 */
907 clk_enable(host->mci_clk); /* Enable the peripheral clock */
908 at91_mci_disable(host);
909 at91_mci_enable(host);
910
911 /*
912 * Allocate the MCI interrupt
913 */
914 host->irq = platform_get_irq(pdev, 0);
915 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
916 mmc_hostname(mmc), host);
917 if (ret) {
918 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
919 goto fail0;
920 }
921
922 platform_set_drvdata(pdev, mmc);
923
924 /*
925 * Add host to MMC layer
926 */
927 if (host->board->det_pin) {
928 host->present = !gpio_get_value(host->board->det_pin);
929 }
930 else
931 host->present = -1;
932
933 mmc_add_host(mmc);
934
935 /*
936 * monitor card insertion/removal if we can
937 */
938 if (host->board->det_pin) {
939 ret = request_irq(gpio_to_irq(host->board->det_pin),
940 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
941 if (ret)
942 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
943 else
944 device_init_wakeup(&pdev->dev, 1);
945 }
946
947 pr_debug("Added MCI driver\n");
948
949 return 0;
950
951 fail0:
952 clk_disable(host->mci_clk);
953 iounmap(host->baseaddr);
954 fail1:
955 clk_put(host->mci_clk);
956 fail2:
957 if (host->board->vcc_pin)
958 gpio_free(host->board->vcc_pin);
959 fail3:
960 if (host->board->wp_pin)
961 gpio_free(host->board->wp_pin);
962 fail4:
963 if (host->board->det_pin)
964 gpio_free(host->board->det_pin);
965 fail5:
966 mmc_free_host(mmc);
967 fail6:
968 release_mem_region(res->start, res->end - res->start + 1);
969 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
970 return ret;
971 }
972
973 /*
974 * Remove a device
975 */
976 static int __exit at91_mci_remove(struct platform_device *pdev)
977 {
978 struct mmc_host *mmc = platform_get_drvdata(pdev);
979 struct at91mci_host *host;
980 struct resource *res;
981
982 if (!mmc)
983 return -1;
984
985 host = mmc_priv(mmc);
986
987 if (host->board->det_pin) {
988 if (device_can_wakeup(&pdev->dev))
989 free_irq(gpio_to_irq(host->board->det_pin), host);
990 device_init_wakeup(&pdev->dev, 0);
991 gpio_free(host->board->det_pin);
992 }
993
994 at91_mci_disable(host);
995 mmc_remove_host(mmc);
996 free_irq(host->irq, host);
997
998 clk_disable(host->mci_clk); /* Disable the peripheral clock */
999 clk_put(host->mci_clk);
1000
1001 if (host->board->vcc_pin)
1002 gpio_free(host->board->vcc_pin);
1003 if (host->board->wp_pin)
1004 gpio_free(host->board->wp_pin);
1005
1006 iounmap(host->baseaddr);
1007 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 release_mem_region(res->start, res->end - res->start + 1);
1009
1010 mmc_free_host(mmc);
1011 platform_set_drvdata(pdev, NULL);
1012 pr_debug("MCI Removed\n");
1013
1014 return 0;
1015 }
1016
1017 #ifdef CONFIG_PM
1018 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1019 {
1020 struct mmc_host *mmc = platform_get_drvdata(pdev);
1021 struct at91mci_host *host = mmc_priv(mmc);
1022 int ret = 0;
1023
1024 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1025 enable_irq_wake(host->board->det_pin);
1026
1027 if (mmc)
1028 ret = mmc_suspend_host(mmc, state);
1029
1030 return ret;
1031 }
1032
1033 static int at91_mci_resume(struct platform_device *pdev)
1034 {
1035 struct mmc_host *mmc = platform_get_drvdata(pdev);
1036 struct at91mci_host *host = mmc_priv(mmc);
1037 int ret = 0;
1038
1039 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1040 disable_irq_wake(host->board->det_pin);
1041
1042 if (mmc)
1043 ret = mmc_resume_host(mmc);
1044
1045 return ret;
1046 }
1047 #else
1048 #define at91_mci_suspend NULL
1049 #define at91_mci_resume NULL
1050 #endif
1051
1052 static struct platform_driver at91_mci_driver = {
1053 .remove = __exit_p(at91_mci_remove),
1054 .suspend = at91_mci_suspend,
1055 .resume = at91_mci_resume,
1056 .driver = {
1057 .name = DRIVER_NAME,
1058 .owner = THIS_MODULE,
1059 },
1060 };
1061
1062 static int __init at91_mci_init(void)
1063 {
1064 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1065 }
1066
1067 static void __exit at91_mci_exit(void)
1068 {
1069 platform_driver_unregister(&at91_mci_driver);
1070 }
1071
1072 module_init(at91_mci_init);
1073 module_exit(at91_mci_exit);
1074
1075 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1076 MODULE_AUTHOR("Nick Randell");
1077 MODULE_LICENSE("GPL");