]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/wbsd.c
Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / wbsd.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/device.h>
30#include <linux/interrupt.h>
85bcc130 31#include <linux/dma-mapping.h>
1da177e4 32#include <linux/delay.h>
85bcc130 33#include <linux/pnp.h>
1da177e4
LT
34#include <linux/highmem.h>
35#include <linux/mmc/host.h>
36#include <linux/mmc/protocol.h>
37
38#include <asm/io.h>
39#include <asm/dma.h>
40#include <asm/scatterlist.h>
41
42#include "wbsd.h"
43
44#define DRIVER_NAME "wbsd"
1656fa57 45#define DRIVER_VERSION "1.4"
1da177e4
LT
46
47#ifdef CONFIG_MMC_DEBUG
48#define DBG(x...) \
49 printk(KERN_DEBUG DRIVER_NAME ": " x)
50#define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
52#else
53#define DBG(x...) do { } while (0)
54#define DBGF(x...) do { } while (0)
55#endif
56
85bcc130
PO
57/*
58 * Device resources
59 */
60
61#ifdef CONFIG_PNP
62
63static const struct pnp_device_id pnp_dev_table[] = {
64 { "WEC0517", 0 },
65 { "WEC0518", 0 },
66 { "", 0 },
67};
68
69MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
70
71#endif /* CONFIG_PNP */
72
3eee0d03
AB
73static const int config_ports[] = { 0x2E, 0x4E };
74static const int unlock_codes[] = { 0x83, 0x87 };
75
76static const int valid_ids[] = {
77 0x7112,
78 };
79
85bcc130
PO
80#ifdef CONFIG_PNP
81static unsigned int nopnp = 0;
82#else
83static const unsigned int nopnp = 1;
84#endif
85static unsigned int io = 0x248;
86static unsigned int irq = 6;
87static int dma = 2;
88
1da177e4
LT
89/*
90 * Basic functions
91 */
92
93static inline void wbsd_unlock_config(struct wbsd_host* host)
94{
85bcc130
PO
95 BUG_ON(host->config == 0);
96
1da177e4
LT
97 outb(host->unlock_code, host->config);
98 outb(host->unlock_code, host->config);
99}
100
101static inline void wbsd_lock_config(struct wbsd_host* host)
102{
85bcc130
PO
103 BUG_ON(host->config == 0);
104
1da177e4
LT
105 outb(LOCK_CODE, host->config);
106}
107
108static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
109{
85bcc130
PO
110 BUG_ON(host->config == 0);
111
1da177e4
LT
112 outb(reg, host->config);
113 outb(value, host->config + 1);
114}
115
116static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
117{
85bcc130
PO
118 BUG_ON(host->config == 0);
119
1da177e4
LT
120 outb(reg, host->config);
121 return inb(host->config + 1);
122}
123
124static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
125{
126 outb(index, host->base + WBSD_IDXR);
127 outb(value, host->base + WBSD_DATAR);
128}
129
130static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
131{
132 outb(index, host->base + WBSD_IDXR);
133 return inb(host->base + WBSD_DATAR);
134}
135
136/*
137 * Common routines
138 */
139
140static void wbsd_init_device(struct wbsd_host* host)
141{
142 u8 setup, ier;
143
144 /*
145 * Reset chip (SD/MMC part) and fifo.
146 */
147 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
148 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
150
85bcc130
PO
151 /*
152 * Set DAT3 to input
153 */
154 setup &= ~WBSD_DAT3_H;
155 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
156 host->flags &= ~WBSD_FIGNORE_DETECT;
157
1da177e4
LT
158 /*
159 * Read back default clock.
160 */
161 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
162
163 /*
164 * Power down port.
165 */
166 outb(WBSD_POWER_N, host->base + WBSD_CSR);
167
168 /*
169 * Set maximum timeout.
170 */
171 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
172
85bcc130
PO
173 /*
174 * Test for card presence
175 */
176 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
177 host->flags |= WBSD_FCARD_PRESENT;
178 else
179 host->flags &= ~WBSD_FCARD_PRESENT;
180
1da177e4
LT
181 /*
182 * Enable interesting interrupts.
183 */
184 ier = 0;
185 ier |= WBSD_EINT_CARD;
186 ier |= WBSD_EINT_FIFO_THRE;
187 ier |= WBSD_EINT_CCRC;
188 ier |= WBSD_EINT_TIMEOUT;
189 ier |= WBSD_EINT_CRC;
190 ier |= WBSD_EINT_TC;
191
192 outb(ier, host->base + WBSD_EIR);
193
194 /*
195 * Clear interrupts.
196 */
197 inb(host->base + WBSD_ISR);
198}
199
200static void wbsd_reset(struct wbsd_host* host)
201{
202 u8 setup;
203
204 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
205
206 /*
207 * Soft reset of chip (SD/MMC part).
208 */
209 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
210 setup |= WBSD_SOFT_RESET;
211 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
212}
213
214static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
215{
216 unsigned long dmaflags;
217
218 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
219
220 if (host->dma >= 0)
221 {
222 /*
223 * Release ISA DMA controller.
224 */
225 dmaflags = claim_dma_lock();
226 disable_dma(host->dma);
227 clear_dma_ff(host->dma);
228 release_dma_lock(dmaflags);
229
230 /*
231 * Disable DMA on host.
232 */
233 wbsd_write_index(host, WBSD_IDX_DMA, 0);
234 }
235
236 host->mrq = NULL;
237
238 /*
239 * MMC layer might call back into the driver so first unlock.
240 */
241 spin_unlock(&host->lock);
242 mmc_request_done(host->mmc, mrq);
243 spin_lock(&host->lock);
244}
245
246/*
247 * Scatter/gather functions
248 */
249
250static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
251{
252 /*
253 * Get info. about SG list from data structure.
254 */
255 host->cur_sg = data->sg;
256 host->num_sg = data->sg_len;
257
258 host->offset = 0;
259 host->remain = host->cur_sg->length;
260}
261
262static inline int wbsd_next_sg(struct wbsd_host* host)
263{
264 /*
265 * Skip to next SG entry.
266 */
267 host->cur_sg++;
268 host->num_sg--;
269
270 /*
271 * Any entries left?
272 */
273 if (host->num_sg > 0)
274 {
275 host->offset = 0;
276 host->remain = host->cur_sg->length;
277 }
278
279 return host->num_sg;
280}
281
282static inline char* wbsd_kmap_sg(struct wbsd_host* host)
283{
284 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
285 host->cur_sg->offset;
286 return host->mapped_sg;
287}
288
289static inline void wbsd_kunmap_sg(struct wbsd_host* host)
290{
291 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
292}
293
294static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
295{
296 unsigned int len, i, size;
297 struct scatterlist* sg;
298 char* dmabuf = host->dma_buffer;
299 char* sgbuf;
300
301 size = host->size;
302
303 sg = data->sg;
304 len = data->sg_len;
305
306 /*
307 * Just loop through all entries. Size might not
308 * be the entire list though so make sure that
309 * we do not transfer too much.
310 */
311 for (i = 0;i < len;i++)
312 {
313 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
314 if (size < sg[i].length)
315 memcpy(dmabuf, sgbuf, size);
316 else
317 memcpy(dmabuf, sgbuf, sg[i].length);
318 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
319 dmabuf += sg[i].length;
320
321 if (size < sg[i].length)
322 size = 0;
323 else
324 size -= sg[i].length;
325
326 if (size == 0)
327 break;
328 }
329
330 /*
331 * Check that we didn't get a request to transfer
332 * more data than can fit into the SG list.
333 */
334
335 BUG_ON(size != 0);
336
337 host->size -= size;
338}
339
340static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
341{
342 unsigned int len, i, size;
343 struct scatterlist* sg;
344 char* dmabuf = host->dma_buffer;
345 char* sgbuf;
346
347 size = host->size;
348
349 sg = data->sg;
350 len = data->sg_len;
351
352 /*
353 * Just loop through all entries. Size might not
354 * be the entire list though so make sure that
355 * we do not transfer too much.
356 */
357 for (i = 0;i < len;i++)
358 {
359 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
360 if (size < sg[i].length)
361 memcpy(sgbuf, dmabuf, size);
362 else
363 memcpy(sgbuf, dmabuf, sg[i].length);
364 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
365 dmabuf += sg[i].length;
366
367 if (size < sg[i].length)
368 size = 0;
369 else
370 size -= sg[i].length;
371
372 if (size == 0)
373 break;
374 }
375
376 /*
377 * Check that we didn't get a request to transfer
378 * more data than can fit into the SG list.
379 */
380
381 BUG_ON(size != 0);
382
383 host->size -= size;
384}
385
386/*
387 * Command handling
388 */
389
390static inline void wbsd_get_short_reply(struct wbsd_host* host,
391 struct mmc_command* cmd)
392{
393 /*
394 * Correct response type?
395 */
396 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
397 {
398 cmd->error = MMC_ERR_INVALID;
399 return;
400 }
401
402 cmd->resp[0] =
403 wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
404 cmd->resp[0] |=
405 wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
406 cmd->resp[0] |=
407 wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
408 cmd->resp[0] |=
409 wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
410 cmd->resp[1] =
411 wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
412}
413
414static inline void wbsd_get_long_reply(struct wbsd_host* host,
415 struct mmc_command* cmd)
416{
417 int i;
418
419 /*
420 * Correct response type?
421 */
422 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
423 {
424 cmd->error = MMC_ERR_INVALID;
425 return;
426 }
427
428 for (i = 0;i < 4;i++)
429 {
430 cmd->resp[i] =
431 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
432 cmd->resp[i] |=
433 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
434 cmd->resp[i] |=
435 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
436 cmd->resp[i] |=
437 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
438 }
439}
440
1da177e4
LT
441static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
442{
443 int i;
444 u8 status, isr;
445
446 DBGF("Sending cmd (%x)\n", cmd->opcode);
447
448 /*
449 * Clear accumulated ISR. The interrupt routine
450 * will fill this one with events that occur during
451 * transfer.
452 */
453 host->isr = 0;
454
455 /*
456 * Send the command (CRC calculated by host).
457 */
458 outb(cmd->opcode, host->base + WBSD_CMDR);
459 for (i = 3;i >= 0;i--)
460 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
461
462 cmd->error = MMC_ERR_NONE;
463
464 /*
465 * Wait for the request to complete.
466 */
467 do {
468 status = wbsd_read_index(host, WBSD_IDX_STATUS);
469 } while (status & WBSD_CARDTRAFFIC);
470
471 /*
472 * Do we expect a reply?
473 */
474 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
475 {
476 /*
477 * Read back status.
478 */
479 isr = host->isr;
480
481 /* Card removed? */
482 if (isr & WBSD_INT_CARD)
483 cmd->error = MMC_ERR_TIMEOUT;
484 /* Timeout? */
485 else if (isr & WBSD_INT_TIMEOUT)
486 cmd->error = MMC_ERR_TIMEOUT;
487 /* CRC? */
488 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
489 cmd->error = MMC_ERR_BADCRC;
490 /* All ok */
491 else
492 {
493 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
494 wbsd_get_short_reply(host, cmd);
495 else
496 wbsd_get_long_reply(host, cmd);
497 }
498 }
499
500 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
501}
502
503/*
504 * Data functions
505 */
506
507static void wbsd_empty_fifo(struct wbsd_host* host)
508{
509 struct mmc_data* data = host->mrq->cmd->data;
510 char* buffer;
511 int i, fsr, fifo;
512
513 /*
514 * Handle excessive data.
515 */
516 if (data->bytes_xfered == host->size)
517 return;
518
519 buffer = wbsd_kmap_sg(host) + host->offset;
520
521 /*
522 * Drain the fifo. This has a tendency to loop longer
523 * than the FIFO length (usually one block).
524 */
525 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
526 {
527 /*
528 * The size field in the FSR is broken so we have to
529 * do some guessing.
530 */
531 if (fsr & WBSD_FIFO_FULL)
532 fifo = 16;
533 else if (fsr & WBSD_FIFO_FUTHRE)
534 fifo = 8;
535 else
536 fifo = 1;
537
538 for (i = 0;i < fifo;i++)
539 {
540 *buffer = inb(host->base + WBSD_DFR);
541 buffer++;
542 host->offset++;
543 host->remain--;
544
545 data->bytes_xfered++;
546
547 /*
548 * Transfer done?
549 */
550 if (data->bytes_xfered == host->size)
551 {
552 wbsd_kunmap_sg(host);
553 return;
554 }
555
556 /*
557 * End of scatter list entry?
558 */
559 if (host->remain == 0)
560 {
561 wbsd_kunmap_sg(host);
562
563 /*
564 * Get next entry. Check if last.
565 */
566 if (!wbsd_next_sg(host))
567 {
568 /*
569 * We should never reach this point.
570 * It means that we're trying to
571 * transfer more blocks than can fit
572 * into the scatter list.
573 */
574 BUG_ON(1);
575
576 host->size = data->bytes_xfered;
577
578 return;
579 }
580
581 buffer = wbsd_kmap_sg(host);
582 }
583 }
584 }
585
586 wbsd_kunmap_sg(host);
587
588 /*
589 * This is a very dirty hack to solve a
590 * hardware problem. The chip doesn't trigger
591 * FIFO threshold interrupts properly.
592 */
593 if ((host->size - data->bytes_xfered) < 16)
594 tasklet_schedule(&host->fifo_tasklet);
595}
596
597static void wbsd_fill_fifo(struct wbsd_host* host)
598{
599 struct mmc_data* data = host->mrq->cmd->data;
600 char* buffer;
601 int i, fsr, fifo;
602
603 /*
604 * Check that we aren't being called after the
605 * entire buffer has been transfered.
606 */
607 if (data->bytes_xfered == host->size)
608 return;
609
610 buffer = wbsd_kmap_sg(host) + host->offset;
611
612 /*
613 * Fill the fifo. This has a tendency to loop longer
614 * than the FIFO length (usually one block).
615 */
616 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
617 {
618 /*
619 * The size field in the FSR is broken so we have to
620 * do some guessing.
621 */
622 if (fsr & WBSD_FIFO_EMPTY)
623 fifo = 0;
624 else if (fsr & WBSD_FIFO_EMTHRE)
625 fifo = 8;
626 else
627 fifo = 15;
628
629 for (i = 16;i > fifo;i--)
630 {
631 outb(*buffer, host->base + WBSD_DFR);
632 buffer++;
633 host->offset++;
634 host->remain--;
635
636 data->bytes_xfered++;
637
638 /*
639 * Transfer done?
640 */
641 if (data->bytes_xfered == host->size)
642 {
643 wbsd_kunmap_sg(host);
644 return;
645 }
646
647 /*
648 * End of scatter list entry?
649 */
650 if (host->remain == 0)
651 {
652 wbsd_kunmap_sg(host);
653
654 /*
655 * Get next entry. Check if last.
656 */
657 if (!wbsd_next_sg(host))
658 {
659 /*
660 * We should never reach this point.
661 * It means that we're trying to
662 * transfer more blocks than can fit
663 * into the scatter list.
664 */
665 BUG_ON(1);
666
667 host->size = data->bytes_xfered;
668
669 return;
670 }
671
672 buffer = wbsd_kmap_sg(host);
673 }
674 }
675 }
676
677 wbsd_kunmap_sg(host);
85bcc130
PO
678
679 /*
680 * The controller stops sending interrupts for
681 * 'FIFO empty' under certain conditions. So we
682 * need to be a bit more pro-active.
683 */
684 tasklet_schedule(&host->fifo_tasklet);
1da177e4
LT
685}
686
687static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
688{
689 u16 blksize;
690 u8 setup;
691 unsigned long dmaflags;
692
693 DBGF("blksz %04x blks %04x flags %08x\n",
694 1 << data->blksz_bits, data->blocks, data->flags);
695 DBGF("tsac %d ms nsac %d clk\n",
696 data->timeout_ns / 1000000, data->timeout_clks);
697
698 /*
699 * Calculate size.
700 */
701 host->size = data->blocks << data->blksz_bits;
702
703 /*
704 * Check timeout values for overflow.
705 * (Yes, some cards cause this value to overflow).
706 */
707 if (data->timeout_ns > 127000000)
708 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
709 else
710 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
711
712 if (data->timeout_clks > 255)
713 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
714 else
715 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
716
717 /*
718 * Inform the chip of how large blocks will be
719 * sent. It needs this to determine when to
720 * calculate CRC.
721 *
722 * Space for CRC must be included in the size.
65ae2118 723 * Two bytes are needed for each data line.
1da177e4 724 */
65ae2118
PO
725 if (host->bus_width == MMC_BUS_WIDTH_1)
726 {
727 blksize = (1 << data->blksz_bits) + 2;
728
729 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
730 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
731 }
732 else if (host->bus_width == MMC_BUS_WIDTH_4)
733 {
734 blksize = (1 << data->blksz_bits) + 2 * 4;
1da177e4 735
65ae2118
PO
736 wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0)
737 | WBSD_DATA_WIDTH);
738 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
739 }
740 else
741 {
742 data->error = MMC_ERR_INVALID;
743 return;
744 }
1da177e4
LT
745
746 /*
747 * Clear the FIFO. This is needed even for DMA
748 * transfers since the chip still uses the FIFO
749 * internally.
750 */
751 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
752 setup |= WBSD_FIFO_RESET;
753 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
754
755 /*
756 * DMA transfer?
757 */
758 if (host->dma >= 0)
759 {
760 /*
761 * The buffer for DMA is only 64 kB.
762 */
763 BUG_ON(host->size > 0x10000);
764 if (host->size > 0x10000)
765 {
766 data->error = MMC_ERR_INVALID;
767 return;
768 }
769
770 /*
771 * Transfer data from the SG list to
772 * the DMA buffer.
773 */
774 if (data->flags & MMC_DATA_WRITE)
775 wbsd_sg_to_dma(host, data);
776
777 /*
778 * Initialise the ISA DMA controller.
779 */
780 dmaflags = claim_dma_lock();
781 disable_dma(host->dma);
782 clear_dma_ff(host->dma);
783 if (data->flags & MMC_DATA_READ)
784 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
785 else
786 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
787 set_dma_addr(host->dma, host->dma_addr);
788 set_dma_count(host->dma, host->size);
789
790 enable_dma(host->dma);
791 release_dma_lock(dmaflags);
792
793 /*
794 * Enable DMA on the host.
795 */
796 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
797 }
798 else
799 {
800 /*
801 * This flag is used to keep printk
802 * output to a minimum.
803 */
804 host->firsterr = 1;
805
806 /*
807 * Initialise the SG list.
808 */
809 wbsd_init_sg(host, data);
810
811 /*
812 * Turn off DMA.
813 */
814 wbsd_write_index(host, WBSD_IDX_DMA, 0);
815
816 /*
817 * Set up FIFO threshold levels (and fill
818 * buffer if doing a write).
819 */
820 if (data->flags & MMC_DATA_READ)
821 {
822 wbsd_write_index(host, WBSD_IDX_FIFOEN,
823 WBSD_FIFOEN_FULL | 8);
824 }
825 else
826 {
827 wbsd_write_index(host, WBSD_IDX_FIFOEN,
828 WBSD_FIFOEN_EMPTY | 8);
829 wbsd_fill_fifo(host);
830 }
831 }
832
833 data->error = MMC_ERR_NONE;
834}
835
836static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
837{
838 unsigned long dmaflags;
839 int count;
840 u8 status;
841
842 WARN_ON(host->mrq == NULL);
843
844 /*
845 * Send a stop command if needed.
846 */
847 if (data->stop)
848 wbsd_send_command(host, data->stop);
849
850 /*
851 * Wait for the controller to leave data
852 * transfer state.
853 */
854 do
855 {
856 status = wbsd_read_index(host, WBSD_IDX_STATUS);
857 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
858
859 /*
860 * DMA transfer?
861 */
862 if (host->dma >= 0)
863 {
864 /*
865 * Disable DMA on the host.
866 */
867 wbsd_write_index(host, WBSD_IDX_DMA, 0);
868
869 /*
870 * Turn of ISA DMA controller.
871 */
872 dmaflags = claim_dma_lock();
873 disable_dma(host->dma);
874 clear_dma_ff(host->dma);
875 count = get_dma_residue(host->dma);
876 release_dma_lock(dmaflags);
877
878 /*
879 * Any leftover data?
880 */
881 if (count)
882 {
883 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
884 "transfer. %d bytes left.\n", count);
885
886 data->error = MMC_ERR_FAILED;
887 }
888 else
889 {
890 /*
891 * Transfer data from DMA buffer to
892 * SG list.
893 */
894 if (data->flags & MMC_DATA_READ)
895 wbsd_dma_to_sg(host, data);
896
897 data->bytes_xfered = host->size;
898 }
899 }
900
901 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
902
903 wbsd_request_end(host, host->mrq);
904}
905
85bcc130
PO
906/*****************************************************************************\
907 * *
908 * MMC layer callbacks *
909 * *
910\*****************************************************************************/
1da177e4
LT
911
912static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
913{
914 struct wbsd_host* host = mmc_priv(mmc);
915 struct mmc_command* cmd;
916
917 /*
918 * Disable tasklets to avoid a deadlock.
919 */
920 spin_lock_bh(&host->lock);
921
922 BUG_ON(host->mrq != NULL);
923
924 cmd = mrq->cmd;
925
926 host->mrq = mrq;
927
928 /*
929 * If there is no card in the slot then
930 * timeout immediatly.
931 */
85bcc130 932 if (!(host->flags & WBSD_FCARD_PRESENT))
1da177e4
LT
933 {
934 cmd->error = MMC_ERR_TIMEOUT;
935 goto done;
936 }
937
938 /*
939 * Does the request include data?
940 */
941 if (cmd->data)
942 {
943 wbsd_prepare_data(host, cmd->data);
944
945 if (cmd->data->error != MMC_ERR_NONE)
946 goto done;
947 }
948
949 wbsd_send_command(host, cmd);
950
951 /*
952 * If this is a data transfer the request
953 * will be finished after the data has
954 * transfered.
955 */
956 if (cmd->data && (cmd->error == MMC_ERR_NONE))
957 {
958 /*
959 * Dirty fix for hardware bug.
960 */
961 if (host->dma == -1)
962 tasklet_schedule(&host->fifo_tasklet);
963
964 spin_unlock_bh(&host->lock);
965
966 return;
967 }
968
969done:
970 wbsd_request_end(host, mrq);
971
972 spin_unlock_bh(&host->lock);
973}
974
975static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
976{
977 struct wbsd_host* host = mmc_priv(mmc);
978 u8 clk, setup, pwr;
979
65ae2118
PO
980 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
981 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
982 ios->vdd, ios->bus_width);
1da177e4
LT
983
984 spin_lock_bh(&host->lock);
985
986 /*
987 * Reset the chip on each power off.
988 * Should clear out any weird states.
989 */
990 if (ios->power_mode == MMC_POWER_OFF)
991 wbsd_init_device(host);
992
993 if (ios->clock >= 24000000)
994 clk = WBSD_CLK_24M;
995 else if (ios->clock >= 16000000)
996 clk = WBSD_CLK_16M;
997 else if (ios->clock >= 12000000)
998 clk = WBSD_CLK_12M;
999 else
1000 clk = WBSD_CLK_375K;
1001
1002 /*
1003 * Only write to the clock register when
1004 * there is an actual change.
1005 */
1006 if (clk != host->clk)
1007 {
1008 wbsd_write_index(host, WBSD_IDX_CLK, clk);
1009 host->clk = clk;
1010 }
1011
85bcc130
PO
1012 /*
1013 * Power up card.
1014 */
1da177e4
LT
1015 if (ios->power_mode != MMC_POWER_OFF)
1016 {
1da177e4
LT
1017 pwr = inb(host->base + WBSD_CSR);
1018 pwr &= ~WBSD_POWER_N;
1019 outb(pwr, host->base + WBSD_CSR);
1da177e4
LT
1020 }
1021
85bcc130
PO
1022 /*
1023 * MMC cards need to have pin 1 high during init.
85bcc130 1024 * It wreaks havoc with the card detection though so
1656fa57 1025 * that needs to be disabled.
85bcc130
PO
1026 */
1027 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1656fa57 1028 if (ios->chip_select == MMC_CS_HIGH)
85bcc130 1029 {
65ae2118 1030 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
85bcc130
PO
1031 setup |= WBSD_DAT3_H;
1032 host->flags |= WBSD_FIGNORE_DETECT;
1033 }
1034 else
1035 {
1036 setup &= ~WBSD_DAT3_H;
1656fa57
PO
1037
1038 /*
1039 * We cannot resume card detection immediatly
1040 * because of capacitance and delays in the chip.
1041 */
1042 mod_timer(&host->ignore_timer, jiffies + HZ/100);
85bcc130
PO
1043 }
1044 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1045
65ae2118
PO
1046 /*
1047 * Store bus width for later. Will be used when
1048 * setting up the data transfer.
1049 */
1050 host->bus_width = ios->bus_width;
1051
1da177e4
LT
1052 spin_unlock_bh(&host->lock);
1053}
1054
65ae2118
PO
1055static int wbsd_get_ro(struct mmc_host* mmc)
1056{
1057 struct wbsd_host* host = mmc_priv(mmc);
1058 u8 csr;
1059
1060 spin_lock_bh(&host->lock);
1061
1062 csr = inb(host->base + WBSD_CSR);
1063 csr |= WBSD_MSLED;
1064 outb(csr, host->base + WBSD_CSR);
1065
1066 mdelay(1);
1067
1068 csr = inb(host->base + WBSD_CSR);
1069 csr &= ~WBSD_MSLED;
1070 outb(csr, host->base + WBSD_CSR);
1071
1072 spin_unlock_bh(&host->lock);
1073
1074 return csr & WBSD_WRPT;
1075}
1076
85bcc130
PO
1077static struct mmc_host_ops wbsd_ops = {
1078 .request = wbsd_request,
1079 .set_ios = wbsd_set_ios,
65ae2118 1080 .get_ro = wbsd_get_ro,
85bcc130
PO
1081};
1082
1083/*****************************************************************************\
1084 * *
1085 * Interrupt handling *
1086 * *
1087\*****************************************************************************/
1088
1656fa57
PO
1089/*
1090 * Helper function to reset detection ignore
1091 */
1092
1093static void wbsd_reset_ignore(unsigned long data)
1094{
1095 struct wbsd_host *host = (struct wbsd_host*)data;
1096
1097 BUG_ON(host == NULL);
1098
1099 DBG("Resetting card detection ignore\n");
1100
1101 spin_lock_bh(&host->lock);
1102
1103 host->flags &= ~WBSD_FIGNORE_DETECT;
1104
1105 /*
1106 * Card status might have changed during the
1107 * blackout.
1108 */
1109 tasklet_schedule(&host->card_tasklet);
1110
1111 spin_unlock_bh(&host->lock);
1112}
1113
1da177e4
LT
1114/*
1115 * Tasklets
1116 */
1117
77933d72 1118static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
1da177e4
LT
1119{
1120 WARN_ON(!host->mrq);
1121 if (!host->mrq)
1122 return NULL;
1123
1124 WARN_ON(!host->mrq->cmd);
1125 if (!host->mrq->cmd)
1126 return NULL;
1127
1128 WARN_ON(!host->mrq->cmd->data);
1129 if (!host->mrq->cmd->data)
1130 return NULL;
1131
1132 return host->mrq->cmd->data;
1133}
1134
1135static void wbsd_tasklet_card(unsigned long param)
1136{
1137 struct wbsd_host* host = (struct wbsd_host*)param;
1138 u8 csr;
1139
1140 spin_lock(&host->lock);
1141
85bcc130
PO
1142 if (host->flags & WBSD_FIGNORE_DETECT)
1143 {
1144 spin_unlock(&host->lock);
1145 return;
1146 }
1147
1da177e4
LT
1148 csr = inb(host->base + WBSD_CSR);
1149 WARN_ON(csr == 0xff);
1150
1151 if (csr & WBSD_CARDPRESENT)
85bcc130
PO
1152 {
1153 if (!(host->flags & WBSD_FCARD_PRESENT))
1154 {
1155 DBG("Card inserted\n");
1156 host->flags |= WBSD_FCARD_PRESENT;
6e6293dd 1157
c4a72cbd
RK
1158 spin_unlock(&host->lock);
1159
6e6293dd
PO
1160 /*
1161 * Delay card detection to allow electrical connections
1162 * to stabilise.
1163 */
c4a72cbd 1164 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
85bcc130 1165 }
c4a72cbd
RK
1166 else
1167 spin_unlock(&host->lock);
85bcc130
PO
1168 }
1169 else if (host->flags & WBSD_FCARD_PRESENT)
1da177e4
LT
1170 {
1171 DBG("Card removed\n");
85bcc130 1172 host->flags &= ~WBSD_FCARD_PRESENT;
1da177e4
LT
1173
1174 if (host->mrq)
1175 {
1176 printk(KERN_ERR DRIVER_NAME
1177 ": Card removed during transfer!\n");
1178 wbsd_reset(host);
1179
1180 host->mrq->cmd->error = MMC_ERR_FAILED;
1181 tasklet_schedule(&host->finish_tasklet);
1182 }
6e6293dd
PO
1183
1184 /*
1185 * Unlock first since we might get a call back.
1186 */
1187 spin_unlock(&host->lock);
1da177e4 1188
8dc00335 1189 mmc_detect_change(host->mmc, 0);
6e6293dd 1190 }
1656fa57
PO
1191 else
1192 spin_unlock(&host->lock);
1da177e4
LT
1193}
1194
1195static void wbsd_tasklet_fifo(unsigned long param)
1196{
1197 struct wbsd_host* host = (struct wbsd_host*)param;
1198 struct mmc_data* data;
1199
1200 spin_lock(&host->lock);
1201
1202 if (!host->mrq)
1203 goto end;
1204
1205 data = wbsd_get_data(host);
1206 if (!data)
1207 goto end;
1208
1209 if (data->flags & MMC_DATA_WRITE)
1210 wbsd_fill_fifo(host);
1211 else
1212 wbsd_empty_fifo(host);
1213
1214 /*
1215 * Done?
1216 */
1217 if (host->size == data->bytes_xfered)
1218 {
1219 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1220 tasklet_schedule(&host->finish_tasklet);
1221 }
1222
1223end:
1224 spin_unlock(&host->lock);
1225}
1226
1227static void wbsd_tasklet_crc(unsigned long param)
1228{
1229 struct wbsd_host* host = (struct wbsd_host*)param;
1230 struct mmc_data* data;
1231
1232 spin_lock(&host->lock);
1233
1234 if (!host->mrq)
1235 goto end;
1236
1237 data = wbsd_get_data(host);
1238 if (!data)
1239 goto end;
1240
1241 DBGF("CRC error\n");
1242
1243 data->error = MMC_ERR_BADCRC;
1244
1245 tasklet_schedule(&host->finish_tasklet);
1246
1247end:
1248 spin_unlock(&host->lock);
1249}
1250
1251static void wbsd_tasklet_timeout(unsigned long param)
1252{
1253 struct wbsd_host* host = (struct wbsd_host*)param;
1254 struct mmc_data* data;
1255
1256 spin_lock(&host->lock);
1257
1258 if (!host->mrq)
1259 goto end;
1260
1261 data = wbsd_get_data(host);
1262 if (!data)
1263 goto end;
1264
1265 DBGF("Timeout\n");
1266
1267 data->error = MMC_ERR_TIMEOUT;
1268
1269 tasklet_schedule(&host->finish_tasklet);
1270
1271end:
1272 spin_unlock(&host->lock);
1273}
1274
1275static void wbsd_tasklet_finish(unsigned long param)
1276{
1277 struct wbsd_host* host = (struct wbsd_host*)param;
1278 struct mmc_data* data;
1279
1280 spin_lock(&host->lock);
1281
1282 WARN_ON(!host->mrq);
1283 if (!host->mrq)
1284 goto end;
1285
1286 data = wbsd_get_data(host);
1287 if (!data)
1288 goto end;
1289
1290 wbsd_finish_data(host, data);
1291
1292end:
1293 spin_unlock(&host->lock);
1294}
1295
1296static void wbsd_tasklet_block(unsigned long param)
1297{
1298 struct wbsd_host* host = (struct wbsd_host*)param;
1299 struct mmc_data* data;
1300
1301 spin_lock(&host->lock);
1302
1303 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1304 WBSD_CRC_OK)
1305 {
1306 data = wbsd_get_data(host);
1307 if (!data)
1308 goto end;
1309
1310 DBGF("CRC error\n");
1311
1312 data->error = MMC_ERR_BADCRC;
1313
1314 tasklet_schedule(&host->finish_tasklet);
1315 }
1316
1317end:
1318 spin_unlock(&host->lock);
1319}
1320
1321/*
1322 * Interrupt handling
1323 */
1324
1325static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1326{
1327 struct wbsd_host* host = dev_id;
1328 int isr;
1329
1330 isr = inb(host->base + WBSD_ISR);
1331
1332 /*
1333 * Was it actually our hardware that caused the interrupt?
1334 */
1335 if (isr == 0xff || isr == 0x00)
1336 return IRQ_NONE;
1337
1338 host->isr |= isr;
1339
1340 /*
1341 * Schedule tasklets as needed.
1342 */
1343 if (isr & WBSD_INT_CARD)
1344 tasklet_schedule(&host->card_tasklet);
1345 if (isr & WBSD_INT_FIFO_THRE)
1346 tasklet_schedule(&host->fifo_tasklet);
1347 if (isr & WBSD_INT_CRC)
1348 tasklet_hi_schedule(&host->crc_tasklet);
1349 if (isr & WBSD_INT_TIMEOUT)
1350 tasklet_hi_schedule(&host->timeout_tasklet);
1351 if (isr & WBSD_INT_BUSYEND)
1352 tasklet_hi_schedule(&host->block_tasklet);
1353 if (isr & WBSD_INT_TC)
1354 tasklet_schedule(&host->finish_tasklet);
1355
1356 return IRQ_HANDLED;
1357}
1358
85bcc130
PO
1359/*****************************************************************************\
1360 * *
1361 * Device initialisation and shutdown *
1362 * *
1363\*****************************************************************************/
1364
1da177e4 1365/*
85bcc130 1366 * Allocate/free MMC structure.
1da177e4
LT
1367 */
1368
85bcc130
PO
1369static int __devinit wbsd_alloc_mmc(struct device* dev)
1370{
1371 struct mmc_host* mmc;
1372 struct wbsd_host* host;
1373
1374 /*
1375 * Allocate MMC structure.
1376 */
1377 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1378 if (!mmc)
1379 return -ENOMEM;
1380
1381 host = mmc_priv(mmc);
1382 host->mmc = mmc;
1383
1384 host->dma = -1;
1385
1386 /*
1387 * Set host parameters.
1388 */
1389 mmc->ops = &wbsd_ops;
1390 mmc->f_min = 375000;
1391 mmc->f_max = 24000000;
1392 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
65ae2118 1393 mmc->caps = MMC_CAP_4_BIT_DATA;
85bcc130
PO
1394
1395 spin_lock_init(&host->lock);
1396
6e6293dd 1397 /*
1656fa57 1398 * Set up timers
6e6293dd 1399 */
1656fa57
PO
1400 init_timer(&host->ignore_timer);
1401 host->ignore_timer.data = (unsigned long)host;
1402 host->ignore_timer.function = wbsd_reset_ignore;
6e6293dd 1403
85bcc130
PO
1404 /*
1405 * Maximum number of segments. Worst case is one sector per segment
1406 * so this will be 64kB/512.
1407 */
1408 mmc->max_hw_segs = 128;
1409 mmc->max_phys_segs = 128;
1410
1411 /*
1412 * Maximum number of sectors in one transfer. Also limited by 64kB
1413 * buffer.
1414 */
1415 mmc->max_sectors = 128;
1416
1417 /*
1418 * Maximum segment size. Could be one segment with the maximum number
1419 * of segments.
1420 */
1421 mmc->max_seg_size = mmc->max_sectors * 512;
1422
1423 dev_set_drvdata(dev, mmc);
1424
1425 return 0;
1426}
1427
1428static void __devexit wbsd_free_mmc(struct device* dev)
1429{
1430 struct mmc_host* mmc;
6e6293dd 1431 struct wbsd_host* host;
85bcc130
PO
1432
1433 mmc = dev_get_drvdata(dev);
1434 if (!mmc)
1435 return;
1436
6e6293dd
PO
1437 host = mmc_priv(mmc);
1438 BUG_ON(host == NULL);
1439
1656fa57 1440 del_timer_sync(&host->ignore_timer);
6e6293dd 1441
85bcc130
PO
1442 mmc_free_host(mmc);
1443
1444 dev_set_drvdata(dev, NULL);
1445}
1446
1447/*
1448 * Scan for known chip id:s
1449 */
1450
1451static int __devinit wbsd_scan(struct wbsd_host* host)
1da177e4
LT
1452{
1453 int i, j, k;
1454 int id;
1455
1456 /*
1457 * Iterate through all ports, all codes to
1458 * find hardware that is in our known list.
1459 */
1460 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
1461 {
1462 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1463 continue;
1464
1465 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
1466 {
1467 id = 0xFFFF;
1468
1469 outb(unlock_codes[j], config_ports[i]);
1470 outb(unlock_codes[j], config_ports[i]);
1471
1472 outb(WBSD_CONF_ID_HI, config_ports[i]);
1473 id = inb(config_ports[i] + 1) << 8;
1474
1475 outb(WBSD_CONF_ID_LO, config_ports[i]);
1476 id |= inb(config_ports[i] + 1);
1477
1478 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
1479 {
1480 if (id == valid_ids[k])
1481 {
1482 host->chip_id = id;
1483 host->config = config_ports[i];
1484 host->unlock_code = unlock_codes[i];
1485
1486 return 0;
1487 }
1488 }
1489
1490 if (id != 0xFFFF)
1491 {
1492 DBG("Unknown hardware (id %x) found at %x\n",
1493 id, config_ports[i]);
1494 }
1495
1496 outb(LOCK_CODE, config_ports[i]);
1497 }
1498
1499 release_region(config_ports[i], 2);
1500 }
1501
1502 return -ENODEV;
1503}
1504
85bcc130
PO
1505/*
1506 * Allocate/free io port ranges
1507 */
1508
1509static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
1da177e4
LT
1510{
1511 if (io & 0x7)
1512 return -EINVAL;
1513
85bcc130 1514 if (!request_region(base, 8, DRIVER_NAME))
1da177e4
LT
1515 return -EIO;
1516
1517 host->base = io;
1518
1519 return 0;
1520}
1521
85bcc130 1522static void __devexit wbsd_release_regions(struct wbsd_host* host)
1da177e4
LT
1523{
1524 if (host->base)
1525 release_region(host->base, 8);
85bcc130
PO
1526
1527 host->base = 0;
1da177e4
LT
1528
1529 if (host->config)
1530 release_region(host->config, 2);
85bcc130
PO
1531
1532 host->config = 0;
1da177e4
LT
1533}
1534
85bcc130
PO
1535/*
1536 * Allocate/free DMA port and buffer
1537 */
1538
1539static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma)
1da177e4 1540{
1da177e4
LT
1541 if (dma < 0)
1542 return;
1543
1544 if (request_dma(dma, DRIVER_NAME))
1545 goto err;
1546
1547 /*
1548 * We need to allocate a special buffer in
1549 * order for ISA to be able to DMA to it.
1550 */
85bcc130 1551 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1da177e4
LT
1552 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1553 if (!host->dma_buffer)
1554 goto free;
1555
1556 /*
1557 * Translate the address to a physical address.
1558 */
85bcc130
PO
1559 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1560 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1da177e4
LT
1561
1562 /*
1563 * ISA DMA must be aligned on a 64k basis.
1564 */
1565 if ((host->dma_addr & 0xffff) != 0)
1566 goto kfree;
1567 /*
1568 * ISA cannot access memory above 16 MB.
1569 */
1570 else if (host->dma_addr >= 0x1000000)
1571 goto kfree;
1572
1573 host->dma = dma;
1574
1575 return;
1576
1577kfree:
1578 /*
1579 * If we've gotten here then there is some kind of alignment bug
1580 */
1581 BUG_ON(1);
1582
85bcc130
PO
1583 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1584 DMA_BIDIRECTIONAL);
1585 host->dma_addr = (dma_addr_t)NULL;
1586
1da177e4
LT
1587 kfree(host->dma_buffer);
1588 host->dma_buffer = NULL;
1589
1590free:
1591 free_dma(dma);
1592
1593err:
1594 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1595 "Falling back on FIFO.\n", dma);
1596}
1597
85bcc130
PO
1598static void __devexit wbsd_release_dma(struct wbsd_host* host)
1599{
1600 if (host->dma_addr)
1601 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1602 DMA_BIDIRECTIONAL);
1603 if (host->dma_buffer)
1604 kfree(host->dma_buffer);
1605 if (host->dma >= 0)
1606 free_dma(host->dma);
1607
1608 host->dma = -1;
1609 host->dma_buffer = NULL;
1610 host->dma_addr = (dma_addr_t)NULL;
1611}
1da177e4
LT
1612
1613/*
85bcc130 1614 * Allocate/free IRQ.
1da177e4
LT
1615 */
1616
85bcc130 1617static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
1da177e4 1618{
1da177e4
LT
1619 int ret;
1620
1621 /*
85bcc130 1622 * Allocate interrupt.
1da177e4 1623 */
85bcc130
PO
1624
1625 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1626 if (ret)
1627 return ret;
1da177e4 1628
85bcc130
PO
1629 host->irq = irq;
1630
1da177e4 1631 /*
85bcc130 1632 * Set up tasklets.
1da177e4 1633 */
85bcc130
PO
1634 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
1635 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
1636 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
1637 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
1638 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
1639 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
1640
1641 return 0;
1642}
1da177e4 1643
85bcc130
PO
1644static void __devexit wbsd_release_irq(struct wbsd_host* host)
1645{
1646 if (!host->irq)
1647 return;
1da177e4 1648
85bcc130
PO
1649 free_irq(host->irq, host);
1650
1651 host->irq = 0;
1652
1653 tasklet_kill(&host->card_tasklet);
1654 tasklet_kill(&host->fifo_tasklet);
1655 tasklet_kill(&host->crc_tasklet);
1656 tasklet_kill(&host->timeout_tasklet);
1657 tasklet_kill(&host->finish_tasklet);
1658 tasklet_kill(&host->block_tasklet);
1659}
1660
1661/*
1662 * Allocate all resources for the host.
1663 */
1664
1665static int __devinit wbsd_request_resources(struct wbsd_host* host,
1666 int base, int irq, int dma)
1667{
1668 int ret;
1669
1da177e4
LT
1670 /*
1671 * Allocate I/O ports.
1672 */
85bcc130 1673 ret = wbsd_request_region(host, base);
1da177e4 1674 if (ret)
85bcc130 1675 return ret;
1da177e4
LT
1676
1677 /*
85bcc130 1678 * Allocate interrupt.
1da177e4 1679 */
85bcc130
PO
1680 ret = wbsd_request_irq(host, irq);
1681 if (ret)
1682 return ret;
1683
1684 /*
1685 * Allocate DMA.
1686 */
1687 wbsd_request_dma(host, dma);
1da177e4 1688
85bcc130
PO
1689 return 0;
1690}
1691
1692/*
1693 * Release all resources for the host.
1694 */
1695
1696static void __devexit wbsd_release_resources(struct wbsd_host* host)
1697{
1698 wbsd_release_dma(host);
1699 wbsd_release_irq(host);
1700 wbsd_release_regions(host);
1701}
1702
1703/*
1704 * Configure the resources the chip should use.
1705 */
1706
1707static void __devinit wbsd_chip_config(struct wbsd_host* host)
1708{
1709 /*
1710 * Reset the chip.
1711 */
1712 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1713 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1da177e4
LT
1714
1715 /*
1716 * Select SD/MMC function.
1717 */
1718 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1719
1720 /*
1721 * Set up card detection.
1722 */
85bcc130 1723 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1da177e4
LT
1724
1725 /*
85bcc130 1726 * Configure chip
1da177e4
LT
1727 */
1728 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1729 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1da177e4 1730
85bcc130 1731 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1da177e4 1732
85bcc130
PO
1733 if (host->dma >= 0)
1734 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1da177e4
LT
1735
1736 /*
85bcc130 1737 * Enable and power up chip.
1da177e4 1738 */
85bcc130
PO
1739 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1740 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1741}
1742
1743/*
1744 * Check that configured resources are correct.
1745 */
1746
1747static int __devinit wbsd_chip_validate(struct wbsd_host* host)
1748{
1749 int base, irq, dma;
1da177e4
LT
1750
1751 /*
85bcc130 1752 * Select SD/MMC function.
1da177e4 1753 */
85bcc130 1754 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1da177e4
LT
1755
1756 /*
85bcc130 1757 * Read configuration.
1da177e4 1758 */
85bcc130
PO
1759 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1760 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1da177e4 1761
85bcc130
PO
1762 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1763
1764 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1da177e4
LT
1765
1766 /*
85bcc130 1767 * Validate against given configuration.
1da177e4 1768 */
85bcc130
PO
1769 if (base != host->base)
1770 return 0;
1771 if (irq != host->irq)
1772 return 0;
1773 if ((dma != host->dma) && (host->dma != -1))
1774 return 0;
1775
1776 return 1;
1777}
1778
1779/*****************************************************************************\
1780 * *
1781 * Devices setup and shutdown *
1782 * *
1783\*****************************************************************************/
1784
1785static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
1786 int pnp)
1787{
1788 struct wbsd_host* host = NULL;
1789 struct mmc_host* mmc = NULL;
1790 int ret;
1791
1792 ret = wbsd_alloc_mmc(dev);
1793 if (ret)
1794 return ret;
1795
1796 mmc = dev_get_drvdata(dev);
1797 host = mmc_priv(mmc);
1da177e4
LT
1798
1799 /*
85bcc130 1800 * Scan for hardware.
1da177e4 1801 */
85bcc130
PO
1802 ret = wbsd_scan(host);
1803 if (ret)
1804 {
1805 if (pnp && (ret == -ENODEV))
1806 {
1807 printk(KERN_WARNING DRIVER_NAME
1808 ": Unable to confirm device presence. You may "
1809 "experience lock-ups.\n");
1810 }
1811 else
1812 {
1813 wbsd_free_mmc(dev);
1814 return ret;
1815 }
1816 }
1da177e4
LT
1817
1818 /*
85bcc130 1819 * Request resources.
1da177e4 1820 */
85bcc130
PO
1821 ret = wbsd_request_resources(host, io, irq, dma);
1822 if (ret)
1823 {
1824 wbsd_release_resources(host);
1825 wbsd_free_mmc(dev);
1826 return ret;
1827 }
1da177e4
LT
1828
1829 /*
85bcc130 1830 * See if chip needs to be configured.
1da177e4 1831 */
85bcc130
PO
1832 if (pnp && (host->config != 0))
1833 {
1834 if (!wbsd_chip_validate(host))
1835 {
1836 printk(KERN_WARNING DRIVER_NAME
1837 ": PnP active but chip not configured! "
1838 "You probably have a buggy BIOS. "
1839 "Configuring chip manually.\n");
1840 wbsd_chip_config(host);
1841 }
1842 }
1843 else
1844 wbsd_chip_config(host);
1da177e4
LT
1845
1846 /*
1847 * Power Management stuff. No idea how this works.
1848 * Not tested.
1849 */
1850#ifdef CONFIG_PM
85bcc130
PO
1851 if (host->config)
1852 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1da177e4 1853#endif
85bcc130
PO
1854 /*
1855 * Allow device to initialise itself properly.
1856 */
1857 mdelay(5);
1da177e4
LT
1858
1859 /*
1860 * Reset the chip into a known state.
1861 */
1862 wbsd_init_device(host);
1863
1da177e4
LT
1864 mmc_add_host(mmc);
1865
d366b643 1866 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
85bcc130
PO
1867 if (host->chip_id != 0)
1868 printk(" id %x", (int)host->chip_id);
1869 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1870 if (host->dma >= 0)
1871 printk(" dma %d", (int)host->dma);
1872 else
1873 printk(" FIFO");
1874 if (pnp)
1875 printk(" PnP");
1876 printk("\n");
1da177e4
LT
1877
1878 return 0;
1da177e4
LT
1879}
1880
85bcc130 1881static void __devexit wbsd_shutdown(struct device* dev, int pnp)
1da177e4
LT
1882{
1883 struct mmc_host* mmc = dev_get_drvdata(dev);
1884 struct wbsd_host* host;
1885
1886 if (!mmc)
85bcc130 1887 return;
1da177e4
LT
1888
1889 host = mmc_priv(mmc);
1890
1da177e4
LT
1891 mmc_remove_host(mmc);
1892
85bcc130
PO
1893 if (!pnp)
1894 {
1895 /*
1896 * Power down the SD/MMC function.
1897 */
1898 wbsd_unlock_config(host);
1899 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1900 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1901 wbsd_lock_config(host);
1902 }
1da177e4 1903
85bcc130 1904 wbsd_release_resources(host);
1da177e4 1905
85bcc130
PO
1906 wbsd_free_mmc(dev);
1907}
1da177e4 1908
85bcc130
PO
1909/*
1910 * Non-PnP
1911 */
1912
1913static int __devinit wbsd_probe(struct device* dev)
1914{
1915 return wbsd_init(dev, io, irq, dma, 0);
1916}
1917
1918static int __devexit wbsd_remove(struct device* dev)
1919{
1920 wbsd_shutdown(dev, 0);
1921
1922 return 0;
1923}
1924
1925/*
1926 * PnP
1927 */
1928
1929#ifdef CONFIG_PNP
1930
1931static int __devinit
1932wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
1933{
1934 int io, irq, dma;
1da177e4 1935
85bcc130
PO
1936 /*
1937 * Get resources from PnP layer.
1938 */
1939 io = pnp_port_start(pnpdev, 0);
1940 irq = pnp_irq(pnpdev, 0);
1941 if (pnp_dma_valid(pnpdev, 0))
1942 dma = pnp_dma(pnpdev, 0);
1943 else
1944 dma = -1;
1da177e4 1945
85bcc130 1946 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1da177e4 1947
85bcc130
PO
1948 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1949}
1da177e4 1950
85bcc130
PO
1951static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
1952{
1953 wbsd_shutdown(&dev->dev, 1);
1da177e4
LT
1954}
1955
85bcc130
PO
1956#endif /* CONFIG_PNP */
1957
1da177e4
LT
1958/*
1959 * Power management
1960 */
1961
1962#ifdef CONFIG_PM
e5378ca8 1963static int wbsd_suspend(struct device *dev, pm_message_t state, u32 level)
1da177e4
LT
1964{
1965 DBGF("Not yet supported\n");
1966
1967 return 0;
1968}
1969
1970static int wbsd_resume(struct device *dev, u32 level)
1971{
1972 DBGF("Not yet supported\n");
1973
1974 return 0;
1975}
1976#else
1977#define wbsd_suspend NULL
1978#define wbsd_resume NULL
1979#endif
1980
85bcc130 1981static struct platform_device *wbsd_device;
1da177e4
LT
1982
1983static struct device_driver wbsd_driver = {
1984 .name = DRIVER_NAME,
1985 .bus = &platform_bus_type,
1986 .probe = wbsd_probe,
1987 .remove = wbsd_remove,
1988
1989 .suspend = wbsd_suspend,
1990 .resume = wbsd_resume,
1991};
1992
85bcc130
PO
1993#ifdef CONFIG_PNP
1994
1995static struct pnp_driver wbsd_pnp_driver = {
1996 .name = DRIVER_NAME,
1997 .id_table = pnp_dev_table,
1998 .probe = wbsd_pnp_probe,
1999 .remove = wbsd_pnp_remove,
2000};
2001
2002#endif /* CONFIG_PNP */
2003
1da177e4
LT
2004/*
2005 * Module loading/unloading
2006 */
2007
2008static int __init wbsd_drv_init(void)
2009{
2010 int result;
2011
2012 printk(KERN_INFO DRIVER_NAME
2013 ": Winbond W83L51xD SD/MMC card interface driver, "
2014 DRIVER_VERSION "\n");
2015 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1da177e4 2016
85bcc130
PO
2017#ifdef CONFIG_PNP
2018
2019 if (!nopnp)
2020 {
2021 result = pnp_register_driver(&wbsd_pnp_driver);
2022 if (result < 0)
2023 return result;
2024 }
2025
2026#endif /* CONFIG_PNP */
2027
2028 if (nopnp)
2029 {
2030 result = driver_register(&wbsd_driver);
2031 if (result < 0)
2032 return result;
2033
2034 wbsd_device = platform_device_register_simple(DRIVER_NAME, -1,
2035 NULL, 0);
2036 if (IS_ERR(wbsd_device))
2037 return PTR_ERR(wbsd_device);
2038 }
1da177e4
LT
2039
2040 return 0;
2041}
2042
2043static void __exit wbsd_drv_exit(void)
2044{
85bcc130
PO
2045#ifdef CONFIG_PNP
2046
2047 if (!nopnp)
2048 pnp_unregister_driver(&wbsd_pnp_driver);
1da177e4 2049
85bcc130
PO
2050#endif /* CONFIG_PNP */
2051
2052 if (nopnp)
2053 {
2054 platform_device_unregister(wbsd_device);
2055
2056 driver_unregister(&wbsd_driver);
2057 }
1da177e4
LT
2058
2059 DBG("unloaded\n");
2060}
2061
2062module_init(wbsd_drv_init);
2063module_exit(wbsd_drv_exit);
85bcc130
PO
2064#ifdef CONFIG_PNP
2065module_param(nopnp, uint, 0444);
2066#endif
1da177e4
LT
2067module_param(io, uint, 0444);
2068module_param(irq, uint, 0444);
2069module_param(dma, int, 0444);
2070
2071MODULE_LICENSE("GPL");
2072MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2073MODULE_VERSION(DRIVER_VERSION);
2074
85bcc130
PO
2075#ifdef CONFIG_PNP
2076MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2077#endif
1da177e4
LT
2078MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2079MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2080MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");