]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/ide/mips/au1xxx-ide.c
scc_pata: add ->{in,out}put_data methods (take 2)
[mirror_ubuntu-artful-kernel.git] / drivers / ide / mips / au1xxx-ide.c
CommitLineData
26a940e2 1/*
26a940e2
PP
2 * BRIEF MODULE DESCRIPTION
3 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
4 *
5 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
13 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
14 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
15 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21 * POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
28 * Interface and Linux Device Driver" Application Note.
29 */
26a940e2
PP
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/delay.h>
8f29e650 34#include <linux/platform_device.h>
26a940e2
PP
35#include <linux/init.h>
36#include <linux/ide.h>
fabd3a22 37#include <linux/scatterlist.h>
26a940e2 38
26a940e2
PP
39#include <asm/mach-au1x00/au1xxx.h>
40#include <asm/mach-au1x00/au1xxx_dbdma.h>
26a940e2
PP
41#include <asm/mach-au1x00/au1xxx_ide.h>
42
43#define DRV_NAME "au1200-ide"
8f29e650 44#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
26a940e2 45
8f29e650
JC
46/* enable the burstmode in the dbdma */
47#define IDE_AU1XXX_BURSTMODE 1
26a940e2 48
8f29e650 49static _auide_hwif auide_hwif;
26a940e2 50
26a940e2
PP
51#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
52
8f29e650 53void auide_insw(unsigned long port, void *addr, u32 count)
26a940e2 54{
8f29e650
JC
55 _auide_hwif *ahwif = &auide_hwif;
56 chan_tab_t *ctp;
57 au1x_ddma_desc_t *dp;
26a940e2 58
8f29e650
JC
59 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
60 DDMA_FLAGS_NOIE)) {
eb63963a 61 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
8f29e650
JC
62 return;
63 }
64 ctp = *((chan_tab_t **)ahwif->rx_chan);
65 dp = ctp->cur_ptr;
66 while (dp->dscr_cmd0 & DSCR_CMD0_V)
67 ;
68 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
69}
70
8f29e650 71void auide_outsw(unsigned long port, void *addr, u32 count)
26a940e2 72{
8f29e650
JC
73 _auide_hwif *ahwif = &auide_hwif;
74 chan_tab_t *ctp;
75 au1x_ddma_desc_t *dp;
26a940e2 76
8f29e650
JC
77 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
78 count << 1, DDMA_FLAGS_NOIE)) {
eb63963a 79 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
8f29e650
JC
80 return;
81 }
82 ctp = *((chan_tab_t **)ahwif->tx_chan);
83 dp = ctp->cur_ptr;
84 while (dp->dscr_cmd0 & DSCR_CMD0_V)
85 ;
86 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
87}
88
26a940e2 89#endif
26a940e2 90
26bcb879 91static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
26a940e2 92{
88b2b32b 93 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
8f29e650
JC
94
95 /* set pio mode! */
96 switch(pio) {
97 case 0:
98 mem_sttime = SBC_IDE_TIMING(PIO0);
99
100 /* set configuration for RCS2# */
101 mem_stcfg |= TS_MASK;
102 mem_stcfg &= ~TCSOE_MASK;
103 mem_stcfg &= ~TOECS_MASK;
104 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
105 break;
106
107 case 1:
108 mem_sttime = SBC_IDE_TIMING(PIO1);
109
110 /* set configuration for RCS2# */
111 mem_stcfg |= TS_MASK;
112 mem_stcfg &= ~TCSOE_MASK;
113 mem_stcfg &= ~TOECS_MASK;
114 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
115 break;
116
117 case 2:
118 mem_sttime = SBC_IDE_TIMING(PIO2);
119
120 /* set configuration for RCS2# */
121 mem_stcfg &= ~TS_MASK;
122 mem_stcfg &= ~TCSOE_MASK;
123 mem_stcfg &= ~TOECS_MASK;
124 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
125 break;
126
127 case 3:
128 mem_sttime = SBC_IDE_TIMING(PIO3);
129
130 /* set configuration for RCS2# */
131 mem_stcfg &= ~TS_MASK;
132 mem_stcfg &= ~TCSOE_MASK;
133 mem_stcfg &= ~TOECS_MASK;
134 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
135
136 break;
137
138 case 4:
139 mem_sttime = SBC_IDE_TIMING(PIO4);
140
141 /* set configuration for RCS2# */
142 mem_stcfg &= ~TS_MASK;
143 mem_stcfg &= ~TCSOE_MASK;
144 mem_stcfg &= ~TOECS_MASK;
145 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
146 break;
147 }
148
149 au_writel(mem_sttime,MEM_STTIME2);
150 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
151}
152
88b2b32b 153static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
26a940e2 154{
88b2b32b 155 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
26a940e2 156
8f29e650 157 switch(speed) {
26a940e2 158#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
159 case XFER_MW_DMA_2:
160 mem_sttime = SBC_IDE_TIMING(MDMA2);
161
162 /* set configuration for RCS2# */
163 mem_stcfg &= ~TS_MASK;
164 mem_stcfg &= ~TCSOE_MASK;
165 mem_stcfg &= ~TOECS_MASK;
166 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
167
8f29e650
JC
168 break;
169 case XFER_MW_DMA_1:
170 mem_sttime = SBC_IDE_TIMING(MDMA1);
171
172 /* set configuration for RCS2# */
173 mem_stcfg &= ~TS_MASK;
174 mem_stcfg &= ~TCSOE_MASK;
175 mem_stcfg &= ~TOECS_MASK;
176 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
177
8f29e650
JC
178 break;
179 case XFER_MW_DMA_0:
180 mem_sttime = SBC_IDE_TIMING(MDMA0);
181
182 /* set configuration for RCS2# */
183 mem_stcfg |= TS_MASK;
184 mem_stcfg &= ~TCSOE_MASK;
185 mem_stcfg &= ~TOECS_MASK;
186 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
187
8f29e650 188 break;
26a940e2 189#endif
8f29e650 190 }
a523a175 191
8f29e650
JC
192 au_writel(mem_sttime,MEM_STTIME2);
193 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
194}
195
196/*
197 * Multi-Word DMA + DbDMA functions
198 */
26a940e2 199
8f29e650 200#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
26a940e2
PP
201static int auide_build_dmatable(ide_drive_t *drive)
202{
8f29e650
JC
203 int i, iswrite, count = 0;
204 ide_hwif_t *hwif = HWIF(drive);
205
206 struct request *rq = HWGROUP(drive)->rq;
207
208 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
209 struct scatterlist *sg;
210
211 iswrite = (rq_data_dir(rq) == WRITE);
212 /* Save for interrupt context */
213 ahwif->drive = drive;
214
062f9f02 215 hwif->sg_nents = i = ide_build_sglist(drive, rq);
8f29e650
JC
216
217 if (!i)
218 return 0;
219
220 /* fill the descriptors */
221 sg = hwif->sg_table;
222 while (i && sg_dma_len(sg)) {
223 u32 cur_addr;
224 u32 cur_len;
225
226 cur_addr = sg_dma_address(sg);
227 cur_len = sg_dma_len(sg);
228
229 while (cur_len) {
230 u32 flags = DDMA_FLAGS_NOIE;
231 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
232
233 if (++count >= PRD_ENTRIES) {
234 printk(KERN_WARNING "%s: DMA table too small\n",
235 drive->name);
236 goto use_pio_instead;
237 }
238
239 /* Lets enable intr for the last descriptor only */
240 if (1==i)
241 flags = DDMA_FLAGS_IE;
242 else
243 flags = DDMA_FLAGS_NOIE;
244
245 if (iswrite) {
246 if(!put_source_flags(ahwif->tx_chan,
45711f1a 247 (void*) sg_virt(sg),
8f29e650
JC
248 tc, flags)) {
249 printk(KERN_ERR "%s failed %d\n",
eb63963a 250 __func__, __LINE__);
26a940e2 251 }
8f29e650 252 } else
26a940e2 253 {
8f29e650 254 if(!put_dest_flags(ahwif->rx_chan,
45711f1a 255 (void*) sg_virt(sg),
8f29e650
JC
256 tc, flags)) {
257 printk(KERN_ERR "%s failed %d\n",
eb63963a 258 __func__, __LINE__);
26a940e2 259 }
8f29e650 260 }
26a940e2 261
8f29e650
JC
262 cur_addr += tc;
263 cur_len -= tc;
264 }
55c16a70 265 sg = sg_next(sg);
8f29e650
JC
266 i--;
267 }
26a940e2 268
8f29e650
JC
269 if (count)
270 return 1;
26a940e2 271
8f29e650 272 use_pio_instead:
062f9f02 273 ide_destroy_dmatable(drive);
26a940e2 274
8f29e650 275 return 0; /* revert to PIO for this request */
26a940e2
PP
276}
277
278static int auide_dma_end(ide_drive_t *drive)
279{
8f29e650 280 ide_hwif_t *hwif = HWIF(drive);
26a940e2 281
8f29e650 282 if (hwif->sg_nents) {
062f9f02 283 ide_destroy_dmatable(drive);
8f29e650
JC
284 hwif->sg_nents = 0;
285 }
26a940e2 286
8f29e650 287 return 0;
26a940e2
PP
288}
289
290static void auide_dma_start(ide_drive_t *drive )
291{
26a940e2
PP
292}
293
26a940e2
PP
294
295static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
296{
8f29e650
JC
297 /* issue cmd to drive */
298 ide_execute_command(drive, command, &ide_dma_intr,
299 (2*WAIT_CMD), NULL);
26a940e2
PP
300}
301
302static int auide_dma_setup(ide_drive_t *drive)
8f29e650
JC
303{
304 struct request *rq = HWGROUP(drive)->rq;
26a940e2 305
8f29e650
JC
306 if (!auide_build_dmatable(drive)) {
307 ide_map_sg(drive, rq);
308 return 1;
309 }
26a940e2 310
8f29e650
JC
311 drive->waiting_for_dma = 1;
312 return 0;
26a940e2
PP
313}
314
26a940e2 315static int auide_dma_test_irq(ide_drive_t *drive)
8f29e650
JC
316{
317 if (drive->waiting_for_dma == 0)
318 printk(KERN_WARNING "%s: ide_dma_test_irq \
26a940e2
PP
319 called while not waiting\n", drive->name);
320
8f29e650
JC
321 /* If dbdma didn't execute the STOP command yet, the
322 * active bit is still set
26a940e2 323 */
8f29e650
JC
324 drive->waiting_for_dma++;
325 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
326 printk(KERN_WARNING "%s: timeout waiting for ddma to \
26a940e2 327 complete\n", drive->name);
8f29e650
JC
328 return 1;
329 }
330 udelay(10);
331 return 0;
26a940e2
PP
332}
333
15ce926a 334static void auide_dma_host_set(ide_drive_t *drive, int on)
26a940e2 335{
26a940e2
PP
336}
337
841d2a9b 338static void auide_dma_lost_irq(ide_drive_t *drive)
26a940e2 339{
8f29e650 340 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
26a940e2
PP
341}
342
53e62d3a 343static void auide_ddma_tx_callback(int irq, void *param)
26a940e2 344{
8f29e650
JC
345 _auide_hwif *ahwif = (_auide_hwif*)param;
346 ahwif->drive->waiting_for_dma = 0;
26a940e2
PP
347}
348
53e62d3a 349static void auide_ddma_rx_callback(int irq, void *param)
26a940e2 350{
8f29e650
JC
351 _auide_hwif *ahwif = (_auide_hwif*)param;
352 ahwif->drive->waiting_for_dma = 0;
353}
354
355#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
26a940e2 356
8f29e650
JC
357static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
358{
359 dev->dev_id = dev_id;
fcbd3b4b 360 dev->dev_physaddr = (u32)IDE_PHYS_ADDR;
8f29e650
JC
361 dev->dev_intlevel = 0;
362 dev->dev_intpolarity = 0;
363 dev->dev_tsize = tsize;
364 dev->dev_devwidth = devwidth;
365 dev->dev_flags = flags;
26a940e2
PP
366}
367
5e37bdc0 368#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
c283f5db 369static void auide_dma_timeout(ide_drive_t *drive)
26a940e2 370{
c283f5db 371 ide_hwif_t *hwif = HWIF(drive);
26a940e2 372
8f29e650 373 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
26a940e2 374
5e37bdc0 375 if (auide_dma_test_irq(drive))
c283f5db 376 return;
26a940e2 377
5e37bdc0 378 auide_dma_end(drive);
26a940e2 379}
26a940e2 380
f37afdac 381static const struct ide_dma_ops au1xxx_dma_ops = {
5e37bdc0
BZ
382 .dma_host_set = auide_dma_host_set,
383 .dma_setup = auide_dma_setup,
384 .dma_exec_cmd = auide_dma_exec_cmd,
385 .dma_start = auide_dma_start,
386 .dma_end = auide_dma_end,
387 .dma_test_irq = auide_dma_test_irq,
388 .dma_lost_irq = auide_dma_lost_irq,
389 .dma_timeout = auide_dma_timeout,
390};
391
85528659
BZ
392static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
393{
394 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
8f29e650
JC
395 dbdev_tab_t source_dev_tab, target_dev_tab;
396 u32 dev_id, tsize, devwidth, flags;
26a940e2 397
fcbd3b4b 398 dev_id = IDE_DDMA_REQ;
26a940e2 399
f629b38b
BZ
400 tsize = 8; /* 1 */
401 devwidth = 32; /* 16 */
26a940e2 402
8f29e650
JC
403#ifdef IDE_AU1XXX_BURSTMODE
404 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
26a940e2 405#else
8f29e650 406 flags = DEV_FLAGS_SYNC;
26a940e2
PP
407#endif
408
8f29e650
JC
409 /* setup dev_tab for tx channel */
410 auide_init_dbdma_dev( &source_dev_tab,
411 dev_id,
412 tsize, devwidth, DEV_FLAGS_OUT | flags);
413 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
414
415 auide_init_dbdma_dev( &source_dev_tab,
416 dev_id,
417 tsize, devwidth, DEV_FLAGS_IN | flags);
418 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
419
420 /* We also need to add a target device for the DMA */
421 auide_init_dbdma_dev( &target_dev_tab,
422 (u32)DSCR_CMD0_ALWAYS,
423 tsize, devwidth, DEV_FLAGS_ANYUSE);
424 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
425
426 /* Get a channel for TX */
427 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
428 auide->tx_dev_id,
429 auide_ddma_tx_callback,
430 (void*)auide);
431
432 /* Get a channel for RX */
433 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
434 auide->target_dev_id,
435 auide_ddma_rx_callback,
436 (void*)auide);
437
438 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
439 NUM_DESCRIPTORS);
440 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
441 NUM_DESCRIPTORS);
442
5df37c34 443 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
8f29e650
JC
444 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
445 &hwif->dmatable_dma, GFP_KERNEL);
446
447 au1xxx_dbdma_start( auide->tx_chan );
448 au1xxx_dbdma_start( auide->rx_chan );
449
450 return 0;
451}
26a940e2 452#else
85528659 453static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
8f29e650 454{
85528659 455 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
8f29e650
JC
456 dbdev_tab_t source_dev_tab;
457 int flags;
26a940e2 458
8f29e650
JC
459#ifdef IDE_AU1XXX_BURSTMODE
460 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
461#else
462 flags = DEV_FLAGS_SYNC;
26a940e2 463#endif
26a940e2 464
8f29e650
JC
465 /* setup dev_tab for tx channel */
466 auide_init_dbdma_dev( &source_dev_tab,
467 (u32)DSCR_CMD0_ALWAYS,
468 8, 32, DEV_FLAGS_OUT | flags);
469 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
470
471 auide_init_dbdma_dev( &source_dev_tab,
472 (u32)DSCR_CMD0_ALWAYS,
473 8, 32, DEV_FLAGS_IN | flags);
474 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
475
476 /* Get a channel for TX */
477 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
478 auide->tx_dev_id,
479 NULL,
480 (void*)auide);
481
482 /* Get a channel for RX */
483 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
484 DSCR_CMD0_ALWAYS,
485 NULL,
486 (void*)auide);
487
488 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
489 NUM_DESCRIPTORS);
490 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
491 NUM_DESCRIPTORS);
492
493 au1xxx_dbdma_start( auide->tx_chan );
494 au1xxx_dbdma_start( auide->rx_chan );
495
496 return 0;
26a940e2 497}
8f29e650 498#endif
26a940e2
PP
499
500static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
501{
8f29e650 502 int i;
4c3032d8 503 unsigned long *ata_regs = hw->io_ports_array;
8f29e650
JC
504
505 /* FIXME? */
4c3032d8 506 for (i = 0; i < 8; i++)
fcbd3b4b 507 *ata_regs++ = ahwif->regbase + (i << IDE_REG_SHIFT);
8f29e650
JC
508
509 /* set the Alternative Status register */
fcbd3b4b 510 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
26a940e2
PP
511}
512
ac95beed
BZ
513static const struct ide_port_ops au1xxx_port_ops = {
514 .set_pio_mode = au1xxx_set_pio_mode,
515 .set_dma_mode = auide_set_dma_mode,
ac95beed
BZ
516};
517
c413b9b9 518static const struct ide_port_info au1xxx_port_info = {
85528659 519 .init_dma = auide_ddma_init,
ac95beed 520 .port_ops = &au1xxx_port_ops,
5e37bdc0
BZ
521#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
522 .dma_ops = &au1xxx_dma_ops,
523#endif
c413b9b9 524 .host_flags = IDE_HFLAG_POST_SET_MODE |
807b90d0 525 IDE_HFLAG_NO_IO_32BIT |
c413b9b9
BZ
526 IDE_HFLAG_UNMASK_IRQS,
527 .pio_mask = ATA_PIO4,
528#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
529 .mwdma_mask = ATA_MWDMA2,
530#endif
531};
532
26a940e2
PP
533static int au_ide_probe(struct device *dev)
534{
535 struct platform_device *pdev = to_platform_device(dev);
8f29e650
JC
536 _auide_hwif *ahwif = &auide_hwif;
537 ide_hwif_t *hwif;
26a940e2
PP
538 struct resource *res;
539 int ret = 0;
8447d9d5 540 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
9239b333 541 hw_regs_t hw;
26a940e2
PP
542
543#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
8f29e650 544 char *mode = "MWDMA2";
26a940e2 545#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
8f29e650 546 char *mode = "PIO+DDMA(offload)";
26a940e2
PP
547#endif
548
8f29e650 549 memset(&auide_hwif, 0, sizeof(_auide_hwif));
26a940e2
PP
550 ahwif->irq = platform_get_irq(pdev, 0);
551
552 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
553
554 if (res == NULL) {
555 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
556 ret = -ENODEV;
48944738
DV
557 goto out;
558 }
559 if (ahwif->irq < 0) {
560 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
561 ret = -ENODEV;
26a940e2
PP
562 goto out;
563 }
564
b4dcaea3
SS
565 if (!request_mem_region(res->start, res->end - res->start + 1,
566 pdev->name)) {
26a940e2 567 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
8f29e650 568 ret = -EBUSY;
26a940e2 569 goto out;
8f29e650 570 }
26a940e2 571
b4dcaea3 572 ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
26a940e2
PP
573 if (ahwif->regbase == 0) {
574 ret = -ENOMEM;
575 goto out;
576 }
577
4f7bada2
BZ
578 hwif = ide_find_port();
579 if (hwif == NULL) {
580 ret = -ENOENT;
581 goto out;
582 }
26a940e2 583
9239b333
BZ
584 memset(&hw, 0, sizeof(hw));
585 auide_setup_ports(&hw, ahwif);
aa79a2fa 586 hw.irq = ahwif->irq;
ed1f7889 587 hw.dev = dev;
aa79a2fa
BZ
588 hw.chipset = ide_au1xxx;
589
590 ide_init_port_hw(hwif, &hw);
26a940e2 591
5df37c34
BZ
592 hwif->dev = dev;
593
8f29e650
JC
594 /* If the user has selected DDMA assisted copies,
595 then set up a few local I/O function entry points
596 */
597
598#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
599 hwif->INSW = auide_insw;
600 hwif->OUTSW = auide_outsw;
a42bcc0f 601#endif
8f29e650
JC
602 hwif->select_data = 0; /* no chipset-specific code */
603 hwif->config_data = 0; /* no chipset-specific code */
604
8f29e650
JC
605 auide_hwif.hwif = hwif;
606 hwif->hwif_data = &auide_hwif;
26a940e2 607
8447d9d5 608 idx[0] = hwif->index;
5cbf79cd 609
c413b9b9 610 ide_device_add(idx, &au1xxx_port_info);
5cbf79cd 611
26a940e2
PP
612 dev_set_drvdata(dev, hwif);
613
8f29e650 614 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
26a940e2 615
8f29e650
JC
616 out:
617 return ret;
26a940e2
PP
618}
619
620static int au_ide_remove(struct device *dev)
621{
622 struct platform_device *pdev = to_platform_device(dev);
623 struct resource *res;
624 ide_hwif_t *hwif = dev_get_drvdata(dev);
8f29e650 625 _auide_hwif *ahwif = &auide_hwif;
26a940e2 626
387750c3 627 ide_unregister(hwif);
26a940e2
PP
628
629 iounmap((void *)ahwif->regbase);
630
631 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
b4dcaea3 632 release_mem_region(res->start, res->end - res->start + 1);
26a940e2
PP
633
634 return 0;
635}
636
637static struct device_driver au1200_ide_driver = {
638 .name = "au1200-ide",
639 .bus = &platform_bus_type,
640 .probe = au_ide_probe,
641 .remove = au_ide_remove,
642};
643
644static int __init au_ide_init(void)
645{
646 return driver_register(&au1200_ide_driver);
647}
648
8f29e650 649static void __exit au_ide_exit(void)
26a940e2
PP
650{
651 driver_unregister(&au1200_ide_driver);
652}
653
26a940e2
PP
654MODULE_LICENSE("GPL");
655MODULE_DESCRIPTION("AU1200 IDE driver");
656
657module_init(au_ide_init);
658module_exit(au_ide_exit);