]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/ide/mips/au1xxx-ide.c
palm_bk3710: use ->init_dma method
[mirror_ubuntu-bionic-kernel.git] / drivers / ide / mips / au1xxx-ide.c
CommitLineData
26a940e2 1/*
26a940e2
PP
2 * BRIEF MODULE DESCRIPTION
3 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
4 *
5 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
13 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
14 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
15 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21 * POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
28 * Interface and Linux Device Driver" Application Note.
29 */
26a940e2
PP
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/delay.h>
8f29e650 34#include <linux/platform_device.h>
26a940e2
PP
35#include <linux/init.h>
36#include <linux/ide.h>
fabd3a22 37#include <linux/scatterlist.h>
26a940e2 38
26a940e2
PP
39#include <asm/mach-au1x00/au1xxx.h>
40#include <asm/mach-au1x00/au1xxx_dbdma.h>
26a940e2
PP
41#include <asm/mach-au1x00/au1xxx_ide.h>
42
43#define DRV_NAME "au1200-ide"
8f29e650 44#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
26a940e2 45
8f29e650
JC
46/* enable the burstmode in the dbdma */
47#define IDE_AU1XXX_BURSTMODE 1
26a940e2 48
8f29e650 49static _auide_hwif auide_hwif;
26a940e2 50
09a77441
SS
51static int auide_ddma_init(_auide_hwif *auide);
52
26a940e2
PP
53#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
54
8f29e650 55void auide_insw(unsigned long port, void *addr, u32 count)
26a940e2 56{
8f29e650
JC
57 _auide_hwif *ahwif = &auide_hwif;
58 chan_tab_t *ctp;
59 au1x_ddma_desc_t *dp;
26a940e2 60
8f29e650
JC
61 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
62 DDMA_FLAGS_NOIE)) {
eb63963a 63 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
8f29e650
JC
64 return;
65 }
66 ctp = *((chan_tab_t **)ahwif->rx_chan);
67 dp = ctp->cur_ptr;
68 while (dp->dscr_cmd0 & DSCR_CMD0_V)
69 ;
70 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
71}
72
8f29e650 73void auide_outsw(unsigned long port, void *addr, u32 count)
26a940e2 74{
8f29e650
JC
75 _auide_hwif *ahwif = &auide_hwif;
76 chan_tab_t *ctp;
77 au1x_ddma_desc_t *dp;
26a940e2 78
8f29e650
JC
79 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
80 count << 1, DDMA_FLAGS_NOIE)) {
eb63963a 81 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
8f29e650
JC
82 return;
83 }
84 ctp = *((chan_tab_t **)ahwif->tx_chan);
85 dp = ctp->cur_ptr;
86 while (dp->dscr_cmd0 & DSCR_CMD0_V)
87 ;
88 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
89}
90
26a940e2 91#endif
26a940e2 92
26bcb879 93static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
26a940e2 94{
88b2b32b 95 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
8f29e650
JC
96
97 /* set pio mode! */
98 switch(pio) {
99 case 0:
100 mem_sttime = SBC_IDE_TIMING(PIO0);
101
102 /* set configuration for RCS2# */
103 mem_stcfg |= TS_MASK;
104 mem_stcfg &= ~TCSOE_MASK;
105 mem_stcfg &= ~TOECS_MASK;
106 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
107 break;
108
109 case 1:
110 mem_sttime = SBC_IDE_TIMING(PIO1);
111
112 /* set configuration for RCS2# */
113 mem_stcfg |= TS_MASK;
114 mem_stcfg &= ~TCSOE_MASK;
115 mem_stcfg &= ~TOECS_MASK;
116 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
117 break;
118
119 case 2:
120 mem_sttime = SBC_IDE_TIMING(PIO2);
121
122 /* set configuration for RCS2# */
123 mem_stcfg &= ~TS_MASK;
124 mem_stcfg &= ~TCSOE_MASK;
125 mem_stcfg &= ~TOECS_MASK;
126 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
127 break;
128
129 case 3:
130 mem_sttime = SBC_IDE_TIMING(PIO3);
131
132 /* set configuration for RCS2# */
133 mem_stcfg &= ~TS_MASK;
134 mem_stcfg &= ~TCSOE_MASK;
135 mem_stcfg &= ~TOECS_MASK;
136 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
137
138 break;
139
140 case 4:
141 mem_sttime = SBC_IDE_TIMING(PIO4);
142
143 /* set configuration for RCS2# */
144 mem_stcfg &= ~TS_MASK;
145 mem_stcfg &= ~TCSOE_MASK;
146 mem_stcfg &= ~TOECS_MASK;
147 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
148 break;
149 }
150
151 au_writel(mem_sttime,MEM_STTIME2);
152 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
153}
154
88b2b32b 155static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
26a940e2 156{
88b2b32b 157 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
26a940e2 158
8f29e650 159 switch(speed) {
26a940e2 160#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
161 case XFER_MW_DMA_2:
162 mem_sttime = SBC_IDE_TIMING(MDMA2);
163
164 /* set configuration for RCS2# */
165 mem_stcfg &= ~TS_MASK;
166 mem_stcfg &= ~TCSOE_MASK;
167 mem_stcfg &= ~TOECS_MASK;
168 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
169
8f29e650
JC
170 break;
171 case XFER_MW_DMA_1:
172 mem_sttime = SBC_IDE_TIMING(MDMA1);
173
174 /* set configuration for RCS2# */
175 mem_stcfg &= ~TS_MASK;
176 mem_stcfg &= ~TCSOE_MASK;
177 mem_stcfg &= ~TOECS_MASK;
178 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
179
8f29e650
JC
180 break;
181 case XFER_MW_DMA_0:
182 mem_sttime = SBC_IDE_TIMING(MDMA0);
183
184 /* set configuration for RCS2# */
185 mem_stcfg |= TS_MASK;
186 mem_stcfg &= ~TCSOE_MASK;
187 mem_stcfg &= ~TOECS_MASK;
188 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
189
8f29e650 190 break;
26a940e2 191#endif
8f29e650 192 }
a523a175 193
8f29e650
JC
194 au_writel(mem_sttime,MEM_STTIME2);
195 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
196}
197
198/*
199 * Multi-Word DMA + DbDMA functions
200 */
26a940e2 201
8f29e650 202#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
26a940e2
PP
203static int auide_build_dmatable(ide_drive_t *drive)
204{
8f29e650
JC
205 int i, iswrite, count = 0;
206 ide_hwif_t *hwif = HWIF(drive);
207
208 struct request *rq = HWGROUP(drive)->rq;
209
210 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
211 struct scatterlist *sg;
212
213 iswrite = (rq_data_dir(rq) == WRITE);
214 /* Save for interrupt context */
215 ahwif->drive = drive;
216
062f9f02 217 hwif->sg_nents = i = ide_build_sglist(drive, rq);
8f29e650
JC
218
219 if (!i)
220 return 0;
221
222 /* fill the descriptors */
223 sg = hwif->sg_table;
224 while (i && sg_dma_len(sg)) {
225 u32 cur_addr;
226 u32 cur_len;
227
228 cur_addr = sg_dma_address(sg);
229 cur_len = sg_dma_len(sg);
230
231 while (cur_len) {
232 u32 flags = DDMA_FLAGS_NOIE;
233 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
234
235 if (++count >= PRD_ENTRIES) {
236 printk(KERN_WARNING "%s: DMA table too small\n",
237 drive->name);
238 goto use_pio_instead;
239 }
240
241 /* Lets enable intr for the last descriptor only */
242 if (1==i)
243 flags = DDMA_FLAGS_IE;
244 else
245 flags = DDMA_FLAGS_NOIE;
246
247 if (iswrite) {
248 if(!put_source_flags(ahwif->tx_chan,
45711f1a 249 (void*) sg_virt(sg),
8f29e650
JC
250 tc, flags)) {
251 printk(KERN_ERR "%s failed %d\n",
eb63963a 252 __func__, __LINE__);
26a940e2 253 }
8f29e650 254 } else
26a940e2 255 {
8f29e650 256 if(!put_dest_flags(ahwif->rx_chan,
45711f1a 257 (void*) sg_virt(sg),
8f29e650
JC
258 tc, flags)) {
259 printk(KERN_ERR "%s failed %d\n",
eb63963a 260 __func__, __LINE__);
26a940e2 261 }
8f29e650 262 }
26a940e2 263
8f29e650
JC
264 cur_addr += tc;
265 cur_len -= tc;
266 }
55c16a70 267 sg = sg_next(sg);
8f29e650
JC
268 i--;
269 }
26a940e2 270
8f29e650
JC
271 if (count)
272 return 1;
26a940e2 273
8f29e650 274 use_pio_instead:
062f9f02 275 ide_destroy_dmatable(drive);
26a940e2 276
8f29e650 277 return 0; /* revert to PIO for this request */
26a940e2
PP
278}
279
280static int auide_dma_end(ide_drive_t *drive)
281{
8f29e650 282 ide_hwif_t *hwif = HWIF(drive);
26a940e2 283
8f29e650 284 if (hwif->sg_nents) {
062f9f02 285 ide_destroy_dmatable(drive);
8f29e650
JC
286 hwif->sg_nents = 0;
287 }
26a940e2 288
8f29e650 289 return 0;
26a940e2
PP
290}
291
292static void auide_dma_start(ide_drive_t *drive )
293{
26a940e2
PP
294}
295
26a940e2
PP
296
297static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
298{
8f29e650
JC
299 /* issue cmd to drive */
300 ide_execute_command(drive, command, &ide_dma_intr,
301 (2*WAIT_CMD), NULL);
26a940e2
PP
302}
303
304static int auide_dma_setup(ide_drive_t *drive)
8f29e650
JC
305{
306 struct request *rq = HWGROUP(drive)->rq;
26a940e2 307
8f29e650
JC
308 if (!auide_build_dmatable(drive)) {
309 ide_map_sg(drive, rq);
310 return 1;
311 }
26a940e2 312
8f29e650
JC
313 drive->waiting_for_dma = 1;
314 return 0;
26a940e2
PP
315}
316
26a940e2 317static int auide_dma_test_irq(ide_drive_t *drive)
8f29e650
JC
318{
319 if (drive->waiting_for_dma == 0)
320 printk(KERN_WARNING "%s: ide_dma_test_irq \
26a940e2
PP
321 called while not waiting\n", drive->name);
322
8f29e650
JC
323 /* If dbdma didn't execute the STOP command yet, the
324 * active bit is still set
26a940e2 325 */
8f29e650
JC
326 drive->waiting_for_dma++;
327 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
328 printk(KERN_WARNING "%s: timeout waiting for ddma to \
26a940e2 329 complete\n", drive->name);
8f29e650
JC
330 return 1;
331 }
332 udelay(10);
333 return 0;
26a940e2
PP
334}
335
15ce926a 336static void auide_dma_host_set(ide_drive_t *drive, int on)
26a940e2 337{
26a940e2
PP
338}
339
841d2a9b 340static void auide_dma_lost_irq(ide_drive_t *drive)
26a940e2 341{
8f29e650 342 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
26a940e2
PP
343}
344
53e62d3a 345static void auide_ddma_tx_callback(int irq, void *param)
26a940e2 346{
8f29e650
JC
347 _auide_hwif *ahwif = (_auide_hwif*)param;
348 ahwif->drive->waiting_for_dma = 0;
26a940e2
PP
349}
350
53e62d3a 351static void auide_ddma_rx_callback(int irq, void *param)
26a940e2 352{
8f29e650
JC
353 _auide_hwif *ahwif = (_auide_hwif*)param;
354 ahwif->drive->waiting_for_dma = 0;
355}
356
357#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
26a940e2 358
8f29e650
JC
359static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
360{
361 dev->dev_id = dev_id;
362 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
363 dev->dev_intlevel = 0;
364 dev->dev_intpolarity = 0;
365 dev->dev_tsize = tsize;
366 dev->dev_devwidth = devwidth;
367 dev->dev_flags = flags;
26a940e2 368}
8f29e650
JC
369
370#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
26a940e2 371
c283f5db 372static void auide_dma_timeout(ide_drive_t *drive)
26a940e2 373{
c283f5db 374 ide_hwif_t *hwif = HWIF(drive);
26a940e2 375
8f29e650 376 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
26a940e2 377
c283f5db
SS
378 if (hwif->ide_dma_test_irq(drive))
379 return;
26a940e2 380
c283f5db 381 hwif->ide_dma_end(drive);
26a940e2 382}
8f29e650 383
26a940e2 384
8f29e650
JC
385static int auide_ddma_init(_auide_hwif *auide) {
386
387 dbdev_tab_t source_dev_tab, target_dev_tab;
388 u32 dev_id, tsize, devwidth, flags;
389 ide_hwif_t *hwif = auide->hwif;
26a940e2 390
8f29e650 391 dev_id = AU1XXX_ATA_DDMA_REQ;
26a940e2 392
f629b38b
BZ
393 tsize = 8; /* 1 */
394 devwidth = 32; /* 16 */
26a940e2 395
8f29e650
JC
396#ifdef IDE_AU1XXX_BURSTMODE
397 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
26a940e2 398#else
8f29e650 399 flags = DEV_FLAGS_SYNC;
26a940e2
PP
400#endif
401
8f29e650
JC
402 /* setup dev_tab for tx channel */
403 auide_init_dbdma_dev( &source_dev_tab,
404 dev_id,
405 tsize, devwidth, DEV_FLAGS_OUT | flags);
406 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
407
408 auide_init_dbdma_dev( &source_dev_tab,
409 dev_id,
410 tsize, devwidth, DEV_FLAGS_IN | flags);
411 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
412
413 /* We also need to add a target device for the DMA */
414 auide_init_dbdma_dev( &target_dev_tab,
415 (u32)DSCR_CMD0_ALWAYS,
416 tsize, devwidth, DEV_FLAGS_ANYUSE);
417 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
418
419 /* Get a channel for TX */
420 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
421 auide->tx_dev_id,
422 auide_ddma_tx_callback,
423 (void*)auide);
424
425 /* Get a channel for RX */
426 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
427 auide->target_dev_id,
428 auide_ddma_rx_callback,
429 (void*)auide);
430
431 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
432 NUM_DESCRIPTORS);
433 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
434 NUM_DESCRIPTORS);
435
5df37c34 436 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
8f29e650
JC
437 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
438 &hwif->dmatable_dma, GFP_KERNEL);
439
440 au1xxx_dbdma_start( auide->tx_chan );
441 au1xxx_dbdma_start( auide->rx_chan );
442
443 return 0;
444}
26a940e2 445#else
8f29e650
JC
446
447static int auide_ddma_init( _auide_hwif *auide )
448{
449 dbdev_tab_t source_dev_tab;
450 int flags;
26a940e2 451
8f29e650
JC
452#ifdef IDE_AU1XXX_BURSTMODE
453 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
454#else
455 flags = DEV_FLAGS_SYNC;
26a940e2 456#endif
26a940e2 457
8f29e650
JC
458 /* setup dev_tab for tx channel */
459 auide_init_dbdma_dev( &source_dev_tab,
460 (u32)DSCR_CMD0_ALWAYS,
461 8, 32, DEV_FLAGS_OUT | flags);
462 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
463
464 auide_init_dbdma_dev( &source_dev_tab,
465 (u32)DSCR_CMD0_ALWAYS,
466 8, 32, DEV_FLAGS_IN | flags);
467 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
468
469 /* Get a channel for TX */
470 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
471 auide->tx_dev_id,
472 NULL,
473 (void*)auide);
474
475 /* Get a channel for RX */
476 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
477 DSCR_CMD0_ALWAYS,
478 NULL,
479 (void*)auide);
480
481 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
482 NUM_DESCRIPTORS);
483 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
484 NUM_DESCRIPTORS);
485
486 au1xxx_dbdma_start( auide->tx_chan );
487 au1xxx_dbdma_start( auide->rx_chan );
488
489 return 0;
26a940e2 490}
8f29e650 491#endif
26a940e2
PP
492
493static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
494{
8f29e650
JC
495 int i;
496 unsigned long *ata_regs = hw->io_ports;
497
498 /* FIXME? */
499 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
500 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
501 }
502
503 /* set the Alternative Status register */
504 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
26a940e2
PP
505}
506
ac95beed
BZ
507static const struct ide_port_ops au1xxx_port_ops = {
508 .set_pio_mode = au1xxx_set_pio_mode,
509 .set_dma_mode = auide_set_dma_mode,
ac95beed
BZ
510};
511
c413b9b9 512static const struct ide_port_info au1xxx_port_info = {
ac95beed 513 .port_ops = &au1xxx_port_ops,
c413b9b9
BZ
514 .host_flags = IDE_HFLAG_POST_SET_MODE |
515 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
807b90d0 516 IDE_HFLAG_NO_IO_32BIT |
c413b9b9
BZ
517 IDE_HFLAG_UNMASK_IRQS,
518 .pio_mask = ATA_PIO4,
519#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
520 .mwdma_mask = ATA_MWDMA2,
521#endif
522};
523
26a940e2
PP
524static int au_ide_probe(struct device *dev)
525{
526 struct platform_device *pdev = to_platform_device(dev);
8f29e650
JC
527 _auide_hwif *ahwif = &auide_hwif;
528 ide_hwif_t *hwif;
26a940e2
PP
529 struct resource *res;
530 int ret = 0;
8447d9d5 531 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
9239b333 532 hw_regs_t hw;
26a940e2
PP
533
534#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
8f29e650 535 char *mode = "MWDMA2";
26a940e2 536#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
8f29e650 537 char *mode = "PIO+DDMA(offload)";
26a940e2
PP
538#endif
539
8f29e650 540 memset(&auide_hwif, 0, sizeof(_auide_hwif));
26a940e2
PP
541 ahwif->irq = platform_get_irq(pdev, 0);
542
543 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
544
545 if (res == NULL) {
546 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
547 ret = -ENODEV;
48944738
DV
548 goto out;
549 }
550 if (ahwif->irq < 0) {
551 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
552 ret = -ENODEV;
26a940e2
PP
553 goto out;
554 }
555
b4dcaea3
SS
556 if (!request_mem_region(res->start, res->end - res->start + 1,
557 pdev->name)) {
26a940e2 558 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
8f29e650 559 ret = -EBUSY;
26a940e2 560 goto out;
8f29e650 561 }
26a940e2 562
b4dcaea3 563 ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
26a940e2
PP
564 if (ahwif->regbase == 0) {
565 ret = -ENOMEM;
566 goto out;
567 }
568
4f7bada2
BZ
569 hwif = ide_find_port();
570 if (hwif == NULL) {
571 ret = -ENOENT;
572 goto out;
573 }
26a940e2 574
9239b333
BZ
575 memset(&hw, 0, sizeof(hw));
576 auide_setup_ports(&hw, ahwif);
aa79a2fa 577 hw.irq = ahwif->irq;
ed1f7889 578 hw.dev = dev;
aa79a2fa
BZ
579 hw.chipset = ide_au1xxx;
580
581 ide_init_port_hw(hwif, &hw);
26a940e2 582
5df37c34
BZ
583 hwif->dev = dev;
584
8f29e650
JC
585 /* If the user has selected DDMA assisted copies,
586 then set up a few local I/O function entry points
587 */
588
589#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
590 hwif->INSW = auide_insw;
591 hwif->OUTSW = auide_outsw;
26a940e2 592#endif
26a940e2 593#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
c283f5db 594 hwif->dma_timeout = &auide_dma_timeout;
15ce926a 595 hwif->dma_host_set = &auide_dma_host_set;
8f29e650
JC
596 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
597 hwif->dma_start = &auide_dma_start;
598 hwif->ide_dma_end = &auide_dma_end;
599 hwif->dma_setup = &auide_dma_setup;
600 hwif->ide_dma_test_irq = &auide_dma_test_irq;
841d2a9b 601 hwif->dma_lost_irq = &auide_dma_lost_irq;
a42bcc0f 602#endif
8f29e650
JC
603 hwif->select_data = 0; /* no chipset-specific code */
604 hwif->config_data = 0; /* no chipset-specific code */
605
8f29e650
JC
606 auide_hwif.hwif = hwif;
607 hwif->hwif_data = &auide_hwif;
26a940e2 608
8f29e650 609 auide_ddma_init(&auide_hwif);
26a940e2 610
8447d9d5 611 idx[0] = hwif->index;
5cbf79cd 612
c413b9b9 613 ide_device_add(idx, &au1xxx_port_info);
5cbf79cd 614
26a940e2
PP
615 dev_set_drvdata(dev, hwif);
616
8f29e650 617 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
26a940e2 618
8f29e650
JC
619 out:
620 return ret;
26a940e2
PP
621}
622
623static int au_ide_remove(struct device *dev)
624{
625 struct platform_device *pdev = to_platform_device(dev);
626 struct resource *res;
627 ide_hwif_t *hwif = dev_get_drvdata(dev);
8f29e650 628 _auide_hwif *ahwif = &auide_hwif;
26a940e2 629
93de00fd 630 ide_unregister(hwif->index);
26a940e2
PP
631
632 iounmap((void *)ahwif->regbase);
633
634 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
b4dcaea3 635 release_mem_region(res->start, res->end - res->start + 1);
26a940e2
PP
636
637 return 0;
638}
639
640static struct device_driver au1200_ide_driver = {
641 .name = "au1200-ide",
642 .bus = &platform_bus_type,
643 .probe = au_ide_probe,
644 .remove = au_ide_remove,
645};
646
647static int __init au_ide_init(void)
648{
649 return driver_register(&au1200_ide_driver);
650}
651
8f29e650 652static void __exit au_ide_exit(void)
26a940e2
PP
653{
654 driver_unregister(&au1200_ide_driver);
655}
656
26a940e2
PP
657MODULE_LICENSE("GPL");
658MODULE_DESCRIPTION("AU1200 IDE driver");
659
660module_init(au_ide_init);
661module_exit(au_ide_exit);