]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ide/mips/au1xxx-ide.c
579caa3b06f4cfcff4483a9103805fc80b8ea306
[mirror_ubuntu-bionic-kernel.git] / drivers / ide / mips / au1xxx-ide.c
1 /*
2 * BRIEF MODULE DESCRIPTION
3 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
4 *
5 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
13 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
14 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
15 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21 * POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
28 * Interface and Linux Device Driver" Application Note.
29 */
30 #include <linux/types.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/init.h>
36 #include <linux/ide.h>
37 #include <linux/scatterlist.h>
38
39 #include <asm/mach-au1x00/au1xxx.h>
40 #include <asm/mach-au1x00/au1xxx_dbdma.h>
41 #include <asm/mach-au1x00/au1xxx_ide.h>
42
43 #define DRV_NAME "au1200-ide"
44 #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
45
46 /* enable the burstmode in the dbdma */
47 #define IDE_AU1XXX_BURSTMODE 1
48
49 static _auide_hwif auide_hwif;
50
51 static int auide_ddma_init(_auide_hwif *auide);
52
53 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
54
55 void auide_insw(unsigned long port, void *addr, u32 count)
56 {
57 _auide_hwif *ahwif = &auide_hwif;
58 chan_tab_t *ctp;
59 au1x_ddma_desc_t *dp;
60
61 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
62 DDMA_FLAGS_NOIE)) {
63 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
64 return;
65 }
66 ctp = *((chan_tab_t **)ahwif->rx_chan);
67 dp = ctp->cur_ptr;
68 while (dp->dscr_cmd0 & DSCR_CMD0_V)
69 ;
70 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
71 }
72
73 void auide_outsw(unsigned long port, void *addr, u32 count)
74 {
75 _auide_hwif *ahwif = &auide_hwif;
76 chan_tab_t *ctp;
77 au1x_ddma_desc_t *dp;
78
79 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
80 count << 1, DDMA_FLAGS_NOIE)) {
81 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
82 return;
83 }
84 ctp = *((chan_tab_t **)ahwif->tx_chan);
85 dp = ctp->cur_ptr;
86 while (dp->dscr_cmd0 & DSCR_CMD0_V)
87 ;
88 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
89 }
90
91 #endif
92
93 static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
94 {
95 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
96
97 /* set pio mode! */
98 switch(pio) {
99 case 0:
100 mem_sttime = SBC_IDE_TIMING(PIO0);
101
102 /* set configuration for RCS2# */
103 mem_stcfg |= TS_MASK;
104 mem_stcfg &= ~TCSOE_MASK;
105 mem_stcfg &= ~TOECS_MASK;
106 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
107 break;
108
109 case 1:
110 mem_sttime = SBC_IDE_TIMING(PIO1);
111
112 /* set configuration for RCS2# */
113 mem_stcfg |= TS_MASK;
114 mem_stcfg &= ~TCSOE_MASK;
115 mem_stcfg &= ~TOECS_MASK;
116 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
117 break;
118
119 case 2:
120 mem_sttime = SBC_IDE_TIMING(PIO2);
121
122 /* set configuration for RCS2# */
123 mem_stcfg &= ~TS_MASK;
124 mem_stcfg &= ~TCSOE_MASK;
125 mem_stcfg &= ~TOECS_MASK;
126 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
127 break;
128
129 case 3:
130 mem_sttime = SBC_IDE_TIMING(PIO3);
131
132 /* set configuration for RCS2# */
133 mem_stcfg &= ~TS_MASK;
134 mem_stcfg &= ~TCSOE_MASK;
135 mem_stcfg &= ~TOECS_MASK;
136 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
137
138 break;
139
140 case 4:
141 mem_sttime = SBC_IDE_TIMING(PIO4);
142
143 /* set configuration for RCS2# */
144 mem_stcfg &= ~TS_MASK;
145 mem_stcfg &= ~TCSOE_MASK;
146 mem_stcfg &= ~TOECS_MASK;
147 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
148 break;
149 }
150
151 au_writel(mem_sttime,MEM_STTIME2);
152 au_writel(mem_stcfg,MEM_STCFG2);
153 }
154
155 static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
156 {
157 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
158
159 switch(speed) {
160 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
161 case XFER_MW_DMA_2:
162 mem_sttime = SBC_IDE_TIMING(MDMA2);
163
164 /* set configuration for RCS2# */
165 mem_stcfg &= ~TS_MASK;
166 mem_stcfg &= ~TCSOE_MASK;
167 mem_stcfg &= ~TOECS_MASK;
168 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
169
170 break;
171 case XFER_MW_DMA_1:
172 mem_sttime = SBC_IDE_TIMING(MDMA1);
173
174 /* set configuration for RCS2# */
175 mem_stcfg &= ~TS_MASK;
176 mem_stcfg &= ~TCSOE_MASK;
177 mem_stcfg &= ~TOECS_MASK;
178 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
179
180 break;
181 case XFER_MW_DMA_0:
182 mem_sttime = SBC_IDE_TIMING(MDMA0);
183
184 /* set configuration for RCS2# */
185 mem_stcfg |= TS_MASK;
186 mem_stcfg &= ~TCSOE_MASK;
187 mem_stcfg &= ~TOECS_MASK;
188 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
189
190 break;
191 #endif
192 }
193
194 au_writel(mem_sttime,MEM_STTIME2);
195 au_writel(mem_stcfg,MEM_STCFG2);
196 }
197
198 /*
199 * Multi-Word DMA + DbDMA functions
200 */
201
202 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
203 static int auide_build_dmatable(ide_drive_t *drive)
204 {
205 int i, iswrite, count = 0;
206 ide_hwif_t *hwif = HWIF(drive);
207
208 struct request *rq = HWGROUP(drive)->rq;
209
210 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
211 struct scatterlist *sg;
212
213 iswrite = (rq_data_dir(rq) == WRITE);
214 /* Save for interrupt context */
215 ahwif->drive = drive;
216
217 hwif->sg_nents = i = ide_build_sglist(drive, rq);
218
219 if (!i)
220 return 0;
221
222 /* fill the descriptors */
223 sg = hwif->sg_table;
224 while (i && sg_dma_len(sg)) {
225 u32 cur_addr;
226 u32 cur_len;
227
228 cur_addr = sg_dma_address(sg);
229 cur_len = sg_dma_len(sg);
230
231 while (cur_len) {
232 u32 flags = DDMA_FLAGS_NOIE;
233 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
234
235 if (++count >= PRD_ENTRIES) {
236 printk(KERN_WARNING "%s: DMA table too small\n",
237 drive->name);
238 goto use_pio_instead;
239 }
240
241 /* Lets enable intr for the last descriptor only */
242 if (1==i)
243 flags = DDMA_FLAGS_IE;
244 else
245 flags = DDMA_FLAGS_NOIE;
246
247 if (iswrite) {
248 if(!put_source_flags(ahwif->tx_chan,
249 (void*) sg_virt(sg),
250 tc, flags)) {
251 printk(KERN_ERR "%s failed %d\n",
252 __func__, __LINE__);
253 }
254 } else
255 {
256 if(!put_dest_flags(ahwif->rx_chan,
257 (void*) sg_virt(sg),
258 tc, flags)) {
259 printk(KERN_ERR "%s failed %d\n",
260 __func__, __LINE__);
261 }
262 }
263
264 cur_addr += tc;
265 cur_len -= tc;
266 }
267 sg = sg_next(sg);
268 i--;
269 }
270
271 if (count)
272 return 1;
273
274 use_pio_instead:
275 ide_destroy_dmatable(drive);
276
277 return 0; /* revert to PIO for this request */
278 }
279
280 static int auide_dma_end(ide_drive_t *drive)
281 {
282 ide_hwif_t *hwif = HWIF(drive);
283
284 if (hwif->sg_nents) {
285 ide_destroy_dmatable(drive);
286 hwif->sg_nents = 0;
287 }
288
289 return 0;
290 }
291
292 static void auide_dma_start(ide_drive_t *drive )
293 {
294 }
295
296
297 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
298 {
299 /* issue cmd to drive */
300 ide_execute_command(drive, command, &ide_dma_intr,
301 (2*WAIT_CMD), NULL);
302 }
303
304 static int auide_dma_setup(ide_drive_t *drive)
305 {
306 struct request *rq = HWGROUP(drive)->rq;
307
308 if (!auide_build_dmatable(drive)) {
309 ide_map_sg(drive, rq);
310 return 1;
311 }
312
313 drive->waiting_for_dma = 1;
314 return 0;
315 }
316
317 static int auide_dma_test_irq(ide_drive_t *drive)
318 {
319 if (drive->waiting_for_dma == 0)
320 printk(KERN_WARNING "%s: ide_dma_test_irq \
321 called while not waiting\n", drive->name);
322
323 /* If dbdma didn't execute the STOP command yet, the
324 * active bit is still set
325 */
326 drive->waiting_for_dma++;
327 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
328 printk(KERN_WARNING "%s: timeout waiting for ddma to \
329 complete\n", drive->name);
330 return 1;
331 }
332 udelay(10);
333 return 0;
334 }
335
336 static void auide_dma_host_set(ide_drive_t *drive, int on)
337 {
338 }
339
340 static void auide_dma_lost_irq(ide_drive_t *drive)
341 {
342 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
343 }
344
345 static void auide_ddma_tx_callback(int irq, void *param)
346 {
347 _auide_hwif *ahwif = (_auide_hwif*)param;
348 ahwif->drive->waiting_for_dma = 0;
349 }
350
351 static void auide_ddma_rx_callback(int irq, void *param)
352 {
353 _auide_hwif *ahwif = (_auide_hwif*)param;
354 ahwif->drive->waiting_for_dma = 0;
355 }
356
357 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
358
359 static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
360 {
361 dev->dev_id = dev_id;
362 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
363 dev->dev_intlevel = 0;
364 dev->dev_intpolarity = 0;
365 dev->dev_tsize = tsize;
366 dev->dev_devwidth = devwidth;
367 dev->dev_flags = flags;
368 }
369
370 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
371 static void auide_dma_timeout(ide_drive_t *drive)
372 {
373 ide_hwif_t *hwif = HWIF(drive);
374
375 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
376
377 if (auide_dma_test_irq(drive))
378 return;
379
380 auide_dma_end(drive);
381 }
382
383 static struct ide_dma_ops au1xxx_dma_ops = {
384 .dma_host_set = auide_dma_host_set,
385 .dma_setup = auide_dma_setup,
386 .dma_exec_cmd = auide_dma_exec_cmd,
387 .dma_start = auide_dma_start,
388 .dma_end = auide_dma_end,
389 .dma_test_irq = auide_dma_test_irq,
390 .dma_lost_irq = auide_dma_lost_irq,
391 .dma_timeout = auide_dma_timeout,
392 };
393
394 static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
395 {
396 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
397 dbdev_tab_t source_dev_tab, target_dev_tab;
398 u32 dev_id, tsize, devwidth, flags;
399
400 dev_id = AU1XXX_ATA_DDMA_REQ;
401
402 tsize = 8; /* 1 */
403 devwidth = 32; /* 16 */
404
405 #ifdef IDE_AU1XXX_BURSTMODE
406 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
407 #else
408 flags = DEV_FLAGS_SYNC;
409 #endif
410
411 /* setup dev_tab for tx channel */
412 auide_init_dbdma_dev( &source_dev_tab,
413 dev_id,
414 tsize, devwidth, DEV_FLAGS_OUT | flags);
415 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
416
417 auide_init_dbdma_dev( &source_dev_tab,
418 dev_id,
419 tsize, devwidth, DEV_FLAGS_IN | flags);
420 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
421
422 /* We also need to add a target device for the DMA */
423 auide_init_dbdma_dev( &target_dev_tab,
424 (u32)DSCR_CMD0_ALWAYS,
425 tsize, devwidth, DEV_FLAGS_ANYUSE);
426 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
427
428 /* Get a channel for TX */
429 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
430 auide->tx_dev_id,
431 auide_ddma_tx_callback,
432 (void*)auide);
433
434 /* Get a channel for RX */
435 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
436 auide->target_dev_id,
437 auide_ddma_rx_callback,
438 (void*)auide);
439
440 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
441 NUM_DESCRIPTORS);
442 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
443 NUM_DESCRIPTORS);
444
445 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
446 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
447 &hwif->dmatable_dma, GFP_KERNEL);
448
449 au1xxx_dbdma_start( auide->tx_chan );
450 au1xxx_dbdma_start( auide->rx_chan );
451
452 return 0;
453 }
454 #else
455 static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
456 {
457 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
458 dbdev_tab_t source_dev_tab;
459 int flags;
460
461 #ifdef IDE_AU1XXX_BURSTMODE
462 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
463 #else
464 flags = DEV_FLAGS_SYNC;
465 #endif
466
467 /* setup dev_tab for tx channel */
468 auide_init_dbdma_dev( &source_dev_tab,
469 (u32)DSCR_CMD0_ALWAYS,
470 8, 32, DEV_FLAGS_OUT | flags);
471 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
472
473 auide_init_dbdma_dev( &source_dev_tab,
474 (u32)DSCR_CMD0_ALWAYS,
475 8, 32, DEV_FLAGS_IN | flags);
476 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
477
478 /* Get a channel for TX */
479 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
480 auide->tx_dev_id,
481 NULL,
482 (void*)auide);
483
484 /* Get a channel for RX */
485 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
486 DSCR_CMD0_ALWAYS,
487 NULL,
488 (void*)auide);
489
490 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
491 NUM_DESCRIPTORS);
492 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
493 NUM_DESCRIPTORS);
494
495 au1xxx_dbdma_start( auide->tx_chan );
496 au1xxx_dbdma_start( auide->rx_chan );
497
498 return 0;
499 }
500 #endif
501
502 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
503 {
504 int i;
505 unsigned long *ata_regs = hw->io_ports;
506
507 /* FIXME? */
508 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
509 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
510 }
511
512 /* set the Alternative Status register */
513 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
514 }
515
516 static const struct ide_port_ops au1xxx_port_ops = {
517 .set_pio_mode = au1xxx_set_pio_mode,
518 .set_dma_mode = auide_set_dma_mode,
519 };
520
521 static const struct ide_port_info au1xxx_port_info = {
522 .init_dma = auide_ddma_init,
523 .port_ops = &au1xxx_port_ops,
524 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
525 .dma_ops = &au1xxx_dma_ops,
526 #endif
527 .host_flags = IDE_HFLAG_POST_SET_MODE |
528 IDE_HFLAG_NO_IO_32BIT |
529 IDE_HFLAG_UNMASK_IRQS,
530 .pio_mask = ATA_PIO4,
531 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
532 .mwdma_mask = ATA_MWDMA2,
533 #endif
534 };
535
536 static int au_ide_probe(struct device *dev)
537 {
538 struct platform_device *pdev = to_platform_device(dev);
539 _auide_hwif *ahwif = &auide_hwif;
540 ide_hwif_t *hwif;
541 struct resource *res;
542 int ret = 0;
543 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
544 hw_regs_t hw;
545
546 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
547 char *mode = "MWDMA2";
548 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
549 char *mode = "PIO+DDMA(offload)";
550 #endif
551
552 memset(&auide_hwif, 0, sizeof(_auide_hwif));
553 ahwif->irq = platform_get_irq(pdev, 0);
554
555 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
556
557 if (res == NULL) {
558 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
559 ret = -ENODEV;
560 goto out;
561 }
562 if (ahwif->irq < 0) {
563 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
564 ret = -ENODEV;
565 goto out;
566 }
567
568 if (!request_mem_region(res->start, res->end - res->start + 1,
569 pdev->name)) {
570 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
571 ret = -EBUSY;
572 goto out;
573 }
574
575 ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
576 if (ahwif->regbase == 0) {
577 ret = -ENOMEM;
578 goto out;
579 }
580
581 hwif = ide_find_port();
582 if (hwif == NULL) {
583 ret = -ENOENT;
584 goto out;
585 }
586
587 memset(&hw, 0, sizeof(hw));
588 auide_setup_ports(&hw, ahwif);
589 hw.irq = ahwif->irq;
590 hw.dev = dev;
591 hw.chipset = ide_au1xxx;
592
593 ide_init_port_hw(hwif, &hw);
594
595 hwif->dev = dev;
596
597 /* If the user has selected DDMA assisted copies,
598 then set up a few local I/O function entry points
599 */
600
601 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
602 hwif->INSW = auide_insw;
603 hwif->OUTSW = auide_outsw;
604 #endif
605 hwif->select_data = 0; /* no chipset-specific code */
606 hwif->config_data = 0; /* no chipset-specific code */
607
608 auide_hwif.hwif = hwif;
609 hwif->hwif_data = &auide_hwif;
610
611 idx[0] = hwif->index;
612
613 ide_device_add(idx, &au1xxx_port_info);
614
615 dev_set_drvdata(dev, hwif);
616
617 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
618
619 out:
620 return ret;
621 }
622
623 static int au_ide_remove(struct device *dev)
624 {
625 struct platform_device *pdev = to_platform_device(dev);
626 struct resource *res;
627 ide_hwif_t *hwif = dev_get_drvdata(dev);
628 _auide_hwif *ahwif = &auide_hwif;
629
630 ide_unregister(hwif->index);
631
632 iounmap((void *)ahwif->regbase);
633
634 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
635 release_mem_region(res->start, res->end - res->start + 1);
636
637 return 0;
638 }
639
640 static struct device_driver au1200_ide_driver = {
641 .name = "au1200-ide",
642 .bus = &platform_bus_type,
643 .probe = au_ide_probe,
644 .remove = au_ide_remove,
645 };
646
647 static int __init au_ide_init(void)
648 {
649 return driver_register(&au1200_ide_driver);
650 }
651
652 static void __exit au_ide_exit(void)
653 {
654 driver_unregister(&au1200_ide_driver);
655 }
656
657 MODULE_LICENSE("GPL");
658 MODULE_DESCRIPTION("AU1200 IDE driver");
659
660 module_init(au_ide_init);
661 module_exit(au_ide_exit);