]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/ide/mips/au1xxx-ide.c
[PATCH] ide: Allow IDE interface to specify its not capable of 32-bit operations
[mirror_ubuntu-zesty-kernel.git] / drivers / ide / mips / au1xxx-ide.c
1 /*
2 * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
3 *
4 * BRIEF MODULE DESCRIPTION
5 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
6 *
7 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
8 *
9 * This program is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU General Public License as published by the Free Software
11 * Foundation; either version 2 of the License, or (at your option) any later
12 * version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
15 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along with
26 * this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note.
31 */
32 #undef REALLY_SLOW_IO /* most systems can safely undef this */
33
34 #include <linux/types.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <linux/platform_device.h>
39
40 #include <linux/init.h>
41 #include <linux/ide.h>
42 #include <linux/sysdev.h>
43
44 #include <linux/dma-mapping.h>
45
46 #include "ide-timing.h"
47
48 #include <asm/io.h>
49 #include <asm/mach-au1x00/au1xxx.h>
50 #include <asm/mach-au1x00/au1xxx_dbdma.h>
51
52 #include <asm/mach-au1x00/au1xxx_ide.h>
53
54 #define DRV_NAME "au1200-ide"
55 #define DRV_VERSION "1.0"
56 #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
57
58 /* enable the burstmode in the dbdma */
59 #define IDE_AU1XXX_BURSTMODE 1
60
61 static _auide_hwif auide_hwif;
62 static int dbdma_init_done;
63
64 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
65
66 void auide_insw(unsigned long port, void *addr, u32 count)
67 {
68 _auide_hwif *ahwif = &auide_hwif;
69 chan_tab_t *ctp;
70 au1x_ddma_desc_t *dp;
71
72 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
73 DDMA_FLAGS_NOIE)) {
74 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
75 return;
76 }
77 ctp = *((chan_tab_t **)ahwif->rx_chan);
78 dp = ctp->cur_ptr;
79 while (dp->dscr_cmd0 & DSCR_CMD0_V)
80 ;
81 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
82 }
83
84 void auide_outsw(unsigned long port, void *addr, u32 count)
85 {
86 _auide_hwif *ahwif = &auide_hwif;
87 chan_tab_t *ctp;
88 au1x_ddma_desc_t *dp;
89
90 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
91 count << 1, DDMA_FLAGS_NOIE)) {
92 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
93 return;
94 }
95 ctp = *((chan_tab_t **)ahwif->tx_chan);
96 dp = ctp->cur_ptr;
97 while (dp->dscr_cmd0 & DSCR_CMD0_V)
98 ;
99 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
100 }
101
102 #endif
103
104 static void auide_tune_drive(ide_drive_t *drive, byte pio)
105 {
106 int mem_sttime;
107 int mem_stcfg;
108 u8 speed;
109
110 /* get the best pio mode for the drive */
111 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
112
113 printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n",
114 drive->name, pio);
115
116 mem_sttime = 0;
117 mem_stcfg = au_readl(MEM_STCFG2);
118
119 /* set pio mode! */
120 switch(pio) {
121 case 0:
122 mem_sttime = SBC_IDE_TIMING(PIO0);
123
124 /* set configuration for RCS2# */
125 mem_stcfg |= TS_MASK;
126 mem_stcfg &= ~TCSOE_MASK;
127 mem_stcfg &= ~TOECS_MASK;
128 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
129 break;
130
131 case 1:
132 mem_sttime = SBC_IDE_TIMING(PIO1);
133
134 /* set configuration for RCS2# */
135 mem_stcfg |= TS_MASK;
136 mem_stcfg &= ~TCSOE_MASK;
137 mem_stcfg &= ~TOECS_MASK;
138 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
139 break;
140
141 case 2:
142 mem_sttime = SBC_IDE_TIMING(PIO2);
143
144 /* set configuration for RCS2# */
145 mem_stcfg &= ~TS_MASK;
146 mem_stcfg &= ~TCSOE_MASK;
147 mem_stcfg &= ~TOECS_MASK;
148 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
149 break;
150
151 case 3:
152 mem_sttime = SBC_IDE_TIMING(PIO3);
153
154 /* set configuration for RCS2# */
155 mem_stcfg &= ~TS_MASK;
156 mem_stcfg &= ~TCSOE_MASK;
157 mem_stcfg &= ~TOECS_MASK;
158 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
159
160 break;
161
162 case 4:
163 mem_sttime = SBC_IDE_TIMING(PIO4);
164
165 /* set configuration for RCS2# */
166 mem_stcfg &= ~TS_MASK;
167 mem_stcfg &= ~TCSOE_MASK;
168 mem_stcfg &= ~TOECS_MASK;
169 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
170 break;
171 }
172
173 au_writel(mem_sttime,MEM_STTIME2);
174 au_writel(mem_stcfg,MEM_STCFG2);
175
176 speed = pio + XFER_PIO_0;
177 ide_config_drive_speed(drive, speed);
178 }
179
180 static int auide_tune_chipset (ide_drive_t *drive, u8 speed)
181 {
182 int mem_sttime;
183 int mem_stcfg;
184 unsigned long mode;
185
186 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
187 if (ide_use_dma(drive))
188 mode = ide_dma_speed(drive, 0);
189 #endif
190
191 mem_sttime = 0;
192 mem_stcfg = au_readl(MEM_STCFG2);
193
194 if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
195 auide_tune_drive(drive, speed - XFER_PIO_0);
196 return 0;
197 }
198
199 switch(speed) {
200 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
201 case XFER_MW_DMA_2:
202 mem_sttime = SBC_IDE_TIMING(MDMA2);
203
204 /* set configuration for RCS2# */
205 mem_stcfg &= ~TS_MASK;
206 mem_stcfg &= ~TCSOE_MASK;
207 mem_stcfg &= ~TOECS_MASK;
208 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
209
210 mode = XFER_MW_DMA_2;
211 break;
212 case XFER_MW_DMA_1:
213 mem_sttime = SBC_IDE_TIMING(MDMA1);
214
215 /* set configuration for RCS2# */
216 mem_stcfg &= ~TS_MASK;
217 mem_stcfg &= ~TCSOE_MASK;
218 mem_stcfg &= ~TOECS_MASK;
219 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
220
221 mode = XFER_MW_DMA_1;
222 break;
223 case XFER_MW_DMA_0:
224 mem_sttime = SBC_IDE_TIMING(MDMA0);
225
226 /* set configuration for RCS2# */
227 mem_stcfg |= TS_MASK;
228 mem_stcfg &= ~TCSOE_MASK;
229 mem_stcfg &= ~TOECS_MASK;
230 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
231
232 mode = XFER_MW_DMA_0;
233 break;
234 #endif
235 default:
236 return 1;
237 }
238
239 if (ide_config_drive_speed(drive, mode))
240 return 1;
241
242 au_writel(mem_sttime,MEM_STTIME2);
243 au_writel(mem_stcfg,MEM_STCFG2);
244
245 return 0;
246 }
247
248 /*
249 * Multi-Word DMA + DbDMA functions
250 */
251
252 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
253
254 static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
255 {
256 ide_hwif_t *hwif = drive->hwif;
257 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
258 struct scatterlist *sg = hwif->sg_table;
259
260 ide_map_sg(drive, rq);
261
262 if (rq_data_dir(rq) == READ)
263 hwif->sg_dma_direction = DMA_FROM_DEVICE;
264 else
265 hwif->sg_dma_direction = DMA_TO_DEVICE;
266
267 return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
268 hwif->sg_dma_direction);
269 }
270
271 static int auide_build_dmatable(ide_drive_t *drive)
272 {
273 int i, iswrite, count = 0;
274 ide_hwif_t *hwif = HWIF(drive);
275
276 struct request *rq = HWGROUP(drive)->rq;
277
278 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
279 struct scatterlist *sg;
280
281 iswrite = (rq_data_dir(rq) == WRITE);
282 /* Save for interrupt context */
283 ahwif->drive = drive;
284
285 /* Build sglist */
286 hwif->sg_nents = i = auide_build_sglist(drive, rq);
287
288 if (!i)
289 return 0;
290
291 /* fill the descriptors */
292 sg = hwif->sg_table;
293 while (i && sg_dma_len(sg)) {
294 u32 cur_addr;
295 u32 cur_len;
296
297 cur_addr = sg_dma_address(sg);
298 cur_len = sg_dma_len(sg);
299
300 while (cur_len) {
301 u32 flags = DDMA_FLAGS_NOIE;
302 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
303
304 if (++count >= PRD_ENTRIES) {
305 printk(KERN_WARNING "%s: DMA table too small\n",
306 drive->name);
307 goto use_pio_instead;
308 }
309
310 /* Lets enable intr for the last descriptor only */
311 if (1==i)
312 flags = DDMA_FLAGS_IE;
313 else
314 flags = DDMA_FLAGS_NOIE;
315
316 if (iswrite) {
317 if(!put_source_flags(ahwif->tx_chan,
318 (void*)(page_address(sg->page)
319 + sg->offset),
320 tc, flags)) {
321 printk(KERN_ERR "%s failed %d\n",
322 __FUNCTION__, __LINE__);
323 }
324 } else
325 {
326 if(!put_dest_flags(ahwif->rx_chan,
327 (void*)(page_address(sg->page)
328 + sg->offset),
329 tc, flags)) {
330 printk(KERN_ERR "%s failed %d\n",
331 __FUNCTION__, __LINE__);
332 }
333 }
334
335 cur_addr += tc;
336 cur_len -= tc;
337 }
338 sg++;
339 i--;
340 }
341
342 if (count)
343 return 1;
344
345 use_pio_instead:
346 dma_unmap_sg(ahwif->dev,
347 hwif->sg_table,
348 hwif->sg_nents,
349 hwif->sg_dma_direction);
350
351 return 0; /* revert to PIO for this request */
352 }
353
354 static int auide_dma_end(ide_drive_t *drive)
355 {
356 ide_hwif_t *hwif = HWIF(drive);
357 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
358
359 if (hwif->sg_nents) {
360 dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
361 hwif->sg_dma_direction);
362 hwif->sg_nents = 0;
363 }
364
365 return 0;
366 }
367
368 static void auide_dma_start(ide_drive_t *drive )
369 {
370 }
371
372
373 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
374 {
375 /* issue cmd to drive */
376 ide_execute_command(drive, command, &ide_dma_intr,
377 (2*WAIT_CMD), NULL);
378 }
379
380 static int auide_dma_setup(ide_drive_t *drive)
381 {
382 struct request *rq = HWGROUP(drive)->rq;
383
384 if (!auide_build_dmatable(drive)) {
385 ide_map_sg(drive, rq);
386 return 1;
387 }
388
389 drive->waiting_for_dma = 1;
390 return 0;
391 }
392
393 static int auide_dma_check(ide_drive_t *drive)
394 {
395 u8 speed;
396
397 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
398
399 if( dbdma_init_done == 0 ){
400 auide_hwif.white_list = ide_in_drive_list(drive->id,
401 dma_white_list);
402 auide_hwif.black_list = ide_in_drive_list(drive->id,
403 dma_black_list);
404 auide_hwif.drive = drive;
405 auide_ddma_init(&auide_hwif);
406 dbdma_init_done = 1;
407 }
408 #endif
409
410 /* Is the drive in our DMA black list? */
411
412 if ( auide_hwif.black_list ) {
413 drive->using_dma = 0;
414
415 /* Borrowed the warning message from ide-dma.c */
416
417 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
418 drive->name, drive->id->model);
419 }
420 else
421 drive->using_dma = 1;
422
423 speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA);
424
425 if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
426 return HWIF(drive)->ide_dma_on(drive);
427
428 return HWIF(drive)->ide_dma_off_quietly(drive);
429 }
430
431 static int auide_dma_test_irq(ide_drive_t *drive)
432 {
433 if (drive->waiting_for_dma == 0)
434 printk(KERN_WARNING "%s: ide_dma_test_irq \
435 called while not waiting\n", drive->name);
436
437 /* If dbdma didn't execute the STOP command yet, the
438 * active bit is still set
439 */
440 drive->waiting_for_dma++;
441 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
442 printk(KERN_WARNING "%s: timeout waiting for ddma to \
443 complete\n", drive->name);
444 return 1;
445 }
446 udelay(10);
447 return 0;
448 }
449
450 static int auide_dma_host_on(ide_drive_t *drive)
451 {
452 return 0;
453 }
454
455 static int auide_dma_on(ide_drive_t *drive)
456 {
457 drive->using_dma = 1;
458 return auide_dma_host_on(drive);
459 }
460
461
462 static int auide_dma_host_off(ide_drive_t *drive)
463 {
464 return 0;
465 }
466
467 static int auide_dma_off_quietly(ide_drive_t *drive)
468 {
469 drive->using_dma = 0;
470 return auide_dma_host_off(drive);
471 }
472
473 static int auide_dma_lostirq(ide_drive_t *drive)
474 {
475 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
476 return 0;
477 }
478
479 static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs)
480 {
481 _auide_hwif *ahwif = (_auide_hwif*)param;
482 ahwif->drive->waiting_for_dma = 0;
483 }
484
485 static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs)
486 {
487 _auide_hwif *ahwif = (_auide_hwif*)param;
488 ahwif->drive->waiting_for_dma = 0;
489 }
490
491 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
492
493 static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
494 {
495 dev->dev_id = dev_id;
496 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
497 dev->dev_intlevel = 0;
498 dev->dev_intpolarity = 0;
499 dev->dev_tsize = tsize;
500 dev->dev_devwidth = devwidth;
501 dev->dev_flags = flags;
502 }
503
504 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
505
506 static int auide_dma_timeout(ide_drive_t *drive)
507 {
508 // printk("%s\n", __FUNCTION__);
509
510 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
511
512 if (HWIF(drive)->ide_dma_test_irq(drive))
513 return 0;
514
515 return HWIF(drive)->ide_dma_end(drive);
516 }
517
518
519 static int auide_ddma_init(_auide_hwif *auide) {
520
521 dbdev_tab_t source_dev_tab, target_dev_tab;
522 u32 dev_id, tsize, devwidth, flags;
523 ide_hwif_t *hwif = auide->hwif;
524
525 dev_id = AU1XXX_ATA_DDMA_REQ;
526
527 if (auide->white_list || auide->black_list) {
528 tsize = 8;
529 devwidth = 32;
530 }
531 else {
532 tsize = 1;
533 devwidth = 16;
534
535 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
536 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
537 }
538
539 #ifdef IDE_AU1XXX_BURSTMODE
540 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
541 #else
542 flags = DEV_FLAGS_SYNC;
543 #endif
544
545 /* setup dev_tab for tx channel */
546 auide_init_dbdma_dev( &source_dev_tab,
547 dev_id,
548 tsize, devwidth, DEV_FLAGS_OUT | flags);
549 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
550
551 auide_init_dbdma_dev( &source_dev_tab,
552 dev_id,
553 tsize, devwidth, DEV_FLAGS_IN | flags);
554 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
555
556 /* We also need to add a target device for the DMA */
557 auide_init_dbdma_dev( &target_dev_tab,
558 (u32)DSCR_CMD0_ALWAYS,
559 tsize, devwidth, DEV_FLAGS_ANYUSE);
560 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
561
562 /* Get a channel for TX */
563 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
564 auide->tx_dev_id,
565 auide_ddma_tx_callback,
566 (void*)auide);
567
568 /* Get a channel for RX */
569 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
570 auide->target_dev_id,
571 auide_ddma_rx_callback,
572 (void*)auide);
573
574 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
575 NUM_DESCRIPTORS);
576 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
577 NUM_DESCRIPTORS);
578
579 hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
580 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
581 &hwif->dmatable_dma, GFP_KERNEL);
582
583 au1xxx_dbdma_start( auide->tx_chan );
584 au1xxx_dbdma_start( auide->rx_chan );
585
586 return 0;
587 }
588 #else
589
590 static int auide_ddma_init( _auide_hwif *auide )
591 {
592 dbdev_tab_t source_dev_tab;
593 int flags;
594
595 #ifdef IDE_AU1XXX_BURSTMODE
596 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
597 #else
598 flags = DEV_FLAGS_SYNC;
599 #endif
600
601 /* setup dev_tab for tx channel */
602 auide_init_dbdma_dev( &source_dev_tab,
603 (u32)DSCR_CMD0_ALWAYS,
604 8, 32, DEV_FLAGS_OUT | flags);
605 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
606
607 auide_init_dbdma_dev( &source_dev_tab,
608 (u32)DSCR_CMD0_ALWAYS,
609 8, 32, DEV_FLAGS_IN | flags);
610 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
611
612 /* Get a channel for TX */
613 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
614 auide->tx_dev_id,
615 NULL,
616 (void*)auide);
617
618 /* Get a channel for RX */
619 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
620 DSCR_CMD0_ALWAYS,
621 NULL,
622 (void*)auide);
623
624 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
625 NUM_DESCRIPTORS);
626 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
627 NUM_DESCRIPTORS);
628
629 au1xxx_dbdma_start( auide->tx_chan );
630 au1xxx_dbdma_start( auide->rx_chan );
631
632 return 0;
633 }
634 #endif
635
636 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
637 {
638 int i;
639 unsigned long *ata_regs = hw->io_ports;
640
641 /* FIXME? */
642 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
643 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
644 }
645
646 /* set the Alternative Status register */
647 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
648 }
649
650 static int au_ide_probe(struct device *dev)
651 {
652 struct platform_device *pdev = to_platform_device(dev);
653 _auide_hwif *ahwif = &auide_hwif;
654 ide_hwif_t *hwif;
655 struct resource *res;
656 int ret = 0;
657
658 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
659 char *mode = "MWDMA2";
660 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
661 char *mode = "PIO+DDMA(offload)";
662 #endif
663
664 memset(&auide_hwif, 0, sizeof(_auide_hwif));
665 auide_hwif.dev = 0;
666
667 ahwif->dev = dev;
668 ahwif->irq = platform_get_irq(pdev, 0);
669
670 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
671
672 if (res == NULL) {
673 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
674 ret = -ENODEV;
675 goto out;
676 }
677 if (ahwif->irq < 0) {
678 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
679 ret = -ENODEV;
680 goto out;
681 }
682
683 if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
684 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
685 ret = -EBUSY;
686 goto out;
687 }
688
689 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
690 if (ahwif->regbase == 0) {
691 ret = -ENOMEM;
692 goto out;
693 }
694
695 /* FIXME: This might possibly break PCMCIA IDE devices */
696
697 hwif = &ide_hwifs[pdev->id];
698 hw_regs_t *hw = &hwif->hw;
699 hwif->irq = hw->irq = ahwif->irq;
700 hwif->chipset = ide_au1xxx;
701
702 auide_setup_ports(hw, ahwif);
703 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
704
705 hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
706 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
707 hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
708 hwif->swdma_mask = 0x00;
709 #else
710 hwif->mwdma_mask = 0x0;
711 hwif->swdma_mask = 0x0;
712 #endif
713
714 hwif->noprobe = 0;
715 hwif->drives[0].unmask = 1;
716 hwif->drives[1].unmask = 1;
717
718 /* hold should be on in all cases */
719 hwif->hold = 1;
720 hwif->mmio = 2;
721
722 /* If the user has selected DDMA assisted copies,
723 then set up a few local I/O function entry points
724 */
725
726 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
727 hwif->INSW = auide_insw;
728 hwif->OUTSW = auide_outsw;
729 #endif
730
731 hwif->tuneproc = &auide_tune_drive;
732 hwif->speedproc = &auide_tune_chipset;
733
734 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
735 hwif->ide_dma_off_quietly = &auide_dma_off_quietly;
736 hwif->ide_dma_timeout = &auide_dma_timeout;
737
738 hwif->ide_dma_check = &auide_dma_check;
739 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
740 hwif->dma_start = &auide_dma_start;
741 hwif->ide_dma_end = &auide_dma_end;
742 hwif->dma_setup = &auide_dma_setup;
743 hwif->ide_dma_test_irq = &auide_dma_test_irq;
744 hwif->ide_dma_host_off = &auide_dma_host_off;
745 hwif->ide_dma_host_on = &auide_dma_host_on;
746 hwif->ide_dma_lostirq = &auide_dma_lostirq;
747 hwif->ide_dma_on = &auide_dma_on;
748
749 hwif->autodma = 1;
750 hwif->drives[0].autodma = hwif->autodma;
751 hwif->drives[1].autodma = hwif->autodma;
752 hwif->atapi_dma = 1;
753
754 #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
755 hwif->autodma = 0;
756 hwif->channel = 0;
757 hwif->hold = 1;
758 hwif->select_data = 0; /* no chipset-specific code */
759 hwif->config_data = 0; /* no chipset-specific code */
760
761 hwif->drives[0].autodma = 0;
762 hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
763 #endif
764 hwif->drives[0].no_io_32bit = 1;
765
766 auide_hwif.hwif = hwif;
767 hwif->hwif_data = &auide_hwif;
768
769 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
770 auide_ddma_init(&auide_hwif);
771 dbdma_init_done = 1;
772 #endif
773
774 probe_hwif_init(hwif);
775 dev_set_drvdata(dev, hwif);
776
777 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
778
779 out:
780 return ret;
781 }
782
783 static int au_ide_remove(struct device *dev)
784 {
785 struct platform_device *pdev = to_platform_device(dev);
786 struct resource *res;
787 ide_hwif_t *hwif = dev_get_drvdata(dev);
788 _auide_hwif *ahwif = &auide_hwif;
789
790 ide_unregister(hwif - ide_hwifs);
791
792 iounmap((void *)ahwif->regbase);
793
794 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
795 release_mem_region(res->start, res->end - res->start);
796
797 return 0;
798 }
799
800 static struct device_driver au1200_ide_driver = {
801 .name = "au1200-ide",
802 .bus = &platform_bus_type,
803 .probe = au_ide_probe,
804 .remove = au_ide_remove,
805 };
806
807 static int __init au_ide_init(void)
808 {
809 return driver_register(&au1200_ide_driver);
810 }
811
812 static void __exit au_ide_exit(void)
813 {
814 driver_unregister(&au1200_ide_driver);
815 }
816
817 MODULE_LICENSE("GPL");
818 MODULE_DESCRIPTION("AU1200 IDE driver");
819
820 module_init(au_ide_init);
821 module_exit(au_ide_exit);