]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/ide/mips/au1xxx-ide.c
ide: add ide_set{_max}_pio() (take 4)
[mirror_ubuntu-zesty-kernel.git] / drivers / ide / mips / au1xxx-ide.c
CommitLineData
26a940e2
PP
1/*
2 * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
3 *
4 * BRIEF MODULE DESCRIPTION
5 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
6 *
7 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
8 *
9 * This program is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU General Public License as published by the Free Software
11 * Foundation; either version 2 of the License, or (at your option) any later
12 * version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
15 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along with
26 * this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note.
31 */
26a940e2
PP
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/delay.h>
8f29e650
JC
36#include <linux/platform_device.h>
37
26a940e2
PP
38#include <linux/init.h>
39#include <linux/ide.h>
40#include <linux/sysdev.h>
41
42#include <linux/dma-mapping.h>
43
8f29e650
JC
44#include "ide-timing.h"
45
26a940e2
PP
46#include <asm/io.h>
47#include <asm/mach-au1x00/au1xxx.h>
48#include <asm/mach-au1x00/au1xxx_dbdma.h>
49
26a940e2
PP
50#include <asm/mach-au1x00/au1xxx_ide.h>
51
52#define DRV_NAME "au1200-ide"
53#define DRV_VERSION "1.0"
8f29e650 54#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
26a940e2 55
8f29e650
JC
56/* enable the burstmode in the dbdma */
57#define IDE_AU1XXX_BURSTMODE 1
26a940e2 58
8f29e650
JC
59static _auide_hwif auide_hwif;
60static int dbdma_init_done;
26a940e2 61
26a940e2
PP
62#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
63
8f29e650 64void auide_insw(unsigned long port, void *addr, u32 count)
26a940e2 65{
8f29e650
JC
66 _auide_hwif *ahwif = &auide_hwif;
67 chan_tab_t *ctp;
68 au1x_ddma_desc_t *dp;
26a940e2 69
8f29e650
JC
70 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
71 DDMA_FLAGS_NOIE)) {
72 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
73 return;
74 }
75 ctp = *((chan_tab_t **)ahwif->rx_chan);
76 dp = ctp->cur_ptr;
77 while (dp->dscr_cmd0 & DSCR_CMD0_V)
78 ;
79 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
80}
81
8f29e650 82void auide_outsw(unsigned long port, void *addr, u32 count)
26a940e2 83{
8f29e650
JC
84 _auide_hwif *ahwif = &auide_hwif;
85 chan_tab_t *ctp;
86 au1x_ddma_desc_t *dp;
26a940e2 87
8f29e650
JC
88 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
89 count << 1, DDMA_FLAGS_NOIE)) {
90 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
91 return;
92 }
93 ctp = *((chan_tab_t **)ahwif->tx_chan);
94 dp = ctp->cur_ptr;
95 while (dp->dscr_cmd0 & DSCR_CMD0_V)
96 ;
97 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
98}
99
26a940e2 100#endif
26a940e2 101
26bcb879 102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
26a940e2 103{
8f29e650
JC
104 int mem_sttime;
105 int mem_stcfg;
106 u8 speed;
107
8f29e650
JC
108 mem_sttime = 0;
109 mem_stcfg = au_readl(MEM_STCFG2);
110
111 /* set pio mode! */
112 switch(pio) {
113 case 0:
114 mem_sttime = SBC_IDE_TIMING(PIO0);
115
116 /* set configuration for RCS2# */
117 mem_stcfg |= TS_MASK;
118 mem_stcfg &= ~TCSOE_MASK;
119 mem_stcfg &= ~TOECS_MASK;
120 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
121 break;
122
123 case 1:
124 mem_sttime = SBC_IDE_TIMING(PIO1);
125
126 /* set configuration for RCS2# */
127 mem_stcfg |= TS_MASK;
128 mem_stcfg &= ~TCSOE_MASK;
129 mem_stcfg &= ~TOECS_MASK;
130 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
131 break;
132
133 case 2:
134 mem_sttime = SBC_IDE_TIMING(PIO2);
135
136 /* set configuration for RCS2# */
137 mem_stcfg &= ~TS_MASK;
138 mem_stcfg &= ~TCSOE_MASK;
139 mem_stcfg &= ~TOECS_MASK;
140 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
141 break;
142
143 case 3:
144 mem_sttime = SBC_IDE_TIMING(PIO3);
145
146 /* set configuration for RCS2# */
147 mem_stcfg &= ~TS_MASK;
148 mem_stcfg &= ~TCSOE_MASK;
149 mem_stcfg &= ~TOECS_MASK;
150 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
151
152 break;
153
154 case 4:
155 mem_sttime = SBC_IDE_TIMING(PIO4);
156
157 /* set configuration for RCS2# */
158 mem_stcfg &= ~TS_MASK;
159 mem_stcfg &= ~TCSOE_MASK;
160 mem_stcfg &= ~TOECS_MASK;
161 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
162 break;
163 }
164
165 au_writel(mem_sttime,MEM_STTIME2);
166 au_writel(mem_stcfg,MEM_STCFG2);
167
168 speed = pio + XFER_PIO_0;
169 ide_config_drive_speed(drive, speed);
26a940e2
PP
170}
171
f212ff28 172static int auide_tune_chipset(ide_drive_t *drive, const u8 speed)
26a940e2 173{
8f29e650
JC
174 int mem_sttime;
175 int mem_stcfg;
26a940e2 176
8f29e650
JC
177 mem_sttime = 0;
178 mem_stcfg = au_readl(MEM_STCFG2);
26a940e2 179
8f29e650 180 if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
26bcb879 181 au1xxx_set_pio_mode(drive, speed - XFER_PIO_0);
8f29e650
JC
182 return 0;
183 }
a523a175 184
8f29e650 185 switch(speed) {
26a940e2 186#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
187 case XFER_MW_DMA_2:
188 mem_sttime = SBC_IDE_TIMING(MDMA2);
189
190 /* set configuration for RCS2# */
191 mem_stcfg &= ~TS_MASK;
192 mem_stcfg &= ~TCSOE_MASK;
193 mem_stcfg &= ~TOECS_MASK;
194 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
195
8f29e650
JC
196 break;
197 case XFER_MW_DMA_1:
198 mem_sttime = SBC_IDE_TIMING(MDMA1);
199
200 /* set configuration for RCS2# */
201 mem_stcfg &= ~TS_MASK;
202 mem_stcfg &= ~TCSOE_MASK;
203 mem_stcfg &= ~TOECS_MASK;
204 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
205
8f29e650
JC
206 break;
207 case XFER_MW_DMA_0:
208 mem_sttime = SBC_IDE_TIMING(MDMA0);
209
210 /* set configuration for RCS2# */
211 mem_stcfg |= TS_MASK;
212 mem_stcfg &= ~TCSOE_MASK;
213 mem_stcfg &= ~TOECS_MASK;
214 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
215
8f29e650 216 break;
26a940e2 217#endif
8f29e650
JC
218 default:
219 return 1;
220 }
a523a175
BZ
221
222 if (ide_config_drive_speed(drive, speed))
8f29e650 223 return 1;
26a940e2 224
8f29e650
JC
225 au_writel(mem_sttime,MEM_STTIME2);
226 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2 227
8f29e650 228 return 0;
26a940e2
PP
229}
230
231/*
232 * Multi-Word DMA + DbDMA functions
233 */
26a940e2 234
8f29e650 235#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
26a940e2
PP
236
237static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
238{
8f29e650
JC
239 ide_hwif_t *hwif = drive->hwif;
240 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
241 struct scatterlist *sg = hwif->sg_table;
26a940e2 242
8f29e650 243 ide_map_sg(drive, rq);
26a940e2 244
8f29e650
JC
245 if (rq_data_dir(rq) == READ)
246 hwif->sg_dma_direction = DMA_FROM_DEVICE;
247 else
248 hwif->sg_dma_direction = DMA_TO_DEVICE;
26a940e2 249
8f29e650
JC
250 return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
251 hwif->sg_dma_direction);
26a940e2
PP
252}
253
254static int auide_build_dmatable(ide_drive_t *drive)
255{
8f29e650
JC
256 int i, iswrite, count = 0;
257 ide_hwif_t *hwif = HWIF(drive);
258
259 struct request *rq = HWGROUP(drive)->rq;
260
261 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
262 struct scatterlist *sg;
263
264 iswrite = (rq_data_dir(rq) == WRITE);
265 /* Save for interrupt context */
266 ahwif->drive = drive;
267
268 /* Build sglist */
269 hwif->sg_nents = i = auide_build_sglist(drive, rq);
270
271 if (!i)
272 return 0;
273
274 /* fill the descriptors */
275 sg = hwif->sg_table;
276 while (i && sg_dma_len(sg)) {
277 u32 cur_addr;
278 u32 cur_len;
279
280 cur_addr = sg_dma_address(sg);
281 cur_len = sg_dma_len(sg);
282
283 while (cur_len) {
284 u32 flags = DDMA_FLAGS_NOIE;
285 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
286
287 if (++count >= PRD_ENTRIES) {
288 printk(KERN_WARNING "%s: DMA table too small\n",
289 drive->name);
290 goto use_pio_instead;
291 }
292
293 /* Lets enable intr for the last descriptor only */
294 if (1==i)
295 flags = DDMA_FLAGS_IE;
296 else
297 flags = DDMA_FLAGS_NOIE;
298
299 if (iswrite) {
300 if(!put_source_flags(ahwif->tx_chan,
301 (void*)(page_address(sg->page)
302 + sg->offset),
303 tc, flags)) {
304 printk(KERN_ERR "%s failed %d\n",
305 __FUNCTION__, __LINE__);
26a940e2 306 }
8f29e650 307 } else
26a940e2 308 {
8f29e650
JC
309 if(!put_dest_flags(ahwif->rx_chan,
310 (void*)(page_address(sg->page)
311 + sg->offset),
312 tc, flags)) {
313 printk(KERN_ERR "%s failed %d\n",
314 __FUNCTION__, __LINE__);
26a940e2 315 }
8f29e650 316 }
26a940e2 317
8f29e650
JC
318 cur_addr += tc;
319 cur_len -= tc;
320 }
321 sg++;
322 i--;
323 }
26a940e2 324
8f29e650
JC
325 if (count)
326 return 1;
26a940e2 327
8f29e650
JC
328 use_pio_instead:
329 dma_unmap_sg(ahwif->dev,
330 hwif->sg_table,
331 hwif->sg_nents,
332 hwif->sg_dma_direction);
26a940e2 333
8f29e650 334 return 0; /* revert to PIO for this request */
26a940e2
PP
335}
336
337static int auide_dma_end(ide_drive_t *drive)
338{
8f29e650
JC
339 ide_hwif_t *hwif = HWIF(drive);
340 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
26a940e2 341
8f29e650
JC
342 if (hwif->sg_nents) {
343 dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
344 hwif->sg_dma_direction);
345 hwif->sg_nents = 0;
346 }
26a940e2 347
8f29e650 348 return 0;
26a940e2
PP
349}
350
351static void auide_dma_start(ide_drive_t *drive )
352{
26a940e2
PP
353}
354
26a940e2
PP
355
356static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
357{
8f29e650
JC
358 /* issue cmd to drive */
359 ide_execute_command(drive, command, &ide_dma_intr,
360 (2*WAIT_CMD), NULL);
26a940e2
PP
361}
362
363static int auide_dma_setup(ide_drive_t *drive)
8f29e650
JC
364{
365 struct request *rq = HWGROUP(drive)->rq;
26a940e2 366
8f29e650
JC
367 if (!auide_build_dmatable(drive)) {
368 ide_map_sg(drive, rq);
369 return 1;
370 }
26a940e2 371
8f29e650
JC
372 drive->waiting_for_dma = 1;
373 return 0;
26a940e2
PP
374}
375
376static int auide_dma_check(ide_drive_t *drive)
377{
75b1d975 378 u8 speed = ide_max_dma_mode(drive);
8f29e650
JC
379
380 if( dbdma_init_done == 0 ){
381 auide_hwif.white_list = ide_in_drive_list(drive->id,
382 dma_white_list);
383 auide_hwif.black_list = ide_in_drive_list(drive->id,
384 dma_black_list);
385 auide_hwif.drive = drive;
386 auide_ddma_init(&auide_hwif);
387 dbdma_init_done = 1;
388 }
26a940e2 389
8f29e650
JC
390 /* Is the drive in our DMA black list? */
391
392 if ( auide_hwif.black_list ) {
393 drive->using_dma = 0;
394
395 /* Borrowed the warning message from ide-dma.c */
26a940e2 396
8f29e650
JC
397 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
398 drive->name, drive->id->model);
399 }
400 else
401 drive->using_dma = 1;
402
8f29e650 403 if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
3608b5d7 404 return 0;
8f29e650 405
3608b5d7 406 return -1;
26a940e2
PP
407}
408
409static int auide_dma_test_irq(ide_drive_t *drive)
8f29e650
JC
410{
411 if (drive->waiting_for_dma == 0)
412 printk(KERN_WARNING "%s: ide_dma_test_irq \
26a940e2
PP
413 called while not waiting\n", drive->name);
414
8f29e650
JC
415 /* If dbdma didn't execute the STOP command yet, the
416 * active bit is still set
26a940e2 417 */
8f29e650
JC
418 drive->waiting_for_dma++;
419 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
420 printk(KERN_WARNING "%s: timeout waiting for ddma to \
26a940e2 421 complete\n", drive->name);
8f29e650
JC
422 return 1;
423 }
424 udelay(10);
425 return 0;
26a940e2
PP
426}
427
ccf35289 428static void auide_dma_host_on(ide_drive_t *drive)
26a940e2 429{
26a940e2
PP
430}
431
432static int auide_dma_on(ide_drive_t *drive)
433{
8f29e650 434 drive->using_dma = 1;
ccf35289
BZ
435
436 return 0;
26a940e2
PP
437}
438
7469aaf6 439static void auide_dma_host_off(ide_drive_t *drive)
26a940e2 440{
26a940e2
PP
441}
442
7469aaf6 443static void auide_dma_off_quietly(ide_drive_t *drive)
26a940e2 444{
8f29e650 445 drive->using_dma = 0;
26a940e2
PP
446}
447
841d2a9b 448static void auide_dma_lost_irq(ide_drive_t *drive)
26a940e2 449{
8f29e650 450 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
26a940e2
PP
451}
452
53e62d3a 453static void auide_ddma_tx_callback(int irq, void *param)
26a940e2 454{
8f29e650
JC
455 _auide_hwif *ahwif = (_auide_hwif*)param;
456 ahwif->drive->waiting_for_dma = 0;
26a940e2
PP
457}
458
53e62d3a 459static void auide_ddma_rx_callback(int irq, void *param)
26a940e2 460{
8f29e650
JC
461 _auide_hwif *ahwif = (_auide_hwif*)param;
462 ahwif->drive->waiting_for_dma = 0;
463}
464
465#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
26a940e2 466
8f29e650
JC
467static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
468{
469 dev->dev_id = dev_id;
470 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
471 dev->dev_intlevel = 0;
472 dev->dev_intpolarity = 0;
473 dev->dev_tsize = tsize;
474 dev->dev_devwidth = devwidth;
475 dev->dev_flags = flags;
26a940e2 476}
8f29e650
JC
477
478#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
26a940e2 479
c283f5db 480static void auide_dma_timeout(ide_drive_t *drive)
26a940e2 481{
c283f5db 482 ide_hwif_t *hwif = HWIF(drive);
26a940e2 483
8f29e650 484 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
26a940e2 485
c283f5db
SS
486 if (hwif->ide_dma_test_irq(drive))
487 return;
26a940e2 488
c283f5db 489 hwif->ide_dma_end(drive);
26a940e2 490}
8f29e650 491
26a940e2 492
8f29e650
JC
493static int auide_ddma_init(_auide_hwif *auide) {
494
495 dbdev_tab_t source_dev_tab, target_dev_tab;
496 u32 dev_id, tsize, devwidth, flags;
497 ide_hwif_t *hwif = auide->hwif;
26a940e2 498
8f29e650 499 dev_id = AU1XXX_ATA_DDMA_REQ;
26a940e2 500
8f29e650
JC
501 if (auide->white_list || auide->black_list) {
502 tsize = 8;
503 devwidth = 32;
504 }
505 else {
506 tsize = 1;
507 devwidth = 16;
508
509 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
510 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
511 }
26a940e2 512
8f29e650
JC
513#ifdef IDE_AU1XXX_BURSTMODE
514 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
26a940e2 515#else
8f29e650 516 flags = DEV_FLAGS_SYNC;
26a940e2
PP
517#endif
518
8f29e650
JC
519 /* setup dev_tab for tx channel */
520 auide_init_dbdma_dev( &source_dev_tab,
521 dev_id,
522 tsize, devwidth, DEV_FLAGS_OUT | flags);
523 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
524
525 auide_init_dbdma_dev( &source_dev_tab,
526 dev_id,
527 tsize, devwidth, DEV_FLAGS_IN | flags);
528 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
529
530 /* We also need to add a target device for the DMA */
531 auide_init_dbdma_dev( &target_dev_tab,
532 (u32)DSCR_CMD0_ALWAYS,
533 tsize, devwidth, DEV_FLAGS_ANYUSE);
534 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
535
536 /* Get a channel for TX */
537 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
538 auide->tx_dev_id,
539 auide_ddma_tx_callback,
540 (void*)auide);
541
542 /* Get a channel for RX */
543 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
544 auide->target_dev_id,
545 auide_ddma_rx_callback,
546 (void*)auide);
547
548 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
549 NUM_DESCRIPTORS);
550 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
551 NUM_DESCRIPTORS);
552
553 hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
554 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
555 &hwif->dmatable_dma, GFP_KERNEL);
556
557 au1xxx_dbdma_start( auide->tx_chan );
558 au1xxx_dbdma_start( auide->rx_chan );
559
560 return 0;
561}
26a940e2 562#else
8f29e650
JC
563
564static int auide_ddma_init( _auide_hwif *auide )
565{
566 dbdev_tab_t source_dev_tab;
567 int flags;
26a940e2 568
8f29e650
JC
569#ifdef IDE_AU1XXX_BURSTMODE
570 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
571#else
572 flags = DEV_FLAGS_SYNC;
26a940e2 573#endif
26a940e2 574
8f29e650
JC
575 /* setup dev_tab for tx channel */
576 auide_init_dbdma_dev( &source_dev_tab,
577 (u32)DSCR_CMD0_ALWAYS,
578 8, 32, DEV_FLAGS_OUT | flags);
579 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
580
581 auide_init_dbdma_dev( &source_dev_tab,
582 (u32)DSCR_CMD0_ALWAYS,
583 8, 32, DEV_FLAGS_IN | flags);
584 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
585
586 /* Get a channel for TX */
587 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
588 auide->tx_dev_id,
589 NULL,
590 (void*)auide);
591
592 /* Get a channel for RX */
593 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
594 DSCR_CMD0_ALWAYS,
595 NULL,
596 (void*)auide);
597
598 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
599 NUM_DESCRIPTORS);
600 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
601 NUM_DESCRIPTORS);
602
603 au1xxx_dbdma_start( auide->tx_chan );
604 au1xxx_dbdma_start( auide->rx_chan );
605
606 return 0;
26a940e2 607}
8f29e650 608#endif
26a940e2
PP
609
610static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
611{
8f29e650
JC
612 int i;
613 unsigned long *ata_regs = hw->io_ports;
614
615 /* FIXME? */
616 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
617 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
618 }
619
620 /* set the Alternative Status register */
621 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
26a940e2
PP
622}
623
624static int au_ide_probe(struct device *dev)
625{
626 struct platform_device *pdev = to_platform_device(dev);
8f29e650
JC
627 _auide_hwif *ahwif = &auide_hwif;
628 ide_hwif_t *hwif;
26a940e2 629 struct resource *res;
1918fd63 630 hw_regs_t *hw;
26a940e2
PP
631 int ret = 0;
632
633#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
8f29e650 634 char *mode = "MWDMA2";
26a940e2 635#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
8f29e650 636 char *mode = "PIO+DDMA(offload)";
26a940e2
PP
637#endif
638
8f29e650
JC
639 memset(&auide_hwif, 0, sizeof(_auide_hwif));
640 auide_hwif.dev = 0;
26a940e2
PP
641
642 ahwif->dev = dev;
643 ahwif->irq = platform_get_irq(pdev, 0);
644
645 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
646
647 if (res == NULL) {
648 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
649 ret = -ENODEV;
48944738
DV
650 goto out;
651 }
652 if (ahwif->irq < 0) {
653 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
654 ret = -ENODEV;
26a940e2
PP
655 goto out;
656 }
657
8f29e650 658 if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
26a940e2 659 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
8f29e650 660 ret = -EBUSY;
26a940e2 661 goto out;
8f29e650 662 }
26a940e2
PP
663
664 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
665 if (ahwif->regbase == 0) {
666 ret = -ENOMEM;
667 goto out;
668 }
669
8f29e650
JC
670 /* FIXME: This might possibly break PCMCIA IDE devices */
671
672 hwif = &ide_hwifs[pdev->id];
1918fd63 673 hw = &hwif->hw;
8f29e650
JC
674 hwif->irq = hw->irq = ahwif->irq;
675 hwif->chipset = ide_au1xxx;
26a940e2 676
8f29e650 677 auide_setup_ports(hw, ahwif);
26a940e2
PP
678 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
679
8f29e650 680 hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
26a940e2 681#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
682 hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
683 hwif->swdma_mask = 0x00;
26a940e2 684#else
8f29e650
JC
685 hwif->mwdma_mask = 0x0;
686 hwif->swdma_mask = 0x0;
687#endif
688
4099d143
BZ
689 hwif->pio_mask = ATA_PIO4;
690
8f29e650
JC
691 hwif->noprobe = 0;
692 hwif->drives[0].unmask = 1;
693 hwif->drives[1].unmask = 1;
694
695 /* hold should be on in all cases */
696 hwif->hold = 1;
2ad1e558
BZ
697
698 hwif->mmio = 1;
8f29e650
JC
699
700 /* If the user has selected DDMA assisted copies,
701 then set up a few local I/O function entry points
702 */
703
704#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
705 hwif->INSW = auide_insw;
706 hwif->OUTSW = auide_outsw;
26a940e2 707#endif
8f29e650 708
26bcb879 709 hwif->set_pio_mode = &au1xxx_set_pio_mode;
8f29e650 710 hwif->speedproc = &auide_tune_chipset;
26a940e2
PP
711
712#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
7469aaf6 713 hwif->dma_off_quietly = &auide_dma_off_quietly;
c283f5db 714 hwif->dma_timeout = &auide_dma_timeout;
8f29e650
JC
715
716 hwif->ide_dma_check = &auide_dma_check;
717 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
718 hwif->dma_start = &auide_dma_start;
719 hwif->ide_dma_end = &auide_dma_end;
720 hwif->dma_setup = &auide_dma_setup;
721 hwif->ide_dma_test_irq = &auide_dma_test_irq;
7469aaf6 722 hwif->dma_host_off = &auide_dma_host_off;
ccf35289 723 hwif->dma_host_on = &auide_dma_host_on;
841d2a9b 724 hwif->dma_lost_irq = &auide_dma_lost_irq;
8f29e650
JC
725 hwif->ide_dma_on = &auide_dma_on;
726
727 hwif->autodma = 1;
728 hwif->drives[0].autodma = hwif->autodma;
729 hwif->drives[1].autodma = hwif->autodma;
730 hwif->atapi_dma = 1;
731
26a940e2 732#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
8f29e650
JC
733 hwif->autodma = 0;
734 hwif->channel = 0;
735 hwif->hold = 1;
736 hwif->select_data = 0; /* no chipset-specific code */
737 hwif->config_data = 0; /* no chipset-specific code */
738
739 hwif->drives[0].autodma = 0;
740 hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
26a940e2 741#endif
8f29e650 742 hwif->drives[0].no_io_32bit = 1;
26a940e2 743
8f29e650
JC
744 auide_hwif.hwif = hwif;
745 hwif->hwif_data = &auide_hwif;
26a940e2 746
8f29e650
JC
747#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
748 auide_ddma_init(&auide_hwif);
749 dbdma_init_done = 1;
26a940e2
PP
750#endif
751
752 probe_hwif_init(hwif);
5cbf79cd
BZ
753
754 ide_proc_register_port(hwif);
755
26a940e2
PP
756 dev_set_drvdata(dev, hwif);
757
8f29e650 758 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
26a940e2 759
8f29e650
JC
760 out:
761 return ret;
26a940e2
PP
762}
763
764static int au_ide_remove(struct device *dev)
765{
766 struct platform_device *pdev = to_platform_device(dev);
767 struct resource *res;
768 ide_hwif_t *hwif = dev_get_drvdata(dev);
8f29e650 769 _auide_hwif *ahwif = &auide_hwif;
26a940e2
PP
770
771 ide_unregister(hwif - ide_hwifs);
772
773 iounmap((void *)ahwif->regbase);
774
775 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
776 release_mem_region(res->start, res->end - res->start);
777
778 return 0;
779}
780
781static struct device_driver au1200_ide_driver = {
782 .name = "au1200-ide",
783 .bus = &platform_bus_type,
784 .probe = au_ide_probe,
785 .remove = au_ide_remove,
786};
787
788static int __init au_ide_init(void)
789{
790 return driver_register(&au1200_ide_driver);
791}
792
8f29e650 793static void __exit au_ide_exit(void)
26a940e2
PP
794{
795 driver_unregister(&au1200_ide_driver);
796}
797
26a940e2
PP
798MODULE_LICENSE("GPL");
799MODULE_DESCRIPTION("AU1200 IDE driver");
800
801module_init(au_ide_init);
802module_exit(au_ide_exit);