]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ide/pci/scc_pata.c
ide: remove ->INW and ->OUTW methods
[mirror_ubuntu-bionic-kernel.git] / drivers / ide / pci / scc_pata.c
1 /*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30 #include <linux/ide.h>
31 #include <linux/init.h>
32
33 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34
35 #define SCC_PATA_NAME "scc IDE"
36
37 #define TDVHSEL_MASTER 0x00000001
38 #define TDVHSEL_SLAVE 0x00000004
39
40 #define MODE_JCUSFEN 0x00000080
41
42 #define CCKCTRL_ATARESET 0x00040000
43 #define CCKCTRL_BUFCNT 0x00020000
44 #define CCKCTRL_CRST 0x00010000
45 #define CCKCTRL_OCLKEN 0x00000100
46 #define CCKCTRL_ATACLKOEN 0x00000002
47 #define CCKCTRL_LCLKEN 0x00000001
48
49 #define QCHCD_IOS_SS 0x00000001
50
51 #define QCHSD_STPDIAG 0x00020000
52
53 #define INTMASK_MSK 0xD1000012
54 #define INTSTS_SERROR 0x80000000
55 #define INTSTS_PRERR 0x40000000
56 #define INTSTS_RERR 0x10000000
57 #define INTSTS_ICERR 0x01000000
58 #define INTSTS_BMSINT 0x00000010
59 #define INTSTS_BMHE 0x00000008
60 #define INTSTS_IOIRQS 0x00000004
61 #define INTSTS_INTRQ 0x00000002
62 #define INTSTS_ACTEINT 0x00000001
63
64 #define ECMODE_VALUE 0x01
65
66 static struct scc_ports {
67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */
69 } scc_ports[MAX_HWIFS];
70
71 /* PIO transfer mode table */
72 /* JCHST */
73 static unsigned long JCHSTtbl[2][7] = {
74 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
75 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
76 };
77
78 /* JCHHT */
79 static unsigned long JCHHTtbl[2][7] = {
80 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
81 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
82 };
83
84 /* JCHCT */
85 static unsigned long JCHCTtbl[2][7] = {
86 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
87 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
88 };
89
90
91 /* DMA transfer mode table */
92 /* JCHDCTM/JCHDCTS */
93 static unsigned long JCHDCTxtbl[2][7] = {
94 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
95 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
96 };
97
98 /* JCSTWTM/JCSTWTS */
99 static unsigned long JCSTWTxtbl[2][7] = {
100 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
101 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
102 };
103
104 /* JCTSS */
105 static unsigned long JCTSStbl[2][7] = {
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
107 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
108 };
109
110 /* JCENVT */
111 static unsigned long JCENVTtbl[2][7] = {
112 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
113 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
114 };
115
116 /* JCACTSELS/JCACTSELM */
117 static unsigned long JCACTSELtbl[2][7] = {
118 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
119 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
120 };
121
122
123 static u8 scc_ide_inb(unsigned long port)
124 {
125 u32 data = in_be32((void*)port);
126 return (u8)data;
127 }
128
129 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
130 {
131 u16 *ptr = (u16 *)addr;
132 while (count--) {
133 *ptr++ = le16_to_cpu(in_be32((void*)port));
134 }
135 }
136
137 static void scc_ide_insl(unsigned long port, void *addr, u32 count)
138 {
139 u16 *ptr = (u16 *)addr;
140 while (count--) {
141 *ptr++ = le16_to_cpu(in_be32((void*)port));
142 *ptr++ = le16_to_cpu(in_be32((void*)port));
143 }
144 }
145
146 static void scc_ide_outb(u8 addr, unsigned long port)
147 {
148 out_be32((void*)port, addr);
149 }
150
151 static void
152 scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port)
153 {
154 ide_hwif_t *hwif = HWIF(drive);
155
156 out_be32((void*)port, addr);
157 eieio();
158 in_be32((void*)(hwif->dma_base + 0x01c));
159 eieio();
160 }
161
162 static void
163 scc_ide_outsw(unsigned long port, void *addr, u32 count)
164 {
165 u16 *ptr = (u16 *)addr;
166 while (count--) {
167 out_be32((void*)port, cpu_to_le16(*ptr++));
168 }
169 }
170
171 static void
172 scc_ide_outsl(unsigned long port, void *addr, u32 count)
173 {
174 u16 *ptr = (u16 *)addr;
175 while (count--) {
176 out_be32((void*)port, cpu_to_le16(*ptr++));
177 out_be32((void*)port, cpu_to_le16(*ptr++));
178 }
179 }
180
181 /**
182 * scc_set_pio_mode - set host controller for PIO mode
183 * @drive: drive
184 * @pio: PIO mode number
185 *
186 * Load the timing settings for this device mode into the
187 * controller.
188 */
189
190 static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
191 {
192 ide_hwif_t *hwif = HWIF(drive);
193 struct scc_ports *ports = ide_get_hwifdata(hwif);
194 unsigned long ctl_base = ports->ctl;
195 unsigned long cckctrl_port = ctl_base + 0xff0;
196 unsigned long piosht_port = ctl_base + 0x000;
197 unsigned long pioct_port = ctl_base + 0x004;
198 unsigned long reg;
199 int offset;
200
201 reg = in_be32((void __iomem *)cckctrl_port);
202 if (reg & CCKCTRL_ATACLKOEN) {
203 offset = 1; /* 133MHz */
204 } else {
205 offset = 0; /* 100MHz */
206 }
207 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
208 out_be32((void __iomem *)piosht_port, reg);
209 reg = JCHCTtbl[offset][pio];
210 out_be32((void __iomem *)pioct_port, reg);
211 }
212
213 /**
214 * scc_set_dma_mode - set host controller for DMA mode
215 * @drive: drive
216 * @speed: DMA mode
217 *
218 * Load the timing settings for this device mode into the
219 * controller.
220 */
221
222 static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
223 {
224 ide_hwif_t *hwif = HWIF(drive);
225 struct scc_ports *ports = ide_get_hwifdata(hwif);
226 unsigned long ctl_base = ports->ctl;
227 unsigned long cckctrl_port = ctl_base + 0xff0;
228 unsigned long mdmact_port = ctl_base + 0x008;
229 unsigned long mcrcst_port = ctl_base + 0x00c;
230 unsigned long sdmact_port = ctl_base + 0x010;
231 unsigned long scrcst_port = ctl_base + 0x014;
232 unsigned long udenvt_port = ctl_base + 0x018;
233 unsigned long tdvhsel_port = ctl_base + 0x020;
234 int is_slave = (&hwif->drives[1] == drive);
235 int offset, idx;
236 unsigned long reg;
237 unsigned long jcactsel;
238
239 reg = in_be32((void __iomem *)cckctrl_port);
240 if (reg & CCKCTRL_ATACLKOEN) {
241 offset = 1; /* 133MHz */
242 } else {
243 offset = 0; /* 100MHz */
244 }
245
246 idx = speed - XFER_UDMA_0;
247
248 jcactsel = JCACTSELtbl[offset][idx];
249 if (is_slave) {
250 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
251 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
252 jcactsel = jcactsel << 2;
253 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
254 } else {
255 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
256 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
257 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
258 }
259 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
260 out_be32((void __iomem *)udenvt_port, reg);
261 }
262
263 /**
264 * scc_ide_dma_setup - begin a DMA phase
265 * @drive: target device
266 *
267 * Build an IDE DMA PRD (IDE speak for scatter gather table)
268 * and then set up the DMA transfer registers.
269 *
270 * Returns 0 on success. If a PIO fallback is required then 1
271 * is returned.
272 */
273
274 static int scc_dma_setup(ide_drive_t *drive)
275 {
276 ide_hwif_t *hwif = drive->hwif;
277 struct request *rq = HWGROUP(drive)->rq;
278 unsigned int reading;
279 u8 dma_stat;
280
281 if (rq_data_dir(rq))
282 reading = 0;
283 else
284 reading = 1 << 3;
285
286 /* fall back to pio! */
287 if (!ide_build_dmatable(drive, rq)) {
288 ide_map_sg(drive, rq);
289 return 1;
290 }
291
292 /* PRD table */
293 out_be32((void __iomem *)hwif->dma_prdtable, hwif->dmatable_dma);
294
295 /* specify r/w */
296 out_be32((void __iomem *)hwif->dma_command, reading);
297
298 /* read dma_status for INTR & ERROR flags */
299 dma_stat = in_be32((void __iomem *)hwif->dma_status);
300
301 /* clear INTR & ERROR flags */
302 out_be32((void __iomem *)hwif->dma_status, dma_stat|6);
303 drive->waiting_for_dma = 1;
304 return 0;
305 }
306
307
308 /**
309 * scc_dma_end - Stop DMA
310 * @drive: IDE drive
311 *
312 * Check and clear INT Status register.
313 * Then call __ide_dma_end().
314 */
315
316 static int scc_dma_end(ide_drive_t *drive)
317 {
318 ide_hwif_t *hwif = HWIF(drive);
319 unsigned long intsts_port = hwif->dma_base + 0x014;
320 u32 reg;
321 int dma_stat, data_loss = 0;
322 static int retry = 0;
323
324 /* errata A308 workaround: Step5 (check data loss) */
325 /* We don't check non ide_disk because it is limited to UDMA4 */
326 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
327 & ERR_STAT) &&
328 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
329 reg = in_be32((void __iomem *)intsts_port);
330 if (!(reg & INTSTS_ACTEINT)) {
331 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
332 drive->name);
333 data_loss = 1;
334 if (retry++) {
335 struct request *rq = HWGROUP(drive)->rq;
336 int unit;
337 /* ERROR_RESET and drive->crc_count are needed
338 * to reduce DMA transfer mode in retry process.
339 */
340 if (rq)
341 rq->errors |= ERROR_RESET;
342 for (unit = 0; unit < MAX_DRIVES; unit++) {
343 ide_drive_t *drive = &hwif->drives[unit];
344 drive->crc_count++;
345 }
346 }
347 }
348 }
349
350 while (1) {
351 reg = in_be32((void __iomem *)intsts_port);
352
353 if (reg & INTSTS_SERROR) {
354 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
355 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
356
357 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
358 continue;
359 }
360
361 if (reg & INTSTS_PRERR) {
362 u32 maea0, maec0;
363 unsigned long ctl_base = hwif->config_data;
364
365 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
366 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
367
368 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
369
370 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
371
372 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
373 continue;
374 }
375
376 if (reg & INTSTS_RERR) {
377 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
378 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
379
380 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
381 continue;
382 }
383
384 if (reg & INTSTS_ICERR) {
385 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
386
387 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
388 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
389 continue;
390 }
391
392 if (reg & INTSTS_BMSINT) {
393 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
394 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
395
396 ide_do_reset(drive);
397 continue;
398 }
399
400 if (reg & INTSTS_BMHE) {
401 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
402 continue;
403 }
404
405 if (reg & INTSTS_ACTEINT) {
406 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
407 continue;
408 }
409
410 if (reg & INTSTS_IOIRQS) {
411 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
412 continue;
413 }
414 break;
415 }
416
417 dma_stat = __ide_dma_end(drive);
418 if (data_loss)
419 dma_stat |= 2; /* emulate DMA error (to retry command) */
420 return dma_stat;
421 }
422
423 /* returns 1 if dma irq issued, 0 otherwise */
424 static int scc_dma_test_irq(ide_drive_t *drive)
425 {
426 ide_hwif_t *hwif = HWIF(drive);
427 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
428
429 /* SCC errata A252,A308 workaround: Step4 */
430 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
431 & ERR_STAT) &&
432 (int_stat & INTSTS_INTRQ))
433 return 1;
434
435 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
436 if (int_stat & INTSTS_IOIRQS)
437 return 1;
438
439 if (!drive->waiting_for_dma)
440 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
441 drive->name, __func__);
442 return 0;
443 }
444
445 static u8 scc_udma_filter(ide_drive_t *drive)
446 {
447 ide_hwif_t *hwif = drive->hwif;
448 u8 mask = hwif->ultra_mask;
449
450 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
451 if ((drive->media != ide_disk) && (mask & 0xE0)) {
452 printk(KERN_INFO "%s: limit %s to UDMA4\n",
453 SCC_PATA_NAME, drive->name);
454 mask = ATA_UDMA4;
455 }
456
457 return mask;
458 }
459
460 /**
461 * setup_mmio_scc - map CTRL/BMID region
462 * @dev: PCI device we are configuring
463 * @name: device name
464 *
465 */
466
467 static int setup_mmio_scc (struct pci_dev *dev, const char *name)
468 {
469 unsigned long ctl_base = pci_resource_start(dev, 0);
470 unsigned long dma_base = pci_resource_start(dev, 1);
471 unsigned long ctl_size = pci_resource_len(dev, 0);
472 unsigned long dma_size = pci_resource_len(dev, 1);
473 void __iomem *ctl_addr;
474 void __iomem *dma_addr;
475 int i, ret;
476
477 for (i = 0; i < MAX_HWIFS; i++) {
478 if (scc_ports[i].ctl == 0)
479 break;
480 }
481 if (i >= MAX_HWIFS)
482 return -ENOMEM;
483
484 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
485 if (ret < 0) {
486 printk(KERN_ERR "%s: can't reserve resources\n", name);
487 return ret;
488 }
489
490 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
491 goto fail_0;
492
493 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
494 goto fail_1;
495
496 pci_set_master(dev);
497 scc_ports[i].ctl = (unsigned long)ctl_addr;
498 scc_ports[i].dma = (unsigned long)dma_addr;
499 pci_set_drvdata(dev, (void *) &scc_ports[i]);
500
501 return 1;
502
503 fail_1:
504 iounmap(ctl_addr);
505 fail_0:
506 return -ENOMEM;
507 }
508
509 static int scc_ide_setup_pci_device(struct pci_dev *dev,
510 const struct ide_port_info *d)
511 {
512 struct scc_ports *ports = pci_get_drvdata(dev);
513 ide_hwif_t *hwif = NULL;
514 hw_regs_t hw;
515 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
516 int i;
517
518 hwif = ide_find_port();
519 if (hwif == NULL) {
520 printk(KERN_ERR "%s: too many IDE interfaces, "
521 "no room in table\n", SCC_PATA_NAME);
522 return -ENOMEM;
523 }
524
525 memset(&hw, 0, sizeof(hw));
526 for (i = 0; i <= 8; i++)
527 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
528 hw.irq = dev->irq;
529 hw.dev = &dev->dev;
530 hw.chipset = ide_pci;
531 ide_init_port_hw(hwif, &hw);
532 hwif->dev = &dev->dev;
533
534 idx[0] = hwif->index;
535
536 ide_device_add(idx, d);
537
538 return 0;
539 }
540
541 /**
542 * init_setup_scc - set up an SCC PATA Controller
543 * @dev: PCI device
544 * @d: IDE port info
545 *
546 * Perform the initial set up for this device.
547 */
548
549 static int __devinit init_setup_scc(struct pci_dev *dev,
550 const struct ide_port_info *d)
551 {
552 unsigned long ctl_base;
553 unsigned long dma_base;
554 unsigned long cckctrl_port;
555 unsigned long intmask_port;
556 unsigned long mode_port;
557 unsigned long ecmode_port;
558 unsigned long dma_status_port;
559 u32 reg = 0;
560 struct scc_ports *ports;
561 int rc;
562
563 rc = pci_enable_device(dev);
564 if (rc)
565 goto end;
566
567 rc = setup_mmio_scc(dev, d->name);
568 if (rc < 0)
569 goto end;
570
571 ports = pci_get_drvdata(dev);
572 ctl_base = ports->ctl;
573 dma_base = ports->dma;
574 cckctrl_port = ctl_base + 0xff0;
575 intmask_port = dma_base + 0x010;
576 mode_port = ctl_base + 0x024;
577 ecmode_port = ctl_base + 0xf00;
578 dma_status_port = dma_base + 0x004;
579
580 /* controller initialization */
581 reg = 0;
582 out_be32((void*)cckctrl_port, reg);
583 reg |= CCKCTRL_ATACLKOEN;
584 out_be32((void*)cckctrl_port, reg);
585 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
586 out_be32((void*)cckctrl_port, reg);
587 reg |= CCKCTRL_CRST;
588 out_be32((void*)cckctrl_port, reg);
589
590 for (;;) {
591 reg = in_be32((void*)cckctrl_port);
592 if (reg & CCKCTRL_CRST)
593 break;
594 udelay(5000);
595 }
596
597 reg |= CCKCTRL_ATARESET;
598 out_be32((void*)cckctrl_port, reg);
599
600 out_be32((void*)ecmode_port, ECMODE_VALUE);
601 out_be32((void*)mode_port, MODE_JCUSFEN);
602 out_be32((void*)intmask_port, INTMASK_MSK);
603
604 rc = scc_ide_setup_pci_device(dev, d);
605
606 end:
607 return rc;
608 }
609
610 static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
611 {
612 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
613 struct ide_taskfile *tf = &task->tf;
614 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
615
616 if (task->tf_flags & IDE_TFLAG_FLAGGED)
617 HIHI = 0xFF;
618
619 ide_set_irq(drive, 1);
620
621 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
622 out_be32((void *)io_ports->data_addr,
623 (tf->hob_data << 8) | tf->data);
624
625 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
626 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
627 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
628 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
629 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
630 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
631 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
632 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
633 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
634 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
635
636 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
637 scc_ide_outb(tf->feature, io_ports->feature_addr);
638 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
639 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
640 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
641 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
642 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
643 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
644 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
645 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
646
647 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
648 scc_ide_outb((tf->device & HIHI) | drive->select.all,
649 io_ports->device_addr);
650 }
651
652 static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
653 {
654 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
655 struct ide_taskfile *tf = &task->tf;
656
657 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
658 u16 data = (u16)in_be32((void *)io_ports->data_addr);
659
660 tf->data = data & 0xff;
661 tf->hob_data = (data >> 8) & 0xff;
662 }
663
664 /* be sure we're looking at the low order bits */
665 scc_ide_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
666
667 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
668 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
669 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
670 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
671 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
672 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
673 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
674 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
675 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
676 tf->device = scc_ide_inb(io_ports->device_addr);
677
678 if (task->tf_flags & IDE_TFLAG_LBA48) {
679 scc_ide_outb(drive->ctl | 0x80, io_ports->ctl_addr);
680
681 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
682 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
683 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
684 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
685 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
686 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
687 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
688 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
689 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
690 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
691 }
692 }
693
694 static void scc_input_data(ide_drive_t *drive, struct request *rq,
695 void *buf, unsigned int len)
696 {
697 unsigned long data_addr = drive->hwif->io_ports.data_addr;
698
699 len++;
700
701 if (drive->io_32bit) {
702 scc_ide_insl(data_addr, buf, len / 4);
703
704 if ((len & 3) >= 2)
705 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
706 } else
707 scc_ide_insw(data_addr, buf, len / 2);
708 }
709
710 static void scc_output_data(ide_drive_t *drive, struct request *rq,
711 void *buf, unsigned int len)
712 {
713 unsigned long data_addr = drive->hwif->io_ports.data_addr;
714
715 len++;
716
717 if (drive->io_32bit) {
718 scc_ide_outsl(data_addr, buf, len / 4);
719
720 if ((len & 3) >= 2)
721 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
722 } else
723 scc_ide_outsw(data_addr, buf, len / 2);
724 }
725
726 /**
727 * init_mmio_iops_scc - set up the iops for MMIO
728 * @hwif: interface to set up
729 *
730 */
731
732 static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
733 {
734 struct pci_dev *dev = to_pci_dev(hwif->dev);
735 struct scc_ports *ports = pci_get_drvdata(dev);
736 unsigned long dma_base = ports->dma;
737
738 ide_set_hwifdata(hwif, ports);
739
740 hwif->tf_load = scc_tf_load;
741 hwif->tf_read = scc_tf_read;
742
743 hwif->input_data = scc_input_data;
744 hwif->output_data = scc_output_data;
745
746 hwif->INB = scc_ide_inb;
747 hwif->OUTB = scc_ide_outb;
748 hwif->OUTBSYNC = scc_ide_outbsync;
749
750 hwif->dma_base = dma_base;
751 hwif->config_data = ports->ctl;
752 hwif->mmio = 1;
753 }
754
755 /**
756 * init_iops_scc - set up iops
757 * @hwif: interface to set up
758 *
759 * Do the basic setup for the SCC hardware interface
760 * and then do the MMIO setup.
761 */
762
763 static void __devinit init_iops_scc(ide_hwif_t *hwif)
764 {
765 struct pci_dev *dev = to_pci_dev(hwif->dev);
766
767 hwif->hwif_data = NULL;
768 if (pci_get_drvdata(dev) == NULL)
769 return;
770 init_mmio_iops_scc(hwif);
771 }
772
773 static u8 __devinit scc_cable_detect(ide_hwif_t *hwif)
774 {
775 return ATA_CBL_PATA80;
776 }
777
778 /**
779 * init_hwif_scc - set up hwif
780 * @hwif: interface to set up
781 *
782 * We do the basic set up of the interface structure. The SCC
783 * requires several custom handlers so we override the default
784 * ide DMA handlers appropriately.
785 */
786
787 static void __devinit init_hwif_scc(ide_hwif_t *hwif)
788 {
789 struct scc_ports *ports = ide_get_hwifdata(hwif);
790
791 ports->hwif = hwif;
792
793 hwif->dma_command = hwif->dma_base;
794 hwif->dma_status = hwif->dma_base + 0x04;
795 hwif->dma_prdtable = hwif->dma_base + 0x08;
796
797 /* PTERADD */
798 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
799
800 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
801 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
802 else
803 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
804 }
805
806 static const struct ide_port_ops scc_port_ops = {
807 .set_pio_mode = scc_set_pio_mode,
808 .set_dma_mode = scc_set_dma_mode,
809 .udma_filter = scc_udma_filter,
810 .cable_detect = scc_cable_detect,
811 };
812
813 static const struct ide_dma_ops scc_dma_ops = {
814 .dma_host_set = ide_dma_host_set,
815 .dma_setup = scc_dma_setup,
816 .dma_exec_cmd = ide_dma_exec_cmd,
817 .dma_start = ide_dma_start,
818 .dma_end = scc_dma_end,
819 .dma_test_irq = scc_dma_test_irq,
820 .dma_lost_irq = ide_dma_lost_irq,
821 .dma_timeout = ide_dma_timeout,
822 };
823
824 #define DECLARE_SCC_DEV(name_str) \
825 { \
826 .name = name_str, \
827 .init_iops = init_iops_scc, \
828 .init_hwif = init_hwif_scc, \
829 .port_ops = &scc_port_ops, \
830 .dma_ops = &scc_dma_ops, \
831 .host_flags = IDE_HFLAG_SINGLE, \
832 .pio_mask = ATA_PIO4, \
833 }
834
835 static const struct ide_port_info scc_chipsets[] __devinitdata = {
836 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
837 };
838
839 /**
840 * scc_init_one - pci layer discovery entry
841 * @dev: PCI device
842 * @id: ident table entry
843 *
844 * Called by the PCI code when it finds an SCC PATA controller.
845 * We then use the IDE PCI generic helper to do most of the work.
846 */
847
848 static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
849 {
850 return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
851 }
852
853 /**
854 * scc_remove - pci layer remove entry
855 * @dev: PCI device
856 *
857 * Called by the PCI code when it removes an SCC PATA controller.
858 */
859
860 static void __devexit scc_remove(struct pci_dev *dev)
861 {
862 struct scc_ports *ports = pci_get_drvdata(dev);
863 ide_hwif_t *hwif = ports->hwif;
864
865 if (hwif->dmatable_cpu) {
866 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
867 hwif->dmatable_cpu, hwif->dmatable_dma);
868 hwif->dmatable_cpu = NULL;
869 }
870
871 ide_unregister(hwif);
872
873 iounmap((void*)ports->dma);
874 iounmap((void*)ports->ctl);
875 pci_release_selected_regions(dev, (1 << 2) - 1);
876 memset(ports, 0, sizeof(*ports));
877 }
878
879 static const struct pci_device_id scc_pci_tbl[] = {
880 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
881 { 0, },
882 };
883 MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
884
885 static struct pci_driver driver = {
886 .name = "SCC IDE",
887 .id_table = scc_pci_tbl,
888 .probe = scc_init_one,
889 .remove = scc_remove,
890 };
891
892 static int scc_ide_init(void)
893 {
894 return ide_pci_register_driver(&driver);
895 }
896
897 module_init(scc_ide_init);
898 /* -- No exit code?
899 static void scc_ide_exit(void)
900 {
901 ide_pci_unregister_driver(&driver);
902 }
903 module_exit(scc_ide_exit);
904 */
905
906
907 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
908 MODULE_LICENSE("GPL");