]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/ata/sata_sil.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_sil.c
CommitLineData
1da177e4
LT
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
8c3d3d4b 4 * Maintained by: Tejun Heo <tj@kernel.org>
1da177e4
LT
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
af36d7f0 8 * Copyright 2003-2005 Red Hat, Inc.
1da177e4
LT
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
af36d7f0
JG
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 28 * as Documentation/driver-api/libata.rst
1da177e4 29 *
953d1137
JG
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
1da177e4
LT
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
1da177e4
LT
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/interrupt.h>
a9524a76 43#include <linux/device.h>
1da177e4
LT
44#include <scsi/scsi_host.h>
45#include <linux/libata.h>
1737ef75 46#include <linux/dmi.h>
1da177e4
LT
47
48#define DRV_NAME "sata_sil"
c7e324f1
RH
49#define DRV_VERSION "2.4"
50
51#define SIL_DMA_BOUNDARY 0x7fffffffUL
1da177e4
LT
52
53enum {
0d5ff566
TH
54 SIL_MMIO_BAR = 5,
55
e653a1e6
TH
56 /*
57 * host flags
58 */
201ce859 59 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
e4e10e3e 60 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
e4deec63 61 SIL_FLAG_MOD15WRITE = (1 << 30),
20888d83 62
9cbe056f 63 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
e4deec63 64
e653a1e6
TH
65 /*
66 * Controller IDs
67 */
1da177e4 68 sil_3112 = 0,
201ce859
TH
69 sil_3112_no_sata_irq = 1,
70 sil_3512 = 2,
71 sil_3114 = 3,
1da177e4 72
e653a1e6
TH
73 /*
74 * Register offsets
75 */
1da177e4 76 SIL_SYSCFG = 0x48,
e653a1e6
TH
77
78 /*
79 * Register bits
80 */
81 /* SYSCFG */
1da177e4
LT
82 SIL_MASK_IDE0_INT = (1 << 22),
83 SIL_MASK_IDE1_INT = (1 << 23),
84 SIL_MASK_IDE2_INT = (1 << 24),
85 SIL_MASK_IDE3_INT = (1 << 25),
86 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
87 SIL_MASK_4PORT = SIL_MASK_2PORT |
88 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
89
e653a1e6 90 /* BMDMA/BMDMA2 */
1da177e4 91 SIL_INTR_STEERING = (1 << 1),
e653a1e6 92
20888d83
TH
93 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
94 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
95 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
96 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
97 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
98 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
99 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
100 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
101 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
102 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
103
104 /* SIEN */
105 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
106
e653a1e6
TH
107 /*
108 * Others
109 */
1da177e4
LT
110 SIL_QUIRK_MOD15WRITE = (1 << 0),
111 SIL_QUIRK_UDMA5MAX = (1 << 1),
112};
113
5796d1c4 114static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
58eb8cd5 115#ifdef CONFIG_PM_SLEEP
afb5a7cb 116static int sil_pci_device_resume(struct pci_dev *pdev);
281d426c 117#endif
cd0d3bbc 118static void sil_dev_config(struct ata_device *dev);
82ef04fb
TH
119static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
120static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
0260731f 121static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
c7e324f1
RH
122static void sil_qc_prep(struct ata_queued_cmd *qc);
123static void sil_bmdma_setup(struct ata_queued_cmd *qc);
124static void sil_bmdma_start(struct ata_queued_cmd *qc);
125static void sil_bmdma_stop(struct ata_queued_cmd *qc);
f6aae27e
TH
126static void sil_freeze(struct ata_port *ap);
127static void sil_thaw(struct ata_port *ap);
1da177e4 128
374b1873 129
3b7d697d 130static const struct pci_device_id sil_pci_tbl[] = {
54bb3a94
JG
131 { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
132 { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
133 { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
134 { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
135 { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
136 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
137 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
138
1da177e4
LT
139 { } /* terminate list */
140};
141
142
143/* TODO firmware versions should be added - eric */
144static const struct sil_drivelist {
5796d1c4 145 const char *product;
1da177e4
LT
146 unsigned int quirk;
147} sil_blacklist [] = {
148 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
149 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
150 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
151 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
1da177e4
LT
152 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
153 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
1da177e4
LT
154 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
155 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
156 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
157 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
158 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
9f9c47f0 159 { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
1da177e4
LT
160 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
161 { }
162};
163
164static struct pci_driver sil_pci_driver = {
165 .name = DRV_NAME,
166 .id_table = sil_pci_tbl,
167 .probe = sil_init_one,
168 .remove = ata_pci_remove_one,
58eb8cd5 169#ifdef CONFIG_PM_SLEEP
afb5a7cb
TH
170 .suspend = ata_pci_device_suspend,
171 .resume = sil_pci_device_resume,
281d426c 172#endif
1da177e4
LT
173};
174
193515d5 175static struct scsi_host_template sil_sht = {
c7e324f1
RH
176 ATA_BASE_SHT(DRV_NAME),
177 /** These controllers support Large Block Transfer which allows
178 transfer chunks up to 2GB and which cross 64KB boundaries,
179 therefore the DMA limits are more relaxed than standard ATA SFF. */
180 .dma_boundary = SIL_DMA_BOUNDARY,
181 .sg_tablesize = ATA_MAX_PRD
1da177e4
LT
182};
183
029cfd6b 184static struct ata_port_operations sil_ops = {
31f80112 185 .inherits = &ata_bmdma32_port_ops,
1da177e4 186 .dev_config = sil_dev_config,
9d2c7c75 187 .set_mode = sil_set_mode,
c7e324f1
RH
188 .bmdma_setup = sil_bmdma_setup,
189 .bmdma_start = sil_bmdma_start,
190 .bmdma_stop = sil_bmdma_stop,
191 .qc_prep = sil_qc_prep,
f6aae27e
TH
192 .freeze = sil_freeze,
193 .thaw = sil_thaw,
1da177e4
LT
194 .scr_read = sil_scr_read,
195 .scr_write = sil_scr_write,
1da177e4
LT
196};
197
98ac62de 198static const struct ata_port_info sil_port_info[] = {
1da177e4 199 /* sil_3112 */
e4deec63 200 {
cca3974e 201 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
14bdef98
EIB
202 .pio_mask = ATA_PIO4,
203 .mwdma_mask = ATA_MWDMA2,
bf6263a8 204 .udma_mask = ATA_UDMA5,
e4deec63 205 .port_ops = &sil_ops,
0ee304d5 206 },
201ce859
TH
207 /* sil_3112_no_sata_irq */
208 {
cca3974e 209 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
201ce859 210 SIL_FLAG_NO_SATA_IRQ,
14bdef98
EIB
211 .pio_mask = ATA_PIO4,
212 .mwdma_mask = ATA_MWDMA2,
bf6263a8 213 .udma_mask = ATA_UDMA5,
201ce859
TH
214 .port_ops = &sil_ops,
215 },
0ee304d5 216 /* sil_3512 */
1da177e4 217 {
cca3974e 218 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
14bdef98
EIB
219 .pio_mask = ATA_PIO4,
220 .mwdma_mask = ATA_MWDMA2,
bf6263a8 221 .udma_mask = ATA_UDMA5,
0ee304d5
TH
222 .port_ops = &sil_ops,
223 },
224 /* sil_3114 */
1da177e4 225 {
cca3974e 226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
14bdef98
EIB
227 .pio_mask = ATA_PIO4,
228 .mwdma_mask = ATA_MWDMA2,
bf6263a8 229 .udma_mask = ATA_UDMA5,
1da177e4
LT
230 .port_ops = &sil_ops,
231 },
232};
233
234/* per-port register offsets */
235/* TODO: we can probably calculate rather than use a table */
236static const struct {
237 unsigned long tf; /* ATA taskfile register block */
238 unsigned long ctl; /* ATA control/altstatus register block */
239 unsigned long bmdma; /* DMA register block */
20888d83 240 unsigned long bmdma2; /* DMA register block #2 */
48d4ef2a 241 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
1da177e4
LT
242 unsigned long scr; /* SATA control register block */
243 unsigned long sien; /* SATA Interrupt Enable register */
244 unsigned long xfer_mode;/* data transfer mode register */
e4e10e3e 245 unsigned long sfis_cfg; /* SATA FIS reception config register */
1da177e4
LT
246} sil_port[] = {
247 /* port 0 ... */
5bcd7a00
JG
248 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */
249 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
250 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
20888d83
TH
251 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
252 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
1da177e4
LT
253 /* ... port 3 */
254};
255
256MODULE_AUTHOR("Jeff Garzik");
257MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
258MODULE_LICENSE("GPL");
259MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
260MODULE_VERSION(DRV_VERSION);
261
5796d1c4 262static int slow_down;
51e9f2ff
JG
263module_param(slow_down, int, 0444);
264MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
265
374b1873 266
c7e324f1
RH
267static void sil_bmdma_stop(struct ata_queued_cmd *qc)
268{
269 struct ata_port *ap = qc->ap;
270 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
271 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
272
273 /* clear start/stop bit - can safely always write 0 */
274 iowrite8(0, bmdma2);
275
276 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
277 ata_sff_dma_pause(ap);
278}
279
280static void sil_bmdma_setup(struct ata_queued_cmd *qc)
281{
282 struct ata_port *ap = qc->ap;
283 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
284
285 /* load PRD table addr. */
f60d7011 286 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
c7e324f1
RH
287
288 /* issue r/w command */
289 ap->ops->sff_exec_command(ap, &qc->tf);
290}
291
292static void sil_bmdma_start(struct ata_queued_cmd *qc)
293{
294 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
295 struct ata_port *ap = qc->ap;
296 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
297 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
298 u8 dmactl = ATA_DMA_START;
299
300 /* set transfer direction, start host DMA transaction
301 Note: For Large Block Transfer to work, the DMA must be started
302 using the bmdma2 register. */
303 if (!rw)
304 dmactl |= ATA_DMA_WR;
305 iowrite8(dmactl, bmdma2);
306}
307
308/* The way God intended PCI IDE scatter/gather lists to look and behave... */
309static void sil_fill_sg(struct ata_queued_cmd *qc)
310{
311 struct scatterlist *sg;
312 struct ata_port *ap = qc->ap;
f60d7011 313 struct ata_bmdma_prd *prd, *last_prd = NULL;
c7e324f1
RH
314 unsigned int si;
315
f60d7011 316 prd = &ap->bmdma_prd[0];
c7e324f1
RH
317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
318 /* Note h/w doesn't support 64-bit, so we unconditionally
319 * truncate dma_addr_t to u32.
320 */
321 u32 addr = (u32) sg_dma_address(sg);
322 u32 sg_len = sg_dma_len(sg);
323
324 prd->addr = cpu_to_le32(addr);
325 prd->flags_len = cpu_to_le32(sg_len);
41137aa6 326 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
c7e324f1
RH
327
328 last_prd = prd;
329 prd++;
330 }
331
332 if (likely(last_prd))
333 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
334}
335
336static void sil_qc_prep(struct ata_queued_cmd *qc)
337{
338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
339 return;
340
341 sil_fill_sg(qc);
342}
343
1da177e4
LT
344static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
345{
346 u8 cache_line = 0;
347 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
348 return cache_line;
349}
350
9d2c7c75
AC
351/**
352 * sil_set_mode - wrap set_mode functions
0260731f 353 * @link: link to set up
9d2c7c75
AC
354 * @r_failed: returned device when we fail
355 *
356 * Wrap the libata method for device setup as after the setup we need
357 * to inspect the results and do some configuration work
358 */
359
0260731f 360static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
1da177e4 361{
0260731f
TH
362 struct ata_port *ap = link->ap;
363 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0d5ff566 364 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
0260731f 365 struct ata_device *dev;
f58229f8 366 u32 tmp, dev_mode[2] = { };
9d2c7c75 367 int rc;
a617c09f 368
0260731f 369 rc = ata_do_set_mode(link, r_failed);
9d2c7c75
AC
370 if (rc)
371 return rc;
1da177e4 372
1eca4365 373 ata_for_each_dev(dev, link, ALL) {
e1211e3f 374 if (!ata_dev_enabled(dev))
f58229f8 375 dev_mode[dev->devno] = 0; /* PIO0/1/2 */
1da177e4 376 else if (dev->flags & ATA_DFLAG_PIO)
f58229f8 377 dev_mode[dev->devno] = 1; /* PIO3/4 */
1da177e4 378 else
f58229f8 379 dev_mode[dev->devno] = 3; /* UDMA */
1da177e4
LT
380 /* value 2 indicates MDMA */
381 }
382
383 tmp = readl(addr);
384 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
385 tmp |= dev_mode[0];
386 tmp |= (dev_mode[1] << 4);
387 writel(tmp, addr);
388 readl(addr); /* flush */
9d2c7c75 389 return 0;
1da177e4
LT
390}
391
5796d1c4
JG
392static inline void __iomem *sil_scr_addr(struct ata_port *ap,
393 unsigned int sc_reg)
1da177e4 394{
0d5ff566 395 void __iomem *offset = ap->ioaddr.scr_addr;
1da177e4
LT
396
397 switch (sc_reg) {
398 case SCR_STATUS:
399 return offset + 4;
400 case SCR_ERROR:
401 return offset + 8;
402 case SCR_CONTROL:
403 return offset;
404 default:
405 /* do nothing */
406 break;
407 }
408
8d9db2d2 409 return NULL;
1da177e4
LT
410}
411
82ef04fb 412static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 413{
82ef04fb 414 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
da3dbb17
TH
415
416 if (mmio) {
417 *val = readl(mmio);
418 return 0;
419 }
420 return -EINVAL;
1da177e4
LT
421}
422
82ef04fb 423static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 424{
82ef04fb 425 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
da3dbb17
TH
426
427 if (mmio) {
1da177e4 428 writel(val, mmio);
da3dbb17
TH
429 return 0;
430 }
431 return -EINVAL;
1da177e4
LT
432}
433
cbe88fbc
TH
434static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
435{
9af5c9c9
TH
436 struct ata_eh_info *ehi = &ap->link.eh_info;
437 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
cbe88fbc
TH
438 u8 status;
439
e573890b 440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
ebd1699e 441 u32 serror = 0xffffffff;
d4c85325
TH
442
443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
444 * controllers continue to assert IRQ as long as
445 * SError bits are pending. Clear SError immediately.
446 */
82ef04fb
TH
447 sil_scr_read(&ap->link, SCR_ERROR, &serror);
448 sil_scr_write(&ap->link, SCR_ERROR, serror);
d4c85325 449
8cf32ac6
TH
450 /* Sometimes spurious interrupts occur, double check
451 * it's PHYRDY CHG.
d4c85325 452 */
8cf32ac6 453 if (serror & SERR_PHYRDY_CHG) {
f7fe7ad4 454 ap->link.eh_info.serror |= serror;
8cf32ac6 455 goto freeze;
d4c85325
TH
456 }
457
8cf32ac6
TH
458 if (!(bmdma2 & SIL_DMA_COMPLETE))
459 return;
e573890b
TH
460 }
461
8cf32ac6 462 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
e2f8fb72 463 /* this sometimes happens, just clear IRQ */
5682ed33 464 ap->ops->sff_check_status(ap);
e2f8fb72
TH
465 return;
466 }
467
cbe88fbc
TH
468 /* Check whether we are expecting interrupt in this state */
469 switch (ap->hsm_task_state) {
470 case HSM_ST_FIRST:
471 /* Some pre-ATAPI-4 devices assert INTRQ
472 * at this state when ready to receive CDB.
473 */
474
475 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
476 * The flag was turned on only for atapi devices. No
477 * need to check ata_is_atapi(qc->tf.protocol) again.
cbe88fbc
TH
478 */
479 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
480 goto err_hsm;
481 break;
482 case HSM_ST_LAST:
405e66b3 483 if (ata_is_dma(qc->tf.protocol)) {
cbe88fbc
TH
484 /* clear DMA-Start bit */
485 ap->ops->bmdma_stop(qc);
486
487 if (bmdma2 & SIL_DMA_ERROR) {
488 qc->err_mask |= AC_ERR_HOST_BUS;
489 ap->hsm_task_state = HSM_ST_ERR;
490 }
491 }
492 break;
493 case HSM_ST:
494 break;
495 default:
496 goto err_hsm;
497 }
498
499 /* check main status, clearing INTRQ */
5682ed33 500 status = ap->ops->sff_check_status(ap);
cbe88fbc
TH
501 if (unlikely(status & ATA_BUSY))
502 goto err_hsm;
503
504 /* ack bmdma irq events */
37f65b8b 505 ata_bmdma_irq_clear(ap);
cbe88fbc
TH
506
507 /* kick HSM in the ass */
9363c382 508 ata_sff_hsm_move(ap, qc, status, 0);
cbe88fbc 509
405e66b3 510 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
ea54763f
TH
511 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
512
cbe88fbc
TH
513 return;
514
515 err_hsm:
516 qc->err_mask |= AC_ERR_HSM;
517 freeze:
518 ata_port_freeze(ap);
519}
520
7d12e780 521static irqreturn_t sil_interrupt(int irq, void *dev_instance)
cbe88fbc 522{
cca3974e 523 struct ata_host *host = dev_instance;
0d5ff566 524 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
cbe88fbc
TH
525 int handled = 0;
526 int i;
527
cca3974e 528 spin_lock(&host->lock);
cbe88fbc 529
cca3974e
JG
530 for (i = 0; i < host->n_ports; i++) {
531 struct ata_port *ap = host->ports[i];
cbe88fbc
TH
532 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
533
201ce859
TH
534 /* turn off SATA_IRQ if not supported */
535 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
536 bmdma2 &= ~SIL_DMA_SATA_IRQ;
537
23fa9618
TH
538 if (bmdma2 == 0xffffffff ||
539 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
cbe88fbc
TH
540 continue;
541
542 sil_host_intr(ap, bmdma2);
543 handled = 1;
544 }
545
cca3974e 546 spin_unlock(&host->lock);
cbe88fbc
TH
547
548 return IRQ_RETVAL(handled);
549}
550
f6aae27e
TH
551static void sil_freeze(struct ata_port *ap)
552{
0d5ff566 553 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
f6aae27e
TH
554 u32 tmp;
555
e573890b
TH
556 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
557 writel(0, mmio_base + sil_port[ap->port_no].sien);
558
f6aae27e
TH
559 /* plug IRQ */
560 tmp = readl(mmio_base + SIL_SYSCFG);
561 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
562 writel(tmp, mmio_base + SIL_SYSCFG);
563 readl(mmio_base + SIL_SYSCFG); /* flush */
2fc37adb
JG
564
565 /* Ensure DMA_ENABLE is off.
566 *
567 * This is because the controller will not give us access to the
568 * taskfile registers while a DMA is in progress
569 */
570 iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
571 ap->ioaddr.bmdma_addr);
572
573 /* According to ata_bmdma_stop, an HDMA transition requires
574 * on PIO cycle. But we can't read a taskfile register.
575 */
576 ioread8(ap->ioaddr.bmdma_addr);
f6aae27e
TH
577}
578
579static void sil_thaw(struct ata_port *ap)
580{
0d5ff566 581 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
f6aae27e
TH
582 u32 tmp;
583
584 /* clear IRQ */
5682ed33 585 ap->ops->sff_check_status(ap);
37f65b8b 586 ata_bmdma_irq_clear(ap);
f6aae27e 587
201ce859
TH
588 /* turn on SATA IRQ if supported */
589 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
590 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
e573890b 591
f6aae27e
TH
592 /* turn on IRQ */
593 tmp = readl(mmio_base + SIL_SYSCFG);
594 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
595 writel(tmp, mmio_base + SIL_SYSCFG);
596}
597
1da177e4
LT
598/**
599 * sil_dev_config - Apply device/host-specific errata fixups
1da177e4
LT
600 * @dev: Device to be examined
601 *
602 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
603 * device is known to be present, this function is called.
604 * We apply two errata fixups which are specific to Silicon Image,
605 * a Seagate and a Maxtor fixup.
606 *
607 * For certain Seagate devices, we must limit the maximum sectors
608 * to under 8K.
609 *
610 * For certain Maxtor devices, we must not program the drive
611 * beyond udma5.
612 *
613 * Both fixups are unfairly pessimistic. As soon as I get more
614 * information on these errata, I will create a more exhaustive
615 * list, and apply the fixups to only the specific
616 * devices/hosts/firmwares that need it.
617 *
618 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
619 * The Maxtor quirk is in the blacklist, but I'm keeping the original
620 * pessimistic fix for the following reasons...
621 * - There seems to be less info on it, only one device gleaned off the
622 * Windows driver, maybe only one is affected. More info would be greatly
623 * appreciated.
624 * - But then again UDMA5 is hardly anything to complain about
625 */
cd0d3bbc 626static void sil_dev_config(struct ata_device *dev)
1da177e4 627{
9af5c9c9
TH
628 struct ata_port *ap = dev->link->ap;
629 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
1da177e4 630 unsigned int n, quirks = 0;
a0cf733b 631 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1da177e4 632
d98f1cd0
MP
633 /* This controller doesn't support trim */
634 dev->horkage |= ATA_HORKAGE_NOTRIM;
635
a0cf733b 636 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1da177e4 637
8a60a071 638 for (n = 0; sil_blacklist[n].product; n++)
2e02671d 639 if (!strcmp(sil_blacklist[n].product, model_num)) {
1da177e4
LT
640 quirks = sil_blacklist[n].quirk;
641 break;
642 }
8a60a071 643
1da177e4 644 /* limit requests to 15 sectors */
51e9f2ff
JG
645 if (slow_down ||
646 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
647 (quirks & SIL_QUIRK_MOD15WRITE))) {
efdaedc4 648 if (print_info)
a9a79dfe
JP
649 ata_dev_info(dev,
650 "applying Seagate errata fix (mod15write workaround)\n");
b00eec1d 651 dev->max_sectors = 15;
1da177e4
LT
652 return;
653 }
654
655 /* limit to udma5 */
656 if (quirks & SIL_QUIRK_UDMA5MAX) {
efdaedc4 657 if (print_info)
a9a79dfe
JP
658 ata_dev_info(dev, "applying Maxtor errata fix %s\n",
659 model_num);
5a529139 660 dev->udma_mask &= ATA_UDMA5;
1da177e4
LT
661 return;
662 }
663}
664
4447d351 665static void sil_init_controller(struct ata_host *host)
3d8ec913 666{
4447d351
TH
667 struct pci_dev *pdev = to_pci_dev(host->dev);
668 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
3d8ec913
TH
669 u8 cls;
670 u32 tmp;
671 int i;
672
673 /* Initialize FIFO PCI bus arbitration */
674 cls = sil_get_device_cache_line(pdev);
675 if (cls) {
676 cls >>= 3;
677 cls++; /* cls = (line_size/8)+1 */
4447d351 678 for (i = 0; i < host->n_ports; i++)
3d8ec913
TH
679 writew(cls << 8 | cls,
680 mmio_base + sil_port[i].fifo_cfg);
681 } else
a44fec1f
JP
682 dev_warn(&pdev->dev,
683 "cache line size not set. Driver may not function\n");
3d8ec913
TH
684
685 /* Apply R_ERR on DMA activate FIS errata workaround */
4447d351 686 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
3d8ec913
TH
687 int cnt;
688
4447d351 689 for (i = 0, cnt = 0; i < host->n_ports; i++) {
3d8ec913
TH
690 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
691 if ((tmp & 0x3) != 0x01)
692 continue;
693 if (!cnt)
a44fec1f
JP
694 dev_info(&pdev->dev,
695 "Applying R_ERR on DMA activate FIS errata fix\n");
3d8ec913
TH
696 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
697 cnt++;
698 }
699 }
700
4447d351 701 if (host->n_ports == 4) {
3d8ec913
TH
702 /* flip the magic "make 4 ports work" bit */
703 tmp = readl(mmio_base + sil_port[2].bmdma);
704 if ((tmp & SIL_INTR_STEERING) == 0)
705 writel(tmp | SIL_INTR_STEERING,
706 mmio_base + sil_port[2].bmdma);
707 }
708}
709
e57db7bd
RW
710static bool sil_broken_system_poweroff(struct pci_dev *pdev)
711{
712 static const struct dmi_system_id broken_systems[] = {
713 {
714 .ident = "HP Compaq nx6325",
715 .matches = {
716 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
717 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
718 },
719 /* PCI slot number of the controller */
720 .driver_data = (void *)0x12UL,
721 },
722
723 { } /* terminate list */
724 };
725 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
726
727 if (dmi) {
728 unsigned long slot = (unsigned long)dmi->driver_data;
729 /* apply the quirk only to on-board controllers */
730 return slot == PCI_SLOT(pdev->devfn);
731 }
732
733 return false;
734}
735
5796d1c4 736static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 737{
4447d351 738 int board_id = ent->driver_data;
e57db7bd
RW
739 struct ata_port_info pi = sil_port_info[board_id];
740 const struct ata_port_info *ppi[] = { &pi, NULL };
4447d351 741 struct ata_host *host;
ea6ba10b 742 void __iomem *mmio_base;
4447d351 743 int n_ports, rc;
1da177e4 744 unsigned int i;
1da177e4 745
06296a1e 746 ata_print_version_once(&pdev->dev, DRV_VERSION);
1da177e4 747
4447d351
TH
748 /* allocate host */
749 n_ports = 2;
750 if (board_id == sil_3114)
751 n_ports = 4;
752
e57db7bd
RW
753 if (sil_broken_system_poweroff(pdev)) {
754 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
755 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
756 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
757 "on poweroff and hibernation\n");
758 }
759
4447d351
TH
760 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
761 if (!host)
762 return -ENOMEM;
763
764 /* acquire resources and fill host */
24dc5f33 765 rc = pcim_enable_device(pdev);
1da177e4
LT
766 if (rc)
767 return rc;
768
0d5ff566
TH
769 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
770 if (rc == -EBUSY)
24dc5f33 771 pcim_pin_device(pdev);
0d5ff566 772 if (rc)
24dc5f33 773 return rc;
4447d351 774 host->iomap = pcim_iomap_table(pdev);
1da177e4 775
c54c719b 776 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
1da177e4 777 if (rc)
24dc5f33 778 return rc;
c54c719b 779 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
1da177e4 780 if (rc)
24dc5f33 781 return rc;
1da177e4 782
4447d351 783 mmio_base = host->iomap[SIL_MMIO_BAR];
1da177e4 784
4447d351 785 for (i = 0; i < host->n_ports; i++) {
cbcdd875
TH
786 struct ata_port *ap = host->ports[i];
787 struct ata_ioports *ioaddr = &ap->ioaddr;
4447d351
TH
788
789 ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
790 ioaddr->altstatus_addr =
791 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
792 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
793 ioaddr->scr_addr = mmio_base + sil_port[i].scr;
9363c382 794 ata_sff_std_ports(ioaddr);
cbcdd875
TH
795
796 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
797 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
1da177e4
LT
798 }
799
4447d351
TH
800 /* initialize and activate */
801 sil_init_controller(host);
1da177e4 802
1da177e4 803 pci_set_master(pdev);
4447d351
TH
804 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
805 &sil_sht);
1da177e4
LT
806}
807
58eb8cd5 808#ifdef CONFIG_PM_SLEEP
afb5a7cb
TH
809static int sil_pci_device_resume(struct pci_dev *pdev)
810{
0a86e1c8 811 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6
TH
812 int rc;
813
814 rc = ata_pci_device_do_resume(pdev);
815 if (rc)
816 return rc;
afb5a7cb 817
4447d351 818 sil_init_controller(host);
cca3974e 819 ata_host_resume(host);
afb5a7cb
TH
820
821 return 0;
822}
281d426c 823#endif
afb5a7cb 824
2fc75da0 825module_pci_driver(sil_pci_driver);