1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
5 * Copyright 2005 Tejun Heo
7 * Based on preview driver from Silicon Image.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/gfp.h>
13 #include <linux/pci.h>
14 #include <linux/blkdev.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/device.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <linux/libata.h>
23 #define DRV_NAME "sata_sil24"
24 #define DRV_VERSION "1.1"
27 * Port request block (PRB) 32 bytes
37 * Scatter gather entry (SGE) 16 bytes
50 /* sil24 fetches in chunks of 64bytes. The first block
51 * contains the PRB and two SGEs. From the second block, it's
52 * consisted of four SGEs and called SGT. Calculate the
53 * number of SGTs that fit into one page.
55 SIL24_PRB_SZ
= sizeof(struct sil24_prb
)
56 + 2 * sizeof(struct sil24_sge
),
57 SIL24_MAX_SGT
= (PAGE_SIZE
- SIL24_PRB_SZ
)
58 / (4 * sizeof(struct sil24_sge
)),
60 /* This will give us one unused SGEs for ATA. This extra SGE
61 * will be used to store CDB for ATAPI devices.
63 SIL24_MAX_SGE
= 4 * SIL24_MAX_SGT
+ 1,
66 * Global controller registers (128 bytes @ BAR0)
69 HOST_SLOT_STAT
= 0x00, /* 32 bit slot stat * 4 */
73 HOST_BIST_CTRL
= 0x50,
74 HOST_BIST_PTRN
= 0x54,
75 HOST_BIST_STAT
= 0x58,
76 HOST_MEM_BIST_STAT
= 0x5c,
77 HOST_FLASH_CMD
= 0x70,
79 HOST_FLASH_DATA
= 0x74,
80 HOST_TRANSITION_DETECT
= 0x75,
81 HOST_GPIO_CTRL
= 0x76,
82 HOST_I2C_ADDR
= 0x78, /* 32 bit */
84 HOST_I2C_XFER_CNT
= 0x7e,
87 /* HOST_SLOT_STAT bits */
88 HOST_SSTAT_ATTN
= (1 << 31),
91 HOST_CTRL_M66EN
= (1 << 16), /* M66EN PCI bus signal */
92 HOST_CTRL_TRDY
= (1 << 17), /* latched PCI TRDY */
93 HOST_CTRL_STOP
= (1 << 18), /* latched PCI STOP */
94 HOST_CTRL_DEVSEL
= (1 << 19), /* latched PCI DEVSEL */
95 HOST_CTRL_REQ64
= (1 << 20), /* latched PCI REQ64 */
96 HOST_CTRL_GLOBAL_RST
= (1 << 31), /* global reset */
100 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
102 PORT_REGS_SIZE
= 0x2000,
104 PORT_LRAM
= 0x0000, /* 31 LRAM slots and PMP regs */
105 PORT_LRAM_SLOT_SZ
= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
107 PORT_PMP
= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
108 PORT_PMP_STATUS
= 0x0000, /* port device status offset */
109 PORT_PMP_QACTIVE
= 0x0004, /* port device QActive offset */
110 PORT_PMP_SIZE
= 0x0008, /* 8 bytes per PMP */
113 PORT_CTRL_STAT
= 0x1000, /* write: ctrl-set, read: stat */
114 PORT_CTRL_CLR
= 0x1004, /* write: ctrl-clear */
115 PORT_IRQ_STAT
= 0x1008, /* high: status, low: interrupt */
116 PORT_IRQ_ENABLE_SET
= 0x1010, /* write: enable-set */
117 PORT_IRQ_ENABLE_CLR
= 0x1014, /* write: enable-clear */
118 PORT_ACTIVATE_UPPER_ADDR
= 0x101c,
119 PORT_EXEC_FIFO
= 0x1020, /* command execution fifo */
120 PORT_CMD_ERR
= 0x1024, /* command error number */
121 PORT_FIS_CFG
= 0x1028,
122 PORT_FIFO_THRES
= 0x102c,
124 PORT_DECODE_ERR_CNT
= 0x1040,
125 PORT_DECODE_ERR_THRESH
= 0x1042,
126 PORT_CRC_ERR_CNT
= 0x1044,
127 PORT_CRC_ERR_THRESH
= 0x1046,
128 PORT_HSHK_ERR_CNT
= 0x1048,
129 PORT_HSHK_ERR_THRESH
= 0x104a,
131 PORT_PHY_CFG
= 0x1050,
132 PORT_SLOT_STAT
= 0x1800,
133 PORT_CMD_ACTIVATE
= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
134 PORT_CONTEXT
= 0x1e04,
135 PORT_EXEC_DIAG
= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
136 PORT_PSD_DIAG
= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
137 PORT_SCONTROL
= 0x1f00,
138 PORT_SSTATUS
= 0x1f04,
139 PORT_SERROR
= 0x1f08,
140 PORT_SACTIVE
= 0x1f0c,
142 /* PORT_CTRL_STAT bits */
143 PORT_CS_PORT_RST
= (1 << 0), /* port reset */
144 PORT_CS_DEV_RST
= (1 << 1), /* device reset */
145 PORT_CS_INIT
= (1 << 2), /* port initialize */
146 PORT_CS_IRQ_WOC
= (1 << 3), /* interrupt write one to clear */
147 PORT_CS_CDB16
= (1 << 5), /* 0=12b cdb, 1=16b cdb */
148 PORT_CS_PMP_RESUME
= (1 << 6), /* PMP resume */
149 PORT_CS_32BIT_ACTV
= (1 << 10), /* 32-bit activation */
150 PORT_CS_PMP_EN
= (1 << 13), /* port multiplier enable */
151 PORT_CS_RDY
= (1 << 31), /* port ready to accept commands */
153 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
154 /* bits[11:0] are masked */
155 PORT_IRQ_COMPLETE
= (1 << 0), /* command(s) completed */
156 PORT_IRQ_ERROR
= (1 << 1), /* command execution error */
157 PORT_IRQ_PORTRDY_CHG
= (1 << 2), /* port ready change */
158 PORT_IRQ_PWR_CHG
= (1 << 3), /* power management change */
159 PORT_IRQ_PHYRDY_CHG
= (1 << 4), /* PHY ready change */
160 PORT_IRQ_COMWAKE
= (1 << 5), /* COMWAKE received */
161 PORT_IRQ_UNK_FIS
= (1 << 6), /* unknown FIS received */
162 PORT_IRQ_DEV_XCHG
= (1 << 7), /* device exchanged */
163 PORT_IRQ_8B10B
= (1 << 8), /* 8b/10b decode error threshold */
164 PORT_IRQ_CRC
= (1 << 9), /* CRC error threshold */
165 PORT_IRQ_HANDSHAKE
= (1 << 10), /* handshake error threshold */
166 PORT_IRQ_SDB_NOTIFY
= (1 << 11), /* SDB notify received */
168 DEF_PORT_IRQ
= PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
|
169 PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
|
170 PORT_IRQ_UNK_FIS
| PORT_IRQ_SDB_NOTIFY
,
172 /* bits[27:16] are unmasked (raw) */
173 PORT_IRQ_RAW_SHIFT
= 16,
174 PORT_IRQ_MASKED_MASK
= 0x7ff,
175 PORT_IRQ_RAW_MASK
= (0x7ff << PORT_IRQ_RAW_SHIFT
),
177 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
178 PORT_IRQ_STEER_SHIFT
= 30,
179 PORT_IRQ_STEER_MASK
= (3 << PORT_IRQ_STEER_SHIFT
),
181 /* PORT_CMD_ERR constants */
182 PORT_CERR_DEV
= 1, /* Error bit in D2H Register FIS */
183 PORT_CERR_SDB
= 2, /* Error bit in SDB FIS */
184 PORT_CERR_DATA
= 3, /* Error in data FIS not detected by dev */
185 PORT_CERR_SEND
= 4, /* Initial cmd FIS transmission failure */
186 PORT_CERR_INCONSISTENT
= 5, /* Protocol mismatch */
187 PORT_CERR_DIRECTION
= 6, /* Data direction mismatch */
188 PORT_CERR_UNDERRUN
= 7, /* Ran out of SGEs while writing */
189 PORT_CERR_OVERRUN
= 8, /* Ran out of SGEs while reading */
190 PORT_CERR_PKT_PROT
= 11, /* DIR invalid in 1st PIO setup of ATAPI */
191 PORT_CERR_SGT_BOUNDARY
= 16, /* PLD ecode 00 - SGT not on qword boundary */
192 PORT_CERR_SGT_TGTABRT
= 17, /* PLD ecode 01 - target abort */
193 PORT_CERR_SGT_MSTABRT
= 18, /* PLD ecode 10 - master abort */
194 PORT_CERR_SGT_PCIPERR
= 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
195 PORT_CERR_CMD_BOUNDARY
= 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
196 PORT_CERR_CMD_TGTABRT
= 25, /* ctrl[15:13] 010 - target abort */
197 PORT_CERR_CMD_MSTABRT
= 26, /* ctrl[15:13] 100 - master abort */
198 PORT_CERR_CMD_PCIPERR
= 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
199 PORT_CERR_XFR_UNDEF
= 32, /* PSD ecode 00 - undefined */
200 PORT_CERR_XFR_TGTABRT
= 33, /* PSD ecode 01 - target abort */
201 PORT_CERR_XFR_MSTABRT
= 34, /* PSD ecode 10 - master abort */
202 PORT_CERR_XFR_PCIPERR
= 35, /* PSD ecode 11 - PCI prity err during transfer */
203 PORT_CERR_SENDSERVICE
= 36, /* FIS received while sending service */
205 /* bits of PRB control field */
206 PRB_CTRL_PROTOCOL
= (1 << 0), /* override def. ATA protocol */
207 PRB_CTRL_PACKET_READ
= (1 << 4), /* PACKET cmd read */
208 PRB_CTRL_PACKET_WRITE
= (1 << 5), /* PACKET cmd write */
209 PRB_CTRL_NIEN
= (1 << 6), /* Mask completion irq */
210 PRB_CTRL_SRST
= (1 << 7), /* Soft reset request (ign BSY?) */
212 /* PRB protocol field */
213 PRB_PROT_PACKET
= (1 << 0),
214 PRB_PROT_TCQ
= (1 << 1),
215 PRB_PROT_NCQ
= (1 << 2),
216 PRB_PROT_READ
= (1 << 3),
217 PRB_PROT_WRITE
= (1 << 4),
218 PRB_PROT_TRANSPARENT
= (1 << 5),
223 SGE_TRM
= (1 << 31), /* Last SGE in chain */
224 SGE_LNK
= (1 << 30), /* linked list
225 Points to SGT, not SGE */
226 SGE_DRD
= (1 << 29), /* discard data read (/dev/null)
227 data address ignored */
237 SIL24_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
238 ATA_FLAG_NCQ
| ATA_FLAG_ACPI_SATA
|
239 ATA_FLAG_AN
| ATA_FLAG_PMP
,
240 SIL24_FLAG_PCIX_IRQ_WOC
= (1 << 24), /* IRQ loss errata on PCI-X */
242 IRQ_STAT_4PORTS
= 0xf,
245 struct sil24_ata_block
{
246 struct sil24_prb prb
;
247 struct sil24_sge sge
[SIL24_MAX_SGE
];
250 struct sil24_atapi_block
{
251 struct sil24_prb prb
;
253 struct sil24_sge sge
[SIL24_MAX_SGE
];
256 union sil24_cmd_block
{
257 struct sil24_ata_block ata
;
258 struct sil24_atapi_block atapi
;
261 static const struct sil24_cerr_info
{
262 unsigned int err_mask
, action
;
264 } sil24_cerr_db
[] = {
265 [0] = { AC_ERR_DEV
, 0,
267 [PORT_CERR_DEV
] = { AC_ERR_DEV
, 0,
268 "device error via D2H FIS" },
269 [PORT_CERR_SDB
] = { AC_ERR_DEV
, 0,
270 "device error via SDB FIS" },
271 [PORT_CERR_DATA
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
272 "error in data FIS" },
273 [PORT_CERR_SEND
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
274 "failed to transmit command FIS" },
275 [PORT_CERR_INCONSISTENT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
276 "protocol mismatch" },
277 [PORT_CERR_DIRECTION
] = { AC_ERR_HSM
, ATA_EH_RESET
,
278 "data direction mismatch" },
279 [PORT_CERR_UNDERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
280 "ran out of SGEs while writing" },
281 [PORT_CERR_OVERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
282 "ran out of SGEs while reading" },
283 [PORT_CERR_PKT_PROT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
284 "invalid data direction for ATAPI CDB" },
285 [PORT_CERR_SGT_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
286 "SGT not on qword boundary" },
287 [PORT_CERR_SGT_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
288 "PCI target abort while fetching SGT" },
289 [PORT_CERR_SGT_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
290 "PCI master abort while fetching SGT" },
291 [PORT_CERR_SGT_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
292 "PCI parity error while fetching SGT" },
293 [PORT_CERR_CMD_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
294 "PRB not on qword boundary" },
295 [PORT_CERR_CMD_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
296 "PCI target abort while fetching PRB" },
297 [PORT_CERR_CMD_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
298 "PCI master abort while fetching PRB" },
299 [PORT_CERR_CMD_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
300 "PCI parity error while fetching PRB" },
301 [PORT_CERR_XFR_UNDEF
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
302 "undefined error while transferring data" },
303 [PORT_CERR_XFR_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
304 "PCI target abort while transferring data" },
305 [PORT_CERR_XFR_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
306 "PCI master abort while transferring data" },
307 [PORT_CERR_XFR_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
308 "PCI parity error while transferring data" },
309 [PORT_CERR_SENDSERVICE
] = { AC_ERR_HSM
, ATA_EH_RESET
,
310 "FIS received while sending service FIS" },
316 * The preview driver always returned 0 for status. We emulate it
317 * here from the previous interrupt.
319 struct sil24_port_priv
{
320 union sil24_cmd_block
*cmd_block
; /* 32 cmd blocks */
321 dma_addr_t cmd_block_dma
; /* DMA base addr for them */
325 static void sil24_dev_config(struct ata_device
*dev
);
326 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
);
327 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
);
328 static int sil24_qc_defer(struct ata_queued_cmd
*qc
);
329 static enum ata_completion_errors
sil24_qc_prep(struct ata_queued_cmd
*qc
);
330 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
);
331 static bool sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
);
332 static void sil24_pmp_attach(struct ata_port
*ap
);
333 static void sil24_pmp_detach(struct ata_port
*ap
);
334 static void sil24_freeze(struct ata_port
*ap
);
335 static void sil24_thaw(struct ata_port
*ap
);
336 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
337 unsigned long deadline
);
338 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
339 unsigned long deadline
);
340 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
341 unsigned long deadline
);
342 static void sil24_error_handler(struct ata_port
*ap
);
343 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
);
344 static int sil24_port_start(struct ata_port
*ap
);
345 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
346 #ifdef CONFIG_PM_SLEEP
347 static int sil24_pci_device_resume(struct pci_dev
*pdev
);
350 static int sil24_port_resume(struct ata_port
*ap
);
353 static const struct pci_device_id sil24_pci_tbl
[] = {
354 { PCI_VDEVICE(CMD
, 0x3124), BID_SIL3124
},
355 { PCI_VDEVICE(INTEL
, 0x3124), BID_SIL3124
},
356 { PCI_VDEVICE(CMD
, 0x3132), BID_SIL3132
},
357 { PCI_VDEVICE(CMD
, 0x0242), BID_SIL3132
},
358 { PCI_VDEVICE(CMD
, 0x0244), BID_SIL3132
},
359 { PCI_VDEVICE(CMD
, 0x3131), BID_SIL3131
},
360 { PCI_VDEVICE(CMD
, 0x3531), BID_SIL3131
},
362 { } /* terminate list */
365 static struct pci_driver sil24_pci_driver
= {
367 .id_table
= sil24_pci_tbl
,
368 .probe
= sil24_init_one
,
369 .remove
= ata_pci_remove_one
,
370 #ifdef CONFIG_PM_SLEEP
371 .suspend
= ata_pci_device_suspend
,
372 .resume
= sil24_pci_device_resume
,
376 static struct scsi_host_template sil24_sht
= {
377 __ATA_BASE_SHT(DRV_NAME
),
378 .can_queue
= SIL24_MAX_CMDS
,
379 .sg_tablesize
= SIL24_MAX_SGE
,
380 .dma_boundary
= ATA_DMA_BOUNDARY
,
381 .tag_alloc_policy
= BLK_TAG_ALLOC_FIFO
,
382 .sdev_attrs
= ata_ncq_sdev_attrs
,
383 .change_queue_depth
= ata_scsi_change_queue_depth
,
384 .slave_configure
= ata_scsi_slave_config
387 static struct ata_port_operations sil24_ops
= {
388 .inherits
= &sata_pmp_port_ops
,
390 .qc_defer
= sil24_qc_defer
,
391 .qc_prep
= sil24_qc_prep
,
392 .qc_issue
= sil24_qc_issue
,
393 .qc_fill_rtf
= sil24_qc_fill_rtf
,
395 .freeze
= sil24_freeze
,
397 .softreset
= sil24_softreset
,
398 .hardreset
= sil24_hardreset
,
399 .pmp_softreset
= sil24_softreset
,
400 .pmp_hardreset
= sil24_pmp_hardreset
,
401 .error_handler
= sil24_error_handler
,
402 .post_internal_cmd
= sil24_post_internal_cmd
,
403 .dev_config
= sil24_dev_config
,
405 .scr_read
= sil24_scr_read
,
406 .scr_write
= sil24_scr_write
,
407 .pmp_attach
= sil24_pmp_attach
,
408 .pmp_detach
= sil24_pmp_detach
,
410 .port_start
= sil24_port_start
,
412 .port_resume
= sil24_port_resume
,
416 static bool sata_sil24_msi
; /* Disable MSI */
417 module_param_named(msi
, sata_sil24_msi
, bool, S_IRUGO
);
418 MODULE_PARM_DESC(msi
, "Enable MSI (Default: false)");
421 * Use bits 30-31 of port_flags to encode available port numbers.
422 * Current maxium is 4.
424 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
425 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
427 static const struct ata_port_info sil24_port_info
[] = {
430 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(4) |
431 SIL24_FLAG_PCIX_IRQ_WOC
,
432 .pio_mask
= ATA_PIO4
,
433 .mwdma_mask
= ATA_MWDMA2
,
434 .udma_mask
= ATA_UDMA5
,
435 .port_ops
= &sil24_ops
,
439 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(2),
440 .pio_mask
= ATA_PIO4
,
441 .mwdma_mask
= ATA_MWDMA2
,
442 .udma_mask
= ATA_UDMA5
,
443 .port_ops
= &sil24_ops
,
445 /* sil_3131/sil_3531 */
447 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(1),
448 .pio_mask
= ATA_PIO4
,
449 .mwdma_mask
= ATA_MWDMA2
,
450 .udma_mask
= ATA_UDMA5
,
451 .port_ops
= &sil24_ops
,
455 static int sil24_tag(int tag
)
457 if (unlikely(ata_tag_internal(tag
)))
462 static unsigned long sil24_port_offset(struct ata_port
*ap
)
464 return ap
->port_no
* PORT_REGS_SIZE
;
467 static void __iomem
*sil24_port_base(struct ata_port
*ap
)
469 return ap
->host
->iomap
[SIL24_PORT_BAR
] + sil24_port_offset(ap
);
472 static void sil24_dev_config(struct ata_device
*dev
)
474 void __iomem
*port
= sil24_port_base(dev
->link
->ap
);
476 if (dev
->cdb_len
== 16)
477 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_STAT
);
479 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_CLR
);
482 static void sil24_read_tf(struct ata_port
*ap
, int tag
, struct ata_taskfile
*tf
)
484 void __iomem
*port
= sil24_port_base(ap
);
485 struct sil24_prb __iomem
*prb
;
488 prb
= port
+ PORT_LRAM
+ sil24_tag(tag
) * PORT_LRAM_SLOT_SZ
;
489 memcpy_fromio(fis
, prb
->fis
, sizeof(fis
));
490 ata_tf_from_fis(fis
, tf
);
493 static int sil24_scr_map
[] = {
500 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
)
502 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
504 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
505 *val
= readl(scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
511 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
)
513 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
515 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
516 writel(val
, scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
522 static void sil24_config_port(struct ata_port
*ap
)
524 void __iomem
*port
= sil24_port_base(ap
);
526 /* configure IRQ WoC */
527 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
528 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_STAT
);
530 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_CLR
);
532 /* zero error counters. */
533 writew(0x8000, port
+ PORT_DECODE_ERR_THRESH
);
534 writew(0x8000, port
+ PORT_CRC_ERR_THRESH
);
535 writew(0x8000, port
+ PORT_HSHK_ERR_THRESH
);
536 writew(0x0000, port
+ PORT_DECODE_ERR_CNT
);
537 writew(0x0000, port
+ PORT_CRC_ERR_CNT
);
538 writew(0x0000, port
+ PORT_HSHK_ERR_CNT
);
540 /* always use 64bit activation */
541 writel(PORT_CS_32BIT_ACTV
, port
+ PORT_CTRL_CLR
);
543 /* clear port multiplier enable and resume bits */
544 writel(PORT_CS_PMP_EN
| PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
547 static void sil24_config_pmp(struct ata_port
*ap
, int attached
)
549 void __iomem
*port
= sil24_port_base(ap
);
552 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_STAT
);
554 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_CLR
);
557 static void sil24_clear_pmp(struct ata_port
*ap
)
559 void __iomem
*port
= sil24_port_base(ap
);
562 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
564 for (i
= 0; i
< SATA_PMP_MAX_PORTS
; i
++) {
565 void __iomem
*pmp_base
= port
+ PORT_PMP
+ i
* PORT_PMP_SIZE
;
567 writel(0, pmp_base
+ PORT_PMP_STATUS
);
568 writel(0, pmp_base
+ PORT_PMP_QACTIVE
);
572 static int sil24_init_port(struct ata_port
*ap
)
574 void __iomem
*port
= sil24_port_base(ap
);
575 struct sil24_port_priv
*pp
= ap
->private_data
;
578 /* clear PMP error status */
579 if (sata_pmp_attached(ap
))
582 writel(PORT_CS_INIT
, port
+ PORT_CTRL_STAT
);
583 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
584 PORT_CS_INIT
, PORT_CS_INIT
, 10, 100);
585 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
586 PORT_CS_RDY
, 0, 10, 100);
588 if ((tmp
& (PORT_CS_INIT
| PORT_CS_RDY
)) != PORT_CS_RDY
) {
590 ap
->link
.eh_context
.i
.action
|= ATA_EH_RESET
;
597 static int sil24_exec_polled_cmd(struct ata_port
*ap
, int pmp
,
598 const struct ata_taskfile
*tf
,
599 int is_cmd
, u32 ctrl
,
600 unsigned long timeout_msec
)
602 void __iomem
*port
= sil24_port_base(ap
);
603 struct sil24_port_priv
*pp
= ap
->private_data
;
604 struct sil24_prb
*prb
= &pp
->cmd_block
[0].ata
.prb
;
605 dma_addr_t paddr
= pp
->cmd_block_dma
;
606 u32 irq_enabled
, irq_mask
, irq_stat
;
609 prb
->ctrl
= cpu_to_le16(ctrl
);
610 ata_tf_to_fis(tf
, pmp
, is_cmd
, prb
->fis
);
612 /* temporarily plug completion and error interrupts */
613 irq_enabled
= readl(port
+ PORT_IRQ_ENABLE_SET
);
614 writel(PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
, port
+ PORT_IRQ_ENABLE_CLR
);
617 * The barrier is required to ensure that writes to cmd_block reach
618 * the memory before the write to PORT_CMD_ACTIVATE.
621 writel((u32
)paddr
, port
+ PORT_CMD_ACTIVATE
);
622 writel((u64
)paddr
>> 32, port
+ PORT_CMD_ACTIVATE
+ 4);
624 irq_mask
= (PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
) << PORT_IRQ_RAW_SHIFT
;
625 irq_stat
= ata_wait_register(ap
, port
+ PORT_IRQ_STAT
, irq_mask
, 0x0,
628 writel(irq_mask
, port
+ PORT_IRQ_STAT
); /* clear IRQs */
629 irq_stat
>>= PORT_IRQ_RAW_SHIFT
;
631 if (irq_stat
& PORT_IRQ_COMPLETE
)
634 /* force port into known state */
637 if (irq_stat
& PORT_IRQ_ERROR
)
643 /* restore IRQ enabled */
644 writel(irq_enabled
, port
+ PORT_IRQ_ENABLE_SET
);
649 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
650 unsigned long deadline
)
652 struct ata_port
*ap
= link
->ap
;
653 int pmp
= sata_srst_pmp(link
);
654 unsigned long timeout_msec
= 0;
655 struct ata_taskfile tf
;
661 /* put the port into known state */
662 if (sil24_init_port(ap
)) {
663 reason
= "port not ready";
668 if (time_after(deadline
, jiffies
))
669 timeout_msec
= jiffies_to_msecs(deadline
- jiffies
);
671 ata_tf_init(link
->device
, &tf
); /* doesn't really matter */
672 rc
= sil24_exec_polled_cmd(ap
, pmp
, &tf
, 0, PRB_CTRL_SRST
,
678 reason
= "SRST command error";
682 sil24_read_tf(ap
, 0, &tf
);
683 *class = ata_dev_classify(&tf
);
685 DPRINTK("EXIT, class=%u\n", *class);
689 ata_link_err(link
, "softreset failed (%s)\n", reason
);
693 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
694 unsigned long deadline
)
696 struct ata_port
*ap
= link
->ap
;
697 void __iomem
*port
= sil24_port_base(ap
);
698 struct sil24_port_priv
*pp
= ap
->private_data
;
699 int did_port_rst
= 0;
705 /* Sometimes, DEV_RST is not enough to recover the controller.
706 * This happens often after PM DMA CS errata.
708 if (pp
->do_port_rst
) {
710 "controller in dubious state, performing PORT_RST\n");
712 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_STAT
);
714 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
715 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
, PORT_CS_RDY
, 0,
718 /* restore port configuration */
719 sil24_config_port(ap
);
720 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
726 /* sil24 does the right thing(tm) without any protection */
730 if (ata_link_online(link
))
733 writel(PORT_CS_DEV_RST
, port
+ PORT_CTRL_STAT
);
734 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
735 PORT_CS_DEV_RST
, PORT_CS_DEV_RST
, 10,
738 /* SStatus oscillates between zero and valid status after
739 * DEV_RST, debounce it.
741 rc
= sata_link_debounce(link
, sata_deb_timing_long
, deadline
);
743 reason
= "PHY debouncing failed";
747 if (tmp
& PORT_CS_DEV_RST
) {
748 if (ata_link_offline(link
))
750 reason
= "link not ready";
754 /* Sil24 doesn't store signature FIS after hardreset, so we
755 * can't wait for BSY to clear. Some devices take a long time
756 * to get ready and those devices will choke if we don't wait
757 * for BSY clearance here. Tell libata to perform follow-up
768 ata_link_err(link
, "hardreset failed (%s)\n", reason
);
772 static inline void sil24_fill_sg(struct ata_queued_cmd
*qc
,
773 struct sil24_sge
*sge
)
775 struct scatterlist
*sg
;
776 struct sil24_sge
*last_sge
= NULL
;
779 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
780 sge
->addr
= cpu_to_le64(sg_dma_address(sg
));
781 sge
->cnt
= cpu_to_le32(sg_dma_len(sg
));
788 last_sge
->flags
= cpu_to_le32(SGE_TRM
);
791 static int sil24_qc_defer(struct ata_queued_cmd
*qc
)
793 struct ata_link
*link
= qc
->dev
->link
;
794 struct ata_port
*ap
= link
->ap
;
795 u8 prot
= qc
->tf
.protocol
;
798 * There is a bug in the chip:
799 * Port LRAM Causes the PRB/SGT Data to be Corrupted
800 * If the host issues a read request for LRAM and SActive registers
801 * while active commands are available in the port, PRB/SGT data in
802 * the LRAM can become corrupted. This issue applies only when
803 * reading from, but not writing to, the LRAM.
805 * Therefore, reading LRAM when there is no particular error [and
806 * other commands may be outstanding] is prohibited.
808 * To avoid this bug there are two situations where a command must run
809 * exclusive of any other commands on the port:
811 * - ATAPI commands which check the sense data
812 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
816 int is_excl
= (ata_is_atapi(prot
) ||
817 (qc
->flags
& ATA_QCFLAG_RESULT_TF
));
819 if (unlikely(ap
->excl_link
)) {
820 if (link
== ap
->excl_link
) {
821 if (ap
->nr_active_links
)
822 return ATA_DEFER_PORT
;
823 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
825 return ATA_DEFER_PORT
;
826 } else if (unlikely(is_excl
)) {
827 ap
->excl_link
= link
;
828 if (ap
->nr_active_links
)
829 return ATA_DEFER_PORT
;
830 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
833 return ata_std_qc_defer(qc
);
836 static enum ata_completion_errors
sil24_qc_prep(struct ata_queued_cmd
*qc
)
838 struct ata_port
*ap
= qc
->ap
;
839 struct sil24_port_priv
*pp
= ap
->private_data
;
840 union sil24_cmd_block
*cb
;
841 struct sil24_prb
*prb
;
842 struct sil24_sge
*sge
;
845 cb
= &pp
->cmd_block
[sil24_tag(qc
->hw_tag
)];
847 if (!ata_is_atapi(qc
->tf
.protocol
)) {
850 if (ata_is_data(qc
->tf
.protocol
)) {
852 ctrl
= PRB_CTRL_PROTOCOL
;
853 if (ata_is_ncq(qc
->tf
.protocol
))
854 prot
|= PRB_PROT_NCQ
;
855 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
856 prot
|= PRB_PROT_WRITE
;
858 prot
|= PRB_PROT_READ
;
859 prb
->prot
= cpu_to_le16(prot
);
862 prb
= &cb
->atapi
.prb
;
864 memset(cb
->atapi
.cdb
, 0, sizeof(cb
->atapi
.cdb
));
865 memcpy(cb
->atapi
.cdb
, qc
->cdb
, qc
->dev
->cdb_len
);
867 if (ata_is_data(qc
->tf
.protocol
)) {
868 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
869 ctrl
= PRB_CTRL_PACKET_WRITE
;
871 ctrl
= PRB_CTRL_PACKET_READ
;
875 prb
->ctrl
= cpu_to_le16(ctrl
);
876 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, prb
->fis
);
878 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
879 sil24_fill_sg(qc
, sge
);
884 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
)
886 struct ata_port
*ap
= qc
->ap
;
887 struct sil24_port_priv
*pp
= ap
->private_data
;
888 void __iomem
*port
= sil24_port_base(ap
);
889 unsigned int tag
= sil24_tag(qc
->hw_tag
);
891 void __iomem
*activate
;
893 paddr
= pp
->cmd_block_dma
+ tag
* sizeof(*pp
->cmd_block
);
894 activate
= port
+ PORT_CMD_ACTIVATE
+ tag
* 8;
897 * The barrier is required to ensure that writes to cmd_block reach
898 * the memory before the write to PORT_CMD_ACTIVATE.
901 writel((u32
)paddr
, activate
);
902 writel((u64
)paddr
>> 32, activate
+ 4);
907 static bool sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
)
909 sil24_read_tf(qc
->ap
, qc
->hw_tag
, &qc
->result_tf
);
913 static void sil24_pmp_attach(struct ata_port
*ap
)
915 u32
*gscr
= ap
->link
.device
->gscr
;
917 sil24_config_pmp(ap
, 1);
920 if (sata_pmp_gscr_vendor(gscr
) == 0x11ab &&
921 sata_pmp_gscr_devid(gscr
) == 0x4140) {
923 "disabling NCQ support due to sil24-mv4140 quirk\n");
924 ap
->flags
&= ~ATA_FLAG_NCQ
;
928 static void sil24_pmp_detach(struct ata_port
*ap
)
931 sil24_config_pmp(ap
, 0);
933 ap
->flags
|= ATA_FLAG_NCQ
;
936 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
937 unsigned long deadline
)
941 rc
= sil24_init_port(link
->ap
);
943 ata_link_err(link
, "hardreset failed (port not ready)\n");
947 return sata_std_hardreset(link
, class, deadline
);
950 static void sil24_freeze(struct ata_port
*ap
)
952 void __iomem
*port
= sil24_port_base(ap
);
954 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
955 * PORT_IRQ_ENABLE instead.
957 writel(0xffff, port
+ PORT_IRQ_ENABLE_CLR
);
960 static void sil24_thaw(struct ata_port
*ap
)
962 void __iomem
*port
= sil24_port_base(ap
);
966 tmp
= readl(port
+ PORT_IRQ_STAT
);
967 writel(tmp
, port
+ PORT_IRQ_STAT
);
969 /* turn IRQ back on */
970 writel(DEF_PORT_IRQ
, port
+ PORT_IRQ_ENABLE_SET
);
973 static void sil24_error_intr(struct ata_port
*ap
)
975 void __iomem
*port
= sil24_port_base(ap
);
976 struct sil24_port_priv
*pp
= ap
->private_data
;
977 struct ata_queued_cmd
*qc
= NULL
;
978 struct ata_link
*link
;
979 struct ata_eh_info
*ehi
;
980 int abort
= 0, freeze
= 0;
983 /* on error, we need to clear IRQ explicitly */
984 irq_stat
= readl(port
+ PORT_IRQ_STAT
);
985 writel(irq_stat
, port
+ PORT_IRQ_STAT
);
987 /* first, analyze and record host port events */
989 ehi
= &link
->eh_info
;
990 ata_ehi_clear_desc(ehi
);
992 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x", irq_stat
);
994 if (irq_stat
& PORT_IRQ_SDB_NOTIFY
) {
995 ata_ehi_push_desc(ehi
, "SDB notify");
996 sata_async_notification(ap
);
999 if (irq_stat
& (PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
)) {
1000 ata_ehi_hotplugged(ehi
);
1001 ata_ehi_push_desc(ehi
, "%s",
1002 irq_stat
& PORT_IRQ_PHYRDY_CHG
?
1003 "PHY RDY changed" : "device exchanged");
1007 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1008 ehi
->err_mask
|= AC_ERR_HSM
;
1009 ehi
->action
|= ATA_EH_RESET
;
1010 ata_ehi_push_desc(ehi
, "unknown FIS");
1014 /* deal with command error */
1015 if (irq_stat
& PORT_IRQ_ERROR
) {
1016 const struct sil24_cerr_info
*ci
= NULL
;
1017 unsigned int err_mask
= 0, action
= 0;
1023 /* DMA Context Switch Failure in Port Multiplier Mode
1024 * errata. If we have active commands to 3 or more
1025 * devices, any error condition on active devices can
1026 * corrupt DMA context switching.
1028 if (ap
->nr_active_links
>= 3) {
1029 ehi
->err_mask
|= AC_ERR_OTHER
;
1030 ehi
->action
|= ATA_EH_RESET
;
1031 ata_ehi_push_desc(ehi
, "PMP DMA CS errata");
1032 pp
->do_port_rst
= 1;
1036 /* find out the offending link and qc */
1037 if (sata_pmp_attached(ap
)) {
1038 context
= readl(port
+ PORT_CONTEXT
);
1039 pmp
= (context
>> 5) & 0xf;
1041 if (pmp
< ap
->nr_pmp_links
) {
1042 link
= &ap
->pmp_link
[pmp
];
1043 ehi
= &link
->eh_info
;
1044 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1046 ata_ehi_clear_desc(ehi
);
1047 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x",
1050 err_mask
|= AC_ERR_HSM
;
1051 action
|= ATA_EH_RESET
;
1055 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1057 /* analyze CMD_ERR */
1058 cerr
= readl(port
+ PORT_CMD_ERR
);
1059 if (cerr
< ARRAY_SIZE(sil24_cerr_db
))
1060 ci
= &sil24_cerr_db
[cerr
];
1062 if (ci
&& ci
->desc
) {
1063 err_mask
|= ci
->err_mask
;
1064 action
|= ci
->action
;
1065 if (action
& ATA_EH_RESET
)
1067 ata_ehi_push_desc(ehi
, "%s", ci
->desc
);
1069 err_mask
|= AC_ERR_OTHER
;
1070 action
|= ATA_EH_RESET
;
1072 ata_ehi_push_desc(ehi
, "unknown command error %d",
1076 /* record error info */
1078 qc
->err_mask
|= err_mask
;
1080 ehi
->err_mask
|= err_mask
;
1082 ehi
->action
|= action
;
1084 /* if PMP, resume */
1085 if (sata_pmp_attached(ap
))
1086 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_STAT
);
1089 /* freeze or abort */
1091 ata_port_freeze(ap
);
1094 ata_link_abort(qc
->dev
->link
);
1100 static inline void sil24_host_intr(struct ata_port
*ap
)
1102 void __iomem
*port
= sil24_port_base(ap
);
1103 u32 slot_stat
, qc_active
;
1106 /* If PCIX_IRQ_WOC, there's an inherent race window between
1107 * clearing IRQ pending status and reading PORT_SLOT_STAT
1108 * which may cause spurious interrupts afterwards. This is
1109 * unavoidable and much better than losing interrupts which
1110 * happens if IRQ pending is cleared after reading
1113 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
1114 writel(PORT_IRQ_COMPLETE
, port
+ PORT_IRQ_STAT
);
1116 slot_stat
= readl(port
+ PORT_SLOT_STAT
);
1118 if (unlikely(slot_stat
& HOST_SSTAT_ATTN
)) {
1119 sil24_error_intr(ap
);
1123 qc_active
= slot_stat
& ~HOST_SSTAT_ATTN
;
1124 rc
= ata_qc_complete_multiple(ap
, qc_active
);
1128 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1129 ehi
->err_mask
|= AC_ERR_HSM
;
1130 ehi
->action
|= ATA_EH_RESET
;
1131 ata_port_freeze(ap
);
1135 /* spurious interrupts are expected if PCIX_IRQ_WOC */
1136 if (!(ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
) && ata_ratelimit())
1138 "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
1139 slot_stat
, ap
->link
.active_tag
, ap
->link
.sactive
);
1142 static irqreturn_t
sil24_interrupt(int irq
, void *dev_instance
)
1144 struct ata_host
*host
= dev_instance
;
1145 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1146 unsigned handled
= 0;
1150 status
= readl(host_base
+ HOST_IRQ_STAT
);
1152 if (status
== 0xffffffff) {
1153 dev_err(host
->dev
, "IRQ status == 0xffffffff, "
1154 "PCI fault or device removal?\n");
1158 if (!(status
& IRQ_STAT_4PORTS
))
1161 spin_lock(&host
->lock
);
1163 for (i
= 0; i
< host
->n_ports
; i
++)
1164 if (status
& (1 << i
)) {
1165 sil24_host_intr(host
->ports
[i
]);
1169 spin_unlock(&host
->lock
);
1171 return IRQ_RETVAL(handled
);
1174 static void sil24_error_handler(struct ata_port
*ap
)
1176 struct sil24_port_priv
*pp
= ap
->private_data
;
1178 if (sil24_init_port(ap
))
1179 ata_eh_freeze_port(ap
);
1181 sata_pmp_error_handler(ap
);
1183 pp
->do_port_rst
= 0;
1186 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
)
1188 struct ata_port
*ap
= qc
->ap
;
1190 /* make DMA engine forget about the failed command */
1191 if ((qc
->flags
& ATA_QCFLAG_FAILED
) && sil24_init_port(ap
))
1192 ata_eh_freeze_port(ap
);
1195 static int sil24_port_start(struct ata_port
*ap
)
1197 struct device
*dev
= ap
->host
->dev
;
1198 struct sil24_port_priv
*pp
;
1199 union sil24_cmd_block
*cb
;
1200 size_t cb_size
= sizeof(*cb
) * SIL24_MAX_CMDS
;
1203 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1207 cb
= dmam_alloc_coherent(dev
, cb_size
, &cb_dma
, GFP_KERNEL
);
1212 pp
->cmd_block_dma
= cb_dma
;
1214 ap
->private_data
= pp
;
1216 ata_port_pbar_desc(ap
, SIL24_HOST_BAR
, -1, "host");
1217 ata_port_pbar_desc(ap
, SIL24_PORT_BAR
, sil24_port_offset(ap
), "port");
1222 static void sil24_init_controller(struct ata_host
*host
)
1224 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1229 writel(0, host_base
+ HOST_FLASH_CMD
);
1231 /* clear global reset & mask interrupts during initialization */
1232 writel(0, host_base
+ HOST_CTRL
);
1235 for (i
= 0; i
< host
->n_ports
; i
++) {
1236 struct ata_port
*ap
= host
->ports
[i
];
1237 void __iomem
*port
= sil24_port_base(ap
);
1240 /* Initial PHY setting */
1241 writel(0x20c, port
+ PORT_PHY_CFG
);
1243 /* Clear port RST */
1244 tmp
= readl(port
+ PORT_CTRL_STAT
);
1245 if (tmp
& PORT_CS_PORT_RST
) {
1246 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
1247 tmp
= ata_wait_register(NULL
, port
+ PORT_CTRL_STAT
,
1249 PORT_CS_PORT_RST
, 10, 100);
1250 if (tmp
& PORT_CS_PORT_RST
)
1252 "failed to clear port RST\n");
1255 /* configure port */
1256 sil24_config_port(ap
);
1259 /* Turn on interrupts */
1260 writel(IRQ_STAT_4PORTS
, host_base
+ HOST_CTRL
);
1263 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1265 extern int __MARKER__sil24_cmd_block_is_sized_wrongly
;
1266 struct ata_port_info pi
= sil24_port_info
[ent
->driver_data
];
1267 const struct ata_port_info
*ppi
[] = { &pi
, NULL
};
1268 void __iomem
* const *iomap
;
1269 struct ata_host
*host
;
1273 /* cause link error if sil24_cmd_block is sized wrongly */
1274 if (sizeof(union sil24_cmd_block
) != PAGE_SIZE
)
1275 __MARKER__sil24_cmd_block_is_sized_wrongly
= 1;
1277 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
1279 /* acquire resources */
1280 rc
= pcim_enable_device(pdev
);
1284 rc
= pcim_iomap_regions(pdev
,
1285 (1 << SIL24_HOST_BAR
) | (1 << SIL24_PORT_BAR
),
1289 iomap
= pcim_iomap_table(pdev
);
1291 /* apply workaround for completion IRQ loss on PCI-X errata */
1292 if (pi
.flags
& SIL24_FLAG_PCIX_IRQ_WOC
) {
1293 tmp
= readl(iomap
[SIL24_HOST_BAR
] + HOST_CTRL
);
1294 if (tmp
& (HOST_CTRL_TRDY
| HOST_CTRL_STOP
| HOST_CTRL_DEVSEL
))
1295 dev_info(&pdev
->dev
,
1296 "Applying completion IRQ loss on PCI-X errata fix\n");
1298 pi
.flags
&= ~SIL24_FLAG_PCIX_IRQ_WOC
;
1301 /* allocate and fill host */
1302 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
,
1303 SIL24_FLAG2NPORTS(ppi
[0]->flags
));
1306 host
->iomap
= iomap
;
1308 /* configure and activate the device */
1309 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1311 dev_err(&pdev
->dev
, "DMA enable failed\n");
1315 /* Set max read request size to 4096. This slightly increases
1316 * write throughput for pci-e variants.
1318 pcie_set_readrq(pdev
, 4096);
1320 sil24_init_controller(host
);
1322 if (sata_sil24_msi
&& !pci_enable_msi(pdev
)) {
1323 dev_info(&pdev
->dev
, "Using MSI\n");
1327 pci_set_master(pdev
);
1328 return ata_host_activate(host
, pdev
->irq
, sil24_interrupt
, IRQF_SHARED
,
1332 #ifdef CONFIG_PM_SLEEP
1333 static int sil24_pci_device_resume(struct pci_dev
*pdev
)
1335 struct ata_host
*host
= pci_get_drvdata(pdev
);
1336 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1339 rc
= ata_pci_device_do_resume(pdev
);
1343 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
)
1344 writel(HOST_CTRL_GLOBAL_RST
, host_base
+ HOST_CTRL
);
1346 sil24_init_controller(host
);
1348 ata_host_resume(host
);
1355 static int sil24_port_resume(struct ata_port
*ap
)
1357 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
1362 module_pci_driver(sil24_pci_driver
);
1364 MODULE_AUTHOR("Tejun Heo");
1365 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1366 MODULE_LICENSE("GPL");
1367 MODULE_DEVICE_TABLE(pci
, sil24_pci_tbl
);