]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/ata/ahci.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[mirror_ubuntu-jammy-kernel.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
65 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
66
67 static int ahci_enable_alpm(struct ata_port *ap,
68 enum link_pm policy);
69 static void ahci_disable_alpm(struct ata_port *ap);
70 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
71 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
72 size_t size);
73 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
74 ssize_t size);
75 #define MAX_SLOTS 8
76
77 enum {
78 AHCI_PCI_BAR = 5,
79 AHCI_MAX_PORTS = 32,
80 AHCI_MAX_SG = 168, /* hardware max is 64K */
81 AHCI_DMA_BOUNDARY = 0xffffffff,
82 AHCI_MAX_CMDS = 32,
83 AHCI_CMD_SZ = 32,
84 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
85 AHCI_RX_FIS_SZ = 256,
86 AHCI_CMD_TBL_CDB = 0x40,
87 AHCI_CMD_TBL_HDR_SZ = 0x80,
88 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
89 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
90 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
91 AHCI_RX_FIS_SZ,
92 AHCI_IRQ_ON_SG = (1 << 31),
93 AHCI_CMD_ATAPI = (1 << 5),
94 AHCI_CMD_WRITE = (1 << 6),
95 AHCI_CMD_PREFETCH = (1 << 7),
96 AHCI_CMD_RESET = (1 << 8),
97 AHCI_CMD_CLR_BUSY = (1 << 10),
98
99 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
100 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
101 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
102
103 board_ahci = 0,
104 board_ahci_vt8251 = 1,
105 board_ahci_ign_iferr = 2,
106 board_ahci_sb600 = 3,
107 board_ahci_mv = 4,
108 board_ahci_sb700 = 5,
109 board_ahci_mcp65 = 6,
110 board_ahci_nopmp = 7,
111
112 /* global controller registers */
113 HOST_CAP = 0x00, /* host capabilities */
114 HOST_CTL = 0x04, /* global host control */
115 HOST_IRQ_STAT = 0x08, /* interrupt status */
116 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
117 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
118 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
119 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
120
121 /* HOST_CTL bits */
122 HOST_RESET = (1 << 0), /* reset controller; self-clear */
123 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
124 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
125
126 /* HOST_CAP bits */
127 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
128 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
129 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
130 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
131 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
132 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
133 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
134 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
135 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
136
137 /* registers for each SATA port */
138 PORT_LST_ADDR = 0x00, /* command list DMA addr */
139 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
140 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
141 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
142 PORT_IRQ_STAT = 0x10, /* interrupt status */
143 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
144 PORT_CMD = 0x18, /* port command */
145 PORT_TFDATA = 0x20, /* taskfile data */
146 PORT_SIG = 0x24, /* device TF signature */
147 PORT_CMD_ISSUE = 0x38, /* command issue */
148 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
149 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
150 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
151 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
152 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
153
154 /* PORT_IRQ_{STAT,MASK} bits */
155 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
156 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
157 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
158 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
159 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
160 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
161 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
162 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
163
164 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
165 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
166 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
167 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
168 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
169 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
170 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
171 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
172 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
173
174 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
175 PORT_IRQ_IF_ERR |
176 PORT_IRQ_CONNECT |
177 PORT_IRQ_PHYRDY |
178 PORT_IRQ_UNK_FIS |
179 PORT_IRQ_BAD_PMP,
180 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
181 PORT_IRQ_TF_ERR |
182 PORT_IRQ_HBUS_DATA_ERR,
183 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
184 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
185 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
186
187 /* PORT_CMD bits */
188 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
189 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
190 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
191 PORT_CMD_PMP = (1 << 17), /* PMP attached */
192 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
193 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
194 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
195 PORT_CMD_CLO = (1 << 3), /* Command list override */
196 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
197 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
198 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
199
200 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
201 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
202 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
203 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
204
205 /* hpriv->flags bits */
206 AHCI_HFLAG_NO_NCQ = (1 << 0),
207 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
208 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
209 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
210 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
211 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
212 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
213 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
214 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
215 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
216
217 /* ap->flags bits */
218
219 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
221 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
222 ATA_FLAG_IPM,
223
224 ICH_MAP = 0x90, /* ICH MAP register */
225
226 /* em_ctl bits */
227 EM_CTL_RST = (1 << 9), /* Reset */
228 EM_CTL_TM = (1 << 8), /* Transmit Message */
229 EM_CTL_ALHD = (1 << 26), /* Activity LED */
230 };
231
232 struct ahci_cmd_hdr {
233 __le32 opts;
234 __le32 status;
235 __le32 tbl_addr;
236 __le32 tbl_addr_hi;
237 __le32 reserved[4];
238 };
239
240 struct ahci_sg {
241 __le32 addr;
242 __le32 addr_hi;
243 __le32 reserved;
244 __le32 flags_size;
245 };
246
247 struct ahci_em_priv {
248 enum sw_activity blink_policy;
249 struct timer_list timer;
250 unsigned long saved_activity;
251 unsigned long activity;
252 unsigned long led_state;
253 };
254
255 struct ahci_host_priv {
256 unsigned int flags; /* AHCI_HFLAG_* */
257 u32 cap; /* cap to use */
258 u32 port_map; /* port map to use */
259 u32 saved_cap; /* saved initial cap */
260 u32 saved_port_map; /* saved initial port_map */
261 u32 em_loc; /* enclosure management location */
262 };
263
264 struct ahci_port_priv {
265 struct ata_link *active_link;
266 struct ahci_cmd_hdr *cmd_slot;
267 dma_addr_t cmd_slot_dma;
268 void *cmd_tbl;
269 dma_addr_t cmd_tbl_dma;
270 void *rx_fis;
271 dma_addr_t rx_fis_dma;
272 /* for NCQ spurious interrupt analysis */
273 unsigned int ncq_saw_d2h:1;
274 unsigned int ncq_saw_dmas:1;
275 unsigned int ncq_saw_sdb:1;
276 u32 intr_mask; /* interrupts to enable */
277 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
278 * per PM slot */
279 };
280
281 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
282 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
283 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
284 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
285 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
286 static int ahci_port_start(struct ata_port *ap);
287 static void ahci_port_stop(struct ata_port *ap);
288 static void ahci_qc_prep(struct ata_queued_cmd *qc);
289 static void ahci_freeze(struct ata_port *ap);
290 static void ahci_thaw(struct ata_port *ap);
291 static void ahci_pmp_attach(struct ata_port *ap);
292 static void ahci_pmp_detach(struct ata_port *ap);
293 static int ahci_softreset(struct ata_link *link, unsigned int *class,
294 unsigned long deadline);
295 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
296 unsigned long deadline);
297 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
298 unsigned long deadline);
299 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
300 unsigned long deadline);
301 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
302 unsigned long deadline);
303 static void ahci_postreset(struct ata_link *link, unsigned int *class);
304 static void ahci_error_handler(struct ata_port *ap);
305 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
306 static int ahci_port_resume(struct ata_port *ap);
307 static void ahci_dev_config(struct ata_device *dev);
308 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
309 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
310 u32 opts);
311 #ifdef CONFIG_PM
312 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
313 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
314 static int ahci_pci_device_resume(struct pci_dev *pdev);
315 #endif
316 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
317 static ssize_t ahci_activity_store(struct ata_device *dev,
318 enum sw_activity val);
319 static void ahci_init_sw_activity(struct ata_link *link);
320
321 static struct device_attribute *ahci_shost_attrs[] = {
322 &dev_attr_link_power_management_policy,
323 &dev_attr_em_message_type,
324 &dev_attr_em_message,
325 NULL
326 };
327
328 static struct device_attribute *ahci_sdev_attrs[] = {
329 &dev_attr_sw_activity,
330 &dev_attr_unload_heads,
331 NULL
332 };
333
334 static struct scsi_host_template ahci_sht = {
335 ATA_NCQ_SHT(DRV_NAME),
336 .can_queue = AHCI_MAX_CMDS - 1,
337 .sg_tablesize = AHCI_MAX_SG,
338 .dma_boundary = AHCI_DMA_BOUNDARY,
339 .shost_attrs = ahci_shost_attrs,
340 .sdev_attrs = ahci_sdev_attrs,
341 };
342
343 static struct ata_port_operations ahci_ops = {
344 .inherits = &sata_pmp_port_ops,
345
346 .qc_defer = sata_pmp_qc_defer_cmd_switch,
347 .qc_prep = ahci_qc_prep,
348 .qc_issue = ahci_qc_issue,
349 .qc_fill_rtf = ahci_qc_fill_rtf,
350
351 .freeze = ahci_freeze,
352 .thaw = ahci_thaw,
353 .softreset = ahci_softreset,
354 .hardreset = ahci_hardreset,
355 .postreset = ahci_postreset,
356 .pmp_softreset = ahci_softreset,
357 .error_handler = ahci_error_handler,
358 .post_internal_cmd = ahci_post_internal_cmd,
359 .dev_config = ahci_dev_config,
360
361 .scr_read = ahci_scr_read,
362 .scr_write = ahci_scr_write,
363 .pmp_attach = ahci_pmp_attach,
364 .pmp_detach = ahci_pmp_detach,
365
366 .enable_pm = ahci_enable_alpm,
367 .disable_pm = ahci_disable_alpm,
368 .em_show = ahci_led_show,
369 .em_store = ahci_led_store,
370 .sw_activity_show = ahci_activity_show,
371 .sw_activity_store = ahci_activity_store,
372 #ifdef CONFIG_PM
373 .port_suspend = ahci_port_suspend,
374 .port_resume = ahci_port_resume,
375 #endif
376 .port_start = ahci_port_start,
377 .port_stop = ahci_port_stop,
378 };
379
380 static struct ata_port_operations ahci_vt8251_ops = {
381 .inherits = &ahci_ops,
382 .hardreset = ahci_vt8251_hardreset,
383 };
384
385 static struct ata_port_operations ahci_p5wdh_ops = {
386 .inherits = &ahci_ops,
387 .hardreset = ahci_p5wdh_hardreset,
388 };
389
390 static struct ata_port_operations ahci_sb600_ops = {
391 .inherits = &ahci_ops,
392 .softreset = ahci_sb600_softreset,
393 .pmp_softreset = ahci_sb600_softreset,
394 };
395
396 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
397
398 static const struct ata_port_info ahci_port_info[] = {
399 /* board_ahci */
400 {
401 .flags = AHCI_FLAG_COMMON,
402 .pio_mask = 0x1f, /* pio0-4 */
403 .udma_mask = ATA_UDMA6,
404 .port_ops = &ahci_ops,
405 },
406 /* board_ahci_vt8251 */
407 {
408 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
409 .flags = AHCI_FLAG_COMMON,
410 .pio_mask = 0x1f, /* pio0-4 */
411 .udma_mask = ATA_UDMA6,
412 .port_ops = &ahci_vt8251_ops,
413 },
414 /* board_ahci_ign_iferr */
415 {
416 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
417 .flags = AHCI_FLAG_COMMON,
418 .pio_mask = 0x1f, /* pio0-4 */
419 .udma_mask = ATA_UDMA6,
420 .port_ops = &ahci_ops,
421 },
422 /* board_ahci_sb600 */
423 {
424 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
425 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
426 AHCI_HFLAG_SECT255),
427 .flags = AHCI_FLAG_COMMON,
428 .pio_mask = 0x1f, /* pio0-4 */
429 .udma_mask = ATA_UDMA6,
430 .port_ops = &ahci_sb600_ops,
431 },
432 /* board_ahci_mv */
433 {
434 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
435 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
436 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
437 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
438 .pio_mask = 0x1f, /* pio0-4 */
439 .udma_mask = ATA_UDMA6,
440 .port_ops = &ahci_ops,
441 },
442 /* board_ahci_sb700 */
443 {
444 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
445 .flags = AHCI_FLAG_COMMON,
446 .pio_mask = 0x1f, /* pio0-4 */
447 .udma_mask = ATA_UDMA6,
448 .port_ops = &ahci_sb600_ops,
449 },
450 /* board_ahci_mcp65 */
451 {
452 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
453 .flags = AHCI_FLAG_COMMON,
454 .pio_mask = 0x1f, /* pio0-4 */
455 .udma_mask = ATA_UDMA6,
456 .port_ops = &ahci_ops,
457 },
458 /* board_ahci_nopmp */
459 {
460 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
461 .flags = AHCI_FLAG_COMMON,
462 .pio_mask = 0x1f, /* pio0-4 */
463 .udma_mask = ATA_UDMA6,
464 .port_ops = &ahci_ops,
465 },
466 };
467
468 static const struct pci_device_id ahci_pci_tbl[] = {
469 /* Intel */
470 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
471 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
472 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
473 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
474 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
475 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
476 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
477 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
478 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
479 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
480 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
481 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
482 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
483 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
484 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
485 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
486 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
487 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
488 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
489 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
490 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
491 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
492 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
493 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
494 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
495 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
496 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
497 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
498 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
499 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
500 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
501 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
502 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
503 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
504 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
505
506 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
507 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
508 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
509
510 /* ATI */
511 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
512 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
513 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
514 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
515 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
516 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
517 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
518
519 /* VIA */
520 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
521 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
522
523 /* NVIDIA */
524 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
525 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
526 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
527 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
528 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
529 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
530 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
531 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
532 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
533 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
534 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
535 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
536 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
537 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
538 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
539 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
540 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
541 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
542 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
543 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
544 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
545 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
546 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
547 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
548 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
549 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
550 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
551 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
552 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
553 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
554 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
555 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
556 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
557 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
558 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
559 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
560 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
561 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
562 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
563 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
564 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
565 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
566 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
567 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
568 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
569 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
570 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
571 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
572 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
573 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
574 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
575 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
576 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
577 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
578 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
579 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
580 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
581 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
582 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
583 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
584 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
585 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
586 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
587 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
588 { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
589 { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
590 { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
591 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
592
593 /* SiS */
594 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
595 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
596 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
597
598 /* Marvell */
599 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
600 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
601
602 /* Promise */
603 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
604
605 /* Generic, PCI class code for AHCI */
606 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
607 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
608
609 { } /* terminate list */
610 };
611
612
613 static struct pci_driver ahci_pci_driver = {
614 .name = DRV_NAME,
615 .id_table = ahci_pci_tbl,
616 .probe = ahci_init_one,
617 .remove = ata_pci_remove_one,
618 #ifdef CONFIG_PM
619 .suspend = ahci_pci_device_suspend,
620 .resume = ahci_pci_device_resume,
621 #endif
622 };
623
624 static int ahci_em_messages = 1;
625 module_param(ahci_em_messages, int, 0444);
626 /* add other LED protocol types when they become supported */
627 MODULE_PARM_DESC(ahci_em_messages,
628 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
629
630 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
631 static int marvell_enable;
632 #else
633 static int marvell_enable = 1;
634 #endif
635 module_param(marvell_enable, int, 0644);
636 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
637
638
639 static inline int ahci_nr_ports(u32 cap)
640 {
641 return (cap & 0x1f) + 1;
642 }
643
644 static inline void __iomem *__ahci_port_base(struct ata_host *host,
645 unsigned int port_no)
646 {
647 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
648
649 return mmio + 0x100 + (port_no * 0x80);
650 }
651
652 static inline void __iomem *ahci_port_base(struct ata_port *ap)
653 {
654 return __ahci_port_base(ap->host, ap->port_no);
655 }
656
657 static void ahci_enable_ahci(void __iomem *mmio)
658 {
659 int i;
660 u32 tmp;
661
662 /* turn on AHCI_EN */
663 tmp = readl(mmio + HOST_CTL);
664 if (tmp & HOST_AHCI_EN)
665 return;
666
667 /* Some controllers need AHCI_EN to be written multiple times.
668 * Try a few times before giving up.
669 */
670 for (i = 0; i < 5; i++) {
671 tmp |= HOST_AHCI_EN;
672 writel(tmp, mmio + HOST_CTL);
673 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
674 if (tmp & HOST_AHCI_EN)
675 return;
676 msleep(10);
677 }
678
679 WARN_ON(1);
680 }
681
682 /**
683 * ahci_save_initial_config - Save and fixup initial config values
684 * @pdev: target PCI device
685 * @hpriv: host private area to store config values
686 *
687 * Some registers containing configuration info might be setup by
688 * BIOS and might be cleared on reset. This function saves the
689 * initial values of those registers into @hpriv such that they
690 * can be restored after controller reset.
691 *
692 * If inconsistent, config values are fixed up by this function.
693 *
694 * LOCKING:
695 * None.
696 */
697 static void ahci_save_initial_config(struct pci_dev *pdev,
698 struct ahci_host_priv *hpriv)
699 {
700 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
701 u32 cap, port_map;
702 int i;
703 int mv;
704
705 /* make sure AHCI mode is enabled before accessing CAP */
706 ahci_enable_ahci(mmio);
707
708 /* Values prefixed with saved_ are written back to host after
709 * reset. Values without are used for driver operation.
710 */
711 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
712 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
713
714 /* some chips have errata preventing 64bit use */
715 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
716 dev_printk(KERN_INFO, &pdev->dev,
717 "controller can't do 64bit DMA, forcing 32bit\n");
718 cap &= ~HOST_CAP_64;
719 }
720
721 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
722 dev_printk(KERN_INFO, &pdev->dev,
723 "controller can't do NCQ, turning off CAP_NCQ\n");
724 cap &= ~HOST_CAP_NCQ;
725 }
726
727 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
728 dev_printk(KERN_INFO, &pdev->dev,
729 "controller can do NCQ, turning on CAP_NCQ\n");
730 cap |= HOST_CAP_NCQ;
731 }
732
733 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
734 dev_printk(KERN_INFO, &pdev->dev,
735 "controller can't do PMP, turning off CAP_PMP\n");
736 cap &= ~HOST_CAP_PMP;
737 }
738
739 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
740 port_map != 1) {
741 dev_printk(KERN_INFO, &pdev->dev,
742 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
743 port_map, 1);
744 port_map = 1;
745 }
746
747 /*
748 * Temporary Marvell 6145 hack: PATA port presence
749 * is asserted through the standard AHCI port
750 * presence register, as bit 4 (counting from 0)
751 */
752 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
753 if (pdev->device == 0x6121)
754 mv = 0x3;
755 else
756 mv = 0xf;
757 dev_printk(KERN_ERR, &pdev->dev,
758 "MV_AHCI HACK: port_map %x -> %x\n",
759 port_map,
760 port_map & mv);
761 dev_printk(KERN_ERR, &pdev->dev,
762 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
763
764 port_map &= mv;
765 }
766
767 /* cross check port_map and cap.n_ports */
768 if (port_map) {
769 int map_ports = 0;
770
771 for (i = 0; i < AHCI_MAX_PORTS; i++)
772 if (port_map & (1 << i))
773 map_ports++;
774
775 /* If PI has more ports than n_ports, whine, clear
776 * port_map and let it be generated from n_ports.
777 */
778 if (map_ports > ahci_nr_ports(cap)) {
779 dev_printk(KERN_WARNING, &pdev->dev,
780 "implemented port map (0x%x) contains more "
781 "ports than nr_ports (%u), using nr_ports\n",
782 port_map, ahci_nr_ports(cap));
783 port_map = 0;
784 }
785 }
786
787 /* fabricate port_map from cap.nr_ports */
788 if (!port_map) {
789 port_map = (1 << ahci_nr_ports(cap)) - 1;
790 dev_printk(KERN_WARNING, &pdev->dev,
791 "forcing PORTS_IMPL to 0x%x\n", port_map);
792
793 /* write the fixed up value to the PI register */
794 hpriv->saved_port_map = port_map;
795 }
796
797 /* record values to use during operation */
798 hpriv->cap = cap;
799 hpriv->port_map = port_map;
800 }
801
802 /**
803 * ahci_restore_initial_config - Restore initial config
804 * @host: target ATA host
805 *
806 * Restore initial config stored by ahci_save_initial_config().
807 *
808 * LOCKING:
809 * None.
810 */
811 static void ahci_restore_initial_config(struct ata_host *host)
812 {
813 struct ahci_host_priv *hpriv = host->private_data;
814 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
815
816 writel(hpriv->saved_cap, mmio + HOST_CAP);
817 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
818 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
819 }
820
821 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
822 {
823 static const int offset[] = {
824 [SCR_STATUS] = PORT_SCR_STAT,
825 [SCR_CONTROL] = PORT_SCR_CTL,
826 [SCR_ERROR] = PORT_SCR_ERR,
827 [SCR_ACTIVE] = PORT_SCR_ACT,
828 [SCR_NOTIFICATION] = PORT_SCR_NTF,
829 };
830 struct ahci_host_priv *hpriv = ap->host->private_data;
831
832 if (sc_reg < ARRAY_SIZE(offset) &&
833 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
834 return offset[sc_reg];
835 return 0;
836 }
837
838 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
839 {
840 void __iomem *port_mmio = ahci_port_base(link->ap);
841 int offset = ahci_scr_offset(link->ap, sc_reg);
842
843 if (offset) {
844 *val = readl(port_mmio + offset);
845 return 0;
846 }
847 return -EINVAL;
848 }
849
850 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
851 {
852 void __iomem *port_mmio = ahci_port_base(link->ap);
853 int offset = ahci_scr_offset(link->ap, sc_reg);
854
855 if (offset) {
856 writel(val, port_mmio + offset);
857 return 0;
858 }
859 return -EINVAL;
860 }
861
862 static void ahci_start_engine(struct ata_port *ap)
863 {
864 void __iomem *port_mmio = ahci_port_base(ap);
865 u32 tmp;
866
867 /* start DMA */
868 tmp = readl(port_mmio + PORT_CMD);
869 tmp |= PORT_CMD_START;
870 writel(tmp, port_mmio + PORT_CMD);
871 readl(port_mmio + PORT_CMD); /* flush */
872 }
873
874 static int ahci_stop_engine(struct ata_port *ap)
875 {
876 void __iomem *port_mmio = ahci_port_base(ap);
877 u32 tmp;
878
879 tmp = readl(port_mmio + PORT_CMD);
880
881 /* check if the HBA is idle */
882 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
883 return 0;
884
885 /* setting HBA to idle */
886 tmp &= ~PORT_CMD_START;
887 writel(tmp, port_mmio + PORT_CMD);
888
889 /* wait for engine to stop. This could be as long as 500 msec */
890 tmp = ata_wait_register(port_mmio + PORT_CMD,
891 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
892 if (tmp & PORT_CMD_LIST_ON)
893 return -EIO;
894
895 return 0;
896 }
897
898 static void ahci_start_fis_rx(struct ata_port *ap)
899 {
900 void __iomem *port_mmio = ahci_port_base(ap);
901 struct ahci_host_priv *hpriv = ap->host->private_data;
902 struct ahci_port_priv *pp = ap->private_data;
903 u32 tmp;
904
905 /* set FIS registers */
906 if (hpriv->cap & HOST_CAP_64)
907 writel((pp->cmd_slot_dma >> 16) >> 16,
908 port_mmio + PORT_LST_ADDR_HI);
909 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
910
911 if (hpriv->cap & HOST_CAP_64)
912 writel((pp->rx_fis_dma >> 16) >> 16,
913 port_mmio + PORT_FIS_ADDR_HI);
914 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
915
916 /* enable FIS reception */
917 tmp = readl(port_mmio + PORT_CMD);
918 tmp |= PORT_CMD_FIS_RX;
919 writel(tmp, port_mmio + PORT_CMD);
920
921 /* flush */
922 readl(port_mmio + PORT_CMD);
923 }
924
925 static int ahci_stop_fis_rx(struct ata_port *ap)
926 {
927 void __iomem *port_mmio = ahci_port_base(ap);
928 u32 tmp;
929
930 /* disable FIS reception */
931 tmp = readl(port_mmio + PORT_CMD);
932 tmp &= ~PORT_CMD_FIS_RX;
933 writel(tmp, port_mmio + PORT_CMD);
934
935 /* wait for completion, spec says 500ms, give it 1000 */
936 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
937 PORT_CMD_FIS_ON, 10, 1000);
938 if (tmp & PORT_CMD_FIS_ON)
939 return -EBUSY;
940
941 return 0;
942 }
943
944 static void ahci_power_up(struct ata_port *ap)
945 {
946 struct ahci_host_priv *hpriv = ap->host->private_data;
947 void __iomem *port_mmio = ahci_port_base(ap);
948 u32 cmd;
949
950 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
951
952 /* spin up device */
953 if (hpriv->cap & HOST_CAP_SSS) {
954 cmd |= PORT_CMD_SPIN_UP;
955 writel(cmd, port_mmio + PORT_CMD);
956 }
957
958 /* wake up link */
959 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
960 }
961
962 static void ahci_disable_alpm(struct ata_port *ap)
963 {
964 struct ahci_host_priv *hpriv = ap->host->private_data;
965 void __iomem *port_mmio = ahci_port_base(ap);
966 u32 cmd;
967 struct ahci_port_priv *pp = ap->private_data;
968
969 /* IPM bits should be disabled by libata-core */
970 /* get the existing command bits */
971 cmd = readl(port_mmio + PORT_CMD);
972
973 /* disable ALPM and ASP */
974 cmd &= ~PORT_CMD_ASP;
975 cmd &= ~PORT_CMD_ALPE;
976
977 /* force the interface back to active */
978 cmd |= PORT_CMD_ICC_ACTIVE;
979
980 /* write out new cmd value */
981 writel(cmd, port_mmio + PORT_CMD);
982 cmd = readl(port_mmio + PORT_CMD);
983
984 /* wait 10ms to be sure we've come out of any low power state */
985 msleep(10);
986
987 /* clear out any PhyRdy stuff from interrupt status */
988 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
989
990 /* go ahead and clean out PhyRdy Change from Serror too */
991 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
992
993 /*
994 * Clear flag to indicate that we should ignore all PhyRdy
995 * state changes
996 */
997 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
998
999 /*
1000 * Enable interrupts on Phy Ready.
1001 */
1002 pp->intr_mask |= PORT_IRQ_PHYRDY;
1003 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1004
1005 /*
1006 * don't change the link pm policy - we can be called
1007 * just to turn of link pm temporarily
1008 */
1009 }
1010
1011 static int ahci_enable_alpm(struct ata_port *ap,
1012 enum link_pm policy)
1013 {
1014 struct ahci_host_priv *hpriv = ap->host->private_data;
1015 void __iomem *port_mmio = ahci_port_base(ap);
1016 u32 cmd;
1017 struct ahci_port_priv *pp = ap->private_data;
1018 u32 asp;
1019
1020 /* Make sure the host is capable of link power management */
1021 if (!(hpriv->cap & HOST_CAP_ALPM))
1022 return -EINVAL;
1023
1024 switch (policy) {
1025 case MAX_PERFORMANCE:
1026 case NOT_AVAILABLE:
1027 /*
1028 * if we came here with NOT_AVAILABLE,
1029 * it just means this is the first time we
1030 * have tried to enable - default to max performance,
1031 * and let the user go to lower power modes on request.
1032 */
1033 ahci_disable_alpm(ap);
1034 return 0;
1035 case MIN_POWER:
1036 /* configure HBA to enter SLUMBER */
1037 asp = PORT_CMD_ASP;
1038 break;
1039 case MEDIUM_POWER:
1040 /* configure HBA to enter PARTIAL */
1041 asp = 0;
1042 break;
1043 default:
1044 return -EINVAL;
1045 }
1046
1047 /*
1048 * Disable interrupts on Phy Ready. This keeps us from
1049 * getting woken up due to spurious phy ready interrupts
1050 * TBD - Hot plug should be done via polling now, is
1051 * that even supported?
1052 */
1053 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1054 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1055
1056 /*
1057 * Set a flag to indicate that we should ignore all PhyRdy
1058 * state changes since these can happen now whenever we
1059 * change link state
1060 */
1061 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1062
1063 /* get the existing command bits */
1064 cmd = readl(port_mmio + PORT_CMD);
1065
1066 /*
1067 * Set ASP based on Policy
1068 */
1069 cmd |= asp;
1070
1071 /*
1072 * Setting this bit will instruct the HBA to aggressively
1073 * enter a lower power link state when it's appropriate and
1074 * based on the value set above for ASP
1075 */
1076 cmd |= PORT_CMD_ALPE;
1077
1078 /* write out new cmd value */
1079 writel(cmd, port_mmio + PORT_CMD);
1080 cmd = readl(port_mmio + PORT_CMD);
1081
1082 /* IPM bits should be set by libata-core */
1083 return 0;
1084 }
1085
1086 #ifdef CONFIG_PM
1087 static void ahci_power_down(struct ata_port *ap)
1088 {
1089 struct ahci_host_priv *hpriv = ap->host->private_data;
1090 void __iomem *port_mmio = ahci_port_base(ap);
1091 u32 cmd, scontrol;
1092
1093 if (!(hpriv->cap & HOST_CAP_SSS))
1094 return;
1095
1096 /* put device into listen mode, first set PxSCTL.DET to 0 */
1097 scontrol = readl(port_mmio + PORT_SCR_CTL);
1098 scontrol &= ~0xf;
1099 writel(scontrol, port_mmio + PORT_SCR_CTL);
1100
1101 /* then set PxCMD.SUD to 0 */
1102 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1103 cmd &= ~PORT_CMD_SPIN_UP;
1104 writel(cmd, port_mmio + PORT_CMD);
1105 }
1106 #endif
1107
1108 static void ahci_start_port(struct ata_port *ap)
1109 {
1110 struct ahci_port_priv *pp = ap->private_data;
1111 struct ata_link *link;
1112 struct ahci_em_priv *emp;
1113
1114 /* enable FIS reception */
1115 ahci_start_fis_rx(ap);
1116
1117 /* enable DMA */
1118 ahci_start_engine(ap);
1119
1120 /* turn on LEDs */
1121 if (ap->flags & ATA_FLAG_EM) {
1122 ata_for_each_link(link, ap, EDGE) {
1123 emp = &pp->em_priv[link->pmp];
1124 ahci_transmit_led_message(ap, emp->led_state, 4);
1125 }
1126 }
1127
1128 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1129 ata_for_each_link(link, ap, EDGE)
1130 ahci_init_sw_activity(link);
1131
1132 }
1133
1134 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1135 {
1136 int rc;
1137
1138 /* disable DMA */
1139 rc = ahci_stop_engine(ap);
1140 if (rc) {
1141 *emsg = "failed to stop engine";
1142 return rc;
1143 }
1144
1145 /* disable FIS reception */
1146 rc = ahci_stop_fis_rx(ap);
1147 if (rc) {
1148 *emsg = "failed stop FIS RX";
1149 return rc;
1150 }
1151
1152 return 0;
1153 }
1154
1155 static int ahci_reset_controller(struct ata_host *host)
1156 {
1157 struct pci_dev *pdev = to_pci_dev(host->dev);
1158 struct ahci_host_priv *hpriv = host->private_data;
1159 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1160 u32 tmp;
1161
1162 /* we must be in AHCI mode, before using anything
1163 * AHCI-specific, such as HOST_RESET.
1164 */
1165 ahci_enable_ahci(mmio);
1166
1167 /* global controller reset */
1168 if (!ahci_skip_host_reset) {
1169 tmp = readl(mmio + HOST_CTL);
1170 if ((tmp & HOST_RESET) == 0) {
1171 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1172 readl(mmio + HOST_CTL); /* flush */
1173 }
1174
1175 /*
1176 * to perform host reset, OS should set HOST_RESET
1177 * and poll until this bit is read to be "0".
1178 * reset must complete within 1 second, or
1179 * the hardware should be considered fried.
1180 */
1181 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1182 HOST_RESET, 10, 1000);
1183
1184 if (tmp & HOST_RESET) {
1185 dev_printk(KERN_ERR, host->dev,
1186 "controller reset failed (0x%x)\n", tmp);
1187 return -EIO;
1188 }
1189
1190 /* turn on AHCI mode */
1191 ahci_enable_ahci(mmio);
1192
1193 /* Some registers might be cleared on reset. Restore
1194 * initial values.
1195 */
1196 ahci_restore_initial_config(host);
1197 } else
1198 dev_printk(KERN_INFO, host->dev,
1199 "skipping global host reset\n");
1200
1201 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1202 u16 tmp16;
1203
1204 /* configure PCS */
1205 pci_read_config_word(pdev, 0x92, &tmp16);
1206 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1207 tmp16 |= hpriv->port_map;
1208 pci_write_config_word(pdev, 0x92, tmp16);
1209 }
1210 }
1211
1212 return 0;
1213 }
1214
1215 static void ahci_sw_activity(struct ata_link *link)
1216 {
1217 struct ata_port *ap = link->ap;
1218 struct ahci_port_priv *pp = ap->private_data;
1219 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1220
1221 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1222 return;
1223
1224 emp->activity++;
1225 if (!timer_pending(&emp->timer))
1226 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1227 }
1228
1229 static void ahci_sw_activity_blink(unsigned long arg)
1230 {
1231 struct ata_link *link = (struct ata_link *)arg;
1232 struct ata_port *ap = link->ap;
1233 struct ahci_port_priv *pp = ap->private_data;
1234 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1235 unsigned long led_message = emp->led_state;
1236 u32 activity_led_state;
1237 unsigned long flags;
1238
1239 led_message &= EM_MSG_LED_VALUE;
1240 led_message |= ap->port_no | (link->pmp << 8);
1241
1242 /* check to see if we've had activity. If so,
1243 * toggle state of LED and reset timer. If not,
1244 * turn LED to desired idle state.
1245 */
1246 spin_lock_irqsave(ap->lock, flags);
1247 if (emp->saved_activity != emp->activity) {
1248 emp->saved_activity = emp->activity;
1249 /* get the current LED state */
1250 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1251
1252 if (activity_led_state)
1253 activity_led_state = 0;
1254 else
1255 activity_led_state = 1;
1256
1257 /* clear old state */
1258 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1259
1260 /* toggle state */
1261 led_message |= (activity_led_state << 16);
1262 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1263 } else {
1264 /* switch to idle */
1265 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1266 if (emp->blink_policy == BLINK_OFF)
1267 led_message |= (1 << 16);
1268 }
1269 spin_unlock_irqrestore(ap->lock, flags);
1270 ahci_transmit_led_message(ap, led_message, 4);
1271 }
1272
1273 static void ahci_init_sw_activity(struct ata_link *link)
1274 {
1275 struct ata_port *ap = link->ap;
1276 struct ahci_port_priv *pp = ap->private_data;
1277 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1278
1279 /* init activity stats, setup timer */
1280 emp->saved_activity = emp->activity = 0;
1281 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1282
1283 /* check our blink policy and set flag for link if it's enabled */
1284 if (emp->blink_policy)
1285 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1286 }
1287
1288 static int ahci_reset_em(struct ata_host *host)
1289 {
1290 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1291 u32 em_ctl;
1292
1293 em_ctl = readl(mmio + HOST_EM_CTL);
1294 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1295 return -EINVAL;
1296
1297 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1298 return 0;
1299 }
1300
1301 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1302 ssize_t size)
1303 {
1304 struct ahci_host_priv *hpriv = ap->host->private_data;
1305 struct ahci_port_priv *pp = ap->private_data;
1306 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1307 u32 em_ctl;
1308 u32 message[] = {0, 0};
1309 unsigned long flags;
1310 int pmp;
1311 struct ahci_em_priv *emp;
1312
1313 /* get the slot number from the message */
1314 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1315 if (pmp < MAX_SLOTS)
1316 emp = &pp->em_priv[pmp];
1317 else
1318 return -EINVAL;
1319
1320 spin_lock_irqsave(ap->lock, flags);
1321
1322 /*
1323 * if we are still busy transmitting a previous message,
1324 * do not allow
1325 */
1326 em_ctl = readl(mmio + HOST_EM_CTL);
1327 if (em_ctl & EM_CTL_TM) {
1328 spin_unlock_irqrestore(ap->lock, flags);
1329 return -EINVAL;
1330 }
1331
1332 /*
1333 * create message header - this is all zero except for
1334 * the message size, which is 4 bytes.
1335 */
1336 message[0] |= (4 << 8);
1337
1338 /* ignore 0:4 of byte zero, fill in port info yourself */
1339 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1340
1341 /* write message to EM_LOC */
1342 writel(message[0], mmio + hpriv->em_loc);
1343 writel(message[1], mmio + hpriv->em_loc+4);
1344
1345 /* save off new led state for port/slot */
1346 emp->led_state = message[1];
1347
1348 /*
1349 * tell hardware to transmit the message
1350 */
1351 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1352
1353 spin_unlock_irqrestore(ap->lock, flags);
1354 return size;
1355 }
1356
1357 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1358 {
1359 struct ahci_port_priv *pp = ap->private_data;
1360 struct ata_link *link;
1361 struct ahci_em_priv *emp;
1362 int rc = 0;
1363
1364 ata_for_each_link(link, ap, EDGE) {
1365 emp = &pp->em_priv[link->pmp];
1366 rc += sprintf(buf, "%lx\n", emp->led_state);
1367 }
1368 return rc;
1369 }
1370
1371 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1372 size_t size)
1373 {
1374 int state;
1375 int pmp;
1376 struct ahci_port_priv *pp = ap->private_data;
1377 struct ahci_em_priv *emp;
1378
1379 state = simple_strtoul(buf, NULL, 0);
1380
1381 /* get the slot number from the message */
1382 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1383 if (pmp < MAX_SLOTS)
1384 emp = &pp->em_priv[pmp];
1385 else
1386 return -EINVAL;
1387
1388 /* mask off the activity bits if we are in sw_activity
1389 * mode, user should turn off sw_activity before setting
1390 * activity led through em_message
1391 */
1392 if (emp->blink_policy)
1393 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1394
1395 return ahci_transmit_led_message(ap, state, size);
1396 }
1397
1398 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1399 {
1400 struct ata_link *link = dev->link;
1401 struct ata_port *ap = link->ap;
1402 struct ahci_port_priv *pp = ap->private_data;
1403 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1404 u32 port_led_state = emp->led_state;
1405
1406 /* save the desired Activity LED behavior */
1407 if (val == OFF) {
1408 /* clear LFLAG */
1409 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1410
1411 /* set the LED to OFF */
1412 port_led_state &= EM_MSG_LED_VALUE_OFF;
1413 port_led_state |= (ap->port_no | (link->pmp << 8));
1414 ahci_transmit_led_message(ap, port_led_state, 4);
1415 } else {
1416 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1417 if (val == BLINK_OFF) {
1418 /* set LED to ON for idle */
1419 port_led_state &= EM_MSG_LED_VALUE_OFF;
1420 port_led_state |= (ap->port_no | (link->pmp << 8));
1421 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1422 ahci_transmit_led_message(ap, port_led_state, 4);
1423 }
1424 }
1425 emp->blink_policy = val;
1426 return 0;
1427 }
1428
1429 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1430 {
1431 struct ata_link *link = dev->link;
1432 struct ata_port *ap = link->ap;
1433 struct ahci_port_priv *pp = ap->private_data;
1434 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1435
1436 /* display the saved value of activity behavior for this
1437 * disk.
1438 */
1439 return sprintf(buf, "%d\n", emp->blink_policy);
1440 }
1441
1442 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1443 int port_no, void __iomem *mmio,
1444 void __iomem *port_mmio)
1445 {
1446 const char *emsg = NULL;
1447 int rc;
1448 u32 tmp;
1449
1450 /* make sure port is not active */
1451 rc = ahci_deinit_port(ap, &emsg);
1452 if (rc)
1453 dev_printk(KERN_WARNING, &pdev->dev,
1454 "%s (%d)\n", emsg, rc);
1455
1456 /* clear SError */
1457 tmp = readl(port_mmio + PORT_SCR_ERR);
1458 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1459 writel(tmp, port_mmio + PORT_SCR_ERR);
1460
1461 /* clear port IRQ */
1462 tmp = readl(port_mmio + PORT_IRQ_STAT);
1463 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1464 if (tmp)
1465 writel(tmp, port_mmio + PORT_IRQ_STAT);
1466
1467 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1468 }
1469
1470 static void ahci_init_controller(struct ata_host *host)
1471 {
1472 struct ahci_host_priv *hpriv = host->private_data;
1473 struct pci_dev *pdev = to_pci_dev(host->dev);
1474 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1475 int i;
1476 void __iomem *port_mmio;
1477 u32 tmp;
1478 int mv;
1479
1480 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1481 if (pdev->device == 0x6121)
1482 mv = 2;
1483 else
1484 mv = 4;
1485 port_mmio = __ahci_port_base(host, mv);
1486
1487 writel(0, port_mmio + PORT_IRQ_MASK);
1488
1489 /* clear port IRQ */
1490 tmp = readl(port_mmio + PORT_IRQ_STAT);
1491 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1492 if (tmp)
1493 writel(tmp, port_mmio + PORT_IRQ_STAT);
1494 }
1495
1496 for (i = 0; i < host->n_ports; i++) {
1497 struct ata_port *ap = host->ports[i];
1498
1499 port_mmio = ahci_port_base(ap);
1500 if (ata_port_is_dummy(ap))
1501 continue;
1502
1503 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1504 }
1505
1506 tmp = readl(mmio + HOST_CTL);
1507 VPRINTK("HOST_CTL 0x%x\n", tmp);
1508 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1509 tmp = readl(mmio + HOST_CTL);
1510 VPRINTK("HOST_CTL 0x%x\n", tmp);
1511 }
1512
1513 static void ahci_dev_config(struct ata_device *dev)
1514 {
1515 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1516
1517 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1518 dev->max_sectors = 255;
1519 ata_dev_printk(dev, KERN_INFO,
1520 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1521 }
1522 }
1523
1524 static unsigned int ahci_dev_classify(struct ata_port *ap)
1525 {
1526 void __iomem *port_mmio = ahci_port_base(ap);
1527 struct ata_taskfile tf;
1528 u32 tmp;
1529
1530 tmp = readl(port_mmio + PORT_SIG);
1531 tf.lbah = (tmp >> 24) & 0xff;
1532 tf.lbam = (tmp >> 16) & 0xff;
1533 tf.lbal = (tmp >> 8) & 0xff;
1534 tf.nsect = (tmp) & 0xff;
1535
1536 return ata_dev_classify(&tf);
1537 }
1538
1539 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1540 u32 opts)
1541 {
1542 dma_addr_t cmd_tbl_dma;
1543
1544 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1545
1546 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1547 pp->cmd_slot[tag].status = 0;
1548 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1549 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1550 }
1551
1552 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1553 {
1554 void __iomem *port_mmio = ahci_port_base(ap);
1555 struct ahci_host_priv *hpriv = ap->host->private_data;
1556 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1557 u32 tmp;
1558 int busy, rc;
1559
1560 /* do we need to kick the port? */
1561 busy = status & (ATA_BUSY | ATA_DRQ);
1562 if (!busy && !force_restart)
1563 return 0;
1564
1565 /* stop engine */
1566 rc = ahci_stop_engine(ap);
1567 if (rc)
1568 goto out_restart;
1569
1570 /* need to do CLO? */
1571 if (!busy) {
1572 rc = 0;
1573 goto out_restart;
1574 }
1575
1576 if (!(hpriv->cap & HOST_CAP_CLO)) {
1577 rc = -EOPNOTSUPP;
1578 goto out_restart;
1579 }
1580
1581 /* perform CLO */
1582 tmp = readl(port_mmio + PORT_CMD);
1583 tmp |= PORT_CMD_CLO;
1584 writel(tmp, port_mmio + PORT_CMD);
1585
1586 rc = 0;
1587 tmp = ata_wait_register(port_mmio + PORT_CMD,
1588 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1589 if (tmp & PORT_CMD_CLO)
1590 rc = -EIO;
1591
1592 /* restart engine */
1593 out_restart:
1594 ahci_start_engine(ap);
1595 return rc;
1596 }
1597
1598 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1599 struct ata_taskfile *tf, int is_cmd, u16 flags,
1600 unsigned long timeout_msec)
1601 {
1602 const u32 cmd_fis_len = 5; /* five dwords */
1603 struct ahci_port_priv *pp = ap->private_data;
1604 void __iomem *port_mmio = ahci_port_base(ap);
1605 u8 *fis = pp->cmd_tbl;
1606 u32 tmp;
1607
1608 /* prep the command */
1609 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1610 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1611
1612 /* issue & wait */
1613 writel(1, port_mmio + PORT_CMD_ISSUE);
1614
1615 if (timeout_msec) {
1616 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1617 1, timeout_msec);
1618 if (tmp & 0x1) {
1619 ahci_kick_engine(ap, 1);
1620 return -EBUSY;
1621 }
1622 } else
1623 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1624
1625 return 0;
1626 }
1627
1628 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1629 int pmp, unsigned long deadline,
1630 int (*check_ready)(struct ata_link *link))
1631 {
1632 struct ata_port *ap = link->ap;
1633 const char *reason = NULL;
1634 unsigned long now, msecs;
1635 struct ata_taskfile tf;
1636 int rc;
1637
1638 DPRINTK("ENTER\n");
1639
1640 /* prepare for SRST (AHCI-1.1 10.4.1) */
1641 rc = ahci_kick_engine(ap, 1);
1642 if (rc && rc != -EOPNOTSUPP)
1643 ata_link_printk(link, KERN_WARNING,
1644 "failed to reset engine (errno=%d)\n", rc);
1645
1646 ata_tf_init(link->device, &tf);
1647
1648 /* issue the first D2H Register FIS */
1649 msecs = 0;
1650 now = jiffies;
1651 if (time_after(now, deadline))
1652 msecs = jiffies_to_msecs(deadline - now);
1653
1654 tf.ctl |= ATA_SRST;
1655 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1656 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1657 rc = -EIO;
1658 reason = "1st FIS failed";
1659 goto fail;
1660 }
1661
1662 /* spec says at least 5us, but be generous and sleep for 1ms */
1663 msleep(1);
1664
1665 /* issue the second D2H Register FIS */
1666 tf.ctl &= ~ATA_SRST;
1667 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1668
1669 /* wait for link to become ready */
1670 rc = ata_wait_after_reset(link, deadline, check_ready);
1671 /* link occupied, -ENODEV too is an error */
1672 if (rc) {
1673 reason = "device not ready";
1674 goto fail;
1675 }
1676 *class = ahci_dev_classify(ap);
1677
1678 DPRINTK("EXIT, class=%u\n", *class);
1679 return 0;
1680
1681 fail:
1682 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1683 return rc;
1684 }
1685
1686 static int ahci_check_ready(struct ata_link *link)
1687 {
1688 void __iomem *port_mmio = ahci_port_base(link->ap);
1689 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1690
1691 return ata_check_ready(status);
1692 }
1693
1694 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1695 unsigned long deadline)
1696 {
1697 int pmp = sata_srst_pmp(link);
1698
1699 DPRINTK("ENTER\n");
1700
1701 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1702 }
1703
1704 static int ahci_sb600_check_ready(struct ata_link *link)
1705 {
1706 void __iomem *port_mmio = ahci_port_base(link->ap);
1707 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1708 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1709
1710 /*
1711 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1712 * which can save timeout delay.
1713 */
1714 if (irq_status & PORT_IRQ_BAD_PMP)
1715 return -EIO;
1716
1717 return ata_check_ready(status);
1718 }
1719
1720 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1721 unsigned long deadline)
1722 {
1723 struct ata_port *ap = link->ap;
1724 void __iomem *port_mmio = ahci_port_base(ap);
1725 int pmp = sata_srst_pmp(link);
1726 int rc;
1727 u32 irq_sts;
1728
1729 DPRINTK("ENTER\n");
1730
1731 rc = ahci_do_softreset(link, class, pmp, deadline,
1732 ahci_sb600_check_ready);
1733
1734 /*
1735 * Soft reset fails on some ATI chips with IPMS set when PMP
1736 * is enabled but SATA HDD/ODD is connected to SATA port,
1737 * do soft reset again to port 0.
1738 */
1739 if (rc == -EIO) {
1740 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1741 if (irq_sts & PORT_IRQ_BAD_PMP) {
1742 ata_link_printk(link, KERN_WARNING,
1743 "failed due to HW bug, retry pmp=0\n");
1744 rc = ahci_do_softreset(link, class, 0, deadline,
1745 ahci_check_ready);
1746 }
1747 }
1748
1749 return rc;
1750 }
1751
1752 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1753 unsigned long deadline)
1754 {
1755 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1756 struct ata_port *ap = link->ap;
1757 struct ahci_port_priv *pp = ap->private_data;
1758 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1759 struct ata_taskfile tf;
1760 bool online;
1761 int rc;
1762
1763 DPRINTK("ENTER\n");
1764
1765 ahci_stop_engine(ap);
1766
1767 /* clear D2H reception area to properly wait for D2H FIS */
1768 ata_tf_init(link->device, &tf);
1769 tf.command = 0x80;
1770 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1771
1772 rc = sata_link_hardreset(link, timing, deadline, &online,
1773 ahci_check_ready);
1774
1775 ahci_start_engine(ap);
1776
1777 if (online)
1778 *class = ahci_dev_classify(ap);
1779
1780 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1781 return rc;
1782 }
1783
1784 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1785 unsigned long deadline)
1786 {
1787 struct ata_port *ap = link->ap;
1788 bool online;
1789 int rc;
1790
1791 DPRINTK("ENTER\n");
1792
1793 ahci_stop_engine(ap);
1794
1795 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1796 deadline, &online, NULL);
1797
1798 ahci_start_engine(ap);
1799
1800 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1801
1802 /* vt8251 doesn't clear BSY on signature FIS reception,
1803 * request follow-up softreset.
1804 */
1805 return online ? -EAGAIN : rc;
1806 }
1807
1808 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1809 unsigned long deadline)
1810 {
1811 struct ata_port *ap = link->ap;
1812 struct ahci_port_priv *pp = ap->private_data;
1813 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1814 struct ata_taskfile tf;
1815 bool online;
1816 int rc;
1817
1818 ahci_stop_engine(ap);
1819
1820 /* clear D2H reception area to properly wait for D2H FIS */
1821 ata_tf_init(link->device, &tf);
1822 tf.command = 0x80;
1823 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1824
1825 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1826 deadline, &online, NULL);
1827
1828 ahci_start_engine(ap);
1829
1830 /* The pseudo configuration device on SIMG4726 attached to
1831 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1832 * hardreset if no device is attached to the first downstream
1833 * port && the pseudo device locks up on SRST w/ PMP==0. To
1834 * work around this, wait for !BSY only briefly. If BSY isn't
1835 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1836 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1837 *
1838 * Wait for two seconds. Devices attached to downstream port
1839 * which can't process the following IDENTIFY after this will
1840 * have to be reset again. For most cases, this should
1841 * suffice while making probing snappish enough.
1842 */
1843 if (online) {
1844 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1845 ahci_check_ready);
1846 if (rc)
1847 ahci_kick_engine(ap, 0);
1848 }
1849 return rc;
1850 }
1851
1852 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1853 {
1854 struct ata_port *ap = link->ap;
1855 void __iomem *port_mmio = ahci_port_base(ap);
1856 u32 new_tmp, tmp;
1857
1858 ata_std_postreset(link, class);
1859
1860 /* Make sure port's ATAPI bit is set appropriately */
1861 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1862 if (*class == ATA_DEV_ATAPI)
1863 new_tmp |= PORT_CMD_ATAPI;
1864 else
1865 new_tmp &= ~PORT_CMD_ATAPI;
1866 if (new_tmp != tmp) {
1867 writel(new_tmp, port_mmio + PORT_CMD);
1868 readl(port_mmio + PORT_CMD); /* flush */
1869 }
1870 }
1871
1872 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1873 {
1874 struct scatterlist *sg;
1875 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1876 unsigned int si;
1877
1878 VPRINTK("ENTER\n");
1879
1880 /*
1881 * Next, the S/G list.
1882 */
1883 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1884 dma_addr_t addr = sg_dma_address(sg);
1885 u32 sg_len = sg_dma_len(sg);
1886
1887 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1888 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1889 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1890 }
1891
1892 return si;
1893 }
1894
1895 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1896 {
1897 struct ata_port *ap = qc->ap;
1898 struct ahci_port_priv *pp = ap->private_data;
1899 int is_atapi = ata_is_atapi(qc->tf.protocol);
1900 void *cmd_tbl;
1901 u32 opts;
1902 const u32 cmd_fis_len = 5; /* five dwords */
1903 unsigned int n_elem;
1904
1905 /*
1906 * Fill in command table information. First, the header,
1907 * a SATA Register - Host to Device command FIS.
1908 */
1909 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1910
1911 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1912 if (is_atapi) {
1913 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1914 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1915 }
1916
1917 n_elem = 0;
1918 if (qc->flags & ATA_QCFLAG_DMAMAP)
1919 n_elem = ahci_fill_sg(qc, cmd_tbl);
1920
1921 /*
1922 * Fill in command slot information.
1923 */
1924 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1925 if (qc->tf.flags & ATA_TFLAG_WRITE)
1926 opts |= AHCI_CMD_WRITE;
1927 if (is_atapi)
1928 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1929
1930 ahci_fill_cmd_slot(pp, qc->tag, opts);
1931 }
1932
1933 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1934 {
1935 struct ahci_host_priv *hpriv = ap->host->private_data;
1936 struct ahci_port_priv *pp = ap->private_data;
1937 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1938 struct ata_link *link = NULL;
1939 struct ata_queued_cmd *active_qc;
1940 struct ata_eh_info *active_ehi;
1941 u32 serror;
1942
1943 /* determine active link */
1944 ata_for_each_link(link, ap, EDGE)
1945 if (ata_link_active(link))
1946 break;
1947 if (!link)
1948 link = &ap->link;
1949
1950 active_qc = ata_qc_from_tag(ap, link->active_tag);
1951 active_ehi = &link->eh_info;
1952
1953 /* record irq stat */
1954 ata_ehi_clear_desc(host_ehi);
1955 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1956
1957 /* AHCI needs SError cleared; otherwise, it might lock up */
1958 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1959 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1960 host_ehi->serror |= serror;
1961
1962 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1963 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1964 irq_stat &= ~PORT_IRQ_IF_ERR;
1965
1966 if (irq_stat & PORT_IRQ_TF_ERR) {
1967 /* If qc is active, charge it; otherwise, the active
1968 * link. There's no active qc on NCQ errors. It will
1969 * be determined by EH by reading log page 10h.
1970 */
1971 if (active_qc)
1972 active_qc->err_mask |= AC_ERR_DEV;
1973 else
1974 active_ehi->err_mask |= AC_ERR_DEV;
1975
1976 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1977 host_ehi->serror &= ~SERR_INTERNAL;
1978 }
1979
1980 if (irq_stat & PORT_IRQ_UNK_FIS) {
1981 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1982
1983 active_ehi->err_mask |= AC_ERR_HSM;
1984 active_ehi->action |= ATA_EH_RESET;
1985 ata_ehi_push_desc(active_ehi,
1986 "unknown FIS %08x %08x %08x %08x" ,
1987 unk[0], unk[1], unk[2], unk[3]);
1988 }
1989
1990 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1991 active_ehi->err_mask |= AC_ERR_HSM;
1992 active_ehi->action |= ATA_EH_RESET;
1993 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1994 }
1995
1996 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1997 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1998 host_ehi->action |= ATA_EH_RESET;
1999 ata_ehi_push_desc(host_ehi, "host bus error");
2000 }
2001
2002 if (irq_stat & PORT_IRQ_IF_ERR) {
2003 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2004 host_ehi->action |= ATA_EH_RESET;
2005 ata_ehi_push_desc(host_ehi, "interface fatal error");
2006 }
2007
2008 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2009 ata_ehi_hotplugged(host_ehi);
2010 ata_ehi_push_desc(host_ehi, "%s",
2011 irq_stat & PORT_IRQ_CONNECT ?
2012 "connection status changed" : "PHY RDY changed");
2013 }
2014
2015 /* okay, let's hand over to EH */
2016
2017 if (irq_stat & PORT_IRQ_FREEZE)
2018 ata_port_freeze(ap);
2019 else
2020 ata_port_abort(ap);
2021 }
2022
2023 static void ahci_port_intr(struct ata_port *ap)
2024 {
2025 void __iomem *port_mmio = ahci_port_base(ap);
2026 struct ata_eh_info *ehi = &ap->link.eh_info;
2027 struct ahci_port_priv *pp = ap->private_data;
2028 struct ahci_host_priv *hpriv = ap->host->private_data;
2029 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2030 u32 status, qc_active;
2031 int rc;
2032
2033 status = readl(port_mmio + PORT_IRQ_STAT);
2034 writel(status, port_mmio + PORT_IRQ_STAT);
2035
2036 /* ignore BAD_PMP while resetting */
2037 if (unlikely(resetting))
2038 status &= ~PORT_IRQ_BAD_PMP;
2039
2040 /* If we are getting PhyRdy, this is
2041 * just a power state change, we should
2042 * clear out this, plus the PhyRdy/Comm
2043 * Wake bits from Serror
2044 */
2045 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2046 (status & PORT_IRQ_PHYRDY)) {
2047 status &= ~PORT_IRQ_PHYRDY;
2048 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2049 }
2050
2051 if (unlikely(status & PORT_IRQ_ERROR)) {
2052 ahci_error_intr(ap, status);
2053 return;
2054 }
2055
2056 if (status & PORT_IRQ_SDB_FIS) {
2057 /* If SNotification is available, leave notification
2058 * handling to sata_async_notification(). If not,
2059 * emulate it by snooping SDB FIS RX area.
2060 *
2061 * Snooping FIS RX area is probably cheaper than
2062 * poking SNotification but some constrollers which
2063 * implement SNotification, ICH9 for example, don't
2064 * store AN SDB FIS into receive area.
2065 */
2066 if (hpriv->cap & HOST_CAP_SNTF)
2067 sata_async_notification(ap);
2068 else {
2069 /* If the 'N' bit in word 0 of the FIS is set,
2070 * we just received asynchronous notification.
2071 * Tell libata about it.
2072 */
2073 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2074 u32 f0 = le32_to_cpu(f[0]);
2075
2076 if (f0 & (1 << 15))
2077 sata_async_notification(ap);
2078 }
2079 }
2080
2081 /* pp->active_link is valid iff any command is in flight */
2082 if (ap->qc_active && pp->active_link->sactive)
2083 qc_active = readl(port_mmio + PORT_SCR_ACT);
2084 else
2085 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2086
2087 rc = ata_qc_complete_multiple(ap, qc_active);
2088
2089 /* while resetting, invalid completions are expected */
2090 if (unlikely(rc < 0 && !resetting)) {
2091 ehi->err_mask |= AC_ERR_HSM;
2092 ehi->action |= ATA_EH_RESET;
2093 ata_port_freeze(ap);
2094 }
2095 }
2096
2097 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2098 {
2099 struct ata_host *host = dev_instance;
2100 struct ahci_host_priv *hpriv;
2101 unsigned int i, handled = 0;
2102 void __iomem *mmio;
2103 u32 irq_stat, irq_masked;
2104
2105 VPRINTK("ENTER\n");
2106
2107 hpriv = host->private_data;
2108 mmio = host->iomap[AHCI_PCI_BAR];
2109
2110 /* sigh. 0xffffffff is a valid return from h/w */
2111 irq_stat = readl(mmio + HOST_IRQ_STAT);
2112 if (!irq_stat)
2113 return IRQ_NONE;
2114
2115 irq_masked = irq_stat & hpriv->port_map;
2116
2117 spin_lock(&host->lock);
2118
2119 for (i = 0; i < host->n_ports; i++) {
2120 struct ata_port *ap;
2121
2122 if (!(irq_masked & (1 << i)))
2123 continue;
2124
2125 ap = host->ports[i];
2126 if (ap) {
2127 ahci_port_intr(ap);
2128 VPRINTK("port %u\n", i);
2129 } else {
2130 VPRINTK("port %u (no irq)\n", i);
2131 if (ata_ratelimit())
2132 dev_printk(KERN_WARNING, host->dev,
2133 "interrupt on disabled port %u\n", i);
2134 }
2135
2136 handled = 1;
2137 }
2138
2139 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2140 * it should be cleared after all the port events are cleared;
2141 * otherwise, it will raise a spurious interrupt after each
2142 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2143 * information.
2144 *
2145 * Also, use the unmasked value to clear interrupt as spurious
2146 * pending event on a dummy port might cause screaming IRQ.
2147 */
2148 writel(irq_stat, mmio + HOST_IRQ_STAT);
2149
2150 spin_unlock(&host->lock);
2151
2152 VPRINTK("EXIT\n");
2153
2154 return IRQ_RETVAL(handled);
2155 }
2156
2157 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2158 {
2159 struct ata_port *ap = qc->ap;
2160 void __iomem *port_mmio = ahci_port_base(ap);
2161 struct ahci_port_priv *pp = ap->private_data;
2162
2163 /* Keep track of the currently active link. It will be used
2164 * in completion path to determine whether NCQ phase is in
2165 * progress.
2166 */
2167 pp->active_link = qc->dev->link;
2168
2169 if (qc->tf.protocol == ATA_PROT_NCQ)
2170 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2171 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2172
2173 ahci_sw_activity(qc->dev->link);
2174
2175 return 0;
2176 }
2177
2178 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2179 {
2180 struct ahci_port_priv *pp = qc->ap->private_data;
2181 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2182
2183 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2184 return true;
2185 }
2186
2187 static void ahci_freeze(struct ata_port *ap)
2188 {
2189 void __iomem *port_mmio = ahci_port_base(ap);
2190
2191 /* turn IRQ off */
2192 writel(0, port_mmio + PORT_IRQ_MASK);
2193 }
2194
2195 static void ahci_thaw(struct ata_port *ap)
2196 {
2197 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2198 void __iomem *port_mmio = ahci_port_base(ap);
2199 u32 tmp;
2200 struct ahci_port_priv *pp = ap->private_data;
2201
2202 /* clear IRQ */
2203 tmp = readl(port_mmio + PORT_IRQ_STAT);
2204 writel(tmp, port_mmio + PORT_IRQ_STAT);
2205 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2206
2207 /* turn IRQ back on */
2208 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2209 }
2210
2211 static void ahci_error_handler(struct ata_port *ap)
2212 {
2213 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2214 /* restart engine */
2215 ahci_stop_engine(ap);
2216 ahci_start_engine(ap);
2217 }
2218
2219 sata_pmp_error_handler(ap);
2220 }
2221
2222 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2223 {
2224 struct ata_port *ap = qc->ap;
2225
2226 /* make DMA engine forget about the failed command */
2227 if (qc->flags & ATA_QCFLAG_FAILED)
2228 ahci_kick_engine(ap, 1);
2229 }
2230
2231 static void ahci_pmp_attach(struct ata_port *ap)
2232 {
2233 void __iomem *port_mmio = ahci_port_base(ap);
2234 struct ahci_port_priv *pp = ap->private_data;
2235 u32 cmd;
2236
2237 cmd = readl(port_mmio + PORT_CMD);
2238 cmd |= PORT_CMD_PMP;
2239 writel(cmd, port_mmio + PORT_CMD);
2240
2241 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2242 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2243 }
2244
2245 static void ahci_pmp_detach(struct ata_port *ap)
2246 {
2247 void __iomem *port_mmio = ahci_port_base(ap);
2248 struct ahci_port_priv *pp = ap->private_data;
2249 u32 cmd;
2250
2251 cmd = readl(port_mmio + PORT_CMD);
2252 cmd &= ~PORT_CMD_PMP;
2253 writel(cmd, port_mmio + PORT_CMD);
2254
2255 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2256 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2257 }
2258
2259 static int ahci_port_resume(struct ata_port *ap)
2260 {
2261 ahci_power_up(ap);
2262 ahci_start_port(ap);
2263
2264 if (sata_pmp_attached(ap))
2265 ahci_pmp_attach(ap);
2266 else
2267 ahci_pmp_detach(ap);
2268
2269 return 0;
2270 }
2271
2272 #ifdef CONFIG_PM
2273 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2274 {
2275 const char *emsg = NULL;
2276 int rc;
2277
2278 rc = ahci_deinit_port(ap, &emsg);
2279 if (rc == 0)
2280 ahci_power_down(ap);
2281 else {
2282 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2283 ahci_start_port(ap);
2284 }
2285
2286 return rc;
2287 }
2288
2289 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2290 {
2291 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2292 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2293 u32 ctl;
2294
2295 if (mesg.event & PM_EVENT_SLEEP) {
2296 /* AHCI spec rev1.1 section 8.3.3:
2297 * Software must disable interrupts prior to requesting a
2298 * transition of the HBA to D3 state.
2299 */
2300 ctl = readl(mmio + HOST_CTL);
2301 ctl &= ~HOST_IRQ_EN;
2302 writel(ctl, mmio + HOST_CTL);
2303 readl(mmio + HOST_CTL); /* flush */
2304 }
2305
2306 return ata_pci_device_suspend(pdev, mesg);
2307 }
2308
2309 static int ahci_pci_device_resume(struct pci_dev *pdev)
2310 {
2311 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2312 int rc;
2313
2314 rc = ata_pci_device_do_resume(pdev);
2315 if (rc)
2316 return rc;
2317
2318 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2319 rc = ahci_reset_controller(host);
2320 if (rc)
2321 return rc;
2322
2323 ahci_init_controller(host);
2324 }
2325
2326 ata_host_resume(host);
2327
2328 return 0;
2329 }
2330 #endif
2331
2332 static int ahci_port_start(struct ata_port *ap)
2333 {
2334 struct device *dev = ap->host->dev;
2335 struct ahci_port_priv *pp;
2336 void *mem;
2337 dma_addr_t mem_dma;
2338
2339 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2340 if (!pp)
2341 return -ENOMEM;
2342
2343 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2344 GFP_KERNEL);
2345 if (!mem)
2346 return -ENOMEM;
2347 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2348
2349 /*
2350 * First item in chunk of DMA memory: 32-slot command table,
2351 * 32 bytes each in size
2352 */
2353 pp->cmd_slot = mem;
2354 pp->cmd_slot_dma = mem_dma;
2355
2356 mem += AHCI_CMD_SLOT_SZ;
2357 mem_dma += AHCI_CMD_SLOT_SZ;
2358
2359 /*
2360 * Second item: Received-FIS area
2361 */
2362 pp->rx_fis = mem;
2363 pp->rx_fis_dma = mem_dma;
2364
2365 mem += AHCI_RX_FIS_SZ;
2366 mem_dma += AHCI_RX_FIS_SZ;
2367
2368 /*
2369 * Third item: data area for storing a single command
2370 * and its scatter-gather table
2371 */
2372 pp->cmd_tbl = mem;
2373 pp->cmd_tbl_dma = mem_dma;
2374
2375 /*
2376 * Save off initial list of interrupts to be enabled.
2377 * This could be changed later
2378 */
2379 pp->intr_mask = DEF_PORT_IRQ;
2380
2381 ap->private_data = pp;
2382
2383 /* engage engines, captain */
2384 return ahci_port_resume(ap);
2385 }
2386
2387 static void ahci_port_stop(struct ata_port *ap)
2388 {
2389 const char *emsg = NULL;
2390 int rc;
2391
2392 /* de-initialize port */
2393 rc = ahci_deinit_port(ap, &emsg);
2394 if (rc)
2395 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2396 }
2397
2398 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2399 {
2400 int rc;
2401
2402 if (using_dac &&
2403 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2404 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2405 if (rc) {
2406 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2407 if (rc) {
2408 dev_printk(KERN_ERR, &pdev->dev,
2409 "64-bit DMA enable failed\n");
2410 return rc;
2411 }
2412 }
2413 } else {
2414 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2415 if (rc) {
2416 dev_printk(KERN_ERR, &pdev->dev,
2417 "32-bit DMA enable failed\n");
2418 return rc;
2419 }
2420 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2421 if (rc) {
2422 dev_printk(KERN_ERR, &pdev->dev,
2423 "32-bit consistent DMA enable failed\n");
2424 return rc;
2425 }
2426 }
2427 return 0;
2428 }
2429
2430 static void ahci_print_info(struct ata_host *host)
2431 {
2432 struct ahci_host_priv *hpriv = host->private_data;
2433 struct pci_dev *pdev = to_pci_dev(host->dev);
2434 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2435 u32 vers, cap, impl, speed;
2436 const char *speed_s;
2437 u16 cc;
2438 const char *scc_s;
2439
2440 vers = readl(mmio + HOST_VERSION);
2441 cap = hpriv->cap;
2442 impl = hpriv->port_map;
2443
2444 speed = (cap >> 20) & 0xf;
2445 if (speed == 1)
2446 speed_s = "1.5";
2447 else if (speed == 2)
2448 speed_s = "3";
2449 else
2450 speed_s = "?";
2451
2452 pci_read_config_word(pdev, 0x0a, &cc);
2453 if (cc == PCI_CLASS_STORAGE_IDE)
2454 scc_s = "IDE";
2455 else if (cc == PCI_CLASS_STORAGE_SATA)
2456 scc_s = "SATA";
2457 else if (cc == PCI_CLASS_STORAGE_RAID)
2458 scc_s = "RAID";
2459 else
2460 scc_s = "unknown";
2461
2462 dev_printk(KERN_INFO, &pdev->dev,
2463 "AHCI %02x%02x.%02x%02x "
2464 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2465 ,
2466
2467 (vers >> 24) & 0xff,
2468 (vers >> 16) & 0xff,
2469 (vers >> 8) & 0xff,
2470 vers & 0xff,
2471
2472 ((cap >> 8) & 0x1f) + 1,
2473 (cap & 0x1f) + 1,
2474 speed_s,
2475 impl,
2476 scc_s);
2477
2478 dev_printk(KERN_INFO, &pdev->dev,
2479 "flags: "
2480 "%s%s%s%s%s%s%s"
2481 "%s%s%s%s%s%s%s"
2482 "%s\n"
2483 ,
2484
2485 cap & (1 << 31) ? "64bit " : "",
2486 cap & (1 << 30) ? "ncq " : "",
2487 cap & (1 << 29) ? "sntf " : "",
2488 cap & (1 << 28) ? "ilck " : "",
2489 cap & (1 << 27) ? "stag " : "",
2490 cap & (1 << 26) ? "pm " : "",
2491 cap & (1 << 25) ? "led " : "",
2492
2493 cap & (1 << 24) ? "clo " : "",
2494 cap & (1 << 19) ? "nz " : "",
2495 cap & (1 << 18) ? "only " : "",
2496 cap & (1 << 17) ? "pmp " : "",
2497 cap & (1 << 15) ? "pio " : "",
2498 cap & (1 << 14) ? "slum " : "",
2499 cap & (1 << 13) ? "part " : "",
2500 cap & (1 << 6) ? "ems ": ""
2501 );
2502 }
2503
2504 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2505 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2506 * support PMP and the 4726 either directly exports the device
2507 * attached to the first downstream port or acts as a hardware storage
2508 * controller and emulate a single ATA device (can be RAID 0/1 or some
2509 * other configuration).
2510 *
2511 * When there's no device attached to the first downstream port of the
2512 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2513 * configure the 4726. However, ATA emulation of the device is very
2514 * lame. It doesn't send signature D2H Reg FIS after the initial
2515 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2516 *
2517 * The following function works around the problem by always using
2518 * hardreset on the port and not depending on receiving signature FIS
2519 * afterward. If signature FIS isn't received soon, ATA class is
2520 * assumed without follow-up softreset.
2521 */
2522 static void ahci_p5wdh_workaround(struct ata_host *host)
2523 {
2524 static struct dmi_system_id sysids[] = {
2525 {
2526 .ident = "P5W DH Deluxe",
2527 .matches = {
2528 DMI_MATCH(DMI_SYS_VENDOR,
2529 "ASUSTEK COMPUTER INC"),
2530 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2531 },
2532 },
2533 { }
2534 };
2535 struct pci_dev *pdev = to_pci_dev(host->dev);
2536
2537 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2538 dmi_check_system(sysids)) {
2539 struct ata_port *ap = host->ports[1];
2540
2541 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2542 "Deluxe on-board SIMG4726 workaround\n");
2543
2544 ap->ops = &ahci_p5wdh_ops;
2545 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2546 }
2547 }
2548
2549 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2550 {
2551 static int printed_version;
2552 unsigned int board_id = ent->driver_data;
2553 struct ata_port_info pi = ahci_port_info[board_id];
2554 const struct ata_port_info *ppi[] = { &pi, NULL };
2555 struct device *dev = &pdev->dev;
2556 struct ahci_host_priv *hpriv;
2557 struct ata_host *host;
2558 int n_ports, i, rc;
2559
2560 VPRINTK("ENTER\n");
2561
2562 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2563
2564 if (!printed_version++)
2565 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2566
2567 /* The AHCI driver can only drive the SATA ports, the PATA driver
2568 can drive them all so if both drivers are selected make sure
2569 AHCI stays out of the way */
2570 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2571 return -ENODEV;
2572
2573 /* acquire resources */
2574 rc = pcim_enable_device(pdev);
2575 if (rc)
2576 return rc;
2577
2578 /* AHCI controllers often implement SFF compatible interface.
2579 * Grab all PCI BARs just in case.
2580 */
2581 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2582 if (rc == -EBUSY)
2583 pcim_pin_device(pdev);
2584 if (rc)
2585 return rc;
2586
2587 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2588 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2589 u8 map;
2590
2591 /* ICH6s share the same PCI ID for both piix and ahci
2592 * modes. Enabling ahci mode while MAP indicates
2593 * combined mode is a bad idea. Yield to ata_piix.
2594 */
2595 pci_read_config_byte(pdev, ICH_MAP, &map);
2596 if (map & 0x3) {
2597 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2598 "combined mode, can't enable AHCI mode\n");
2599 return -ENODEV;
2600 }
2601 }
2602
2603 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2604 if (!hpriv)
2605 return -ENOMEM;
2606 hpriv->flags |= (unsigned long)pi.private_data;
2607
2608 /* MCP65 revision A1 and A2 can't do MSI */
2609 if (board_id == board_ahci_mcp65 &&
2610 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2611 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2612
2613 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2614 pci_intx(pdev, 1);
2615
2616 /* save initial config */
2617 ahci_save_initial_config(pdev, hpriv);
2618
2619 /* prepare host */
2620 if (hpriv->cap & HOST_CAP_NCQ)
2621 pi.flags |= ATA_FLAG_NCQ;
2622
2623 if (hpriv->cap & HOST_CAP_PMP)
2624 pi.flags |= ATA_FLAG_PMP;
2625
2626 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2627 u8 messages;
2628 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2629 u32 em_loc = readl(mmio + HOST_EM_LOC);
2630 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2631
2632 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2633
2634 /* we only support LED message type right now */
2635 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2636 /* store em_loc */
2637 hpriv->em_loc = ((em_loc >> 16) * 4);
2638 pi.flags |= ATA_FLAG_EM;
2639 if (!(em_ctl & EM_CTL_ALHD))
2640 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2641 }
2642 }
2643
2644 /* CAP.NP sometimes indicate the index of the last enabled
2645 * port, at other times, that of the last possible port, so
2646 * determining the maximum port number requires looking at
2647 * both CAP.NP and port_map.
2648 */
2649 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2650
2651 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2652 if (!host)
2653 return -ENOMEM;
2654 host->iomap = pcim_iomap_table(pdev);
2655 host->private_data = hpriv;
2656
2657 if (pi.flags & ATA_FLAG_EM)
2658 ahci_reset_em(host);
2659
2660 for (i = 0; i < host->n_ports; i++) {
2661 struct ata_port *ap = host->ports[i];
2662
2663 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2664 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2665 0x100 + ap->port_no * 0x80, "port");
2666
2667 /* set initial link pm policy */
2668 ap->pm_policy = NOT_AVAILABLE;
2669
2670 /* set enclosure management message type */
2671 if (ap->flags & ATA_FLAG_EM)
2672 ap->em_message_type = ahci_em_messages;
2673
2674
2675 /* disabled/not-implemented port */
2676 if (!(hpriv->port_map & (1 << i)))
2677 ap->ops = &ata_dummy_port_ops;
2678 }
2679
2680 /* apply workaround for ASUS P5W DH Deluxe mainboard */
2681 ahci_p5wdh_workaround(host);
2682
2683 /* initialize adapter */
2684 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2685 if (rc)
2686 return rc;
2687
2688 rc = ahci_reset_controller(host);
2689 if (rc)
2690 return rc;
2691
2692 ahci_init_controller(host);
2693 ahci_print_info(host);
2694
2695 pci_set_master(pdev);
2696 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2697 &ahci_sht);
2698 }
2699
2700 static int __init ahci_init(void)
2701 {
2702 return pci_register_driver(&ahci_pci_driver);
2703 }
2704
2705 static void __exit ahci_exit(void)
2706 {
2707 pci_unregister_driver(&ahci_pci_driver);
2708 }
2709
2710
2711 MODULE_AUTHOR("Jeff Garzik");
2712 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2713 MODULE_LICENSE("GPL");
2714 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2715 MODULE_VERSION(DRV_VERSION);
2716
2717 module_init(ahci_init);
2718 module_exit(ahci_exit);