]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ata/ahci.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <linux/gfp.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <linux/libata.h>
49
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "3.0"
52
53 /* Enclosure Management Control */
54 #define EM_CTRL_MSG_TYPE 0x000f0000
55
56 /* Enclosure Management LED Message Type */
57 #define EM_MSG_LED_HBA_PORT 0x0000000f
58 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
59 #define EM_MSG_LED_VALUE 0xffff0000
60 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61 #define EM_MSG_LED_VALUE_OFF 0xfff80000
62 #define EM_MSG_LED_VALUE_ON 0x00010000
63
64 static int ahci_skip_host_reset;
65 static int ahci_ignore_sss;
66
67 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69
70 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72
73 static int ahci_enable_alpm(struct ata_port *ap,
74 enum link_pm policy);
75 static void ahci_disable_alpm(struct ata_port *ap);
76 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 size_t size);
79 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
80 ssize_t size);
81
82 enum {
83 AHCI_PCI_BAR = 5,
84 AHCI_MAX_PORTS = 32,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
87 AHCI_MAX_CMDS = 32,
88 AHCI_CMD_SZ = 32,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_RX_FIS_SZ = 256,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_RX_FIS_SZ,
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
98 AHCI_CMD_TBL_AR_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
106
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
110
111 board_ahci = 0,
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
115 board_ahci_mv = 4,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
121
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
131
132 /* HOST_CTL bits */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
136
137 /* HOST_CAP bits */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
155
156 /* HOST_CAP2 bits */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
160
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
178
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
188
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
198
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_IF_ERR |
201 PORT_IRQ_CONNECT |
202 PORT_IRQ_PHYRDY |
203 PORT_IRQ_UNK_FIS |
204 PORT_IRQ_BAD_PMP,
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
206 PORT_IRQ_TF_ERR |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
211
212 /* PORT_CMD bits */
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
225
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
230
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
238
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
252 link offline */
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
254
255 /* ap->flags bits */
256
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
260 ATA_FLAG_IPM,
261
262 ICH_MAP = 0x90, /* ICH MAP register */
263
264 /* em constants */
265 EM_MAX_SLOTS = 8,
266 EM_MAX_RETRY = 5,
267
268 /* em_ctl bits */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
272 };
273
274 struct ahci_cmd_hdr {
275 __le32 opts;
276 __le32 status;
277 __le32 tbl_addr;
278 __le32 tbl_addr_hi;
279 __le32 reserved[4];
280 };
281
282 struct ahci_sg {
283 __le32 addr;
284 __le32 addr_hi;
285 __le32 reserved;
286 __le32 flags_size;
287 };
288
289 struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
295 };
296
297 struct ahci_host_priv {
298 unsigned int flags; /* AHCI_HFLAG_* */
299 u32 cap; /* cap to use */
300 u32 cap2; /* cap2 to use */
301 u32 port_map; /* port map to use */
302 u32 saved_cap; /* saved initial cap */
303 u32 saved_cap2; /* saved initial cap2 */
304 u32 saved_port_map; /* saved initial port_map */
305 u32 em_loc; /* enclosure management location */
306 };
307
308 struct ahci_port_priv {
309 struct ata_link *active_link;
310 struct ahci_cmd_hdr *cmd_slot;
311 dma_addr_t cmd_slot_dma;
312 void *cmd_tbl;
313 dma_addr_t cmd_tbl_dma;
314 void *rx_fis;
315 dma_addr_t rx_fis_dma;
316 /* for NCQ spurious interrupt analysis */
317 unsigned int ncq_saw_d2h:1;
318 unsigned int ncq_saw_dmas:1;
319 unsigned int ncq_saw_sdb:1;
320 u32 intr_mask; /* interrupts to enable */
321 bool fbs_supported; /* set iff FBS is supported */
322 bool fbs_enabled; /* set iff FBS is enabled */
323 int fbs_last_dev; /* save FBS.DEV of last FIS */
324 /* enclosure management info per PM slot */
325 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
326 };
327
328 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
329 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
330 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
331 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
332 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
333 static int ahci_port_start(struct ata_port *ap);
334 static void ahci_port_stop(struct ata_port *ap);
335 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
336 static void ahci_qc_prep(struct ata_queued_cmd *qc);
337 static void ahci_freeze(struct ata_port *ap);
338 static void ahci_thaw(struct ata_port *ap);
339 static void ahci_enable_fbs(struct ata_port *ap);
340 static void ahci_disable_fbs(struct ata_port *ap);
341 static void ahci_pmp_attach(struct ata_port *ap);
342 static void ahci_pmp_detach(struct ata_port *ap);
343 static int ahci_softreset(struct ata_link *link, unsigned int *class,
344 unsigned long deadline);
345 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
346 unsigned long deadline);
347 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
348 unsigned long deadline);
349 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
350 unsigned long deadline);
351 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
352 unsigned long deadline);
353 static void ahci_postreset(struct ata_link *link, unsigned int *class);
354 static void ahci_error_handler(struct ata_port *ap);
355 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
356 static int ahci_port_resume(struct ata_port *ap);
357 static void ahci_dev_config(struct ata_device *dev);
358 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
359 u32 opts);
360 #ifdef CONFIG_PM
361 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
362 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
363 static int ahci_pci_device_resume(struct pci_dev *pdev);
364 #endif
365 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
366 static ssize_t ahci_activity_store(struct ata_device *dev,
367 enum sw_activity val);
368 static void ahci_init_sw_activity(struct ata_link *link);
369
370 static ssize_t ahci_show_host_caps(struct device *dev,
371 struct device_attribute *attr, char *buf);
372 static ssize_t ahci_show_host_cap2(struct device *dev,
373 struct device_attribute *attr, char *buf);
374 static ssize_t ahci_show_host_version(struct device *dev,
375 struct device_attribute *attr, char *buf);
376 static ssize_t ahci_show_port_cmd(struct device *dev,
377 struct device_attribute *attr, char *buf);
378
379 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
380 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
381 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
382 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
383
384 static struct device_attribute *ahci_shost_attrs[] = {
385 &dev_attr_link_power_management_policy,
386 &dev_attr_em_message_type,
387 &dev_attr_em_message,
388 &dev_attr_ahci_host_caps,
389 &dev_attr_ahci_host_cap2,
390 &dev_attr_ahci_host_version,
391 &dev_attr_ahci_port_cmd,
392 NULL
393 };
394
395 static struct device_attribute *ahci_sdev_attrs[] = {
396 &dev_attr_sw_activity,
397 &dev_attr_unload_heads,
398 NULL
399 };
400
401 static struct scsi_host_template ahci_sht = {
402 ATA_NCQ_SHT(DRV_NAME),
403 .can_queue = AHCI_MAX_CMDS - 1,
404 .sg_tablesize = AHCI_MAX_SG,
405 .dma_boundary = AHCI_DMA_BOUNDARY,
406 .shost_attrs = ahci_shost_attrs,
407 .sdev_attrs = ahci_sdev_attrs,
408 };
409
410 static struct ata_port_operations ahci_ops = {
411 .inherits = &sata_pmp_port_ops,
412
413 .qc_defer = ahci_pmp_qc_defer,
414 .qc_prep = ahci_qc_prep,
415 .qc_issue = ahci_qc_issue,
416 .qc_fill_rtf = ahci_qc_fill_rtf,
417
418 .freeze = ahci_freeze,
419 .thaw = ahci_thaw,
420 .softreset = ahci_softreset,
421 .hardreset = ahci_hardreset,
422 .postreset = ahci_postreset,
423 .pmp_softreset = ahci_softreset,
424 .error_handler = ahci_error_handler,
425 .post_internal_cmd = ahci_post_internal_cmd,
426 .dev_config = ahci_dev_config,
427
428 .scr_read = ahci_scr_read,
429 .scr_write = ahci_scr_write,
430 .pmp_attach = ahci_pmp_attach,
431 .pmp_detach = ahci_pmp_detach,
432
433 .enable_pm = ahci_enable_alpm,
434 .disable_pm = ahci_disable_alpm,
435 .em_show = ahci_led_show,
436 .em_store = ahci_led_store,
437 .sw_activity_show = ahci_activity_show,
438 .sw_activity_store = ahci_activity_store,
439 #ifdef CONFIG_PM
440 .port_suspend = ahci_port_suspend,
441 .port_resume = ahci_port_resume,
442 #endif
443 .port_start = ahci_port_start,
444 .port_stop = ahci_port_stop,
445 };
446
447 static struct ata_port_operations ahci_vt8251_ops = {
448 .inherits = &ahci_ops,
449 .hardreset = ahci_vt8251_hardreset,
450 };
451
452 static struct ata_port_operations ahci_p5wdh_ops = {
453 .inherits = &ahci_ops,
454 .hardreset = ahci_p5wdh_hardreset,
455 };
456
457 static struct ata_port_operations ahci_sb600_ops = {
458 .inherits = &ahci_ops,
459 .softreset = ahci_sb600_softreset,
460 .pmp_softreset = ahci_sb600_softreset,
461 };
462
463 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
464
465 static const struct ata_port_info ahci_port_info[] = {
466 [board_ahci] =
467 {
468 .flags = AHCI_FLAG_COMMON,
469 .pio_mask = ATA_PIO4,
470 .udma_mask = ATA_UDMA6,
471 .port_ops = &ahci_ops,
472 },
473 [board_ahci_vt8251] =
474 {
475 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
476 .flags = AHCI_FLAG_COMMON,
477 .pio_mask = ATA_PIO4,
478 .udma_mask = ATA_UDMA6,
479 .port_ops = &ahci_vt8251_ops,
480 },
481 [board_ahci_ign_iferr] =
482 {
483 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
484 .flags = AHCI_FLAG_COMMON,
485 .pio_mask = ATA_PIO4,
486 .udma_mask = ATA_UDMA6,
487 .port_ops = &ahci_ops,
488 },
489 [board_ahci_sb600] =
490 {
491 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
492 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
493 AHCI_HFLAG_32BIT_ONLY),
494 .flags = AHCI_FLAG_COMMON,
495 .pio_mask = ATA_PIO4,
496 .udma_mask = ATA_UDMA6,
497 .port_ops = &ahci_sb600_ops,
498 },
499 [board_ahci_mv] =
500 {
501 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
502 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
503 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
504 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
505 .pio_mask = ATA_PIO4,
506 .udma_mask = ATA_UDMA6,
507 .port_ops = &ahci_ops,
508 },
509 [board_ahci_sb700] = /* for SB700 and SB800 */
510 {
511 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
512 .flags = AHCI_FLAG_COMMON,
513 .pio_mask = ATA_PIO4,
514 .udma_mask = ATA_UDMA6,
515 .port_ops = &ahci_sb600_ops,
516 },
517 [board_ahci_mcp65] =
518 {
519 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
520 .flags = AHCI_FLAG_COMMON,
521 .pio_mask = ATA_PIO4,
522 .udma_mask = ATA_UDMA6,
523 .port_ops = &ahci_ops,
524 },
525 [board_ahci_nopmp] =
526 {
527 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
528 .flags = AHCI_FLAG_COMMON,
529 .pio_mask = ATA_PIO4,
530 .udma_mask = ATA_UDMA6,
531 .port_ops = &ahci_ops,
532 },
533 [board_ahci_yesncq] =
534 {
535 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
536 .flags = AHCI_FLAG_COMMON,
537 .pio_mask = ATA_PIO4,
538 .udma_mask = ATA_UDMA6,
539 .port_ops = &ahci_ops,
540 },
541 [board_ahci_nosntf] =
542 {
543 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
544 .flags = AHCI_FLAG_COMMON,
545 .pio_mask = ATA_PIO4,
546 .udma_mask = ATA_UDMA6,
547 .port_ops = &ahci_ops,
548 },
549 };
550
551 static const struct pci_device_id ahci_pci_tbl[] = {
552 /* Intel */
553 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
554 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
555 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
556 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
557 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
558 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
559 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
560 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
561 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
562 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
563 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
564 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
565 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
566 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
567 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
568 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
569 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
570 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
571 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
572 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
573 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
574 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
575 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
576 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
577 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
578 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
579 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
580 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
581 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
582 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
583 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
584 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
585 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
586 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
587 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
588 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
589 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
590 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
591 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
592 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
593 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
594 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
595 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
596 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
597 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
598 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
599
600 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
601 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
602 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
603
604 /* ATI */
605 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
606 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
607 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
608 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
609 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
610 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
611 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
612
613 /* AMD */
614 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
615 /* AMD is using RAID class only for ahci controllers */
616 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
617 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
618
619 /* VIA */
620 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
621 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
622
623 /* NVIDIA */
624 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
625 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
626 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
627 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
628 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
629 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
633 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
645 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
661 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
697 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
708
709 /* SiS */
710 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
711 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
712 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
713
714 /* Marvell */
715 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
716 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
717
718 /* Promise */
719 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
720
721 /* Generic, PCI class code for AHCI */
722 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
723 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
724
725 { } /* terminate list */
726 };
727
728
729 static struct pci_driver ahci_pci_driver = {
730 .name = DRV_NAME,
731 .id_table = ahci_pci_tbl,
732 .probe = ahci_init_one,
733 .remove = ata_pci_remove_one,
734 #ifdef CONFIG_PM
735 .suspend = ahci_pci_device_suspend,
736 .resume = ahci_pci_device_resume,
737 #endif
738 };
739
740 static int ahci_em_messages = 1;
741 module_param(ahci_em_messages, int, 0444);
742 /* add other LED protocol types when they become supported */
743 MODULE_PARM_DESC(ahci_em_messages,
744 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
745
746 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
747 static int marvell_enable;
748 #else
749 static int marvell_enable = 1;
750 #endif
751 module_param(marvell_enable, int, 0644);
752 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
753
754
755 static inline int ahci_nr_ports(u32 cap)
756 {
757 return (cap & 0x1f) + 1;
758 }
759
760 static inline void __iomem *__ahci_port_base(struct ata_host *host,
761 unsigned int port_no)
762 {
763 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
764
765 return mmio + 0x100 + (port_no * 0x80);
766 }
767
768 static inline void __iomem *ahci_port_base(struct ata_port *ap)
769 {
770 return __ahci_port_base(ap->host, ap->port_no);
771 }
772
773 static void ahci_enable_ahci(void __iomem *mmio)
774 {
775 int i;
776 u32 tmp;
777
778 /* turn on AHCI_EN */
779 tmp = readl(mmio + HOST_CTL);
780 if (tmp & HOST_AHCI_EN)
781 return;
782
783 /* Some controllers need AHCI_EN to be written multiple times.
784 * Try a few times before giving up.
785 */
786 for (i = 0; i < 5; i++) {
787 tmp |= HOST_AHCI_EN;
788 writel(tmp, mmio + HOST_CTL);
789 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
790 if (tmp & HOST_AHCI_EN)
791 return;
792 msleep(10);
793 }
794
795 WARN_ON(1);
796 }
797
798 static ssize_t ahci_show_host_caps(struct device *dev,
799 struct device_attribute *attr, char *buf)
800 {
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct ata_port *ap = ata_shost_to_port(shost);
803 struct ahci_host_priv *hpriv = ap->host->private_data;
804
805 return sprintf(buf, "%x\n", hpriv->cap);
806 }
807
808 static ssize_t ahci_show_host_cap2(struct device *dev,
809 struct device_attribute *attr, char *buf)
810 {
811 struct Scsi_Host *shost = class_to_shost(dev);
812 struct ata_port *ap = ata_shost_to_port(shost);
813 struct ahci_host_priv *hpriv = ap->host->private_data;
814
815 return sprintf(buf, "%x\n", hpriv->cap2);
816 }
817
818 static ssize_t ahci_show_host_version(struct device *dev,
819 struct device_attribute *attr, char *buf)
820 {
821 struct Scsi_Host *shost = class_to_shost(dev);
822 struct ata_port *ap = ata_shost_to_port(shost);
823 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
824
825 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
826 }
827
828 static ssize_t ahci_show_port_cmd(struct device *dev,
829 struct device_attribute *attr, char *buf)
830 {
831 struct Scsi_Host *shost = class_to_shost(dev);
832 struct ata_port *ap = ata_shost_to_port(shost);
833 void __iomem *port_mmio = ahci_port_base(ap);
834
835 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
836 }
837
838 /**
839 * ahci_save_initial_config - Save and fixup initial config values
840 * @pdev: target PCI device
841 * @hpriv: host private area to store config values
842 *
843 * Some registers containing configuration info might be setup by
844 * BIOS and might be cleared on reset. This function saves the
845 * initial values of those registers into @hpriv such that they
846 * can be restored after controller reset.
847 *
848 * If inconsistent, config values are fixed up by this function.
849 *
850 * LOCKING:
851 * None.
852 */
853 static void ahci_save_initial_config(struct pci_dev *pdev,
854 struct ahci_host_priv *hpriv)
855 {
856 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
857 u32 cap, cap2, vers, port_map;
858 int i;
859 int mv;
860
861 /* make sure AHCI mode is enabled before accessing CAP */
862 ahci_enable_ahci(mmio);
863
864 /* Values prefixed with saved_ are written back to host after
865 * reset. Values without are used for driver operation.
866 */
867 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
868 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
869
870 /* CAP2 register is only defined for AHCI 1.2 and later */
871 vers = readl(mmio + HOST_VERSION);
872 if ((vers >> 16) > 1 ||
873 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
874 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
875 else
876 hpriv->saved_cap2 = cap2 = 0;
877
878 /* some chips have errata preventing 64bit use */
879 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
880 dev_printk(KERN_INFO, &pdev->dev,
881 "controller can't do 64bit DMA, forcing 32bit\n");
882 cap &= ~HOST_CAP_64;
883 }
884
885 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
886 dev_printk(KERN_INFO, &pdev->dev,
887 "controller can't do NCQ, turning off CAP_NCQ\n");
888 cap &= ~HOST_CAP_NCQ;
889 }
890
891 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
892 dev_printk(KERN_INFO, &pdev->dev,
893 "controller can do NCQ, turning on CAP_NCQ\n");
894 cap |= HOST_CAP_NCQ;
895 }
896
897 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
898 dev_printk(KERN_INFO, &pdev->dev,
899 "controller can't do PMP, turning off CAP_PMP\n");
900 cap &= ~HOST_CAP_PMP;
901 }
902
903 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
904 dev_printk(KERN_INFO, &pdev->dev,
905 "controller can't do SNTF, turning off CAP_SNTF\n");
906 cap &= ~HOST_CAP_SNTF;
907 }
908
909 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
910 port_map != 1) {
911 dev_printk(KERN_INFO, &pdev->dev,
912 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
913 port_map, 1);
914 port_map = 1;
915 }
916
917 /*
918 * Temporary Marvell 6145 hack: PATA port presence
919 * is asserted through the standard AHCI port
920 * presence register, as bit 4 (counting from 0)
921 */
922 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
923 if (pdev->device == 0x6121)
924 mv = 0x3;
925 else
926 mv = 0xf;
927 dev_printk(KERN_ERR, &pdev->dev,
928 "MV_AHCI HACK: port_map %x -> %x\n",
929 port_map,
930 port_map & mv);
931 dev_printk(KERN_ERR, &pdev->dev,
932 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
933
934 port_map &= mv;
935 }
936
937 /* cross check port_map and cap.n_ports */
938 if (port_map) {
939 int map_ports = 0;
940
941 for (i = 0; i < AHCI_MAX_PORTS; i++)
942 if (port_map & (1 << i))
943 map_ports++;
944
945 /* If PI has more ports than n_ports, whine, clear
946 * port_map and let it be generated from n_ports.
947 */
948 if (map_ports > ahci_nr_ports(cap)) {
949 dev_printk(KERN_WARNING, &pdev->dev,
950 "implemented port map (0x%x) contains more "
951 "ports than nr_ports (%u), using nr_ports\n",
952 port_map, ahci_nr_ports(cap));
953 port_map = 0;
954 }
955 }
956
957 /* fabricate port_map from cap.nr_ports */
958 if (!port_map) {
959 port_map = (1 << ahci_nr_ports(cap)) - 1;
960 dev_printk(KERN_WARNING, &pdev->dev,
961 "forcing PORTS_IMPL to 0x%x\n", port_map);
962
963 /* write the fixed up value to the PI register */
964 hpriv->saved_port_map = port_map;
965 }
966
967 /* record values to use during operation */
968 hpriv->cap = cap;
969 hpriv->cap2 = cap2;
970 hpriv->port_map = port_map;
971 }
972
973 /**
974 * ahci_restore_initial_config - Restore initial config
975 * @host: target ATA host
976 *
977 * Restore initial config stored by ahci_save_initial_config().
978 *
979 * LOCKING:
980 * None.
981 */
982 static void ahci_restore_initial_config(struct ata_host *host)
983 {
984 struct ahci_host_priv *hpriv = host->private_data;
985 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
986
987 writel(hpriv->saved_cap, mmio + HOST_CAP);
988 if (hpriv->saved_cap2)
989 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
990 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
991 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
992 }
993
994 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
995 {
996 static const int offset[] = {
997 [SCR_STATUS] = PORT_SCR_STAT,
998 [SCR_CONTROL] = PORT_SCR_CTL,
999 [SCR_ERROR] = PORT_SCR_ERR,
1000 [SCR_ACTIVE] = PORT_SCR_ACT,
1001 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1002 };
1003 struct ahci_host_priv *hpriv = ap->host->private_data;
1004
1005 if (sc_reg < ARRAY_SIZE(offset) &&
1006 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1007 return offset[sc_reg];
1008 return 0;
1009 }
1010
1011 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1012 {
1013 void __iomem *port_mmio = ahci_port_base(link->ap);
1014 int offset = ahci_scr_offset(link->ap, sc_reg);
1015
1016 if (offset) {
1017 *val = readl(port_mmio + offset);
1018 return 0;
1019 }
1020 return -EINVAL;
1021 }
1022
1023 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1024 {
1025 void __iomem *port_mmio = ahci_port_base(link->ap);
1026 int offset = ahci_scr_offset(link->ap, sc_reg);
1027
1028 if (offset) {
1029 writel(val, port_mmio + offset);
1030 return 0;
1031 }
1032 return -EINVAL;
1033 }
1034
1035 static void ahci_start_engine(struct ata_port *ap)
1036 {
1037 void __iomem *port_mmio = ahci_port_base(ap);
1038 u32 tmp;
1039
1040 /* start DMA */
1041 tmp = readl(port_mmio + PORT_CMD);
1042 tmp |= PORT_CMD_START;
1043 writel(tmp, port_mmio + PORT_CMD);
1044 readl(port_mmio + PORT_CMD); /* flush */
1045 }
1046
1047 static int ahci_stop_engine(struct ata_port *ap)
1048 {
1049 void __iomem *port_mmio = ahci_port_base(ap);
1050 u32 tmp;
1051
1052 tmp = readl(port_mmio + PORT_CMD);
1053
1054 /* check if the HBA is idle */
1055 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1056 return 0;
1057
1058 /* setting HBA to idle */
1059 tmp &= ~PORT_CMD_START;
1060 writel(tmp, port_mmio + PORT_CMD);
1061
1062 /* wait for engine to stop. This could be as long as 500 msec */
1063 tmp = ata_wait_register(port_mmio + PORT_CMD,
1064 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1065 if (tmp & PORT_CMD_LIST_ON)
1066 return -EIO;
1067
1068 return 0;
1069 }
1070
1071 static void ahci_start_fis_rx(struct ata_port *ap)
1072 {
1073 void __iomem *port_mmio = ahci_port_base(ap);
1074 struct ahci_host_priv *hpriv = ap->host->private_data;
1075 struct ahci_port_priv *pp = ap->private_data;
1076 u32 tmp;
1077
1078 /* set FIS registers */
1079 if (hpriv->cap & HOST_CAP_64)
1080 writel((pp->cmd_slot_dma >> 16) >> 16,
1081 port_mmio + PORT_LST_ADDR_HI);
1082 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1083
1084 if (hpriv->cap & HOST_CAP_64)
1085 writel((pp->rx_fis_dma >> 16) >> 16,
1086 port_mmio + PORT_FIS_ADDR_HI);
1087 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1088
1089 /* enable FIS reception */
1090 tmp = readl(port_mmio + PORT_CMD);
1091 tmp |= PORT_CMD_FIS_RX;
1092 writel(tmp, port_mmio + PORT_CMD);
1093
1094 /* flush */
1095 readl(port_mmio + PORT_CMD);
1096 }
1097
1098 static int ahci_stop_fis_rx(struct ata_port *ap)
1099 {
1100 void __iomem *port_mmio = ahci_port_base(ap);
1101 u32 tmp;
1102
1103 /* disable FIS reception */
1104 tmp = readl(port_mmio + PORT_CMD);
1105 tmp &= ~PORT_CMD_FIS_RX;
1106 writel(tmp, port_mmio + PORT_CMD);
1107
1108 /* wait for completion, spec says 500ms, give it 1000 */
1109 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1110 PORT_CMD_FIS_ON, 10, 1000);
1111 if (tmp & PORT_CMD_FIS_ON)
1112 return -EBUSY;
1113
1114 return 0;
1115 }
1116
1117 static void ahci_power_up(struct ata_port *ap)
1118 {
1119 struct ahci_host_priv *hpriv = ap->host->private_data;
1120 void __iomem *port_mmio = ahci_port_base(ap);
1121 u32 cmd;
1122
1123 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1124
1125 /* spin up device */
1126 if (hpriv->cap & HOST_CAP_SSS) {
1127 cmd |= PORT_CMD_SPIN_UP;
1128 writel(cmd, port_mmio + PORT_CMD);
1129 }
1130
1131 /* wake up link */
1132 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1133 }
1134
1135 static void ahci_disable_alpm(struct ata_port *ap)
1136 {
1137 struct ahci_host_priv *hpriv = ap->host->private_data;
1138 void __iomem *port_mmio = ahci_port_base(ap);
1139 u32 cmd;
1140 struct ahci_port_priv *pp = ap->private_data;
1141
1142 /* IPM bits should be disabled by libata-core */
1143 /* get the existing command bits */
1144 cmd = readl(port_mmio + PORT_CMD);
1145
1146 /* disable ALPM and ASP */
1147 cmd &= ~PORT_CMD_ASP;
1148 cmd &= ~PORT_CMD_ALPE;
1149
1150 /* force the interface back to active */
1151 cmd |= PORT_CMD_ICC_ACTIVE;
1152
1153 /* write out new cmd value */
1154 writel(cmd, port_mmio + PORT_CMD);
1155 cmd = readl(port_mmio + PORT_CMD);
1156
1157 /* wait 10ms to be sure we've come out of any low power state */
1158 msleep(10);
1159
1160 /* clear out any PhyRdy stuff from interrupt status */
1161 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1162
1163 /* go ahead and clean out PhyRdy Change from Serror too */
1164 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1165
1166 /*
1167 * Clear flag to indicate that we should ignore all PhyRdy
1168 * state changes
1169 */
1170 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1171
1172 /*
1173 * Enable interrupts on Phy Ready.
1174 */
1175 pp->intr_mask |= PORT_IRQ_PHYRDY;
1176 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1177
1178 /*
1179 * don't change the link pm policy - we can be called
1180 * just to turn of link pm temporarily
1181 */
1182 }
1183
1184 static int ahci_enable_alpm(struct ata_port *ap,
1185 enum link_pm policy)
1186 {
1187 struct ahci_host_priv *hpriv = ap->host->private_data;
1188 void __iomem *port_mmio = ahci_port_base(ap);
1189 u32 cmd;
1190 struct ahci_port_priv *pp = ap->private_data;
1191 u32 asp;
1192
1193 /* Make sure the host is capable of link power management */
1194 if (!(hpriv->cap & HOST_CAP_ALPM))
1195 return -EINVAL;
1196
1197 switch (policy) {
1198 case MAX_PERFORMANCE:
1199 case NOT_AVAILABLE:
1200 /*
1201 * if we came here with NOT_AVAILABLE,
1202 * it just means this is the first time we
1203 * have tried to enable - default to max performance,
1204 * and let the user go to lower power modes on request.
1205 */
1206 ahci_disable_alpm(ap);
1207 return 0;
1208 case MIN_POWER:
1209 /* configure HBA to enter SLUMBER */
1210 asp = PORT_CMD_ASP;
1211 break;
1212 case MEDIUM_POWER:
1213 /* configure HBA to enter PARTIAL */
1214 asp = 0;
1215 break;
1216 default:
1217 return -EINVAL;
1218 }
1219
1220 /*
1221 * Disable interrupts on Phy Ready. This keeps us from
1222 * getting woken up due to spurious phy ready interrupts
1223 * TBD - Hot plug should be done via polling now, is
1224 * that even supported?
1225 */
1226 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1227 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1228
1229 /*
1230 * Set a flag to indicate that we should ignore all PhyRdy
1231 * state changes since these can happen now whenever we
1232 * change link state
1233 */
1234 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1235
1236 /* get the existing command bits */
1237 cmd = readl(port_mmio + PORT_CMD);
1238
1239 /*
1240 * Set ASP based on Policy
1241 */
1242 cmd |= asp;
1243
1244 /*
1245 * Setting this bit will instruct the HBA to aggressively
1246 * enter a lower power link state when it's appropriate and
1247 * based on the value set above for ASP
1248 */
1249 cmd |= PORT_CMD_ALPE;
1250
1251 /* write out new cmd value */
1252 writel(cmd, port_mmio + PORT_CMD);
1253 cmd = readl(port_mmio + PORT_CMD);
1254
1255 /* IPM bits should be set by libata-core */
1256 return 0;
1257 }
1258
1259 #ifdef CONFIG_PM
1260 static void ahci_power_down(struct ata_port *ap)
1261 {
1262 struct ahci_host_priv *hpriv = ap->host->private_data;
1263 void __iomem *port_mmio = ahci_port_base(ap);
1264 u32 cmd, scontrol;
1265
1266 if (!(hpriv->cap & HOST_CAP_SSS))
1267 return;
1268
1269 /* put device into listen mode, first set PxSCTL.DET to 0 */
1270 scontrol = readl(port_mmio + PORT_SCR_CTL);
1271 scontrol &= ~0xf;
1272 writel(scontrol, port_mmio + PORT_SCR_CTL);
1273
1274 /* then set PxCMD.SUD to 0 */
1275 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1276 cmd &= ~PORT_CMD_SPIN_UP;
1277 writel(cmd, port_mmio + PORT_CMD);
1278 }
1279 #endif
1280
1281 static void ahci_start_port(struct ata_port *ap)
1282 {
1283 struct ahci_port_priv *pp = ap->private_data;
1284 struct ata_link *link;
1285 struct ahci_em_priv *emp;
1286 ssize_t rc;
1287 int i;
1288
1289 /* enable FIS reception */
1290 ahci_start_fis_rx(ap);
1291
1292 /* enable DMA */
1293 ahci_start_engine(ap);
1294
1295 /* turn on LEDs */
1296 if (ap->flags & ATA_FLAG_EM) {
1297 ata_for_each_link(link, ap, EDGE) {
1298 emp = &pp->em_priv[link->pmp];
1299
1300 /* EM Transmit bit maybe busy during init */
1301 for (i = 0; i < EM_MAX_RETRY; i++) {
1302 rc = ahci_transmit_led_message(ap,
1303 emp->led_state,
1304 4);
1305 if (rc == -EBUSY)
1306 msleep(1);
1307 else
1308 break;
1309 }
1310 }
1311 }
1312
1313 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1314 ata_for_each_link(link, ap, EDGE)
1315 ahci_init_sw_activity(link);
1316
1317 }
1318
1319 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1320 {
1321 int rc;
1322
1323 /* disable DMA */
1324 rc = ahci_stop_engine(ap);
1325 if (rc) {
1326 *emsg = "failed to stop engine";
1327 return rc;
1328 }
1329
1330 /* disable FIS reception */
1331 rc = ahci_stop_fis_rx(ap);
1332 if (rc) {
1333 *emsg = "failed stop FIS RX";
1334 return rc;
1335 }
1336
1337 return 0;
1338 }
1339
1340 static int ahci_reset_controller(struct ata_host *host)
1341 {
1342 struct pci_dev *pdev = to_pci_dev(host->dev);
1343 struct ahci_host_priv *hpriv = host->private_data;
1344 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1345 u32 tmp;
1346
1347 /* we must be in AHCI mode, before using anything
1348 * AHCI-specific, such as HOST_RESET.
1349 */
1350 ahci_enable_ahci(mmio);
1351
1352 /* global controller reset */
1353 if (!ahci_skip_host_reset) {
1354 tmp = readl(mmio + HOST_CTL);
1355 if ((tmp & HOST_RESET) == 0) {
1356 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1357 readl(mmio + HOST_CTL); /* flush */
1358 }
1359
1360 /*
1361 * to perform host reset, OS should set HOST_RESET
1362 * and poll until this bit is read to be "0".
1363 * reset must complete within 1 second, or
1364 * the hardware should be considered fried.
1365 */
1366 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1367 HOST_RESET, 10, 1000);
1368
1369 if (tmp & HOST_RESET) {
1370 dev_printk(KERN_ERR, host->dev,
1371 "controller reset failed (0x%x)\n", tmp);
1372 return -EIO;
1373 }
1374
1375 /* turn on AHCI mode */
1376 ahci_enable_ahci(mmio);
1377
1378 /* Some registers might be cleared on reset. Restore
1379 * initial values.
1380 */
1381 ahci_restore_initial_config(host);
1382 } else
1383 dev_printk(KERN_INFO, host->dev,
1384 "skipping global host reset\n");
1385
1386 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1387 u16 tmp16;
1388
1389 /* configure PCS */
1390 pci_read_config_word(pdev, 0x92, &tmp16);
1391 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1392 tmp16 |= hpriv->port_map;
1393 pci_write_config_word(pdev, 0x92, tmp16);
1394 }
1395 }
1396
1397 return 0;
1398 }
1399
1400 static void ahci_sw_activity(struct ata_link *link)
1401 {
1402 struct ata_port *ap = link->ap;
1403 struct ahci_port_priv *pp = ap->private_data;
1404 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1405
1406 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1407 return;
1408
1409 emp->activity++;
1410 if (!timer_pending(&emp->timer))
1411 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1412 }
1413
1414 static void ahci_sw_activity_blink(unsigned long arg)
1415 {
1416 struct ata_link *link = (struct ata_link *)arg;
1417 struct ata_port *ap = link->ap;
1418 struct ahci_port_priv *pp = ap->private_data;
1419 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1420 unsigned long led_message = emp->led_state;
1421 u32 activity_led_state;
1422 unsigned long flags;
1423
1424 led_message &= EM_MSG_LED_VALUE;
1425 led_message |= ap->port_no | (link->pmp << 8);
1426
1427 /* check to see if we've had activity. If so,
1428 * toggle state of LED and reset timer. If not,
1429 * turn LED to desired idle state.
1430 */
1431 spin_lock_irqsave(ap->lock, flags);
1432 if (emp->saved_activity != emp->activity) {
1433 emp->saved_activity = emp->activity;
1434 /* get the current LED state */
1435 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1436
1437 if (activity_led_state)
1438 activity_led_state = 0;
1439 else
1440 activity_led_state = 1;
1441
1442 /* clear old state */
1443 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1444
1445 /* toggle state */
1446 led_message |= (activity_led_state << 16);
1447 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1448 } else {
1449 /* switch to idle */
1450 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1451 if (emp->blink_policy == BLINK_OFF)
1452 led_message |= (1 << 16);
1453 }
1454 spin_unlock_irqrestore(ap->lock, flags);
1455 ahci_transmit_led_message(ap, led_message, 4);
1456 }
1457
1458 static void ahci_init_sw_activity(struct ata_link *link)
1459 {
1460 struct ata_port *ap = link->ap;
1461 struct ahci_port_priv *pp = ap->private_data;
1462 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1463
1464 /* init activity stats, setup timer */
1465 emp->saved_activity = emp->activity = 0;
1466 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1467
1468 /* check our blink policy and set flag for link if it's enabled */
1469 if (emp->blink_policy)
1470 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1471 }
1472
1473 static int ahci_reset_em(struct ata_host *host)
1474 {
1475 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1476 u32 em_ctl;
1477
1478 em_ctl = readl(mmio + HOST_EM_CTL);
1479 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1480 return -EINVAL;
1481
1482 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1483 return 0;
1484 }
1485
1486 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1487 ssize_t size)
1488 {
1489 struct ahci_host_priv *hpriv = ap->host->private_data;
1490 struct ahci_port_priv *pp = ap->private_data;
1491 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1492 u32 em_ctl;
1493 u32 message[] = {0, 0};
1494 unsigned long flags;
1495 int pmp;
1496 struct ahci_em_priv *emp;
1497
1498 /* get the slot number from the message */
1499 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1500 if (pmp < EM_MAX_SLOTS)
1501 emp = &pp->em_priv[pmp];
1502 else
1503 return -EINVAL;
1504
1505 spin_lock_irqsave(ap->lock, flags);
1506
1507 /*
1508 * if we are still busy transmitting a previous message,
1509 * do not allow
1510 */
1511 em_ctl = readl(mmio + HOST_EM_CTL);
1512 if (em_ctl & EM_CTL_TM) {
1513 spin_unlock_irqrestore(ap->lock, flags);
1514 return -EBUSY;
1515 }
1516
1517 /*
1518 * create message header - this is all zero except for
1519 * the message size, which is 4 bytes.
1520 */
1521 message[0] |= (4 << 8);
1522
1523 /* ignore 0:4 of byte zero, fill in port info yourself */
1524 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1525
1526 /* write message to EM_LOC */
1527 writel(message[0], mmio + hpriv->em_loc);
1528 writel(message[1], mmio + hpriv->em_loc+4);
1529
1530 /* save off new led state for port/slot */
1531 emp->led_state = state;
1532
1533 /*
1534 * tell hardware to transmit the message
1535 */
1536 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1537
1538 spin_unlock_irqrestore(ap->lock, flags);
1539 return size;
1540 }
1541
1542 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1543 {
1544 struct ahci_port_priv *pp = ap->private_data;
1545 struct ata_link *link;
1546 struct ahci_em_priv *emp;
1547 int rc = 0;
1548
1549 ata_for_each_link(link, ap, EDGE) {
1550 emp = &pp->em_priv[link->pmp];
1551 rc += sprintf(buf, "%lx\n", emp->led_state);
1552 }
1553 return rc;
1554 }
1555
1556 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1557 size_t size)
1558 {
1559 int state;
1560 int pmp;
1561 struct ahci_port_priv *pp = ap->private_data;
1562 struct ahci_em_priv *emp;
1563
1564 state = simple_strtoul(buf, NULL, 0);
1565
1566 /* get the slot number from the message */
1567 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1568 if (pmp < EM_MAX_SLOTS)
1569 emp = &pp->em_priv[pmp];
1570 else
1571 return -EINVAL;
1572
1573 /* mask off the activity bits if we are in sw_activity
1574 * mode, user should turn off sw_activity before setting
1575 * activity led through em_message
1576 */
1577 if (emp->blink_policy)
1578 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1579
1580 return ahci_transmit_led_message(ap, state, size);
1581 }
1582
1583 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1584 {
1585 struct ata_link *link = dev->link;
1586 struct ata_port *ap = link->ap;
1587 struct ahci_port_priv *pp = ap->private_data;
1588 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1589 u32 port_led_state = emp->led_state;
1590
1591 /* save the desired Activity LED behavior */
1592 if (val == OFF) {
1593 /* clear LFLAG */
1594 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1595
1596 /* set the LED to OFF */
1597 port_led_state &= EM_MSG_LED_VALUE_OFF;
1598 port_led_state |= (ap->port_no | (link->pmp << 8));
1599 ahci_transmit_led_message(ap, port_led_state, 4);
1600 } else {
1601 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1602 if (val == BLINK_OFF) {
1603 /* set LED to ON for idle */
1604 port_led_state &= EM_MSG_LED_VALUE_OFF;
1605 port_led_state |= (ap->port_no | (link->pmp << 8));
1606 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1607 ahci_transmit_led_message(ap, port_led_state, 4);
1608 }
1609 }
1610 emp->blink_policy = val;
1611 return 0;
1612 }
1613
1614 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1615 {
1616 struct ata_link *link = dev->link;
1617 struct ata_port *ap = link->ap;
1618 struct ahci_port_priv *pp = ap->private_data;
1619 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1620
1621 /* display the saved value of activity behavior for this
1622 * disk.
1623 */
1624 return sprintf(buf, "%d\n", emp->blink_policy);
1625 }
1626
1627 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1628 int port_no, void __iomem *mmio,
1629 void __iomem *port_mmio)
1630 {
1631 const char *emsg = NULL;
1632 int rc;
1633 u32 tmp;
1634
1635 /* make sure port is not active */
1636 rc = ahci_deinit_port(ap, &emsg);
1637 if (rc)
1638 dev_printk(KERN_WARNING, &pdev->dev,
1639 "%s (%d)\n", emsg, rc);
1640
1641 /* clear SError */
1642 tmp = readl(port_mmio + PORT_SCR_ERR);
1643 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1644 writel(tmp, port_mmio + PORT_SCR_ERR);
1645
1646 /* clear port IRQ */
1647 tmp = readl(port_mmio + PORT_IRQ_STAT);
1648 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1649 if (tmp)
1650 writel(tmp, port_mmio + PORT_IRQ_STAT);
1651
1652 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1653 }
1654
1655 static void ahci_init_controller(struct ata_host *host)
1656 {
1657 struct ahci_host_priv *hpriv = host->private_data;
1658 struct pci_dev *pdev = to_pci_dev(host->dev);
1659 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1660 int i;
1661 void __iomem *port_mmio;
1662 u32 tmp;
1663 int mv;
1664
1665 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1666 if (pdev->device == 0x6121)
1667 mv = 2;
1668 else
1669 mv = 4;
1670 port_mmio = __ahci_port_base(host, mv);
1671
1672 writel(0, port_mmio + PORT_IRQ_MASK);
1673
1674 /* clear port IRQ */
1675 tmp = readl(port_mmio + PORT_IRQ_STAT);
1676 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1677 if (tmp)
1678 writel(tmp, port_mmio + PORT_IRQ_STAT);
1679 }
1680
1681 for (i = 0; i < host->n_ports; i++) {
1682 struct ata_port *ap = host->ports[i];
1683
1684 port_mmio = ahci_port_base(ap);
1685 if (ata_port_is_dummy(ap))
1686 continue;
1687
1688 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1689 }
1690
1691 tmp = readl(mmio + HOST_CTL);
1692 VPRINTK("HOST_CTL 0x%x\n", tmp);
1693 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1694 tmp = readl(mmio + HOST_CTL);
1695 VPRINTK("HOST_CTL 0x%x\n", tmp);
1696 }
1697
1698 static void ahci_dev_config(struct ata_device *dev)
1699 {
1700 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1701
1702 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1703 dev->max_sectors = 255;
1704 ata_dev_printk(dev, KERN_INFO,
1705 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1706 }
1707 }
1708
1709 static unsigned int ahci_dev_classify(struct ata_port *ap)
1710 {
1711 void __iomem *port_mmio = ahci_port_base(ap);
1712 struct ata_taskfile tf;
1713 u32 tmp;
1714
1715 tmp = readl(port_mmio + PORT_SIG);
1716 tf.lbah = (tmp >> 24) & 0xff;
1717 tf.lbam = (tmp >> 16) & 0xff;
1718 tf.lbal = (tmp >> 8) & 0xff;
1719 tf.nsect = (tmp) & 0xff;
1720
1721 return ata_dev_classify(&tf);
1722 }
1723
1724 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1725 u32 opts)
1726 {
1727 dma_addr_t cmd_tbl_dma;
1728
1729 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1730
1731 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1732 pp->cmd_slot[tag].status = 0;
1733 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1734 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1735 }
1736
1737 static int ahci_kick_engine(struct ata_port *ap)
1738 {
1739 void __iomem *port_mmio = ahci_port_base(ap);
1740 struct ahci_host_priv *hpriv = ap->host->private_data;
1741 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1742 u32 tmp;
1743 int busy, rc;
1744
1745 /* stop engine */
1746 rc = ahci_stop_engine(ap);
1747 if (rc)
1748 goto out_restart;
1749
1750 /* need to do CLO?
1751 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1752 */
1753 busy = status & (ATA_BUSY | ATA_DRQ);
1754 if (!busy && !sata_pmp_attached(ap)) {
1755 rc = 0;
1756 goto out_restart;
1757 }
1758
1759 if (!(hpriv->cap & HOST_CAP_CLO)) {
1760 rc = -EOPNOTSUPP;
1761 goto out_restart;
1762 }
1763
1764 /* perform CLO */
1765 tmp = readl(port_mmio + PORT_CMD);
1766 tmp |= PORT_CMD_CLO;
1767 writel(tmp, port_mmio + PORT_CMD);
1768
1769 rc = 0;
1770 tmp = ata_wait_register(port_mmio + PORT_CMD,
1771 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1772 if (tmp & PORT_CMD_CLO)
1773 rc = -EIO;
1774
1775 /* restart engine */
1776 out_restart:
1777 ahci_start_engine(ap);
1778 return rc;
1779 }
1780
1781 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1782 struct ata_taskfile *tf, int is_cmd, u16 flags,
1783 unsigned long timeout_msec)
1784 {
1785 const u32 cmd_fis_len = 5; /* five dwords */
1786 struct ahci_port_priv *pp = ap->private_data;
1787 void __iomem *port_mmio = ahci_port_base(ap);
1788 u8 *fis = pp->cmd_tbl;
1789 u32 tmp;
1790
1791 /* prep the command */
1792 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1793 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1794
1795 /* issue & wait */
1796 writel(1, port_mmio + PORT_CMD_ISSUE);
1797
1798 if (timeout_msec) {
1799 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1800 1, timeout_msec);
1801 if (tmp & 0x1) {
1802 ahci_kick_engine(ap);
1803 return -EBUSY;
1804 }
1805 } else
1806 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1807
1808 return 0;
1809 }
1810
1811 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1812 int pmp, unsigned long deadline,
1813 int (*check_ready)(struct ata_link *link))
1814 {
1815 struct ata_port *ap = link->ap;
1816 struct ahci_host_priv *hpriv = ap->host->private_data;
1817 const char *reason = NULL;
1818 unsigned long now, msecs;
1819 struct ata_taskfile tf;
1820 int rc;
1821
1822 DPRINTK("ENTER\n");
1823
1824 /* prepare for SRST (AHCI-1.1 10.4.1) */
1825 rc = ahci_kick_engine(ap);
1826 if (rc && rc != -EOPNOTSUPP)
1827 ata_link_printk(link, KERN_WARNING,
1828 "failed to reset engine (errno=%d)\n", rc);
1829
1830 ata_tf_init(link->device, &tf);
1831
1832 /* issue the first D2H Register FIS */
1833 msecs = 0;
1834 now = jiffies;
1835 if (time_after(now, deadline))
1836 msecs = jiffies_to_msecs(deadline - now);
1837
1838 tf.ctl |= ATA_SRST;
1839 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1840 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1841 rc = -EIO;
1842 reason = "1st FIS failed";
1843 goto fail;
1844 }
1845
1846 /* spec says at least 5us, but be generous and sleep for 1ms */
1847 msleep(1);
1848
1849 /* issue the second D2H Register FIS */
1850 tf.ctl &= ~ATA_SRST;
1851 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1852
1853 /* wait for link to become ready */
1854 rc = ata_wait_after_reset(link, deadline, check_ready);
1855 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1856 /*
1857 * Workaround for cases where link online status can't
1858 * be trusted. Treat device readiness timeout as link
1859 * offline.
1860 */
1861 ata_link_printk(link, KERN_INFO,
1862 "device not ready, treating as offline\n");
1863 *class = ATA_DEV_NONE;
1864 } else if (rc) {
1865 /* link occupied, -ENODEV too is an error */
1866 reason = "device not ready";
1867 goto fail;
1868 } else
1869 *class = ahci_dev_classify(ap);
1870
1871 DPRINTK("EXIT, class=%u\n", *class);
1872 return 0;
1873
1874 fail:
1875 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1876 return rc;
1877 }
1878
1879 static int ahci_check_ready(struct ata_link *link)
1880 {
1881 void __iomem *port_mmio = ahci_port_base(link->ap);
1882 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1883
1884 return ata_check_ready(status);
1885 }
1886
1887 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1888 unsigned long deadline)
1889 {
1890 int pmp = sata_srst_pmp(link);
1891
1892 DPRINTK("ENTER\n");
1893
1894 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1895 }
1896
1897 static int ahci_sb600_check_ready(struct ata_link *link)
1898 {
1899 void __iomem *port_mmio = ahci_port_base(link->ap);
1900 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1901 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1902
1903 /*
1904 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1905 * which can save timeout delay.
1906 */
1907 if (irq_status & PORT_IRQ_BAD_PMP)
1908 return -EIO;
1909
1910 return ata_check_ready(status);
1911 }
1912
1913 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1914 unsigned long deadline)
1915 {
1916 struct ata_port *ap = link->ap;
1917 void __iomem *port_mmio = ahci_port_base(ap);
1918 int pmp = sata_srst_pmp(link);
1919 int rc;
1920 u32 irq_sts;
1921
1922 DPRINTK("ENTER\n");
1923
1924 rc = ahci_do_softreset(link, class, pmp, deadline,
1925 ahci_sb600_check_ready);
1926
1927 /*
1928 * Soft reset fails on some ATI chips with IPMS set when PMP
1929 * is enabled but SATA HDD/ODD is connected to SATA port,
1930 * do soft reset again to port 0.
1931 */
1932 if (rc == -EIO) {
1933 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1934 if (irq_sts & PORT_IRQ_BAD_PMP) {
1935 ata_link_printk(link, KERN_WARNING,
1936 "applying SB600 PMP SRST workaround "
1937 "and retrying\n");
1938 rc = ahci_do_softreset(link, class, 0, deadline,
1939 ahci_check_ready);
1940 }
1941 }
1942
1943 return rc;
1944 }
1945
1946 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1947 unsigned long deadline)
1948 {
1949 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1950 struct ata_port *ap = link->ap;
1951 struct ahci_port_priv *pp = ap->private_data;
1952 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1953 struct ata_taskfile tf;
1954 bool online;
1955 int rc;
1956
1957 DPRINTK("ENTER\n");
1958
1959 ahci_stop_engine(ap);
1960
1961 /* clear D2H reception area to properly wait for D2H FIS */
1962 ata_tf_init(link->device, &tf);
1963 tf.command = 0x80;
1964 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1965
1966 rc = sata_link_hardreset(link, timing, deadline, &online,
1967 ahci_check_ready);
1968
1969 ahci_start_engine(ap);
1970
1971 if (online)
1972 *class = ahci_dev_classify(ap);
1973
1974 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1975 return rc;
1976 }
1977
1978 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1979 unsigned long deadline)
1980 {
1981 struct ata_port *ap = link->ap;
1982 bool online;
1983 int rc;
1984
1985 DPRINTK("ENTER\n");
1986
1987 ahci_stop_engine(ap);
1988
1989 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1990 deadline, &online, NULL);
1991
1992 ahci_start_engine(ap);
1993
1994 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1995
1996 /* vt8251 doesn't clear BSY on signature FIS reception,
1997 * request follow-up softreset.
1998 */
1999 return online ? -EAGAIN : rc;
2000 }
2001
2002 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2003 unsigned long deadline)
2004 {
2005 struct ata_port *ap = link->ap;
2006 struct ahci_port_priv *pp = ap->private_data;
2007 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2008 struct ata_taskfile tf;
2009 bool online;
2010 int rc;
2011
2012 ahci_stop_engine(ap);
2013
2014 /* clear D2H reception area to properly wait for D2H FIS */
2015 ata_tf_init(link->device, &tf);
2016 tf.command = 0x80;
2017 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2018
2019 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2020 deadline, &online, NULL);
2021
2022 ahci_start_engine(ap);
2023
2024 /* The pseudo configuration device on SIMG4726 attached to
2025 * ASUS P5W-DH Deluxe doesn't send signature FIS after
2026 * hardreset if no device is attached to the first downstream
2027 * port && the pseudo device locks up on SRST w/ PMP==0. To
2028 * work around this, wait for !BSY only briefly. If BSY isn't
2029 * cleared, perform CLO and proceed to IDENTIFY (achieved by
2030 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2031 *
2032 * Wait for two seconds. Devices attached to downstream port
2033 * which can't process the following IDENTIFY after this will
2034 * have to be reset again. For most cases, this should
2035 * suffice while making probing snappish enough.
2036 */
2037 if (online) {
2038 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2039 ahci_check_ready);
2040 if (rc)
2041 ahci_kick_engine(ap);
2042 }
2043 return rc;
2044 }
2045
2046 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2047 {
2048 struct ata_port *ap = link->ap;
2049 void __iomem *port_mmio = ahci_port_base(ap);
2050 u32 new_tmp, tmp;
2051
2052 ata_std_postreset(link, class);
2053
2054 /* Make sure port's ATAPI bit is set appropriately */
2055 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2056 if (*class == ATA_DEV_ATAPI)
2057 new_tmp |= PORT_CMD_ATAPI;
2058 else
2059 new_tmp &= ~PORT_CMD_ATAPI;
2060 if (new_tmp != tmp) {
2061 writel(new_tmp, port_mmio + PORT_CMD);
2062 readl(port_mmio + PORT_CMD); /* flush */
2063 }
2064 }
2065
2066 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2067 {
2068 struct scatterlist *sg;
2069 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2070 unsigned int si;
2071
2072 VPRINTK("ENTER\n");
2073
2074 /*
2075 * Next, the S/G list.
2076 */
2077 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2078 dma_addr_t addr = sg_dma_address(sg);
2079 u32 sg_len = sg_dma_len(sg);
2080
2081 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2082 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2083 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2084 }
2085
2086 return si;
2087 }
2088
2089 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2090 {
2091 struct ata_port *ap = qc->ap;
2092 struct ahci_port_priv *pp = ap->private_data;
2093
2094 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2095 return ata_std_qc_defer(qc);
2096 else
2097 return sata_pmp_qc_defer_cmd_switch(qc);
2098 }
2099
2100 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2101 {
2102 struct ata_port *ap = qc->ap;
2103 struct ahci_port_priv *pp = ap->private_data;
2104 int is_atapi = ata_is_atapi(qc->tf.protocol);
2105 void *cmd_tbl;
2106 u32 opts;
2107 const u32 cmd_fis_len = 5; /* five dwords */
2108 unsigned int n_elem;
2109
2110 /*
2111 * Fill in command table information. First, the header,
2112 * a SATA Register - Host to Device command FIS.
2113 */
2114 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2115
2116 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2117 if (is_atapi) {
2118 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2119 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2120 }
2121
2122 n_elem = 0;
2123 if (qc->flags & ATA_QCFLAG_DMAMAP)
2124 n_elem = ahci_fill_sg(qc, cmd_tbl);
2125
2126 /*
2127 * Fill in command slot information.
2128 */
2129 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2130 if (qc->tf.flags & ATA_TFLAG_WRITE)
2131 opts |= AHCI_CMD_WRITE;
2132 if (is_atapi)
2133 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2134
2135 ahci_fill_cmd_slot(pp, qc->tag, opts);
2136 }
2137
2138 static void ahci_fbs_dec_intr(struct ata_port *ap)
2139 {
2140 struct ahci_port_priv *pp = ap->private_data;
2141 void __iomem *port_mmio = ahci_port_base(ap);
2142 u32 fbs = readl(port_mmio + PORT_FBS);
2143 int retries = 3;
2144
2145 DPRINTK("ENTER\n");
2146 BUG_ON(!pp->fbs_enabled);
2147
2148 /* time to wait for DEC is not specified by AHCI spec,
2149 * add a retry loop for safety.
2150 */
2151 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2152 fbs = readl(port_mmio + PORT_FBS);
2153 while ((fbs & PORT_FBS_DEC) && retries--) {
2154 udelay(1);
2155 fbs = readl(port_mmio + PORT_FBS);
2156 }
2157
2158 if (fbs & PORT_FBS_DEC)
2159 dev_printk(KERN_ERR, ap->host->dev,
2160 "failed to clear device error\n");
2161 }
2162
2163 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2164 {
2165 struct ahci_host_priv *hpriv = ap->host->private_data;
2166 struct ahci_port_priv *pp = ap->private_data;
2167 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2168 struct ata_link *link = NULL;
2169 struct ata_queued_cmd *active_qc;
2170 struct ata_eh_info *active_ehi;
2171 bool fbs_need_dec = false;
2172 u32 serror;
2173
2174 /* determine active link with error */
2175 if (pp->fbs_enabled) {
2176 void __iomem *port_mmio = ahci_port_base(ap);
2177 u32 fbs = readl(port_mmio + PORT_FBS);
2178 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2179
2180 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2181 ata_link_online(&ap->pmp_link[pmp])) {
2182 link = &ap->pmp_link[pmp];
2183 fbs_need_dec = true;
2184 }
2185
2186 } else
2187 ata_for_each_link(link, ap, EDGE)
2188 if (ata_link_active(link))
2189 break;
2190
2191 if (!link)
2192 link = &ap->link;
2193
2194 active_qc = ata_qc_from_tag(ap, link->active_tag);
2195 active_ehi = &link->eh_info;
2196
2197 /* record irq stat */
2198 ata_ehi_clear_desc(host_ehi);
2199 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2200
2201 /* AHCI needs SError cleared; otherwise, it might lock up */
2202 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2203 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2204 host_ehi->serror |= serror;
2205
2206 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2207 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2208 irq_stat &= ~PORT_IRQ_IF_ERR;
2209
2210 if (irq_stat & PORT_IRQ_TF_ERR) {
2211 /* If qc is active, charge it; otherwise, the active
2212 * link. There's no active qc on NCQ errors. It will
2213 * be determined by EH by reading log page 10h.
2214 */
2215 if (active_qc)
2216 active_qc->err_mask |= AC_ERR_DEV;
2217 else
2218 active_ehi->err_mask |= AC_ERR_DEV;
2219
2220 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2221 host_ehi->serror &= ~SERR_INTERNAL;
2222 }
2223
2224 if (irq_stat & PORT_IRQ_UNK_FIS) {
2225 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2226
2227 active_ehi->err_mask |= AC_ERR_HSM;
2228 active_ehi->action |= ATA_EH_RESET;
2229 ata_ehi_push_desc(active_ehi,
2230 "unknown FIS %08x %08x %08x %08x" ,
2231 unk[0], unk[1], unk[2], unk[3]);
2232 }
2233
2234 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2235 active_ehi->err_mask |= AC_ERR_HSM;
2236 active_ehi->action |= ATA_EH_RESET;
2237 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2238 }
2239
2240 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2241 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2242 host_ehi->action |= ATA_EH_RESET;
2243 ata_ehi_push_desc(host_ehi, "host bus error");
2244 }
2245
2246 if (irq_stat & PORT_IRQ_IF_ERR) {
2247 if (fbs_need_dec)
2248 active_ehi->err_mask |= AC_ERR_DEV;
2249 else {
2250 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2251 host_ehi->action |= ATA_EH_RESET;
2252 }
2253
2254 ata_ehi_push_desc(host_ehi, "interface fatal error");
2255 }
2256
2257 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2258 ata_ehi_hotplugged(host_ehi);
2259 ata_ehi_push_desc(host_ehi, "%s",
2260 irq_stat & PORT_IRQ_CONNECT ?
2261 "connection status changed" : "PHY RDY changed");
2262 }
2263
2264 /* okay, let's hand over to EH */
2265
2266 if (irq_stat & PORT_IRQ_FREEZE)
2267 ata_port_freeze(ap);
2268 else if (fbs_need_dec) {
2269 ata_link_abort(link);
2270 ahci_fbs_dec_intr(ap);
2271 } else
2272 ata_port_abort(ap);
2273 }
2274
2275 static void ahci_port_intr(struct ata_port *ap)
2276 {
2277 void __iomem *port_mmio = ahci_port_base(ap);
2278 struct ata_eh_info *ehi = &ap->link.eh_info;
2279 struct ahci_port_priv *pp = ap->private_data;
2280 struct ahci_host_priv *hpriv = ap->host->private_data;
2281 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2282 u32 status, qc_active = 0;
2283 int rc;
2284
2285 status = readl(port_mmio + PORT_IRQ_STAT);
2286 writel(status, port_mmio + PORT_IRQ_STAT);
2287
2288 /* ignore BAD_PMP while resetting */
2289 if (unlikely(resetting))
2290 status &= ~PORT_IRQ_BAD_PMP;
2291
2292 /* If we are getting PhyRdy, this is
2293 * just a power state change, we should
2294 * clear out this, plus the PhyRdy/Comm
2295 * Wake bits from Serror
2296 */
2297 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2298 (status & PORT_IRQ_PHYRDY)) {
2299 status &= ~PORT_IRQ_PHYRDY;
2300 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2301 }
2302
2303 if (unlikely(status & PORT_IRQ_ERROR)) {
2304 ahci_error_intr(ap, status);
2305 return;
2306 }
2307
2308 if (status & PORT_IRQ_SDB_FIS) {
2309 /* If SNotification is available, leave notification
2310 * handling to sata_async_notification(). If not,
2311 * emulate it by snooping SDB FIS RX area.
2312 *
2313 * Snooping FIS RX area is probably cheaper than
2314 * poking SNotification but some constrollers which
2315 * implement SNotification, ICH9 for example, don't
2316 * store AN SDB FIS into receive area.
2317 */
2318 if (hpriv->cap & HOST_CAP_SNTF)
2319 sata_async_notification(ap);
2320 else {
2321 /* If the 'N' bit in word 0 of the FIS is set,
2322 * we just received asynchronous notification.
2323 * Tell libata about it.
2324 *
2325 * Lack of SNotification should not appear in
2326 * ahci 1.2, so the workaround is unnecessary
2327 * when FBS is enabled.
2328 */
2329 if (pp->fbs_enabled)
2330 WARN_ON_ONCE(1);
2331 else {
2332 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2333 u32 f0 = le32_to_cpu(f[0]);
2334 if (f0 & (1 << 15))
2335 sata_async_notification(ap);
2336 }
2337 }
2338 }
2339
2340 /* pp->active_link is not reliable once FBS is enabled, both
2341 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2342 * NCQ and non-NCQ commands may be in flight at the same time.
2343 */
2344 if (pp->fbs_enabled) {
2345 if (ap->qc_active) {
2346 qc_active = readl(port_mmio + PORT_SCR_ACT);
2347 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2348 }
2349 } else {
2350 /* pp->active_link is valid iff any command is in flight */
2351 if (ap->qc_active && pp->active_link->sactive)
2352 qc_active = readl(port_mmio + PORT_SCR_ACT);
2353 else
2354 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2355 }
2356
2357 rc = ata_qc_complete_multiple(ap, qc_active);
2358
2359 /* while resetting, invalid completions are expected */
2360 if (unlikely(rc < 0 && !resetting)) {
2361 ehi->err_mask |= AC_ERR_HSM;
2362 ehi->action |= ATA_EH_RESET;
2363 ata_port_freeze(ap);
2364 }
2365 }
2366
2367 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2368 {
2369 struct ata_host *host = dev_instance;
2370 struct ahci_host_priv *hpriv;
2371 unsigned int i, handled = 0;
2372 void __iomem *mmio;
2373 u32 irq_stat, irq_masked;
2374
2375 VPRINTK("ENTER\n");
2376
2377 hpriv = host->private_data;
2378 mmio = host->iomap[AHCI_PCI_BAR];
2379
2380 /* sigh. 0xffffffff is a valid return from h/w */
2381 irq_stat = readl(mmio + HOST_IRQ_STAT);
2382 if (!irq_stat)
2383 return IRQ_NONE;
2384
2385 irq_masked = irq_stat & hpriv->port_map;
2386
2387 spin_lock(&host->lock);
2388
2389 for (i = 0; i < host->n_ports; i++) {
2390 struct ata_port *ap;
2391
2392 if (!(irq_masked & (1 << i)))
2393 continue;
2394
2395 ap = host->ports[i];
2396 if (ap) {
2397 ahci_port_intr(ap);
2398 VPRINTK("port %u\n", i);
2399 } else {
2400 VPRINTK("port %u (no irq)\n", i);
2401 if (ata_ratelimit())
2402 dev_printk(KERN_WARNING, host->dev,
2403 "interrupt on disabled port %u\n", i);
2404 }
2405
2406 handled = 1;
2407 }
2408
2409 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2410 * it should be cleared after all the port events are cleared;
2411 * otherwise, it will raise a spurious interrupt after each
2412 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2413 * information.
2414 *
2415 * Also, use the unmasked value to clear interrupt as spurious
2416 * pending event on a dummy port might cause screaming IRQ.
2417 */
2418 writel(irq_stat, mmio + HOST_IRQ_STAT);
2419
2420 spin_unlock(&host->lock);
2421
2422 VPRINTK("EXIT\n");
2423
2424 return IRQ_RETVAL(handled);
2425 }
2426
2427 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2428 {
2429 struct ata_port *ap = qc->ap;
2430 void __iomem *port_mmio = ahci_port_base(ap);
2431 struct ahci_port_priv *pp = ap->private_data;
2432
2433 /* Keep track of the currently active link. It will be used
2434 * in completion path to determine whether NCQ phase is in
2435 * progress.
2436 */
2437 pp->active_link = qc->dev->link;
2438
2439 if (qc->tf.protocol == ATA_PROT_NCQ)
2440 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2441
2442 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2443 u32 fbs = readl(port_mmio + PORT_FBS);
2444 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2445 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2446 writel(fbs, port_mmio + PORT_FBS);
2447 pp->fbs_last_dev = qc->dev->link->pmp;
2448 }
2449
2450 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2451
2452 ahci_sw_activity(qc->dev->link);
2453
2454 return 0;
2455 }
2456
2457 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2458 {
2459 struct ahci_port_priv *pp = qc->ap->private_data;
2460 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2461
2462 if (pp->fbs_enabled)
2463 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2464
2465 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2466 return true;
2467 }
2468
2469 static void ahci_freeze(struct ata_port *ap)
2470 {
2471 void __iomem *port_mmio = ahci_port_base(ap);
2472
2473 /* turn IRQ off */
2474 writel(0, port_mmio + PORT_IRQ_MASK);
2475 }
2476
2477 static void ahci_thaw(struct ata_port *ap)
2478 {
2479 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2480 void __iomem *port_mmio = ahci_port_base(ap);
2481 u32 tmp;
2482 struct ahci_port_priv *pp = ap->private_data;
2483
2484 /* clear IRQ */
2485 tmp = readl(port_mmio + PORT_IRQ_STAT);
2486 writel(tmp, port_mmio + PORT_IRQ_STAT);
2487 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2488
2489 /* turn IRQ back on */
2490 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2491 }
2492
2493 static void ahci_error_handler(struct ata_port *ap)
2494 {
2495 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2496 /* restart engine */
2497 ahci_stop_engine(ap);
2498 ahci_start_engine(ap);
2499 }
2500
2501 sata_pmp_error_handler(ap);
2502 }
2503
2504 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2505 {
2506 struct ata_port *ap = qc->ap;
2507
2508 /* make DMA engine forget about the failed command */
2509 if (qc->flags & ATA_QCFLAG_FAILED)
2510 ahci_kick_engine(ap);
2511 }
2512
2513 static void ahci_enable_fbs(struct ata_port *ap)
2514 {
2515 struct ahci_port_priv *pp = ap->private_data;
2516 void __iomem *port_mmio = ahci_port_base(ap);
2517 u32 fbs;
2518 int rc;
2519
2520 if (!pp->fbs_supported)
2521 return;
2522
2523 fbs = readl(port_mmio + PORT_FBS);
2524 if (fbs & PORT_FBS_EN) {
2525 pp->fbs_enabled = true;
2526 pp->fbs_last_dev = -1; /* initialization */
2527 return;
2528 }
2529
2530 rc = ahci_stop_engine(ap);
2531 if (rc)
2532 return;
2533
2534 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2535 fbs = readl(port_mmio + PORT_FBS);
2536 if (fbs & PORT_FBS_EN) {
2537 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2538 pp->fbs_enabled = true;
2539 pp->fbs_last_dev = -1; /* initialization */
2540 } else
2541 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2542
2543 ahci_start_engine(ap);
2544 }
2545
2546 static void ahci_disable_fbs(struct ata_port *ap)
2547 {
2548 struct ahci_port_priv *pp = ap->private_data;
2549 void __iomem *port_mmio = ahci_port_base(ap);
2550 u32 fbs;
2551 int rc;
2552
2553 if (!pp->fbs_supported)
2554 return;
2555
2556 fbs = readl(port_mmio + PORT_FBS);
2557 if ((fbs & PORT_FBS_EN) == 0) {
2558 pp->fbs_enabled = false;
2559 return;
2560 }
2561
2562 rc = ahci_stop_engine(ap);
2563 if (rc)
2564 return;
2565
2566 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2567 fbs = readl(port_mmio + PORT_FBS);
2568 if (fbs & PORT_FBS_EN)
2569 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2570 else {
2571 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2572 pp->fbs_enabled = false;
2573 }
2574
2575 ahci_start_engine(ap);
2576 }
2577
2578 static void ahci_pmp_attach(struct ata_port *ap)
2579 {
2580 void __iomem *port_mmio = ahci_port_base(ap);
2581 struct ahci_port_priv *pp = ap->private_data;
2582 u32 cmd;
2583
2584 cmd = readl(port_mmio + PORT_CMD);
2585 cmd |= PORT_CMD_PMP;
2586 writel(cmd, port_mmio + PORT_CMD);
2587
2588 ahci_enable_fbs(ap);
2589
2590 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2591 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2592 }
2593
2594 static void ahci_pmp_detach(struct ata_port *ap)
2595 {
2596 void __iomem *port_mmio = ahci_port_base(ap);
2597 struct ahci_port_priv *pp = ap->private_data;
2598 u32 cmd;
2599
2600 ahci_disable_fbs(ap);
2601
2602 cmd = readl(port_mmio + PORT_CMD);
2603 cmd &= ~PORT_CMD_PMP;
2604 writel(cmd, port_mmio + PORT_CMD);
2605
2606 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2607 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2608 }
2609
2610 static int ahci_port_resume(struct ata_port *ap)
2611 {
2612 ahci_power_up(ap);
2613 ahci_start_port(ap);
2614
2615 if (sata_pmp_attached(ap))
2616 ahci_pmp_attach(ap);
2617 else
2618 ahci_pmp_detach(ap);
2619
2620 return 0;
2621 }
2622
2623 #ifdef CONFIG_PM
2624 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2625 {
2626 const char *emsg = NULL;
2627 int rc;
2628
2629 rc = ahci_deinit_port(ap, &emsg);
2630 if (rc == 0)
2631 ahci_power_down(ap);
2632 else {
2633 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2634 ahci_start_port(ap);
2635 }
2636
2637 return rc;
2638 }
2639
2640 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2641 {
2642 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2643 struct ahci_host_priv *hpriv = host->private_data;
2644 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2645 u32 ctl;
2646
2647 if (mesg.event & PM_EVENT_SUSPEND &&
2648 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2649 dev_printk(KERN_ERR, &pdev->dev,
2650 "BIOS update required for suspend/resume\n");
2651 return -EIO;
2652 }
2653
2654 if (mesg.event & PM_EVENT_SLEEP) {
2655 /* AHCI spec rev1.1 section 8.3.3:
2656 * Software must disable interrupts prior to requesting a
2657 * transition of the HBA to D3 state.
2658 */
2659 ctl = readl(mmio + HOST_CTL);
2660 ctl &= ~HOST_IRQ_EN;
2661 writel(ctl, mmio + HOST_CTL);
2662 readl(mmio + HOST_CTL); /* flush */
2663 }
2664
2665 return ata_pci_device_suspend(pdev, mesg);
2666 }
2667
2668 static int ahci_pci_device_resume(struct pci_dev *pdev)
2669 {
2670 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2671 int rc;
2672
2673 rc = ata_pci_device_do_resume(pdev);
2674 if (rc)
2675 return rc;
2676
2677 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2678 rc = ahci_reset_controller(host);
2679 if (rc)
2680 return rc;
2681
2682 ahci_init_controller(host);
2683 }
2684
2685 ata_host_resume(host);
2686
2687 return 0;
2688 }
2689 #endif
2690
2691 static int ahci_port_start(struct ata_port *ap)
2692 {
2693 struct ahci_host_priv *hpriv = ap->host->private_data;
2694 struct device *dev = ap->host->dev;
2695 struct ahci_port_priv *pp;
2696 void *mem;
2697 dma_addr_t mem_dma;
2698 size_t dma_sz, rx_fis_sz;
2699
2700 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2701 if (!pp)
2702 return -ENOMEM;
2703
2704 /* check FBS capability */
2705 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2706 void __iomem *port_mmio = ahci_port_base(ap);
2707 u32 cmd = readl(port_mmio + PORT_CMD);
2708 if (cmd & PORT_CMD_FBSCP)
2709 pp->fbs_supported = true;
2710 else
2711 dev_printk(KERN_WARNING, dev,
2712 "The port is not capable of FBS\n");
2713 }
2714
2715 if (pp->fbs_supported) {
2716 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2717 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2718 } else {
2719 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2720 rx_fis_sz = AHCI_RX_FIS_SZ;
2721 }
2722
2723 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2724 if (!mem)
2725 return -ENOMEM;
2726 memset(mem, 0, dma_sz);
2727
2728 /*
2729 * First item in chunk of DMA memory: 32-slot command table,
2730 * 32 bytes each in size
2731 */
2732 pp->cmd_slot = mem;
2733 pp->cmd_slot_dma = mem_dma;
2734
2735 mem += AHCI_CMD_SLOT_SZ;
2736 mem_dma += AHCI_CMD_SLOT_SZ;
2737
2738 /*
2739 * Second item: Received-FIS area
2740 */
2741 pp->rx_fis = mem;
2742 pp->rx_fis_dma = mem_dma;
2743
2744 mem += rx_fis_sz;
2745 mem_dma += rx_fis_sz;
2746
2747 /*
2748 * Third item: data area for storing a single command
2749 * and its scatter-gather table
2750 */
2751 pp->cmd_tbl = mem;
2752 pp->cmd_tbl_dma = mem_dma;
2753
2754 /*
2755 * Save off initial list of interrupts to be enabled.
2756 * This could be changed later
2757 */
2758 pp->intr_mask = DEF_PORT_IRQ;
2759
2760 ap->private_data = pp;
2761
2762 /* engage engines, captain */
2763 return ahci_port_resume(ap);
2764 }
2765
2766 static void ahci_port_stop(struct ata_port *ap)
2767 {
2768 const char *emsg = NULL;
2769 int rc;
2770
2771 /* de-initialize port */
2772 rc = ahci_deinit_port(ap, &emsg);
2773 if (rc)
2774 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2775 }
2776
2777 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2778 {
2779 int rc;
2780
2781 if (using_dac &&
2782 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2783 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2784 if (rc) {
2785 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2786 if (rc) {
2787 dev_printk(KERN_ERR, &pdev->dev,
2788 "64-bit DMA enable failed\n");
2789 return rc;
2790 }
2791 }
2792 } else {
2793 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2794 if (rc) {
2795 dev_printk(KERN_ERR, &pdev->dev,
2796 "32-bit DMA enable failed\n");
2797 return rc;
2798 }
2799 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2800 if (rc) {
2801 dev_printk(KERN_ERR, &pdev->dev,
2802 "32-bit consistent DMA enable failed\n");
2803 return rc;
2804 }
2805 }
2806 return 0;
2807 }
2808
2809 static void ahci_print_info(struct ata_host *host)
2810 {
2811 struct ahci_host_priv *hpriv = host->private_data;
2812 struct pci_dev *pdev = to_pci_dev(host->dev);
2813 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2814 u32 vers, cap, cap2, impl, speed;
2815 const char *speed_s;
2816 u16 cc;
2817 const char *scc_s;
2818
2819 vers = readl(mmio + HOST_VERSION);
2820 cap = hpriv->cap;
2821 cap2 = hpriv->cap2;
2822 impl = hpriv->port_map;
2823
2824 speed = (cap >> 20) & 0xf;
2825 if (speed == 1)
2826 speed_s = "1.5";
2827 else if (speed == 2)
2828 speed_s = "3";
2829 else if (speed == 3)
2830 speed_s = "6";
2831 else
2832 speed_s = "?";
2833
2834 pci_read_config_word(pdev, 0x0a, &cc);
2835 if (cc == PCI_CLASS_STORAGE_IDE)
2836 scc_s = "IDE";
2837 else if (cc == PCI_CLASS_STORAGE_SATA)
2838 scc_s = "SATA";
2839 else if (cc == PCI_CLASS_STORAGE_RAID)
2840 scc_s = "RAID";
2841 else
2842 scc_s = "unknown";
2843
2844 dev_printk(KERN_INFO, &pdev->dev,
2845 "AHCI %02x%02x.%02x%02x "
2846 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2847 ,
2848
2849 (vers >> 24) & 0xff,
2850 (vers >> 16) & 0xff,
2851 (vers >> 8) & 0xff,
2852 vers & 0xff,
2853
2854 ((cap >> 8) & 0x1f) + 1,
2855 (cap & 0x1f) + 1,
2856 speed_s,
2857 impl,
2858 scc_s);
2859
2860 dev_printk(KERN_INFO, &pdev->dev,
2861 "flags: "
2862 "%s%s%s%s%s%s%s"
2863 "%s%s%s%s%s%s%s"
2864 "%s%s%s%s%s%s\n"
2865 ,
2866
2867 cap & HOST_CAP_64 ? "64bit " : "",
2868 cap & HOST_CAP_NCQ ? "ncq " : "",
2869 cap & HOST_CAP_SNTF ? "sntf " : "",
2870 cap & HOST_CAP_MPS ? "ilck " : "",
2871 cap & HOST_CAP_SSS ? "stag " : "",
2872 cap & HOST_CAP_ALPM ? "pm " : "",
2873 cap & HOST_CAP_LED ? "led " : "",
2874 cap & HOST_CAP_CLO ? "clo " : "",
2875 cap & HOST_CAP_ONLY ? "only " : "",
2876 cap & HOST_CAP_PMP ? "pmp " : "",
2877 cap & HOST_CAP_FBS ? "fbs " : "",
2878 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2879 cap & HOST_CAP_SSC ? "slum " : "",
2880 cap & HOST_CAP_PART ? "part " : "",
2881 cap & HOST_CAP_CCC ? "ccc " : "",
2882 cap & HOST_CAP_EMS ? "ems " : "",
2883 cap & HOST_CAP_SXS ? "sxs " : "",
2884 cap2 & HOST_CAP2_APST ? "apst " : "",
2885 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2886 cap2 & HOST_CAP2_BOH ? "boh " : ""
2887 );
2888 }
2889
2890 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2891 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2892 * support PMP and the 4726 either directly exports the device
2893 * attached to the first downstream port or acts as a hardware storage
2894 * controller and emulate a single ATA device (can be RAID 0/1 or some
2895 * other configuration).
2896 *
2897 * When there's no device attached to the first downstream port of the
2898 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2899 * configure the 4726. However, ATA emulation of the device is very
2900 * lame. It doesn't send signature D2H Reg FIS after the initial
2901 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2902 *
2903 * The following function works around the problem by always using
2904 * hardreset on the port and not depending on receiving signature FIS
2905 * afterward. If signature FIS isn't received soon, ATA class is
2906 * assumed without follow-up softreset.
2907 */
2908 static void ahci_p5wdh_workaround(struct ata_host *host)
2909 {
2910 static struct dmi_system_id sysids[] = {
2911 {
2912 .ident = "P5W DH Deluxe",
2913 .matches = {
2914 DMI_MATCH(DMI_SYS_VENDOR,
2915 "ASUSTEK COMPUTER INC"),
2916 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2917 },
2918 },
2919 { }
2920 };
2921 struct pci_dev *pdev = to_pci_dev(host->dev);
2922
2923 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2924 dmi_check_system(sysids)) {
2925 struct ata_port *ap = host->ports[1];
2926
2927 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2928 "Deluxe on-board SIMG4726 workaround\n");
2929
2930 ap->ops = &ahci_p5wdh_ops;
2931 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2932 }
2933 }
2934
2935 /* only some SB600 ahci controllers can do 64bit DMA */
2936 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2937 {
2938 static const struct dmi_system_id sysids[] = {
2939 /*
2940 * The oldest version known to be broken is 0901 and
2941 * working is 1501 which was released on 2007-10-26.
2942 * Enable 64bit DMA on 1501 and anything newer.
2943 *
2944 * Please read bko#9412 for more info.
2945 */
2946 {
2947 .ident = "ASUS M2A-VM",
2948 .matches = {
2949 DMI_MATCH(DMI_BOARD_VENDOR,
2950 "ASUSTeK Computer INC."),
2951 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2952 },
2953 .driver_data = "20071026", /* yyyymmdd */
2954 },
2955 /*
2956 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2957 * support 64bit DMA.
2958 *
2959 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2960 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2961 * This spelling mistake was fixed in BIOS version 1.5, so
2962 * 1.5 and later have the Manufacturer as
2963 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2964 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2965 *
2966 * BIOS versions earlier than 1.9 had a Board Product Name
2967 * DMI field of "MS-7376". This was changed to be
2968 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2969 * match on DMI_BOARD_NAME of "MS-7376".
2970 */
2971 {
2972 .ident = "MSI K9A2 Platinum",
2973 .matches = {
2974 DMI_MATCH(DMI_BOARD_VENDOR,
2975 "MICRO-STAR INTER"),
2976 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2977 },
2978 },
2979 { }
2980 };
2981 const struct dmi_system_id *match;
2982 int year, month, date;
2983 char buf[9];
2984
2985 match = dmi_first_match(sysids);
2986 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2987 !match)
2988 return false;
2989
2990 if (!match->driver_data)
2991 goto enable_64bit;
2992
2993 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2994 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2995
2996 if (strcmp(buf, match->driver_data) >= 0)
2997 goto enable_64bit;
2998 else {
2999 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
3000 "forcing 32bit DMA, update BIOS\n", match->ident);
3001 return false;
3002 }
3003
3004 enable_64bit:
3005 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
3006 match->ident);
3007 return true;
3008 }
3009
3010 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
3011 {
3012 static const struct dmi_system_id broken_systems[] = {
3013 {
3014 .ident = "HP Compaq nx6310",
3015 .matches = {
3016 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3017 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
3018 },
3019 /* PCI slot number of the controller */
3020 .driver_data = (void *)0x1FUL,
3021 },
3022 {
3023 .ident = "HP Compaq 6720s",
3024 .matches = {
3025 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3026 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
3027 },
3028 /* PCI slot number of the controller */
3029 .driver_data = (void *)0x1FUL,
3030 },
3031
3032 { } /* terminate list */
3033 };
3034 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
3035
3036 if (dmi) {
3037 unsigned long slot = (unsigned long)dmi->driver_data;
3038 /* apply the quirk only to on-board controllers */
3039 return slot == PCI_SLOT(pdev->devfn);
3040 }
3041
3042 return false;
3043 }
3044
3045 static bool ahci_broken_suspend(struct pci_dev *pdev)
3046 {
3047 static const struct dmi_system_id sysids[] = {
3048 /*
3049 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3050 * to the harddisk doesn't become online after
3051 * resuming from STR. Warn and fail suspend.
3052 *
3053 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
3054 *
3055 * Use dates instead of versions to match as HP is
3056 * apparently recycling both product and version
3057 * strings.
3058 *
3059 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
3060 */
3061 {
3062 .ident = "dv4",
3063 .matches = {
3064 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3065 DMI_MATCH(DMI_PRODUCT_NAME,
3066 "HP Pavilion dv4 Notebook PC"),
3067 },
3068 .driver_data = "20090105", /* F.30 */
3069 },
3070 {
3071 .ident = "dv5",
3072 .matches = {
3073 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3074 DMI_MATCH(DMI_PRODUCT_NAME,
3075 "HP Pavilion dv5 Notebook PC"),
3076 },
3077 .driver_data = "20090506", /* F.16 */
3078 },
3079 {
3080 .ident = "dv6",
3081 .matches = {
3082 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3083 DMI_MATCH(DMI_PRODUCT_NAME,
3084 "HP Pavilion dv6 Notebook PC"),
3085 },
3086 .driver_data = "20090423", /* F.21 */
3087 },
3088 {
3089 .ident = "HDX18",
3090 .matches = {
3091 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3092 DMI_MATCH(DMI_PRODUCT_NAME,
3093 "HP HDX18 Notebook PC"),
3094 },
3095 .driver_data = "20090430", /* F.23 */
3096 },
3097 /*
3098 * Acer eMachines G725 has the same problem. BIOS
3099 * V1.03 is known to be broken. V3.04 is known to
3100 * work. Inbetween, there are V1.06, V2.06 and V3.03
3101 * that we don't have much idea about. For now,
3102 * blacklist anything older than V3.04.
3103 *
3104 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
3105 */
3106 {
3107 .ident = "G725",
3108 .matches = {
3109 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3110 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3111 },
3112 .driver_data = "20091216", /* V3.04 */
3113 },
3114 { } /* terminate list */
3115 };
3116 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3117 int year, month, date;
3118 char buf[9];
3119
3120 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3121 return false;
3122
3123 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3124 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3125
3126 return strcmp(buf, dmi->driver_data) < 0;
3127 }
3128
3129 static bool ahci_broken_online(struct pci_dev *pdev)
3130 {
3131 #define ENCODE_BUSDEVFN(bus, slot, func) \
3132 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
3133 static const struct dmi_system_id sysids[] = {
3134 /*
3135 * There are several gigabyte boards which use
3136 * SIMG5723s configured as hardware RAID. Certain
3137 * 5723 firmware revisions shipped there keep the link
3138 * online but fail to answer properly to SRST or
3139 * IDENTIFY when no device is attached downstream
3140 * causing libata to retry quite a few times leading
3141 * to excessive detection delay.
3142 *
3143 * As these firmwares respond to the second reset try
3144 * with invalid device signature, considering unknown
3145 * sig as offline works around the problem acceptably.
3146 */
3147 {
3148 .ident = "EP45-DQ6",
3149 .matches = {
3150 DMI_MATCH(DMI_BOARD_VENDOR,
3151 "Gigabyte Technology Co., Ltd."),
3152 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
3153 },
3154 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
3155 },
3156 {
3157 .ident = "EP45-DS5",
3158 .matches = {
3159 DMI_MATCH(DMI_BOARD_VENDOR,
3160 "Gigabyte Technology Co., Ltd."),
3161 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
3162 },
3163 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
3164 },
3165 { } /* terminate list */
3166 };
3167 #undef ENCODE_BUSDEVFN
3168 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3169 unsigned int val;
3170
3171 if (!dmi)
3172 return false;
3173
3174 val = (unsigned long)dmi->driver_data;
3175
3176 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
3177 }
3178
3179 #ifdef CONFIG_ATA_ACPI
3180 static void ahci_gtf_filter_workaround(struct ata_host *host)
3181 {
3182 static const struct dmi_system_id sysids[] = {
3183 /*
3184 * Aspire 3810T issues a bunch of SATA enable commands
3185 * via _GTF including an invalid one and one which is
3186 * rejected by the device. Among the successful ones
3187 * is FPDMA non-zero offset enable which when enabled
3188 * only on the drive side leads to NCQ command
3189 * failures. Filter it out.
3190 */
3191 {
3192 .ident = "Aspire 3810T",
3193 .matches = {
3194 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3195 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
3196 },
3197 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
3198 },
3199 { }
3200 };
3201 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3202 unsigned int filter;
3203 int i;
3204
3205 if (!dmi)
3206 return;
3207
3208 filter = (unsigned long)dmi->driver_data;
3209 dev_printk(KERN_INFO, host->dev,
3210 "applying extra ACPI _GTF filter 0x%x for %s\n",
3211 filter, dmi->ident);
3212
3213 for (i = 0; i < host->n_ports; i++) {
3214 struct ata_port *ap = host->ports[i];
3215 struct ata_link *link;
3216 struct ata_device *dev;
3217
3218 ata_for_each_link(link, ap, EDGE)
3219 ata_for_each_dev(dev, link, ALL)
3220 dev->gtf_filter |= filter;
3221 }
3222 }
3223 #else
3224 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3225 {}
3226 #endif
3227
3228 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3229 {
3230 static int printed_version;
3231 unsigned int board_id = ent->driver_data;
3232 struct ata_port_info pi = ahci_port_info[board_id];
3233 const struct ata_port_info *ppi[] = { &pi, NULL };
3234 struct device *dev = &pdev->dev;
3235 struct ahci_host_priv *hpriv;
3236 struct ata_host *host;
3237 int n_ports, i, rc;
3238
3239 VPRINTK("ENTER\n");
3240
3241 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3242
3243 if (!printed_version++)
3244 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3245
3246 /* The AHCI driver can only drive the SATA ports, the PATA driver
3247 can drive them all so if both drivers are selected make sure
3248 AHCI stays out of the way */
3249 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3250 return -ENODEV;
3251
3252 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3253 * At the moment, we can only use the AHCI mode. Let the users know
3254 * that for SAS drives they're out of luck.
3255 */
3256 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3257 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3258 "can only drive SATA devices with this driver\n");
3259
3260 /* acquire resources */
3261 rc = pcim_enable_device(pdev);
3262 if (rc)
3263 return rc;
3264
3265 /* AHCI controllers often implement SFF compatible interface.
3266 * Grab all PCI BARs just in case.
3267 */
3268 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3269 if (rc == -EBUSY)
3270 pcim_pin_device(pdev);
3271 if (rc)
3272 return rc;
3273
3274 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3275 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3276 u8 map;
3277
3278 /* ICH6s share the same PCI ID for both piix and ahci
3279 * modes. Enabling ahci mode while MAP indicates
3280 * combined mode is a bad idea. Yield to ata_piix.
3281 */
3282 pci_read_config_byte(pdev, ICH_MAP, &map);
3283 if (map & 0x3) {
3284 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3285 "combined mode, can't enable AHCI mode\n");
3286 return -ENODEV;
3287 }
3288 }
3289
3290 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3291 if (!hpriv)
3292 return -ENOMEM;
3293 hpriv->flags |= (unsigned long)pi.private_data;
3294
3295 /* MCP65 revision A1 and A2 can't do MSI */
3296 if (board_id == board_ahci_mcp65 &&
3297 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3298 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3299
3300 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3301 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3302 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3303
3304 /* only some SB600s can do 64bit DMA */
3305 if (ahci_sb600_enable_64bit(pdev))
3306 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3307
3308 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3309 pci_intx(pdev, 1);
3310
3311 /* save initial config */
3312 ahci_save_initial_config(pdev, hpriv);
3313
3314 /* prepare host */
3315 if (hpriv->cap & HOST_CAP_NCQ) {
3316 pi.flags |= ATA_FLAG_NCQ;
3317 /* Auto-activate optimization is supposed to be supported on
3318 all AHCI controllers indicating NCQ support, but it seems
3319 to be broken at least on some NVIDIA MCP79 chipsets.
3320 Until we get info on which NVIDIA chipsets don't have this
3321 issue, if any, disable AA on all NVIDIA AHCIs. */
3322 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3323 pi.flags |= ATA_FLAG_FPDMA_AA;
3324 }
3325
3326 if (hpriv->cap & HOST_CAP_PMP)
3327 pi.flags |= ATA_FLAG_PMP;
3328
3329 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3330 u8 messages;
3331 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3332 u32 em_loc = readl(mmio + HOST_EM_LOC);
3333 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3334
3335 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3336
3337 /* we only support LED message type right now */
3338 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3339 /* store em_loc */
3340 hpriv->em_loc = ((em_loc >> 16) * 4);
3341 pi.flags |= ATA_FLAG_EM;
3342 if (!(em_ctl & EM_CTL_ALHD))
3343 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3344 }
3345 }
3346
3347 if (ahci_broken_system_poweroff(pdev)) {
3348 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3349 dev_info(&pdev->dev,
3350 "quirky BIOS, skipping spindown on poweroff\n");
3351 }
3352
3353 if (ahci_broken_suspend(pdev)) {
3354 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3355 dev_printk(KERN_WARNING, &pdev->dev,
3356 "BIOS update required for suspend/resume\n");
3357 }
3358
3359 if (ahci_broken_online(pdev)) {
3360 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3361 dev_info(&pdev->dev,
3362 "online status unreliable, applying workaround\n");
3363 }
3364
3365 /* CAP.NP sometimes indicate the index of the last enabled
3366 * port, at other times, that of the last possible port, so
3367 * determining the maximum port number requires looking at
3368 * both CAP.NP and port_map.
3369 */
3370 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3371
3372 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3373 if (!host)
3374 return -ENOMEM;
3375 host->iomap = pcim_iomap_table(pdev);
3376 host->private_data = hpriv;
3377
3378 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3379 host->flags |= ATA_HOST_PARALLEL_SCAN;
3380 else
3381 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3382
3383 if (pi.flags & ATA_FLAG_EM)
3384 ahci_reset_em(host);
3385
3386 for (i = 0; i < host->n_ports; i++) {
3387 struct ata_port *ap = host->ports[i];
3388
3389 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3390 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3391 0x100 + ap->port_no * 0x80, "port");
3392
3393 /* set initial link pm policy */
3394 ap->pm_policy = NOT_AVAILABLE;
3395
3396 /* set enclosure management message type */
3397 if (ap->flags & ATA_FLAG_EM)
3398 ap->em_message_type = ahci_em_messages;
3399
3400
3401 /* disabled/not-implemented port */
3402 if (!(hpriv->port_map & (1 << i)))
3403 ap->ops = &ata_dummy_port_ops;
3404 }
3405
3406 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3407 ahci_p5wdh_workaround(host);
3408
3409 /* apply gtf filter quirk */
3410 ahci_gtf_filter_workaround(host);
3411
3412 /* initialize adapter */
3413 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3414 if (rc)
3415 return rc;
3416
3417 rc = ahci_reset_controller(host);
3418 if (rc)
3419 return rc;
3420
3421 ahci_init_controller(host);
3422 ahci_print_info(host);
3423
3424 pci_set_master(pdev);
3425 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3426 &ahci_sht);
3427 }
3428
3429 static int __init ahci_init(void)
3430 {
3431 return pci_register_driver(&ahci_pci_driver);
3432 }
3433
3434 static void __exit ahci_exit(void)
3435 {
3436 pci_unregister_driver(&ahci_pci_driver);
3437 }
3438
3439
3440 MODULE_AUTHOR("Jeff Garzik");
3441 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3442 MODULE_LICENSE("GPL");
3443 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3444 MODULE_VERSION(DRV_VERSION);
3445
3446 module_init(ahci_init);
3447 module_exit(ahci_exit);