]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/ata/ahci.c
libata: update libata LLDs to use devres
[mirror_ubuntu-hirsute-kernel.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "2.0"
51
52
53 enum {
54 AHCI_PCI_BAR = 5,
55 AHCI_MAX_PORTS = 32,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
78 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
79
80 board_ahci = 0,
81 board_ahci_pi = 1,
82 board_ahci_vt8251 = 2,
83 board_ahci_ign_iferr = 3,
84
85 /* global controller registers */
86 HOST_CAP = 0x00, /* host capabilities */
87 HOST_CTL = 0x04, /* global host control */
88 HOST_IRQ_STAT = 0x08, /* interrupt status */
89 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
90 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
91
92 /* HOST_CTL bits */
93 HOST_RESET = (1 << 0), /* reset controller; self-clear */
94 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
95 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
96
97 /* HOST_CAP bits */
98 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
99 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
100 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
101 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
102 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
103
104 /* registers for each SATA port */
105 PORT_LST_ADDR = 0x00, /* command list DMA addr */
106 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
107 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
108 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
109 PORT_IRQ_STAT = 0x10, /* interrupt status */
110 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
111 PORT_CMD = 0x18, /* port command */
112 PORT_TFDATA = 0x20, /* taskfile data */
113 PORT_SIG = 0x24, /* device TF signature */
114 PORT_CMD_ISSUE = 0x38, /* command issue */
115 PORT_SCR = 0x28, /* SATA phy register block */
116 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
117 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
118 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
119 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
120
121 /* PORT_IRQ_{STAT,MASK} bits */
122 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
123 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
124 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
125 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
126 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
127 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
128 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
129 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
130
131 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
132 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
133 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
134 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
135 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
136 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
137 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
138 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
139 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
140
141 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
142 PORT_IRQ_IF_ERR |
143 PORT_IRQ_CONNECT |
144 PORT_IRQ_PHYRDY |
145 PORT_IRQ_UNK_FIS,
146 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
147 PORT_IRQ_TF_ERR |
148 PORT_IRQ_HBUS_DATA_ERR,
149 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
150 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
151 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
152
153 /* PORT_CMD bits */
154 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
155 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
156 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
157 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
158 PORT_CMD_CLO = (1 << 3), /* Command list override */
159 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
160 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
161 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
162
163 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
164 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
165 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
166 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
167
168 /* ap->flags bits */
169 AHCI_FLAG_NO_NCQ = (1 << 24),
170 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
171 AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */
172 };
173
174 struct ahci_cmd_hdr {
175 u32 opts;
176 u32 status;
177 u32 tbl_addr;
178 u32 tbl_addr_hi;
179 u32 reserved[4];
180 };
181
182 struct ahci_sg {
183 u32 addr;
184 u32 addr_hi;
185 u32 reserved;
186 u32 flags_size;
187 };
188
189 struct ahci_host_priv {
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192 };
193
194 struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201 /* for NCQ spurious interrupt analysis */
202 int ncq_saw_spurious_sdb_cnt;
203 unsigned int ncq_saw_d2h:1;
204 unsigned int ncq_saw_dmas:1;
205 };
206
207 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
208 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
209 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
210 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
211 static irqreturn_t ahci_interrupt (int irq, void *dev_instance);
212 static void ahci_irq_clear(struct ata_port *ap);
213 static int ahci_port_start(struct ata_port *ap);
214 static void ahci_port_stop(struct ata_port *ap);
215 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
216 static void ahci_qc_prep(struct ata_queued_cmd *qc);
217 static u8 ahci_check_status(struct ata_port *ap);
218 static void ahci_freeze(struct ata_port *ap);
219 static void ahci_thaw(struct ata_port *ap);
220 static void ahci_error_handler(struct ata_port *ap);
221 static void ahci_vt8251_error_handler(struct ata_port *ap);
222 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
223 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
224 static int ahci_port_resume(struct ata_port *ap);
225 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
226 static int ahci_pci_device_resume(struct pci_dev *pdev);
227
228 static struct scsi_host_template ahci_sht = {
229 .module = THIS_MODULE,
230 .name = DRV_NAME,
231 .ioctl = ata_scsi_ioctl,
232 .queuecommand = ata_scsi_queuecmd,
233 .change_queue_depth = ata_scsi_change_queue_depth,
234 .can_queue = AHCI_MAX_CMDS - 1,
235 .this_id = ATA_SHT_THIS_ID,
236 .sg_tablesize = AHCI_MAX_SG,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = AHCI_USE_CLUSTERING,
240 .proc_name = DRV_NAME,
241 .dma_boundary = AHCI_DMA_BOUNDARY,
242 .slave_configure = ata_scsi_slave_config,
243 .slave_destroy = ata_scsi_slave_destroy,
244 .bios_param = ata_std_bios_param,
245 .suspend = ata_scsi_device_suspend,
246 .resume = ata_scsi_device_resume,
247 };
248
249 static const struct ata_port_operations ahci_ops = {
250 .port_disable = ata_port_disable,
251
252 .check_status = ahci_check_status,
253 .check_altstatus = ahci_check_status,
254 .dev_select = ata_noop_dev_select,
255
256 .tf_read = ahci_tf_read,
257
258 .qc_prep = ahci_qc_prep,
259 .qc_issue = ahci_qc_issue,
260
261 .irq_handler = ahci_interrupt,
262 .irq_clear = ahci_irq_clear,
263
264 .scr_read = ahci_scr_read,
265 .scr_write = ahci_scr_write,
266
267 .freeze = ahci_freeze,
268 .thaw = ahci_thaw,
269
270 .error_handler = ahci_error_handler,
271 .post_internal_cmd = ahci_post_internal_cmd,
272
273 .port_suspend = ahci_port_suspend,
274 .port_resume = ahci_port_resume,
275
276 .port_start = ahci_port_start,
277 .port_stop = ahci_port_stop,
278 };
279
280 static const struct ata_port_operations ahci_vt8251_ops = {
281 .port_disable = ata_port_disable,
282
283 .check_status = ahci_check_status,
284 .check_altstatus = ahci_check_status,
285 .dev_select = ata_noop_dev_select,
286
287 .tf_read = ahci_tf_read,
288
289 .qc_prep = ahci_qc_prep,
290 .qc_issue = ahci_qc_issue,
291
292 .irq_handler = ahci_interrupt,
293 .irq_clear = ahci_irq_clear,
294
295 .scr_read = ahci_scr_read,
296 .scr_write = ahci_scr_write,
297
298 .freeze = ahci_freeze,
299 .thaw = ahci_thaw,
300
301 .error_handler = ahci_vt8251_error_handler,
302 .post_internal_cmd = ahci_post_internal_cmd,
303
304 .port_suspend = ahci_port_suspend,
305 .port_resume = ahci_port_resume,
306
307 .port_start = ahci_port_start,
308 .port_stop = ahci_port_stop,
309 };
310
311 static const struct ata_port_info ahci_port_info[] = {
312 /* board_ahci */
313 {
314 .sht = &ahci_sht,
315 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
316 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
317 ATA_FLAG_SKIP_D2H_BSY,
318 .pio_mask = 0x1f, /* pio0-4 */
319 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
320 .port_ops = &ahci_ops,
321 },
322 /* board_ahci_pi */
323 {
324 .sht = &ahci_sht,
325 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
326 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
327 ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI,
328 .pio_mask = 0x1f, /* pio0-4 */
329 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
330 .port_ops = &ahci_ops,
331 },
332 /* board_ahci_vt8251 */
333 {
334 .sht = &ahci_sht,
335 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
336 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
337 ATA_FLAG_SKIP_D2H_BSY |
338 ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ,
339 .pio_mask = 0x1f, /* pio0-4 */
340 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
341 .port_ops = &ahci_vt8251_ops,
342 },
343 /* board_ahci_ign_iferr */
344 {
345 .sht = &ahci_sht,
346 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
347 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
348 ATA_FLAG_SKIP_D2H_BSY |
349 AHCI_FLAG_IGN_IRQ_IF_ERR,
350 .pio_mask = 0x1f, /* pio0-4 */
351 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
352 .port_ops = &ahci_ops,
353 },
354 };
355
356 static const struct pci_device_id ahci_pci_tbl[] = {
357 /* Intel */
358 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
359 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
360 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
361 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
362 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
363 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
364 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
365 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
366 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
367 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
368 { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
370 { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
371 { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
372 { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
373 { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
374 { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
375 { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
376 { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
377 { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
378 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
379 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
380 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
381 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
382 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
383 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
384
385 /* JMicron */
386 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
387 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */
388 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */
389 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */
390 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */
391
392 /* ATI */
393 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
394 { PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */
395
396 /* VIA */
397 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
398
399 /* NVIDIA */
400 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
401 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
402 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
403 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
404 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
405 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
406 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
407 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
408 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
409 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
410 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
411 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
412 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
413 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
414 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
415 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
416 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
417 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
418 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
419 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
420
421 /* SiS */
422 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
423 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
424 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
425
426 /* Generic, PCI class code for AHCI */
427 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
428 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
429
430 { } /* terminate list */
431 };
432
433
434 static struct pci_driver ahci_pci_driver = {
435 .name = DRV_NAME,
436 .id_table = ahci_pci_tbl,
437 .probe = ahci_init_one,
438 .remove = ata_pci_remove_one,
439 .suspend = ahci_pci_device_suspend,
440 .resume = ahci_pci_device_resume,
441 };
442
443
444 static inline int ahci_nr_ports(u32 cap)
445 {
446 return (cap & 0x1f) + 1;
447 }
448
449 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
450 {
451 return base + 0x100 + (port * 0x80);
452 }
453
454 static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
455 {
456 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
457 }
458
459 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
460 {
461 unsigned int sc_reg;
462
463 switch (sc_reg_in) {
464 case SCR_STATUS: sc_reg = 0; break;
465 case SCR_CONTROL: sc_reg = 1; break;
466 case SCR_ERROR: sc_reg = 2; break;
467 case SCR_ACTIVE: sc_reg = 3; break;
468 default:
469 return 0xffffffffU;
470 }
471
472 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
473 }
474
475
476 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
477 u32 val)
478 {
479 unsigned int sc_reg;
480
481 switch (sc_reg_in) {
482 case SCR_STATUS: sc_reg = 0; break;
483 case SCR_CONTROL: sc_reg = 1; break;
484 case SCR_ERROR: sc_reg = 2; break;
485 case SCR_ACTIVE: sc_reg = 3; break;
486 default:
487 return;
488 }
489
490 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
491 }
492
493 static void ahci_start_engine(void __iomem *port_mmio)
494 {
495 u32 tmp;
496
497 /* start DMA */
498 tmp = readl(port_mmio + PORT_CMD);
499 tmp |= PORT_CMD_START;
500 writel(tmp, port_mmio + PORT_CMD);
501 readl(port_mmio + PORT_CMD); /* flush */
502 }
503
504 static int ahci_stop_engine(void __iomem *port_mmio)
505 {
506 u32 tmp;
507
508 tmp = readl(port_mmio + PORT_CMD);
509
510 /* check if the HBA is idle */
511 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
512 return 0;
513
514 /* setting HBA to idle */
515 tmp &= ~PORT_CMD_START;
516 writel(tmp, port_mmio + PORT_CMD);
517
518 /* wait for engine to stop. This could be as long as 500 msec */
519 tmp = ata_wait_register(port_mmio + PORT_CMD,
520 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
521 if (tmp & PORT_CMD_LIST_ON)
522 return -EIO;
523
524 return 0;
525 }
526
527 static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
528 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
529 {
530 u32 tmp;
531
532 /* set FIS registers */
533 if (cap & HOST_CAP_64)
534 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
535 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
536
537 if (cap & HOST_CAP_64)
538 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
539 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
540
541 /* enable FIS reception */
542 tmp = readl(port_mmio + PORT_CMD);
543 tmp |= PORT_CMD_FIS_RX;
544 writel(tmp, port_mmio + PORT_CMD);
545
546 /* flush */
547 readl(port_mmio + PORT_CMD);
548 }
549
550 static int ahci_stop_fis_rx(void __iomem *port_mmio)
551 {
552 u32 tmp;
553
554 /* disable FIS reception */
555 tmp = readl(port_mmio + PORT_CMD);
556 tmp &= ~PORT_CMD_FIS_RX;
557 writel(tmp, port_mmio + PORT_CMD);
558
559 /* wait for completion, spec says 500ms, give it 1000 */
560 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
561 PORT_CMD_FIS_ON, 10, 1000);
562 if (tmp & PORT_CMD_FIS_ON)
563 return -EBUSY;
564
565 return 0;
566 }
567
568 static void ahci_power_up(void __iomem *port_mmio, u32 cap)
569 {
570 u32 cmd;
571
572 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
573
574 /* spin up device */
575 if (cap & HOST_CAP_SSS) {
576 cmd |= PORT_CMD_SPIN_UP;
577 writel(cmd, port_mmio + PORT_CMD);
578 }
579
580 /* wake up link */
581 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
582 }
583
584 static void ahci_power_down(void __iomem *port_mmio, u32 cap)
585 {
586 u32 cmd, scontrol;
587
588 if (!(cap & HOST_CAP_SSS))
589 return;
590
591 /* put device into listen mode, first set PxSCTL.DET to 0 */
592 scontrol = readl(port_mmio + PORT_SCR_CTL);
593 scontrol &= ~0xf;
594 writel(scontrol, port_mmio + PORT_SCR_CTL);
595
596 /* then set PxCMD.SUD to 0 */
597 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
598 cmd &= ~PORT_CMD_SPIN_UP;
599 writel(cmd, port_mmio + PORT_CMD);
600 }
601
602 static void ahci_init_port(void __iomem *port_mmio, u32 cap,
603 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
604 {
605 /* enable FIS reception */
606 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
607
608 /* enable DMA */
609 ahci_start_engine(port_mmio);
610 }
611
612 static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
613 {
614 int rc;
615
616 /* disable DMA */
617 rc = ahci_stop_engine(port_mmio);
618 if (rc) {
619 *emsg = "failed to stop engine";
620 return rc;
621 }
622
623 /* disable FIS reception */
624 rc = ahci_stop_fis_rx(port_mmio);
625 if (rc) {
626 *emsg = "failed stop FIS RX";
627 return rc;
628 }
629
630 return 0;
631 }
632
633 static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
634 {
635 u32 cap_save, impl_save, tmp;
636
637 cap_save = readl(mmio + HOST_CAP);
638 impl_save = readl(mmio + HOST_PORTS_IMPL);
639
640 /* global controller reset */
641 tmp = readl(mmio + HOST_CTL);
642 if ((tmp & HOST_RESET) == 0) {
643 writel(tmp | HOST_RESET, mmio + HOST_CTL);
644 readl(mmio + HOST_CTL); /* flush */
645 }
646
647 /* reset must complete within 1 second, or
648 * the hardware should be considered fried.
649 */
650 ssleep(1);
651
652 tmp = readl(mmio + HOST_CTL);
653 if (tmp & HOST_RESET) {
654 dev_printk(KERN_ERR, &pdev->dev,
655 "controller reset failed (0x%x)\n", tmp);
656 return -EIO;
657 }
658
659 /* turn on AHCI mode */
660 writel(HOST_AHCI_EN, mmio + HOST_CTL);
661 (void) readl(mmio + HOST_CTL); /* flush */
662
663 /* These write-once registers are normally cleared on reset.
664 * Restore BIOS values... which we HOPE were present before
665 * reset.
666 */
667 if (!impl_save) {
668 impl_save = (1 << ahci_nr_ports(cap_save)) - 1;
669 dev_printk(KERN_WARNING, &pdev->dev,
670 "PORTS_IMPL is zero, forcing 0x%x\n", impl_save);
671 }
672 writel(cap_save, mmio + HOST_CAP);
673 writel(impl_save, mmio + HOST_PORTS_IMPL);
674 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
675
676 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
677 u16 tmp16;
678
679 /* configure PCS */
680 pci_read_config_word(pdev, 0x92, &tmp16);
681 tmp16 |= 0xf;
682 pci_write_config_word(pdev, 0x92, tmp16);
683 }
684
685 return 0;
686 }
687
688 static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
689 int n_ports, unsigned int port_flags,
690 struct ahci_host_priv *hpriv)
691 {
692 int i, rc;
693 u32 tmp;
694
695 for (i = 0; i < n_ports; i++) {
696 void __iomem *port_mmio = ahci_port_base(mmio, i);
697 const char *emsg = NULL;
698
699 if ((port_flags & AHCI_FLAG_HONOR_PI) &&
700 !(hpriv->port_map & (1 << i)))
701 continue;
702
703 /* make sure port is not active */
704 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
705 if (rc)
706 dev_printk(KERN_WARNING, &pdev->dev,
707 "%s (%d)\n", emsg, rc);
708
709 /* clear SError */
710 tmp = readl(port_mmio + PORT_SCR_ERR);
711 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
712 writel(tmp, port_mmio + PORT_SCR_ERR);
713
714 /* clear port IRQ */
715 tmp = readl(port_mmio + PORT_IRQ_STAT);
716 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
717 if (tmp)
718 writel(tmp, port_mmio + PORT_IRQ_STAT);
719
720 writel(1 << i, mmio + HOST_IRQ_STAT);
721 }
722
723 tmp = readl(mmio + HOST_CTL);
724 VPRINTK("HOST_CTL 0x%x\n", tmp);
725 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
726 tmp = readl(mmio + HOST_CTL);
727 VPRINTK("HOST_CTL 0x%x\n", tmp);
728 }
729
730 static unsigned int ahci_dev_classify(struct ata_port *ap)
731 {
732 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
733 struct ata_taskfile tf;
734 u32 tmp;
735
736 tmp = readl(port_mmio + PORT_SIG);
737 tf.lbah = (tmp >> 24) & 0xff;
738 tf.lbam = (tmp >> 16) & 0xff;
739 tf.lbal = (tmp >> 8) & 0xff;
740 tf.nsect = (tmp) & 0xff;
741
742 return ata_dev_classify(&tf);
743 }
744
745 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
746 u32 opts)
747 {
748 dma_addr_t cmd_tbl_dma;
749
750 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
751
752 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
753 pp->cmd_slot[tag].status = 0;
754 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
755 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
756 }
757
758 static int ahci_clo(struct ata_port *ap)
759 {
760 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
761 struct ahci_host_priv *hpriv = ap->host->private_data;
762 u32 tmp;
763
764 if (!(hpriv->cap & HOST_CAP_CLO))
765 return -EOPNOTSUPP;
766
767 tmp = readl(port_mmio + PORT_CMD);
768 tmp |= PORT_CMD_CLO;
769 writel(tmp, port_mmio + PORT_CMD);
770
771 tmp = ata_wait_register(port_mmio + PORT_CMD,
772 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
773 if (tmp & PORT_CMD_CLO)
774 return -EIO;
775
776 return 0;
777 }
778
779 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
780 {
781 struct ahci_port_priv *pp = ap->private_data;
782 void __iomem *mmio = ap->host->mmio_base;
783 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
784 const u32 cmd_fis_len = 5; /* five dwords */
785 const char *reason = NULL;
786 struct ata_taskfile tf;
787 u32 tmp;
788 u8 *fis;
789 int rc;
790
791 DPRINTK("ENTER\n");
792
793 if (ata_port_offline(ap)) {
794 DPRINTK("PHY reports no device\n");
795 *class = ATA_DEV_NONE;
796 return 0;
797 }
798
799 /* prepare for SRST (AHCI-1.1 10.4.1) */
800 rc = ahci_stop_engine(port_mmio);
801 if (rc) {
802 reason = "failed to stop engine";
803 goto fail_restart;
804 }
805
806 /* check BUSY/DRQ, perform Command List Override if necessary */
807 if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
808 rc = ahci_clo(ap);
809
810 if (rc == -EOPNOTSUPP) {
811 reason = "port busy but CLO unavailable";
812 goto fail_restart;
813 } else if (rc) {
814 reason = "port busy but CLO failed";
815 goto fail_restart;
816 }
817 }
818
819 /* restart engine */
820 ahci_start_engine(port_mmio);
821
822 ata_tf_init(ap->device, &tf);
823 fis = pp->cmd_tbl;
824
825 /* issue the first D2H Register FIS */
826 ahci_fill_cmd_slot(pp, 0,
827 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
828
829 tf.ctl |= ATA_SRST;
830 ata_tf_to_fis(&tf, fis, 0);
831 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
832
833 writel(1, port_mmio + PORT_CMD_ISSUE);
834
835 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
836 if (tmp & 0x1) {
837 rc = -EIO;
838 reason = "1st FIS failed";
839 goto fail;
840 }
841
842 /* spec says at least 5us, but be generous and sleep for 1ms */
843 msleep(1);
844
845 /* issue the second D2H Register FIS */
846 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
847
848 tf.ctl &= ~ATA_SRST;
849 ata_tf_to_fis(&tf, fis, 0);
850 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
851
852 writel(1, port_mmio + PORT_CMD_ISSUE);
853 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
854
855 /* spec mandates ">= 2ms" before checking status.
856 * We wait 150ms, because that was the magic delay used for
857 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
858 * between when the ATA command register is written, and then
859 * status is checked. Because waiting for "a while" before
860 * checking status is fine, post SRST, we perform this magic
861 * delay here as well.
862 */
863 msleep(150);
864
865 *class = ATA_DEV_NONE;
866 if (ata_port_online(ap)) {
867 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
868 rc = -EIO;
869 reason = "device not ready";
870 goto fail;
871 }
872 *class = ahci_dev_classify(ap);
873 }
874
875 DPRINTK("EXIT, class=%u\n", *class);
876 return 0;
877
878 fail_restart:
879 ahci_start_engine(port_mmio);
880 fail:
881 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
882 return rc;
883 }
884
885 static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
886 {
887 struct ahci_port_priv *pp = ap->private_data;
888 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
889 struct ata_taskfile tf;
890 void __iomem *mmio = ap->host->mmio_base;
891 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
892 int rc;
893
894 DPRINTK("ENTER\n");
895
896 ahci_stop_engine(port_mmio);
897
898 /* clear D2H reception area to properly wait for D2H FIS */
899 ata_tf_init(ap->device, &tf);
900 tf.command = 0x80;
901 ata_tf_to_fis(&tf, d2h_fis, 0);
902
903 rc = sata_std_hardreset(ap, class);
904
905 ahci_start_engine(port_mmio);
906
907 if (rc == 0 && ata_port_online(ap))
908 *class = ahci_dev_classify(ap);
909 if (*class == ATA_DEV_UNKNOWN)
910 *class = ATA_DEV_NONE;
911
912 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
913 return rc;
914 }
915
916 static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
917 {
918 void __iomem *mmio = ap->host->mmio_base;
919 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
920 int rc;
921
922 DPRINTK("ENTER\n");
923
924 ahci_stop_engine(port_mmio);
925
926 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context));
927
928 /* vt8251 needs SError cleared for the port to operate */
929 ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
930
931 ahci_start_engine(port_mmio);
932
933 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
934
935 /* vt8251 doesn't clear BSY on signature FIS reception,
936 * request follow-up softreset.
937 */
938 return rc ?: -EAGAIN;
939 }
940
941 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
942 {
943 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
944 u32 new_tmp, tmp;
945
946 ata_std_postreset(ap, class);
947
948 /* Make sure port's ATAPI bit is set appropriately */
949 new_tmp = tmp = readl(port_mmio + PORT_CMD);
950 if (*class == ATA_DEV_ATAPI)
951 new_tmp |= PORT_CMD_ATAPI;
952 else
953 new_tmp &= ~PORT_CMD_ATAPI;
954 if (new_tmp != tmp) {
955 writel(new_tmp, port_mmio + PORT_CMD);
956 readl(port_mmio + PORT_CMD); /* flush */
957 }
958 }
959
960 static u8 ahci_check_status(struct ata_port *ap)
961 {
962 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
963
964 return readl(mmio + PORT_TFDATA) & 0xFF;
965 }
966
967 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
968 {
969 struct ahci_port_priv *pp = ap->private_data;
970 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
971
972 ata_tf_from_fis(d2h_fis, tf);
973 }
974
975 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
976 {
977 struct scatterlist *sg;
978 struct ahci_sg *ahci_sg;
979 unsigned int n_sg = 0;
980
981 VPRINTK("ENTER\n");
982
983 /*
984 * Next, the S/G list.
985 */
986 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
987 ata_for_each_sg(sg, qc) {
988 dma_addr_t addr = sg_dma_address(sg);
989 u32 sg_len = sg_dma_len(sg);
990
991 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
992 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
993 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
994
995 ahci_sg++;
996 n_sg++;
997 }
998
999 return n_sg;
1000 }
1001
1002 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1003 {
1004 struct ata_port *ap = qc->ap;
1005 struct ahci_port_priv *pp = ap->private_data;
1006 int is_atapi = is_atapi_taskfile(&qc->tf);
1007 void *cmd_tbl;
1008 u32 opts;
1009 const u32 cmd_fis_len = 5; /* five dwords */
1010 unsigned int n_elem;
1011
1012 /*
1013 * Fill in command table information. First, the header,
1014 * a SATA Register - Host to Device command FIS.
1015 */
1016 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1017
1018 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
1019 if (is_atapi) {
1020 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1021 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1022 }
1023
1024 n_elem = 0;
1025 if (qc->flags & ATA_QCFLAG_DMAMAP)
1026 n_elem = ahci_fill_sg(qc, cmd_tbl);
1027
1028 /*
1029 * Fill in command slot information.
1030 */
1031 opts = cmd_fis_len | n_elem << 16;
1032 if (qc->tf.flags & ATA_TFLAG_WRITE)
1033 opts |= AHCI_CMD_WRITE;
1034 if (is_atapi)
1035 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1036
1037 ahci_fill_cmd_slot(pp, qc->tag, opts);
1038 }
1039
1040 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1041 {
1042 struct ahci_port_priv *pp = ap->private_data;
1043 struct ata_eh_info *ehi = &ap->eh_info;
1044 unsigned int err_mask = 0, action = 0;
1045 struct ata_queued_cmd *qc;
1046 u32 serror;
1047
1048 ata_ehi_clear_desc(ehi);
1049
1050 /* AHCI needs SError cleared; otherwise, it might lock up */
1051 serror = ahci_scr_read(ap, SCR_ERROR);
1052 ahci_scr_write(ap, SCR_ERROR, serror);
1053
1054 /* analyze @irq_stat */
1055 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
1056
1057 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1058 if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
1059 irq_stat &= ~PORT_IRQ_IF_ERR;
1060
1061 if (irq_stat & PORT_IRQ_TF_ERR)
1062 err_mask |= AC_ERR_DEV;
1063
1064 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1065 err_mask |= AC_ERR_HOST_BUS;
1066 action |= ATA_EH_SOFTRESET;
1067 }
1068
1069 if (irq_stat & PORT_IRQ_IF_ERR) {
1070 err_mask |= AC_ERR_ATA_BUS;
1071 action |= ATA_EH_SOFTRESET;
1072 ata_ehi_push_desc(ehi, ", interface fatal error");
1073 }
1074
1075 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1076 ata_ehi_hotplugged(ehi);
1077 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1078 "connection status changed" : "PHY RDY changed");
1079 }
1080
1081 if (irq_stat & PORT_IRQ_UNK_FIS) {
1082 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1083
1084 err_mask |= AC_ERR_HSM;
1085 action |= ATA_EH_SOFTRESET;
1086 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1087 unk[0], unk[1], unk[2], unk[3]);
1088 }
1089
1090 /* okay, let's hand over to EH */
1091 ehi->serror |= serror;
1092 ehi->action |= action;
1093
1094 qc = ata_qc_from_tag(ap, ap->active_tag);
1095 if (qc)
1096 qc->err_mask |= err_mask;
1097 else
1098 ehi->err_mask |= err_mask;
1099
1100 if (irq_stat & PORT_IRQ_FREEZE)
1101 ata_port_freeze(ap);
1102 else
1103 ata_port_abort(ap);
1104 }
1105
1106 static void ahci_host_intr(struct ata_port *ap)
1107 {
1108 void __iomem *mmio = ap->host->mmio_base;
1109 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1110 struct ata_eh_info *ehi = &ap->eh_info;
1111 struct ahci_port_priv *pp = ap->private_data;
1112 u32 status, qc_active;
1113 int rc, known_irq = 0;
1114
1115 status = readl(port_mmio + PORT_IRQ_STAT);
1116 writel(status, port_mmio + PORT_IRQ_STAT);
1117
1118 if (unlikely(status & PORT_IRQ_ERROR)) {
1119 ahci_error_intr(ap, status);
1120 return;
1121 }
1122
1123 if (ap->sactive)
1124 qc_active = readl(port_mmio + PORT_SCR_ACT);
1125 else
1126 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1127
1128 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1129 if (rc > 0)
1130 return;
1131 if (rc < 0) {
1132 ehi->err_mask |= AC_ERR_HSM;
1133 ehi->action |= ATA_EH_SOFTRESET;
1134 ata_port_freeze(ap);
1135 return;
1136 }
1137
1138 /* hmmm... a spurious interupt */
1139
1140 /* if !NCQ, ignore. No modern ATA device has broken HSM
1141 * implementation for non-NCQ commands.
1142 */
1143 if (!ap->sactive)
1144 return;
1145
1146 if (status & PORT_IRQ_D2H_REG_FIS) {
1147 if (!pp->ncq_saw_d2h)
1148 ata_port_printk(ap, KERN_INFO,
1149 "D2H reg with I during NCQ, "
1150 "this message won't be printed again\n");
1151 pp->ncq_saw_d2h = 1;
1152 known_irq = 1;
1153 }
1154
1155 if (status & PORT_IRQ_DMAS_FIS) {
1156 if (!pp->ncq_saw_dmas)
1157 ata_port_printk(ap, KERN_INFO,
1158 "DMAS FIS during NCQ, "
1159 "this message won't be printed again\n");
1160 pp->ncq_saw_dmas = 1;
1161 known_irq = 1;
1162 }
1163
1164 if (status & PORT_IRQ_SDB_FIS &&
1165 pp->ncq_saw_spurious_sdb_cnt < 10) {
1166 /* SDB FIS containing spurious completions might be
1167 * dangerous, we need to know more about them. Print
1168 * more of it.
1169 */
1170 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1171
1172 ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ "
1173 "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n",
1174 readl(port_mmio + PORT_CMD_ISSUE),
1175 readl(port_mmio + PORT_SCR_ACT),
1176 le32_to_cpu(f[0]), le32_to_cpu(f[1]),
1177 pp->ncq_saw_spurious_sdb_cnt < 10 ?
1178 "" : ", shutting up");
1179
1180 pp->ncq_saw_spurious_sdb_cnt++;
1181 known_irq = 1;
1182 }
1183
1184 if (!known_irq)
1185 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1186 "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
1187 status, ap->active_tag, ap->sactive);
1188 }
1189
1190 static void ahci_irq_clear(struct ata_port *ap)
1191 {
1192 /* TODO */
1193 }
1194
1195 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1196 {
1197 struct ata_host *host = dev_instance;
1198 struct ahci_host_priv *hpriv;
1199 unsigned int i, handled = 0;
1200 void __iomem *mmio;
1201 u32 irq_stat, irq_ack = 0;
1202
1203 VPRINTK("ENTER\n");
1204
1205 hpriv = host->private_data;
1206 mmio = host->mmio_base;
1207
1208 /* sigh. 0xffffffff is a valid return from h/w */
1209 irq_stat = readl(mmio + HOST_IRQ_STAT);
1210 irq_stat &= hpriv->port_map;
1211 if (!irq_stat)
1212 return IRQ_NONE;
1213
1214 spin_lock(&host->lock);
1215
1216 for (i = 0; i < host->n_ports; i++) {
1217 struct ata_port *ap;
1218
1219 if (!(irq_stat & (1 << i)))
1220 continue;
1221
1222 ap = host->ports[i];
1223 if (ap) {
1224 ahci_host_intr(ap);
1225 VPRINTK("port %u\n", i);
1226 } else {
1227 VPRINTK("port %u (no irq)\n", i);
1228 if (ata_ratelimit())
1229 dev_printk(KERN_WARNING, host->dev,
1230 "interrupt on disabled port %u\n", i);
1231 }
1232
1233 irq_ack |= (1 << i);
1234 }
1235
1236 if (irq_ack) {
1237 writel(irq_ack, mmio + HOST_IRQ_STAT);
1238 handled = 1;
1239 }
1240
1241 spin_unlock(&host->lock);
1242
1243 VPRINTK("EXIT\n");
1244
1245 return IRQ_RETVAL(handled);
1246 }
1247
1248 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1249 {
1250 struct ata_port *ap = qc->ap;
1251 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1252
1253 if (qc->tf.protocol == ATA_PROT_NCQ)
1254 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1255 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1256 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1257
1258 return 0;
1259 }
1260
1261 static void ahci_freeze(struct ata_port *ap)
1262 {
1263 void __iomem *mmio = ap->host->mmio_base;
1264 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1265
1266 /* turn IRQ off */
1267 writel(0, port_mmio + PORT_IRQ_MASK);
1268 }
1269
1270 static void ahci_thaw(struct ata_port *ap)
1271 {
1272 void __iomem *mmio = ap->host->mmio_base;
1273 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1274 u32 tmp;
1275
1276 /* clear IRQ */
1277 tmp = readl(port_mmio + PORT_IRQ_STAT);
1278 writel(tmp, port_mmio + PORT_IRQ_STAT);
1279 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1280
1281 /* turn IRQ back on */
1282 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1283 }
1284
1285 static void ahci_error_handler(struct ata_port *ap)
1286 {
1287 void __iomem *mmio = ap->host->mmio_base;
1288 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1289
1290 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1291 /* restart engine */
1292 ahci_stop_engine(port_mmio);
1293 ahci_start_engine(port_mmio);
1294 }
1295
1296 /* perform recovery */
1297 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
1298 ahci_postreset);
1299 }
1300
1301 static void ahci_vt8251_error_handler(struct ata_port *ap)
1302 {
1303 void __iomem *mmio = ap->host->mmio_base;
1304 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1305
1306 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1307 /* restart engine */
1308 ahci_stop_engine(port_mmio);
1309 ahci_start_engine(port_mmio);
1310 }
1311
1312 /* perform recovery */
1313 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
1314 ahci_postreset);
1315 }
1316
1317 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1318 {
1319 struct ata_port *ap = qc->ap;
1320 void __iomem *mmio = ap->host->mmio_base;
1321 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1322
1323 if (qc->flags & ATA_QCFLAG_FAILED)
1324 qc->err_mask |= AC_ERR_OTHER;
1325
1326 if (qc->err_mask) {
1327 /* make DMA engine forget about the failed command */
1328 ahci_stop_engine(port_mmio);
1329 ahci_start_engine(port_mmio);
1330 }
1331 }
1332
1333 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1334 {
1335 struct ahci_host_priv *hpriv = ap->host->private_data;
1336 struct ahci_port_priv *pp = ap->private_data;
1337 void __iomem *mmio = ap->host->mmio_base;
1338 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1339 const char *emsg = NULL;
1340 int rc;
1341
1342 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1343 if (rc == 0)
1344 ahci_power_down(port_mmio, hpriv->cap);
1345 else {
1346 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1347 ahci_init_port(port_mmio, hpriv->cap,
1348 pp->cmd_slot_dma, pp->rx_fis_dma);
1349 }
1350
1351 return rc;
1352 }
1353
1354 static int ahci_port_resume(struct ata_port *ap)
1355 {
1356 struct ahci_port_priv *pp = ap->private_data;
1357 struct ahci_host_priv *hpriv = ap->host->private_data;
1358 void __iomem *mmio = ap->host->mmio_base;
1359 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1360
1361 ahci_power_up(port_mmio, hpriv->cap);
1362 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1363
1364 return 0;
1365 }
1366
1367 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1368 {
1369 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1370 void __iomem *mmio = host->mmio_base;
1371 u32 ctl;
1372
1373 if (mesg.event == PM_EVENT_SUSPEND) {
1374 /* AHCI spec rev1.1 section 8.3.3:
1375 * Software must disable interrupts prior to requesting a
1376 * transition of the HBA to D3 state.
1377 */
1378 ctl = readl(mmio + HOST_CTL);
1379 ctl &= ~HOST_IRQ_EN;
1380 writel(ctl, mmio + HOST_CTL);
1381 readl(mmio + HOST_CTL); /* flush */
1382 }
1383
1384 return ata_pci_device_suspend(pdev, mesg);
1385 }
1386
1387 static int ahci_pci_device_resume(struct pci_dev *pdev)
1388 {
1389 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1390 struct ahci_host_priv *hpriv = host->private_data;
1391 void __iomem *mmio = host->mmio_base;
1392 int rc;
1393
1394 rc = ata_pci_device_do_resume(pdev);
1395 if (rc)
1396 return rc;
1397
1398 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1399 rc = ahci_reset_controller(mmio, pdev);
1400 if (rc)
1401 return rc;
1402
1403 ahci_init_controller(mmio, pdev, host->n_ports,
1404 host->ports[0]->flags, hpriv);
1405 }
1406
1407 ata_host_resume(host);
1408
1409 return 0;
1410 }
1411
1412 static int ahci_port_start(struct ata_port *ap)
1413 {
1414 struct device *dev = ap->host->dev;
1415 struct ahci_host_priv *hpriv = ap->host->private_data;
1416 struct ahci_port_priv *pp;
1417 void __iomem *mmio = ap->host->mmio_base;
1418 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1419 void *mem;
1420 dma_addr_t mem_dma;
1421 int rc;
1422
1423 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1424 if (!pp)
1425 return -ENOMEM;
1426
1427 rc = ata_pad_alloc(ap, dev);
1428 if (rc)
1429 return rc;
1430
1431 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1432 GFP_KERNEL);
1433 if (!mem)
1434 return -ENOMEM;
1435 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1436
1437 /*
1438 * First item in chunk of DMA memory: 32-slot command table,
1439 * 32 bytes each in size
1440 */
1441 pp->cmd_slot = mem;
1442 pp->cmd_slot_dma = mem_dma;
1443
1444 mem += AHCI_CMD_SLOT_SZ;
1445 mem_dma += AHCI_CMD_SLOT_SZ;
1446
1447 /*
1448 * Second item: Received-FIS area
1449 */
1450 pp->rx_fis = mem;
1451 pp->rx_fis_dma = mem_dma;
1452
1453 mem += AHCI_RX_FIS_SZ;
1454 mem_dma += AHCI_RX_FIS_SZ;
1455
1456 /*
1457 * Third item: data area for storing a single command
1458 * and its scatter-gather table
1459 */
1460 pp->cmd_tbl = mem;
1461 pp->cmd_tbl_dma = mem_dma;
1462
1463 ap->private_data = pp;
1464
1465 /* power up port */
1466 ahci_power_up(port_mmio, hpriv->cap);
1467
1468 /* initialize port */
1469 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1470
1471 return 0;
1472 }
1473
1474 static void ahci_port_stop(struct ata_port *ap)
1475 {
1476 struct ahci_host_priv *hpriv = ap->host->private_data;
1477 void __iomem *mmio = ap->host->mmio_base;
1478 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1479 const char *emsg = NULL;
1480 int rc;
1481
1482 /* de-initialize port */
1483 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1484 if (rc)
1485 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1486 }
1487
1488 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1489 unsigned int port_idx)
1490 {
1491 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1492 base = ahci_port_base_ul(base, port_idx);
1493 VPRINTK("base now==0x%lx\n", base);
1494
1495 port->cmd_addr = base;
1496 port->scr_addr = base + PORT_SCR;
1497
1498 VPRINTK("EXIT\n");
1499 }
1500
1501 static int ahci_host_init(struct ata_probe_ent *probe_ent)
1502 {
1503 struct ahci_host_priv *hpriv = probe_ent->private_data;
1504 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1505 void __iomem *mmio = probe_ent->mmio_base;
1506 unsigned int i, cap_n_ports, using_dac;
1507 int rc;
1508
1509 rc = ahci_reset_controller(mmio, pdev);
1510 if (rc)
1511 return rc;
1512
1513 hpriv->cap = readl(mmio + HOST_CAP);
1514 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1515 cap_n_ports = ahci_nr_ports(hpriv->cap);
1516
1517 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1518 hpriv->cap, hpriv->port_map, cap_n_ports);
1519
1520 if (probe_ent->port_flags & AHCI_FLAG_HONOR_PI) {
1521 unsigned int n_ports = cap_n_ports;
1522 u32 port_map = hpriv->port_map;
1523 int max_port = 0;
1524
1525 for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
1526 if (port_map & (1 << i)) {
1527 n_ports--;
1528 port_map &= ~(1 << i);
1529 max_port = i;
1530 } else
1531 probe_ent->dummy_port_mask |= 1 << i;
1532 }
1533
1534 if (n_ports || port_map)
1535 dev_printk(KERN_WARNING, &pdev->dev,
1536 "nr_ports (%u) and implemented port map "
1537 "(0x%x) don't match\n",
1538 cap_n_ports, hpriv->port_map);
1539
1540 probe_ent->n_ports = max_port + 1;
1541 } else
1542 probe_ent->n_ports = cap_n_ports;
1543
1544 using_dac = hpriv->cap & HOST_CAP_64;
1545 if (using_dac &&
1546 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1547 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1548 if (rc) {
1549 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1550 if (rc) {
1551 dev_printk(KERN_ERR, &pdev->dev,
1552 "64-bit DMA enable failed\n");
1553 return rc;
1554 }
1555 }
1556 } else {
1557 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1558 if (rc) {
1559 dev_printk(KERN_ERR, &pdev->dev,
1560 "32-bit DMA enable failed\n");
1561 return rc;
1562 }
1563 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1564 if (rc) {
1565 dev_printk(KERN_ERR, &pdev->dev,
1566 "32-bit consistent DMA enable failed\n");
1567 return rc;
1568 }
1569 }
1570
1571 for (i = 0; i < probe_ent->n_ports; i++)
1572 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1573
1574 ahci_init_controller(mmio, pdev, probe_ent->n_ports,
1575 probe_ent->port_flags, hpriv);
1576
1577 pci_set_master(pdev);
1578
1579 return 0;
1580 }
1581
1582 static void ahci_print_info(struct ata_probe_ent *probe_ent)
1583 {
1584 struct ahci_host_priv *hpriv = probe_ent->private_data;
1585 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1586 void __iomem *mmio = probe_ent->mmio_base;
1587 u32 vers, cap, impl, speed;
1588 const char *speed_s;
1589 u16 cc;
1590 const char *scc_s;
1591
1592 vers = readl(mmio + HOST_VERSION);
1593 cap = hpriv->cap;
1594 impl = hpriv->port_map;
1595
1596 speed = (cap >> 20) & 0xf;
1597 if (speed == 1)
1598 speed_s = "1.5";
1599 else if (speed == 2)
1600 speed_s = "3";
1601 else
1602 speed_s = "?";
1603
1604 pci_read_config_word(pdev, 0x0a, &cc);
1605 if (cc == PCI_CLASS_STORAGE_IDE)
1606 scc_s = "IDE";
1607 else if (cc == PCI_CLASS_STORAGE_SATA)
1608 scc_s = "SATA";
1609 else if (cc == PCI_CLASS_STORAGE_RAID)
1610 scc_s = "RAID";
1611 else
1612 scc_s = "unknown";
1613
1614 dev_printk(KERN_INFO, &pdev->dev,
1615 "AHCI %02x%02x.%02x%02x "
1616 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1617 ,
1618
1619 (vers >> 24) & 0xff,
1620 (vers >> 16) & 0xff,
1621 (vers >> 8) & 0xff,
1622 vers & 0xff,
1623
1624 ((cap >> 8) & 0x1f) + 1,
1625 (cap & 0x1f) + 1,
1626 speed_s,
1627 impl,
1628 scc_s);
1629
1630 dev_printk(KERN_INFO, &pdev->dev,
1631 "flags: "
1632 "%s%s%s%s%s%s"
1633 "%s%s%s%s%s%s%s\n"
1634 ,
1635
1636 cap & (1 << 31) ? "64bit " : "",
1637 cap & (1 << 30) ? "ncq " : "",
1638 cap & (1 << 28) ? "ilck " : "",
1639 cap & (1 << 27) ? "stag " : "",
1640 cap & (1 << 26) ? "pm " : "",
1641 cap & (1 << 25) ? "led " : "",
1642
1643 cap & (1 << 24) ? "clo " : "",
1644 cap & (1 << 19) ? "nz " : "",
1645 cap & (1 << 18) ? "only " : "",
1646 cap & (1 << 17) ? "pmp " : "",
1647 cap & (1 << 15) ? "pio " : "",
1648 cap & (1 << 14) ? "slum " : "",
1649 cap & (1 << 13) ? "part " : ""
1650 );
1651 }
1652
1653 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1654 {
1655 static int printed_version;
1656 unsigned int board_idx = (unsigned int) ent->driver_data;
1657 struct device *dev = &pdev->dev;
1658 struct ata_probe_ent *probe_ent;
1659 struct ahci_host_priv *hpriv;
1660 unsigned long base;
1661 void __iomem *mmio_base;
1662 int rc;
1663
1664 VPRINTK("ENTER\n");
1665
1666 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1667
1668 if (!printed_version++)
1669 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1670
1671 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1672 /* Function 1 is the PATA controller except on the 368, where
1673 we are not AHCI anyway */
1674 if (PCI_FUNC(pdev->devfn))
1675 return -ENODEV;
1676 }
1677
1678 rc = pcim_enable_device(pdev);
1679 if (rc)
1680 return rc;
1681
1682 rc = pci_request_regions(pdev, DRV_NAME);
1683 if (rc) {
1684 pcim_pin_device(pdev);
1685 return rc;
1686 }
1687
1688 if (pci_enable_msi(pdev))
1689 pci_intx(pdev, 1);
1690
1691 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
1692 if (probe_ent == NULL)
1693 return -ENOMEM;
1694
1695 probe_ent->dev = pci_dev_to_dev(pdev);
1696 INIT_LIST_HEAD(&probe_ent->node);
1697
1698 mmio_base = pcim_iomap(pdev, AHCI_PCI_BAR, 0);
1699 if (mmio_base == NULL)
1700 return -ENOMEM;
1701 base = (unsigned long) mmio_base;
1702
1703 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1704 if (!hpriv)
1705 return -ENOMEM;
1706
1707 probe_ent->sht = ahci_port_info[board_idx].sht;
1708 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1709 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1710 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1711 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1712
1713 probe_ent->irq = pdev->irq;
1714 probe_ent->irq_flags = IRQF_SHARED;
1715 probe_ent->mmio_base = mmio_base;
1716 probe_ent->private_data = hpriv;
1717
1718 /* initialize adapter */
1719 rc = ahci_host_init(probe_ent);
1720 if (rc)
1721 return rc;
1722
1723 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1724 (hpriv->cap & HOST_CAP_NCQ))
1725 probe_ent->port_flags |= ATA_FLAG_NCQ;
1726
1727 ahci_print_info(probe_ent);
1728
1729 if (!ata_device_add(probe_ent))
1730 return -ENODEV;
1731
1732 devm_kfree(dev, probe_ent);
1733 return 0;
1734 }
1735
1736 static int __init ahci_init(void)
1737 {
1738 return pci_register_driver(&ahci_pci_driver);
1739 }
1740
1741 static void __exit ahci_exit(void)
1742 {
1743 pci_unregister_driver(&ahci_pci_driver);
1744 }
1745
1746
1747 MODULE_AUTHOR("Jeff Garzik");
1748 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1749 MODULE_LICENSE("GPL");
1750 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1751 MODULE_VERSION(DRV_VERSION);
1752
1753 module_init(ahci_init);
1754 module_exit(ahci_exit);