]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/ata/sata_nv.c
Linux 2.6.25
[mirror_ubuntu-artful-kernel.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
2a3103ce 52#define DRV_VERSION "3.5"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 166 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 171
f140f0f1
KL
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
fbbb262d
RH
201};
202
203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
2dcb407e 231 u8 len; /* 3 */
fbbb262d
RH
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
10ad05df 239};
1da177e4 240
fbbb262d
RH
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
2dcb407e
JG
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
8959d300 250 u64 adma_dma_mask;
fbbb262d 251 u8 flags;
5e5c74a5 252 int last_issue_ncq;
fbbb262d
RH
253};
254
cdf56bcf
RH
255struct nv_host_priv {
256 unsigned long type;
257};
258
f140f0f1
KL
259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
5796d1c4 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 296
2dcb407e 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 298#ifdef CONFIG_PM
cdf56bcf 299static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 300#endif
cca3974e 301static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
2dcb407e
JG
305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 307
39f87582
TH
308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
312static void nv_error_handler(struct ata_port *ap);
fbbb262d 313static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 314static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
315static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318static void nv_adma_irq_clear(struct ata_port *ap);
319static int nv_adma_port_start(struct ata_port *ap);
320static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 321#ifdef CONFIG_PM
cdf56bcf
RH
322static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 324#endif
53014e25
RH
325static void nv_adma_freeze(struct ata_port *ap);
326static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
327static void nv_adma_error_handler(struct ata_port *ap);
328static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 329static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 330static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 331
f140f0f1
KL
332static void nv_mcp55_thaw(struct ata_port *ap);
333static void nv_mcp55_freeze(struct ata_port *ap);
334static void nv_swncq_error_handler(struct ata_port *ap);
335static int nv_swncq_slave_config(struct scsi_device *sdev);
336static int nv_swncq_port_start(struct ata_port *ap);
337static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342#ifdef CONFIG_PM
343static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344static int nv_swncq_port_resume(struct ata_port *ap);
345#endif
346
1da177e4
LT
347enum nv_host_type
348{
349 GENERIC,
350 NFORCE2,
27e4b274 351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 352 CK804,
f140f0f1
KL
353 ADMA,
354 SWNCQ,
1da177e4
LT
355};
356
3b7d697d 357static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
f140f0f1
KL
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
e2e031eb
KL
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
372
373 { } /* terminate list */
1da177e4
LT
374};
375
1da177e4
LT
376static struct pci_driver nv_pci_driver = {
377 .name = DRV_NAME,
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
438ac6d5 380#ifdef CONFIG_PM
cdf56bcf
RH
381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
438ac6d5 383#endif
1daf9ce7 384 .remove = ata_pci_remove_one,
1da177e4
LT
385};
386
193515d5 387static struct scsi_host_template nv_sht = {
1da177e4
LT
388 .module = THIS_MODULE,
389 .name = DRV_NAME,
390 .ioctl = ata_scsi_ioctl,
391 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
392 .can_queue = ATA_DEF_QUEUE,
393 .this_id = ATA_SHT_THIS_ID,
394 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
395 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
396 .emulated = ATA_SHT_EMULATED,
397 .use_clustering = ATA_SHT_USE_CLUSTERING,
398 .proc_name = DRV_NAME,
399 .dma_boundary = ATA_DMA_BOUNDARY,
400 .slave_configure = ata_scsi_slave_config,
ccf68c34 401 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 402 .bios_param = ata_std_bios_param,
1da177e4
LT
403};
404
fbbb262d
RH
405static struct scsi_host_template nv_adma_sht = {
406 .module = THIS_MODULE,
407 .name = DRV_NAME,
408 .ioctl = ata_scsi_ioctl,
409 .queuecommand = ata_scsi_queuecmd,
1e0b5ab8 410 .change_queue_depth = ata_scsi_change_queue_depth,
fbbb262d
RH
411 .can_queue = NV_ADMA_MAX_CPBS,
412 .this_id = ATA_SHT_THIS_ID,
413 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
414 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
415 .emulated = ATA_SHT_EMULATED,
416 .use_clustering = ATA_SHT_USE_CLUSTERING,
417 .proc_name = DRV_NAME,
418 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
419 .slave_configure = nv_adma_slave_config,
420 .slave_destroy = ata_scsi_slave_destroy,
421 .bios_param = ata_std_bios_param,
422};
423
f140f0f1
KL
424static struct scsi_host_template nv_swncq_sht = {
425 .module = THIS_MODULE,
426 .name = DRV_NAME,
427 .ioctl = ata_scsi_ioctl,
428 .queuecommand = ata_scsi_queuecmd,
429 .change_queue_depth = ata_scsi_change_queue_depth,
430 .can_queue = ATA_MAX_QUEUE,
431 .this_id = ATA_SHT_THIS_ID,
432 .sg_tablesize = LIBATA_MAX_PRD,
433 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
434 .emulated = ATA_SHT_EMULATED,
435 .use_clustering = ATA_SHT_USE_CLUSTERING,
436 .proc_name = DRV_NAME,
437 .dma_boundary = ATA_DMA_BOUNDARY,
438 .slave_configure = nv_swncq_slave_config,
439 .slave_destroy = ata_scsi_slave_destroy,
440 .bios_param = ata_std_bios_param,
441};
442
ada364e8 443static const struct ata_port_operations nv_generic_ops = {
1da177e4
LT
444 .tf_load = ata_tf_load,
445 .tf_read = ata_tf_read,
446 .exec_command = ata_exec_command,
447 .check_status = ata_check_status,
448 .dev_select = ata_std_dev_select,
1da177e4
LT
449 .bmdma_setup = ata_bmdma_setup,
450 .bmdma_start = ata_bmdma_start,
451 .bmdma_stop = ata_bmdma_stop,
452 .bmdma_status = ata_bmdma_status,
453 .qc_prep = ata_qc_prep,
454 .qc_issue = ata_qc_issue_prot,
39f87582
TH
455 .freeze = ata_bmdma_freeze,
456 .thaw = ata_bmdma_thaw,
457 .error_handler = nv_error_handler,
458 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 459 .data_xfer = ata_data_xfer,
1da177e4 460 .irq_clear = ata_bmdma_irq_clear,
246ce3b6 461 .irq_on = ata_irq_on,
1da177e4
LT
462 .scr_read = nv_scr_read,
463 .scr_write = nv_scr_write,
464 .port_start = ata_port_start,
1da177e4
LT
465};
466
ada364e8 467static const struct ata_port_operations nv_nf2_ops = {
ada364e8
TH
468 .tf_load = ata_tf_load,
469 .tf_read = ata_tf_read,
470 .exec_command = ata_exec_command,
471 .check_status = ata_check_status,
472 .dev_select = ata_std_dev_select,
ada364e8
TH
473 .bmdma_setup = ata_bmdma_setup,
474 .bmdma_start = ata_bmdma_start,
475 .bmdma_stop = ata_bmdma_stop,
476 .bmdma_status = ata_bmdma_status,
477 .qc_prep = ata_qc_prep,
478 .qc_issue = ata_qc_issue_prot,
39f87582
TH
479 .freeze = nv_nf2_freeze,
480 .thaw = nv_nf2_thaw,
481 .error_handler = nv_error_handler,
482 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 483 .data_xfer = ata_data_xfer,
ada364e8 484 .irq_clear = ata_bmdma_irq_clear,
246ce3b6 485 .irq_on = ata_irq_on,
ada364e8
TH
486 .scr_read = nv_scr_read,
487 .scr_write = nv_scr_write,
488 .port_start = ata_port_start,
ada364e8
TH
489};
490
491static const struct ata_port_operations nv_ck804_ops = {
ada364e8
TH
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .exec_command = ata_exec_command,
495 .check_status = ata_check_status,
496 .dev_select = ata_std_dev_select,
ada364e8
TH
497 .bmdma_setup = ata_bmdma_setup,
498 .bmdma_start = ata_bmdma_start,
499 .bmdma_stop = ata_bmdma_stop,
500 .bmdma_status = ata_bmdma_status,
501 .qc_prep = ata_qc_prep,
502 .qc_issue = ata_qc_issue_prot,
39f87582
TH
503 .freeze = nv_ck804_freeze,
504 .thaw = nv_ck804_thaw,
505 .error_handler = nv_error_handler,
506 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 507 .data_xfer = ata_data_xfer,
ada364e8 508 .irq_clear = ata_bmdma_irq_clear,
246ce3b6 509 .irq_on = ata_irq_on,
ada364e8
TH
510 .scr_read = nv_scr_read,
511 .scr_write = nv_scr_write,
512 .port_start = ata_port_start,
ada364e8
TH
513 .host_stop = nv_ck804_host_stop,
514};
515
fbbb262d 516static const struct ata_port_operations nv_adma_ops = {
fbbb262d 517 .tf_load = ata_tf_load,
f2fb344b 518 .tf_read = nv_adma_tf_read,
2dec7555 519 .check_atapi_dma = nv_adma_check_atapi_dma,
fbbb262d
RH
520 .exec_command = ata_exec_command,
521 .check_status = ata_check_status,
522 .dev_select = ata_std_dev_select,
f5ecac2d
RH
523 .bmdma_setup = ata_bmdma_setup,
524 .bmdma_start = ata_bmdma_start,
525 .bmdma_stop = ata_bmdma_stop,
526 .bmdma_status = ata_bmdma_status,
31cc23b3 527 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
528 .qc_prep = nv_adma_qc_prep,
529 .qc_issue = nv_adma_qc_issue,
53014e25
RH
530 .freeze = nv_adma_freeze,
531 .thaw = nv_adma_thaw,
fbbb262d 532 .error_handler = nv_adma_error_handler,
f5ecac2d 533 .post_internal_cmd = nv_adma_post_internal_cmd,
0d5ff566 534 .data_xfer = ata_data_xfer,
fbbb262d 535 .irq_clear = nv_adma_irq_clear,
246ce3b6 536 .irq_on = ata_irq_on,
fbbb262d
RH
537 .scr_read = nv_scr_read,
538 .scr_write = nv_scr_write,
539 .port_start = nv_adma_port_start,
540 .port_stop = nv_adma_port_stop,
438ac6d5 541#ifdef CONFIG_PM
cdf56bcf
RH
542 .port_suspend = nv_adma_port_suspend,
543 .port_resume = nv_adma_port_resume,
438ac6d5 544#endif
fbbb262d
RH
545 .host_stop = nv_adma_host_stop,
546};
547
f140f0f1
KL
548static const struct ata_port_operations nv_swncq_ops = {
549 .tf_load = ata_tf_load,
550 .tf_read = ata_tf_read,
551 .exec_command = ata_exec_command,
552 .check_status = ata_check_status,
553 .dev_select = ata_std_dev_select,
554 .bmdma_setup = ata_bmdma_setup,
555 .bmdma_start = ata_bmdma_start,
556 .bmdma_stop = ata_bmdma_stop,
557 .bmdma_status = ata_bmdma_status,
558 .qc_defer = ata_std_qc_defer,
559 .qc_prep = nv_swncq_qc_prep,
560 .qc_issue = nv_swncq_qc_issue,
561 .freeze = nv_mcp55_freeze,
562 .thaw = nv_mcp55_thaw,
563 .error_handler = nv_swncq_error_handler,
564 .post_internal_cmd = ata_bmdma_post_internal_cmd,
565 .data_xfer = ata_data_xfer,
566 .irq_clear = ata_bmdma_irq_clear,
567 .irq_on = ata_irq_on,
568 .scr_read = nv_scr_read,
569 .scr_write = nv_scr_write,
570#ifdef CONFIG_PM
571 .port_suspend = nv_swncq_port_suspend,
572 .port_resume = nv_swncq_port_resume,
573#endif
574 .port_start = nv_swncq_port_start,
575};
576
1626aeb8 577static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
578 /* generic */
579 {
580 .sht = &nv_sht,
0c88758b
TH
581 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
582 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
ada364e8
TH
583 .pio_mask = NV_PIO_MASK,
584 .mwdma_mask = NV_MWDMA_MASK,
585 .udma_mask = NV_UDMA_MASK,
586 .port_ops = &nv_generic_ops,
9a829ccf 587 .irq_handler = nv_generic_interrupt,
ada364e8
TH
588 },
589 /* nforce2/3 */
590 {
591 .sht = &nv_sht,
0c88758b
TH
592 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
593 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
ada364e8
TH
594 .pio_mask = NV_PIO_MASK,
595 .mwdma_mask = NV_MWDMA_MASK,
596 .udma_mask = NV_UDMA_MASK,
597 .port_ops = &nv_nf2_ops,
9a829ccf 598 .irq_handler = nv_nf2_interrupt,
ada364e8
TH
599 },
600 /* ck804 */
601 {
602 .sht = &nv_sht,
0c88758b
TH
603 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
604 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
ada364e8
TH
605 .pio_mask = NV_PIO_MASK,
606 .mwdma_mask = NV_MWDMA_MASK,
607 .udma_mask = NV_UDMA_MASK,
608 .port_ops = &nv_ck804_ops,
9a829ccf 609 .irq_handler = nv_ck804_interrupt,
ada364e8 610 },
fbbb262d
RH
611 /* ADMA */
612 {
613 .sht = &nv_adma_sht,
614 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
615 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
0c88758b 616 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
fbbb262d
RH
617 .pio_mask = NV_PIO_MASK,
618 .mwdma_mask = NV_MWDMA_MASK,
619 .udma_mask = NV_UDMA_MASK,
620 .port_ops = &nv_adma_ops,
9a829ccf 621 .irq_handler = nv_adma_interrupt,
fbbb262d 622 },
f140f0f1
KL
623 /* SWNCQ */
624 {
625 .sht = &nv_swncq_sht,
626 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
627 ATA_FLAG_NCQ,
628 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
629 .pio_mask = NV_PIO_MASK,
630 .mwdma_mask = NV_MWDMA_MASK,
631 .udma_mask = NV_UDMA_MASK,
632 .port_ops = &nv_swncq_ops,
633 .irq_handler = nv_swncq_interrupt,
634 },
1da177e4
LT
635};
636
637MODULE_AUTHOR("NVIDIA");
638MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
639MODULE_LICENSE("GPL");
640MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
641MODULE_VERSION(DRV_VERSION);
642
fbbb262d 643static int adma_enabled = 1;
f140f0f1 644static int swncq_enabled;
fbbb262d 645
2dec7555
RH
646static void nv_adma_register_mode(struct ata_port *ap)
647{
2dec7555 648 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 649 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
650 u16 tmp, status;
651 int count = 0;
2dec7555
RH
652
653 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
654 return;
655
a2cfe81a 656 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 657 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
658 ndelay(50);
659 status = readw(mmio + NV_ADMA_STAT);
660 count++;
661 }
2dcb407e 662 if (count == 20)
a2cfe81a
RH
663 ata_port_printk(ap, KERN_WARNING,
664 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
665 status);
666
2dec7555
RH
667 tmp = readw(mmio + NV_ADMA_CTL);
668 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
669
a2cfe81a
RH
670 count = 0;
671 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 672 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
673 ndelay(50);
674 status = readw(mmio + NV_ADMA_STAT);
675 count++;
676 }
2dcb407e 677 if (count == 20)
a2cfe81a
RH
678 ata_port_printk(ap, KERN_WARNING,
679 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
680 status);
681
2dec7555
RH
682 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
683}
684
685static void nv_adma_mode(struct ata_port *ap)
686{
2dec7555 687 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 688 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
689 u16 tmp, status;
690 int count = 0;
2dec7555
RH
691
692 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
693 return;
f20b16ff 694
2dec7555
RH
695 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
696
697 tmp = readw(mmio + NV_ADMA_CTL);
698 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
699
a2cfe81a 700 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 701 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
702 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
703 ndelay(50);
704 status = readw(mmio + NV_ADMA_STAT);
705 count++;
706 }
2dcb407e 707 if (count == 20)
a2cfe81a
RH
708 ata_port_printk(ap, KERN_WARNING,
709 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
710 status);
711
2dec7555
RH
712 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
713}
714
fbbb262d
RH
715static int nv_adma_slave_config(struct scsi_device *sdev)
716{
717 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 718 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
719 struct nv_adma_port_priv *port0, *port1;
720 struct scsi_device *sdev0, *sdev1;
2dec7555 721 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 722 unsigned long segment_boundary, flags;
fbbb262d
RH
723 unsigned short sg_tablesize;
724 int rc;
2dec7555
RH
725 int adma_enable;
726 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
727
728 rc = ata_scsi_slave_config(sdev);
729
730 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
731 /* Not a proper libata device, ignore */
732 return rc;
733
8959d300
RH
734 spin_lock_irqsave(ap->lock, flags);
735
9af5c9c9 736 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
737 /*
738 * NVIDIA reports that ADMA mode does not support ATAPI commands.
739 * Therefore ATAPI commands are sent through the legacy interface.
740 * However, the legacy interface only supports 32-bit DMA.
741 * Restrict DMA parameters as required by the legacy interface
742 * when an ATAPI device is connected.
743 */
fbbb262d
RH
744 segment_boundary = ATA_DMA_BOUNDARY;
745 /* Subtract 1 since an extra entry may be needed for padding, see
746 libata-scsi.c */
747 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 748
2dec7555
RH
749 /* Since the legacy DMA engine is in use, we need to disable ADMA
750 on the port. */
751 adma_enable = 0;
752 nv_adma_register_mode(ap);
2dcb407e 753 } else {
fbbb262d
RH
754 segment_boundary = NV_ADMA_DMA_BOUNDARY;
755 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 756 adma_enable = 1;
fbbb262d 757 }
f20b16ff 758
2dec7555
RH
759 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
760
2dcb407e 761 if (ap->port_no == 1)
2dec7555
RH
762 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
763 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
764 else
765 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
766 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 767
2dcb407e 768 if (adma_enable) {
2dec7555
RH
769 new_reg = current_reg | config_mask;
770 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 771 } else {
2dec7555
RH
772 new_reg = current_reg & ~config_mask;
773 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
774 }
f20b16ff 775
2dcb407e 776 if (current_reg != new_reg)
2dec7555 777 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 778
8959d300
RH
779 port0 = ap->host->ports[0]->private_data;
780 port1 = ap->host->ports[1]->private_data;
781 sdev0 = ap->host->ports[0]->link.device[0].sdev;
782 sdev1 = ap->host->ports[1]->link.device[0].sdev;
783 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
784 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
785 /** We have to set the DMA mask to 32-bit if either port is in
786 ATAPI mode, since they are on the same PCI device which is
787 used for DMA mapping. If we set the mask we also need to set
788 the bounce limit on both ports to ensure that the block
789 layer doesn't feed addresses that cause DMA mapping to
790 choke. If either SCSI device is not allocated yet, it's OK
791 since that port will discover its correct setting when it
792 does get allocated.
793 Note: Setting 32-bit mask should not fail. */
794 if (sdev0)
795 blk_queue_bounce_limit(sdev0->request_queue,
796 ATA_DMA_MASK);
797 if (sdev1)
798 blk_queue_bounce_limit(sdev1->request_queue,
799 ATA_DMA_MASK);
800
801 pci_set_dma_mask(pdev, ATA_DMA_MASK);
802 } else {
803 /** This shouldn't fail as it was set to this value before */
804 pci_set_dma_mask(pdev, pp->adma_dma_mask);
805 if (sdev0)
806 blk_queue_bounce_limit(sdev0->request_queue,
807 pp->adma_dma_mask);
808 if (sdev1)
809 blk_queue_bounce_limit(sdev1->request_queue,
810 pp->adma_dma_mask);
811 }
812
fbbb262d
RH
813 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
814 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
815 ata_port_printk(ap, KERN_INFO,
8959d300
RH
816 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
817 (unsigned long long)*ap->host->dev->dma_mask,
818 segment_boundary, sg_tablesize);
819
820 spin_unlock_irqrestore(ap->lock, flags);
821
fbbb262d
RH
822 return rc;
823}
824
2dec7555
RH
825static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
826{
827 struct nv_adma_port_priv *pp = qc->ap->private_data;
828 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
829}
830
f2fb344b
RH
831static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
832{
3f3debdb
RH
833 /* Other than when internal or pass-through commands are executed,
834 the only time this function will be called in ADMA mode will be
835 if a command fails. In the failure case we don't care about going
836 into register mode with ADMA commands pending, as the commands will
837 all shortly be aborted anyway. We assume that NCQ commands are not
838 issued via passthrough, which is the only way that switching into
839 ADMA mode could abort outstanding commands. */
f2fb344b
RH
840 nv_adma_register_mode(ap);
841
842 ata_tf_read(ap, tf);
843}
844
2dec7555 845static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
846{
847 unsigned int idx = 0;
848
2dcb407e 849 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
850 if (tf->flags & ATA_TFLAG_LBA48) {
851 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
852 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
853 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
854 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
855 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
856 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
857 } else
858 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 859
ac3d6b86
RH
860 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
861 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
862 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
863 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 864 }
a84471fe 865
2dcb407e 866 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 867 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
868
869 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 870
2dcb407e 871 while (idx < 12)
ac3d6b86 872 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
873
874 return idx;
875}
876
5bd28a4b 877static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
878{
879 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 880 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
881
882 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
883
5bd28a4b
RH
884 if (unlikely((force_err ||
885 flags & (NV_CPB_RESP_ATA_ERR |
886 NV_CPB_RESP_CMD_ERR |
887 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 888 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
889 int freeze = 0;
890
891 ata_ehi_clear_desc(ehi);
2dcb407e 892 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 893 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 894 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
895 ehi->err_mask |= AC_ERR_DEV;
896 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 897 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
898 ehi->err_mask |= AC_ERR_DEV;
899 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 900 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
901 ehi->err_mask |= AC_ERR_SYSTEM;
902 freeze = 1;
903 } else {
904 /* notifier error, but no error in CPB flags? */
b64bbc39 905 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
906 ehi->err_mask |= AC_ERR_OTHER;
907 freeze = 1;
908 }
909 /* Kill all commands. EH will determine what actually failed. */
910 if (freeze)
911 ata_port_freeze(ap);
912 else
913 ata_port_abort(ap);
914 return 1;
fbbb262d 915 }
5bd28a4b 916
f2fb344b 917 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 918 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
919 VPRINTK("CPB flags done, flags=0x%x\n", flags);
920 if (likely(qc)) {
2dcb407e 921 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 922 ata_qc_complete(qc);
2a54cf76 923 } else {
9af5c9c9 924 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
925 /* Notifier bits set without a command may indicate the drive
926 is misbehaving. Raise host state machine violation on this
927 condition. */
5796d1c4
JG
928 ata_port_printk(ap, KERN_ERR,
929 "notifier for tag %d with no cmd?\n",
930 cpb_num);
2a54cf76
RH
931 ehi->err_mask |= AC_ERR_HSM;
932 ehi->action |= ATA_EH_SOFTRESET;
933 ata_port_freeze(ap);
934 return 1;
fbbb262d
RH
935 }
936 }
5bd28a4b 937 return 0;
fbbb262d
RH
938}
939
2dec7555
RH
940static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
941{
9af5c9c9 942 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
943
944 /* freeze if hotplugged */
945 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
946 ata_port_freeze(ap);
947 return 1;
948 }
949
950 /* bail out if not our interrupt */
951 if (!(irq_stat & NV_INT_DEV))
952 return 0;
953
954 /* DEV interrupt w/ no active qc? */
955 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
956 ata_check_status(ap);
957 return 1;
958 }
959
960 /* handle interrupt */
f740d168 961 return ata_host_intr(ap, qc);
2dec7555
RH
962}
963
fbbb262d
RH
964static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
965{
966 struct ata_host *host = dev_instance;
967 int i, handled = 0;
2dec7555 968 u32 notifier_clears[2];
fbbb262d
RH
969
970 spin_lock(&host->lock);
971
972 for (i = 0; i < host->n_ports; i++) {
973 struct ata_port *ap = host->ports[i];
2dec7555 974 notifier_clears[i] = 0;
fbbb262d
RH
975
976 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
977 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 978 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
979 u16 status;
980 u32 gen_ctl;
fbbb262d 981 u32 notifier, notifier_error;
a617c09f 982
53014e25
RH
983 /* if ADMA is disabled, use standard ata interrupt handler */
984 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
985 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
986 >> (NV_INT_PORT_SHIFT * i);
987 handled += nv_host_intr(ap, irq_stat);
988 continue;
989 }
fbbb262d 990
53014e25 991 /* if in ATA register mode, check for standard interrupts */
fbbb262d 992 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 993 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 994 >> (NV_INT_PORT_SHIFT * i);
2dcb407e 995 if (ata_tag_valid(ap->link.active_tag))
f740d168
RH
996 /** NV_INT_DEV indication seems unreliable at times
997 at least in ADMA mode. Force it on always when a
998 command is active, to prevent losing interrupts. */
999 irq_stat |= NV_INT_DEV;
2dec7555 1000 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
1001 }
1002
1003 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1004 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 1005 notifier_clears[i] = notifier | notifier_error;
fbbb262d 1006
cdf56bcf 1007 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 1008
2dcb407e 1009 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
fbbb262d
RH
1010 !notifier_error)
1011 /* Nothing to do */
1012 continue;
1013
1014 status = readw(mmio + NV_ADMA_STAT);
1015
1016 /* Clear status. Ensure the controller sees the clearing before we start
1017 looking at any of the CPB statuses, so that any CPB completions after
1018 this point in the handler will raise another interrupt. */
1019 writew(status, mmio + NV_ADMA_STAT);
1020 readw(mmio + NV_ADMA_STAT); /* flush posted write */
1021 rmb();
1022
5bd28a4b
RH
1023 handled++; /* irq handled if we got here */
1024
1025 /* freeze if hotplugged or controller error */
1026 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
1027 NV_ADMA_STAT_HOTUNPLUG |
5278b50c
RH
1028 NV_ADMA_STAT_TIMEOUT |
1029 NV_ADMA_STAT_SERROR))) {
9af5c9c9 1030 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
1031
1032 ata_ehi_clear_desc(ehi);
2dcb407e 1033 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
5bd28a4b
RH
1034 if (status & NV_ADMA_STAT_TIMEOUT) {
1035 ehi->err_mask |= AC_ERR_SYSTEM;
b64bbc39 1036 ata_ehi_push_desc(ehi, "timeout");
5bd28a4b
RH
1037 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1038 ata_ehi_hotplugged(ehi);
b64bbc39 1039 ata_ehi_push_desc(ehi, "hotplug");
5bd28a4b
RH
1040 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1041 ata_ehi_hotplugged(ehi);
b64bbc39 1042 ata_ehi_push_desc(ehi, "hot unplug");
5278b50c
RH
1043 } else if (status & NV_ADMA_STAT_SERROR) {
1044 /* let libata analyze SError and figure out the cause */
b64bbc39
TH
1045 ata_ehi_push_desc(ehi, "SError");
1046 } else
1047 ata_ehi_push_desc(ehi, "unknown");
fbbb262d 1048 ata_port_freeze(ap);
fbbb262d
RH
1049 continue;
1050 }
1051
5bd28a4b 1052 if (status & (NV_ADMA_STAT_DONE |
a1fe7824
RH
1053 NV_ADMA_STAT_CPBERR |
1054 NV_ADMA_STAT_CMD_COMPLETE)) {
1055 u32 check_commands = notifier_clears[i];
721449bf 1056 int pos, error = 0;
8ba5e4cb 1057
a1fe7824
RH
1058 if (status & NV_ADMA_STAT_CPBERR) {
1059 /* Check all active commands */
1060 if (ata_tag_valid(ap->link.active_tag))
1061 check_commands = 1 <<
1062 ap->link.active_tag;
1063 else
1064 check_commands = ap->
1065 link.sactive;
1066 }
8ba5e4cb 1067
fbbb262d 1068 /** Check CPBs for completed commands */
721449bf
RH
1069 while ((pos = ffs(check_commands)) && !error) {
1070 pos--;
1071 error = nv_adma_check_cpb(ap, pos,
5796d1c4
JG
1072 notifier_error & (1 << pos));
1073 check_commands &= ~(1 << pos);
fbbb262d
RH
1074 }
1075 }
fbbb262d
RH
1076 }
1077 }
f20b16ff 1078
b447916e 1079 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
1080 /* Note: Both notifier clear registers must be written
1081 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
1082 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1083 writel(notifier_clears[0], pp->notifier_clear_block);
1084 pp = host->ports[1]->private_data;
1085 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 1086 }
fbbb262d
RH
1087
1088 spin_unlock(&host->lock);
1089
1090 return IRQ_RETVAL(handled);
1091}
1092
53014e25
RH
1093static void nv_adma_freeze(struct ata_port *ap)
1094{
1095 struct nv_adma_port_priv *pp = ap->private_data;
1096 void __iomem *mmio = pp->ctl_block;
1097 u16 tmp;
1098
1099 nv_ck804_freeze(ap);
1100
1101 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1102 return;
1103
1104 /* clear any outstanding CK804 notifications */
2dcb407e 1105 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1106 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1107
1108 /* Disable interrupt */
1109 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1110 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1111 mmio + NV_ADMA_CTL);
5796d1c4 1112 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1113}
1114
1115static void nv_adma_thaw(struct ata_port *ap)
1116{
1117 struct nv_adma_port_priv *pp = ap->private_data;
1118 void __iomem *mmio = pp->ctl_block;
1119 u16 tmp;
1120
1121 nv_ck804_thaw(ap);
1122
1123 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1124 return;
1125
1126 /* Enable interrupt */
1127 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1128 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1129 mmio + NV_ADMA_CTL);
5796d1c4 1130 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1131}
1132
fbbb262d
RH
1133static void nv_adma_irq_clear(struct ata_port *ap)
1134{
cdf56bcf
RH
1135 struct nv_adma_port_priv *pp = ap->private_data;
1136 void __iomem *mmio = pp->ctl_block;
53014e25 1137 u32 notifier_clears[2];
fbbb262d 1138
53014e25
RH
1139 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1140 ata_bmdma_irq_clear(ap);
1141 return;
1142 }
1143
1144 /* clear any outstanding CK804 notifications */
2dcb407e 1145 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1146 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1147
53014e25
RH
1148 /* clear ADMA status */
1149 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1150
53014e25
RH
1151 /* clear notifiers - note both ports need to be written with
1152 something even though we are only clearing on one */
1153 if (ap->port_no == 0) {
1154 notifier_clears[0] = 0xFFFFFFFF;
1155 notifier_clears[1] = 0;
1156 } else {
1157 notifier_clears[0] = 0;
1158 notifier_clears[1] = 0xFFFFFFFF;
1159 }
1160 pp = ap->host->ports[0]->private_data;
1161 writel(notifier_clears[0], pp->notifier_clear_block);
1162 pp = ap->host->ports[1]->private_data;
1163 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1164}
1165
f5ecac2d 1166static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1167{
f5ecac2d 1168 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1169
b447916e 1170 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
f5ecac2d 1171 ata_bmdma_post_internal_cmd(qc);
fbbb262d
RH
1172}
1173
1174static int nv_adma_port_start(struct ata_port *ap)
1175{
1176 struct device *dev = ap->host->dev;
1177 struct nv_adma_port_priv *pp;
1178 int rc;
1179 void *mem;
1180 dma_addr_t mem_dma;
cdf56bcf 1181 void __iomem *mmio;
8959d300 1182 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1183 u16 tmp;
1184
1185 VPRINTK("ENTER\n");
1186
8959d300
RH
1187 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1188 pad buffers */
1189 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1190 if (rc)
1191 return rc;
1192 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1193 if (rc)
1194 return rc;
1195
fbbb262d
RH
1196 rc = ata_port_start(ap);
1197 if (rc)
1198 return rc;
1199
24dc5f33
TH
1200 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1201 if (!pp)
1202 return -ENOMEM;
fbbb262d 1203
0d5ff566 1204 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1205 ap->port_no * NV_ADMA_PORT_SIZE;
1206 pp->ctl_block = mmio;
0d5ff566 1207 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1208 pp->notifier_clear_block = pp->gen_block +
1209 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1210
8959d300
RH
1211 /* Now that the legacy PRD and padding buffer are allocated we can
1212 safely raise the DMA mask to allocate the CPB/APRD table.
1213 These are allowed to fail since we store the value that ends up
1214 being used to set as the bounce limit in slave_config later if
1215 needed. */
1216 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1217 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1218 pp->adma_dma_mask = *dev->dma_mask;
1219
24dc5f33
TH
1220 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1221 &mem_dma, GFP_KERNEL);
1222 if (!mem)
1223 return -ENOMEM;
fbbb262d
RH
1224 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1225
1226 /*
1227 * First item in chunk of DMA memory:
1228 * 128-byte command parameter block (CPB)
1229 * one for each command tag
1230 */
1231 pp->cpb = mem;
1232 pp->cpb_dma = mem_dma;
1233
1234 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1235 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1236
1237 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1238 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1239
1240 /*
1241 * Second item: block of ADMA_SGTBL_LEN s/g entries
1242 */
1243 pp->aprd = mem;
1244 pp->aprd_dma = mem_dma;
1245
1246 ap->private_data = pp;
1247
1248 /* clear any outstanding interrupt conditions */
1249 writew(0xffff, mmio + NV_ADMA_STAT);
1250
1251 /* initialize port variables */
1252 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1253
1254 /* clear CPB fetch count */
1255 writew(0, mmio + NV_ADMA_CPB_COUNT);
1256
cdf56bcf 1257 /* clear GO for register mode, enable interrupt */
fbbb262d 1258 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1259 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1260 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1261
1262 tmp = readw(mmio + NV_ADMA_CTL);
1263 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1264 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1265 udelay(1);
1266 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1267 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1268
1269 return 0;
fbbb262d
RH
1270}
1271
1272static void nv_adma_port_stop(struct ata_port *ap)
1273{
fbbb262d 1274 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1275 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1276
1277 VPRINTK("ENTER\n");
fbbb262d 1278 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1279}
1280
438ac6d5 1281#ifdef CONFIG_PM
cdf56bcf
RH
1282static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1283{
1284 struct nv_adma_port_priv *pp = ap->private_data;
1285 void __iomem *mmio = pp->ctl_block;
1286
1287 /* Go to register mode - clears GO */
1288 nv_adma_register_mode(ap);
1289
1290 /* clear CPB fetch count */
1291 writew(0, mmio + NV_ADMA_CPB_COUNT);
1292
1293 /* disable interrupt, shut down port */
1294 writew(0, mmio + NV_ADMA_CTL);
1295
1296 return 0;
1297}
1298
1299static int nv_adma_port_resume(struct ata_port *ap)
1300{
1301 struct nv_adma_port_priv *pp = ap->private_data;
1302 void __iomem *mmio = pp->ctl_block;
1303 u16 tmp;
1304
1305 /* set CPB block location */
1306 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1307 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1308
1309 /* clear any outstanding interrupt conditions */
1310 writew(0xffff, mmio + NV_ADMA_STAT);
1311
1312 /* initialize port variables */
1313 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1314
1315 /* clear CPB fetch count */
1316 writew(0, mmio + NV_ADMA_CPB_COUNT);
1317
1318 /* clear GO for register mode, enable interrupt */
1319 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1320 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1321 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1322
1323 tmp = readw(mmio + NV_ADMA_CTL);
1324 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1325 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1326 udelay(1);
1327 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1328 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1329
1330 return 0;
1331}
438ac6d5 1332#endif
fbbb262d 1333
9a829ccf 1334static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1335{
9a829ccf
TH
1336 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1337 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1338
1339 VPRINTK("ENTER\n");
1340
9a829ccf 1341 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1342
0d5ff566
TH
1343 ioport->cmd_addr = mmio;
1344 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1345 ioport->error_addr =
0d5ff566
TH
1346 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1347 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1348 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1349 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1350 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1351 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1352 ioport->status_addr =
0d5ff566 1353 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1354 ioport->altstatus_addr =
0d5ff566 1355 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1356}
1357
9a829ccf 1358static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1359{
9a829ccf 1360 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1361 unsigned int i;
1362 u32 tmp32;
1363
1364 VPRINTK("ENTER\n");
1365
1366 /* enable ADMA on the ports */
1367 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1368 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1369 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1370 NV_MCP_SATA_CFG_20_PORT1_EN |
1371 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1372
1373 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1374
9a829ccf
TH
1375 for (i = 0; i < host->n_ports; i++)
1376 nv_adma_setup_port(host->ports[i]);
fbbb262d 1377
fbbb262d
RH
1378 return 0;
1379}
1380
1381static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1382 struct scatterlist *sg,
1383 int idx,
1384 struct nv_adma_prd *aprd)
1385{
41949ed5 1386 u8 flags = 0;
fbbb262d
RH
1387 if (qc->tf.flags & ATA_TFLAG_WRITE)
1388 flags |= NV_APRD_WRITE;
1389 if (idx == qc->n_elem - 1)
1390 flags |= NV_APRD_END;
1391 else if (idx != 4)
1392 flags |= NV_APRD_CONT;
1393
1394 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1395 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1396 aprd->flags = flags;
41949ed5 1397 aprd->packet_len = 0;
fbbb262d
RH
1398}
1399
1400static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1401{
1402 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1403 struct nv_adma_prd *aprd;
1404 struct scatterlist *sg;
ff2aeb1e 1405 unsigned int si;
fbbb262d
RH
1406
1407 VPRINTK("ENTER\n");
1408
ff2aeb1e
TH
1409 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1410 aprd = (si < 5) ? &cpb->aprd[si] :
1411 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1412 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1413 }
ff2aeb1e 1414 if (si > 5)
fbbb262d 1415 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1416 else
1417 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1418}
1419
382a6652
RH
1420static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1421{
1422 struct nv_adma_port_priv *pp = qc->ap->private_data;
1423
1424 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1425 or interrupt-driven no-data commands. */
b447916e 1426 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1427 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1428 return 1;
1429
b447916e 1430 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1431 (qc->tf.protocol == ATA_PROT_NODATA))
1432 return 0;
1433
1434 return 1;
1435}
1436
fbbb262d
RH
1437static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1438{
1439 struct nv_adma_port_priv *pp = qc->ap->private_data;
1440 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1441 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1442 NV_CPB_CTL_IEN;
1443
382a6652 1444 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1445 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1446 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1447 nv_adma_register_mode(qc->ap);
fbbb262d
RH
1448 ata_qc_prep(qc);
1449 return;
1450 }
1451
41949ed5
RH
1452 cpb->resp_flags = NV_CPB_RESP_DONE;
1453 wmb();
1454 cpb->ctl_flags = 0;
1455 wmb();
fbbb262d
RH
1456
1457 cpb->len = 3;
1458 cpb->tag = qc->tag;
1459 cpb->next_cpb_idx = 0;
1460
1461 /* turn on NCQ flags for NCQ commands */
1462 if (qc->tf.protocol == ATA_PROT_NCQ)
1463 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1464
cdf56bcf
RH
1465 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1466
fbbb262d
RH
1467 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1468
b447916e 1469 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1470 nv_adma_fill_sg(qc, cpb);
1471 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1472 } else
1473 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1474
5796d1c4
JG
1475 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1476 until we are finished filling in all of the contents */
fbbb262d
RH
1477 wmb();
1478 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1479 wmb();
1480 cpb->resp_flags = 0;
fbbb262d
RH
1481}
1482
1483static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1484{
2dec7555 1485 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1486 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1487 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1488
1489 VPRINTK("ENTER\n");
1490
3f3debdb
RH
1491 /* We can't handle result taskfile with NCQ commands, since
1492 retrieving the taskfile switches us out of ADMA mode and would abort
1493 existing commands. */
1494 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1495 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1496 ata_dev_printk(qc->dev, KERN_ERR,
1497 "NCQ w/ RESULT_TF not allowed\n");
1498 return AC_ERR_SYSTEM;
1499 }
1500
382a6652 1501 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1502 /* use ATA register mode */
382a6652 1503 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1504 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1505 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d
RH
1506 nv_adma_register_mode(qc->ap);
1507 return ata_qc_issue_prot(qc);
1508 } else
1509 nv_adma_mode(qc->ap);
1510
1511 /* write append register, command tag in lower 8 bits
1512 and (number of cpbs to append -1) in top 8 bits */
1513 wmb();
5e5c74a5 1514
b447916e 1515 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1516 /* Seems to need some delay before switching between NCQ and
1517 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1518 udelay(20);
1519 pp->last_issue_ncq = curr_ncq;
1520 }
1521
fbbb262d
RH
1522 writew(qc->tag, mmio + NV_ADMA_APPEND);
1523
5796d1c4 1524 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1525
1526 return 0;
1527}
1528
7d12e780 1529static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1530{
cca3974e 1531 struct ata_host *host = dev_instance;
1da177e4
LT
1532 unsigned int i;
1533 unsigned int handled = 0;
1534 unsigned long flags;
1535
cca3974e 1536 spin_lock_irqsave(&host->lock, flags);
1da177e4 1537
cca3974e 1538 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1539 struct ata_port *ap;
1540
cca3974e 1541 ap = host->ports[i];
c1389503 1542 if (ap &&
029f5468 1543 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1544 struct ata_queued_cmd *qc;
1545
9af5c9c9 1546 qc = ata_qc_from_tag(ap, ap->link.active_tag);
e50362ec 1547 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 1548 handled += ata_host_intr(ap, qc);
b887030a
AC
1549 else
1550 // No request pending? Clear interrupt status
1551 // anyway, in case there's one pending.
1552 ap->ops->check_status(ap);
1da177e4
LT
1553 }
1554
1555 }
1556
cca3974e 1557 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1558
1559 return IRQ_RETVAL(handled);
1560}
1561
cca3974e 1562static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1563{
1564 int i, handled = 0;
1565
cca3974e
JG
1566 for (i = 0; i < host->n_ports; i++) {
1567 struct ata_port *ap = host->ports[i];
ada364e8
TH
1568
1569 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1570 handled += nv_host_intr(ap, irq_stat);
1571
1572 irq_stat >>= NV_INT_PORT_SHIFT;
1573 }
1574
1575 return IRQ_RETVAL(handled);
1576}
1577
7d12e780 1578static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1579{
cca3974e 1580 struct ata_host *host = dev_instance;
ada364e8
TH
1581 u8 irq_stat;
1582 irqreturn_t ret;
1583
cca3974e 1584 spin_lock(&host->lock);
0d5ff566 1585 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1586 ret = nv_do_interrupt(host, irq_stat);
1587 spin_unlock(&host->lock);
ada364e8
TH
1588
1589 return ret;
1590}
1591
7d12e780 1592static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1593{
cca3974e 1594 struct ata_host *host = dev_instance;
ada364e8
TH
1595 u8 irq_stat;
1596 irqreturn_t ret;
1597
cca3974e 1598 spin_lock(&host->lock);
0d5ff566 1599 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1600 ret = nv_do_interrupt(host, irq_stat);
1601 spin_unlock(&host->lock);
ada364e8
TH
1602
1603 return ret;
1604}
1605
da3dbb17 1606static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1da177e4 1607{
1da177e4 1608 if (sc_reg > SCR_CONTROL)
da3dbb17 1609 return -EINVAL;
1da177e4 1610
da3dbb17
TH
1611 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1612 return 0;
1da177e4
LT
1613}
1614
da3dbb17 1615static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1da177e4 1616{
1da177e4 1617 if (sc_reg > SCR_CONTROL)
da3dbb17 1618 return -EINVAL;
1da177e4 1619
0d5ff566 1620 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1621 return 0;
1da177e4
LT
1622}
1623
39f87582
TH
1624static void nv_nf2_freeze(struct ata_port *ap)
1625{
0d5ff566 1626 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1627 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1628 u8 mask;
1629
0d5ff566 1630 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1631 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1632 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1633}
1634
1635static void nv_nf2_thaw(struct ata_port *ap)
1636{
0d5ff566 1637 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1638 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1639 u8 mask;
1640
0d5ff566 1641 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1642
0d5ff566 1643 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1644 mask |= (NV_INT_MASK << shift);
0d5ff566 1645 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1646}
1647
1648static void nv_ck804_freeze(struct ata_port *ap)
1649{
0d5ff566 1650 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1651 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1652 u8 mask;
1653
1654 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1655 mask &= ~(NV_INT_ALL << shift);
1656 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1657}
1658
1659static void nv_ck804_thaw(struct ata_port *ap)
1660{
0d5ff566 1661 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1662 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1663 u8 mask;
1664
1665 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1666
1667 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1668 mask |= (NV_INT_MASK << shift);
1669 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1670}
1671
f140f0f1
KL
1672static void nv_mcp55_freeze(struct ata_port *ap)
1673{
1674 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1675 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1676 u32 mask;
1677
1678 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1679
1680 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1681 mask &= ~(NV_INT_ALL_MCP55 << shift);
1682 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1683 ata_bmdma_freeze(ap);
1684}
1685
1686static void nv_mcp55_thaw(struct ata_port *ap)
1687{
1688 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1689 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1690 u32 mask;
1691
1692 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1693
1694 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1695 mask |= (NV_INT_MASK_MCP55 << shift);
1696 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1697 ata_bmdma_thaw(ap);
1698}
1699
cc0680a5 1700static int nv_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 1701 unsigned long deadline)
39f87582
TH
1702{
1703 unsigned int dummy;
1704
1705 /* SATA hardreset fails to retrieve proper device signature on
1706 * some controllers. Don't classify on hardreset. For more
647c595d 1707 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
39f87582 1708 */
cc0680a5 1709 return sata_std_hardreset(link, &dummy, deadline);
39f87582
TH
1710}
1711
1712static void nv_error_handler(struct ata_port *ap)
1713{
1714 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1715 nv_hardreset, ata_std_postreset);
1716}
1717
fbbb262d
RH
1718static void nv_adma_error_handler(struct ata_port *ap)
1719{
1720 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1721 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1722 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1723 int i;
1724 u16 tmp;
a84471fe 1725
b447916e 1726 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1727 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1728 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1729 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1730 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1731 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1732 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1733
5796d1c4
JG
1734 ata_port_printk(ap, KERN_ERR,
1735 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1736 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1737 "next cpb count 0x%X next cpb idx 0x%x\n",
1738 notifier, notifier_error, gen_ctl, status,
1739 cpb_count, next_cpb_idx);
2cb27853 1740
b447916e 1741 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1742 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1743 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1744 ap->link.sactive & (1 << i))
2cb27853
RH
1745 ata_port_printk(ap, KERN_ERR,
1746 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1747 i, cpb->ctl_flags, cpb->resp_flags);
1748 }
1749 }
fbbb262d 1750
fbbb262d
RH
1751 /* Push us back into port register mode for error handling. */
1752 nv_adma_register_mode(ap);
1753
5796d1c4
JG
1754 /* Mark all of the CPBs as invalid to prevent them from
1755 being executed */
b447916e 1756 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1757 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1758
1759 /* clear CPB fetch count */
1760 writew(0, mmio + NV_ADMA_CPB_COUNT);
1761
1762 /* Reset channel */
1763 tmp = readw(mmio + NV_ADMA_CTL);
1764 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1765 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1766 udelay(1);
1767 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1768 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1769 }
1770
1771 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1772 nv_hardreset, ata_std_postreset);
1773}
1774
f140f0f1
KL
1775static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1776{
1777 struct nv_swncq_port_priv *pp = ap->private_data;
1778 struct defer_queue *dq = &pp->defer_queue;
1779
1780 /* queue is full */
1781 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1782 dq->defer_bits |= (1 << qc->tag);
1783 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1784}
1785
1786static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1787{
1788 struct nv_swncq_port_priv *pp = ap->private_data;
1789 struct defer_queue *dq = &pp->defer_queue;
1790 unsigned int tag;
1791
1792 if (dq->head == dq->tail) /* null queue */
1793 return NULL;
1794
1795 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1796 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1797 WARN_ON(!(dq->defer_bits & (1 << tag)));
1798 dq->defer_bits &= ~(1 << tag);
1799
1800 return ata_qc_from_tag(ap, tag);
1801}
1802
1803static void nv_swncq_fis_reinit(struct ata_port *ap)
1804{
1805 struct nv_swncq_port_priv *pp = ap->private_data;
1806
1807 pp->dhfis_bits = 0;
1808 pp->dmafis_bits = 0;
1809 pp->sdbfis_bits = 0;
1810 pp->ncq_flags = 0;
1811}
1812
1813static void nv_swncq_pp_reinit(struct ata_port *ap)
1814{
1815 struct nv_swncq_port_priv *pp = ap->private_data;
1816 struct defer_queue *dq = &pp->defer_queue;
1817
1818 dq->head = 0;
1819 dq->tail = 0;
1820 dq->defer_bits = 0;
1821 pp->qc_active = 0;
1822 pp->last_issue_tag = ATA_TAG_POISON;
1823 nv_swncq_fis_reinit(ap);
1824}
1825
1826static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1827{
1828 struct nv_swncq_port_priv *pp = ap->private_data;
1829
1830 writew(fis, pp->irq_block);
1831}
1832
1833static void __ata_bmdma_stop(struct ata_port *ap)
1834{
1835 struct ata_queued_cmd qc;
1836
1837 qc.ap = ap;
1838 ata_bmdma_stop(&qc);
1839}
1840
1841static void nv_swncq_ncq_stop(struct ata_port *ap)
1842{
1843 struct nv_swncq_port_priv *pp = ap->private_data;
1844 unsigned int i;
1845 u32 sactive;
1846 u32 done_mask;
1847
1848 ata_port_printk(ap, KERN_ERR,
1849 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1850 ap->qc_active, ap->link.sactive);
1851 ata_port_printk(ap, KERN_ERR,
1852 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1853 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1854 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1855 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1856
1857 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1858 ap->ops->check_status(ap),
1859 ioread8(ap->ioaddr.error_addr));
1860
1861 sactive = readl(pp->sactive_block);
1862 done_mask = pp->qc_active ^ sactive;
1863
1864 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1865 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1866 u8 err = 0;
1867 if (pp->qc_active & (1 << i))
1868 err = 0;
1869 else if (done_mask & (1 << i))
1870 err = 1;
1871 else
1872 continue;
1873
1874 ata_port_printk(ap, KERN_ERR,
1875 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1876 (pp->dhfis_bits >> i) & 0x1,
1877 (pp->dmafis_bits >> i) & 0x1,
1878 (pp->sdbfis_bits >> i) & 0x1,
1879 (sactive >> i) & 0x1,
1880 (err ? "error! tag doesn't exit" : " "));
1881 }
1882
1883 nv_swncq_pp_reinit(ap);
1884 ap->ops->irq_clear(ap);
1885 __ata_bmdma_stop(ap);
1886 nv_swncq_irq_clear(ap, 0xffff);
1887}
1888
1889static void nv_swncq_error_handler(struct ata_port *ap)
1890{
1891 struct ata_eh_context *ehc = &ap->link.eh_context;
1892
1893 if (ap->link.sactive) {
1894 nv_swncq_ncq_stop(ap);
1895 ehc->i.action |= ATA_EH_HARDRESET;
1896 }
1897
1898 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1899 nv_hardreset, ata_std_postreset);
1900}
1901
1902#ifdef CONFIG_PM
1903static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1904{
1905 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1906 u32 tmp;
1907
1908 /* clear irq */
1909 writel(~0, mmio + NV_INT_STATUS_MCP55);
1910
1911 /* disable irq */
1912 writel(0, mmio + NV_INT_ENABLE_MCP55);
1913
1914 /* disable swncq */
1915 tmp = readl(mmio + NV_CTL_MCP55);
1916 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1917 writel(tmp, mmio + NV_CTL_MCP55);
1918
1919 return 0;
1920}
1921
1922static int nv_swncq_port_resume(struct ata_port *ap)
1923{
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1925 u32 tmp;
1926
1927 /* clear irq */
1928 writel(~0, mmio + NV_INT_STATUS_MCP55);
1929
1930 /* enable irq */
1931 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1932
1933 /* enable swncq */
1934 tmp = readl(mmio + NV_CTL_MCP55);
1935 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1936
1937 return 0;
1938}
1939#endif
1940
1941static void nv_swncq_host_init(struct ata_host *host)
1942{
1943 u32 tmp;
1944 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1945 struct pci_dev *pdev = to_pci_dev(host->dev);
1946 u8 regval;
1947
1948 /* disable ECO 398 */
1949 pci_read_config_byte(pdev, 0x7f, &regval);
1950 regval &= ~(1 << 7);
1951 pci_write_config_byte(pdev, 0x7f, regval);
1952
1953 /* enable swncq */
1954 tmp = readl(mmio + NV_CTL_MCP55);
1955 VPRINTK("HOST_CTL:0x%X\n", tmp);
1956 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1957
1958 /* enable irq intr */
1959 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1960 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1961 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1962
1963 /* clear port irq */
1964 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1965}
1966
1967static int nv_swncq_slave_config(struct scsi_device *sdev)
1968{
1969 struct ata_port *ap = ata_shost_to_port(sdev->host);
1970 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1971 struct ata_device *dev;
1972 int rc;
1973 u8 rev;
1974 u8 check_maxtor = 0;
1975 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1976
1977 rc = ata_scsi_slave_config(sdev);
1978 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1979 /* Not a proper libata device, ignore */
1980 return rc;
1981
1982 dev = &ap->link.device[sdev->id];
1983 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1984 return rc;
1985
1986 /* if MCP51 and Maxtor, then disable ncq */
1987 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1988 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1989 check_maxtor = 1;
1990
1991 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1992 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1993 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1994 pci_read_config_byte(pdev, 0x8, &rev);
1995 if (rev <= 0xa2)
1996 check_maxtor = 1;
1997 }
1998
1999 if (!check_maxtor)
2000 return rc;
2001
2002 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
2003
2004 if (strncmp(model_num, "Maxtor", 6) == 0) {
2005 ata_scsi_change_queue_depth(sdev, 1);
2006 ata_dev_printk(dev, KERN_NOTICE,
2007 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
2008 }
2009
2010 return rc;
2011}
2012
2013static int nv_swncq_port_start(struct ata_port *ap)
2014{
2015 struct device *dev = ap->host->dev;
2016 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
2017 struct nv_swncq_port_priv *pp;
2018 int rc;
2019
2020 rc = ata_port_start(ap);
2021 if (rc)
2022 return rc;
2023
2024 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2025 if (!pp)
2026 return -ENOMEM;
2027
2028 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2029 &pp->prd_dma, GFP_KERNEL);
2030 if (!pp->prd)
2031 return -ENOMEM;
2032 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2033
2034 ap->private_data = pp;
2035 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2036 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2037 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2038
2039 return 0;
2040}
2041
2042static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2043{
2044 if (qc->tf.protocol != ATA_PROT_NCQ) {
2045 ata_qc_prep(qc);
2046 return;
2047 }
2048
2049 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2050 return;
2051
2052 nv_swncq_fill_sg(qc);
2053}
2054
2055static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2056{
2057 struct ata_port *ap = qc->ap;
2058 struct scatterlist *sg;
f140f0f1
KL
2059 struct nv_swncq_port_priv *pp = ap->private_data;
2060 struct ata_prd *prd;
ff2aeb1e 2061 unsigned int si, idx;
f140f0f1
KL
2062
2063 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2064
2065 idx = 0;
ff2aeb1e 2066 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
2067 u32 addr, offset;
2068 u32 sg_len, len;
2069
2070 addr = (u32)sg_dma_address(sg);
2071 sg_len = sg_dma_len(sg);
2072
2073 while (sg_len) {
2074 offset = addr & 0xffff;
2075 len = sg_len;
2076 if ((offset + sg_len) > 0x10000)
2077 len = 0x10000 - offset;
2078
2079 prd[idx].addr = cpu_to_le32(addr);
2080 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2081
2082 idx++;
2083 sg_len -= len;
2084 addr += len;
2085 }
2086 }
2087
ff2aeb1e 2088 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
2089}
2090
2091static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2092 struct ata_queued_cmd *qc)
2093{
2094 struct nv_swncq_port_priv *pp = ap->private_data;
2095
2096 if (qc == NULL)
2097 return 0;
2098
2099 DPRINTK("Enter\n");
2100
2101 writel((1 << qc->tag), pp->sactive_block);
2102 pp->last_issue_tag = qc->tag;
2103 pp->dhfis_bits &= ~(1 << qc->tag);
2104 pp->dmafis_bits &= ~(1 << qc->tag);
2105 pp->qc_active |= (0x1 << qc->tag);
2106
2107 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2108 ap->ops->exec_command(ap, &qc->tf);
2109
2110 DPRINTK("Issued tag %u\n", qc->tag);
2111
2112 return 0;
2113}
2114
2115static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2116{
2117 struct ata_port *ap = qc->ap;
2118 struct nv_swncq_port_priv *pp = ap->private_data;
2119
2120 if (qc->tf.protocol != ATA_PROT_NCQ)
2121 return ata_qc_issue_prot(qc);
2122
2123 DPRINTK("Enter\n");
2124
2125 if (!pp->qc_active)
2126 nv_swncq_issue_atacmd(ap, qc);
2127 else
2128 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2129
2130 return 0;
2131}
2132
2133static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2134{
2135 u32 serror;
2136 struct ata_eh_info *ehi = &ap->link.eh_info;
2137
2138 ata_ehi_clear_desc(ehi);
2139
2140 /* AHCI needs SError cleared; otherwise, it might lock up */
2141 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2142 sata_scr_write(&ap->link, SCR_ERROR, serror);
2143
2144 /* analyze @irq_stat */
2145 if (fis & NV_SWNCQ_IRQ_ADDED)
2146 ata_ehi_push_desc(ehi, "hot plug");
2147 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2148 ata_ehi_push_desc(ehi, "hot unplug");
2149
2150 ata_ehi_hotplugged(ehi);
2151
2152 /* okay, let's hand over to EH */
2153 ehi->serror |= serror;
2154
2155 ata_port_freeze(ap);
2156}
2157
2158static int nv_swncq_sdbfis(struct ata_port *ap)
2159{
2160 struct ata_queued_cmd *qc;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2162 struct ata_eh_info *ehi = &ap->link.eh_info;
2163 u32 sactive;
2164 int nr_done = 0;
2165 u32 done_mask;
2166 int i;
2167 u8 host_stat;
2168 u8 lack_dhfis = 0;
2169
2170 host_stat = ap->ops->bmdma_status(ap);
2171 if (unlikely(host_stat & ATA_DMA_ERR)) {
2172 /* error when transfering data to/from memory */
2173 ata_ehi_clear_desc(ehi);
2174 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2175 ehi->err_mask |= AC_ERR_HOST_BUS;
2176 ehi->action |= ATA_EH_SOFTRESET;
2177 return -EINVAL;
2178 }
2179
2180 ap->ops->irq_clear(ap);
2181 __ata_bmdma_stop(ap);
2182
2183 sactive = readl(pp->sactive_block);
2184 done_mask = pp->qc_active ^ sactive;
2185
2186 if (unlikely(done_mask & sactive)) {
2187 ata_ehi_clear_desc(ehi);
2188 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2189 "(%08x->%08x)", pp->qc_active, sactive);
2190 ehi->err_mask |= AC_ERR_HSM;
2191 ehi->action |= ATA_EH_HARDRESET;
2192 return -EINVAL;
2193 }
2194 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2195 if (!(done_mask & (1 << i)))
2196 continue;
2197
2198 qc = ata_qc_from_tag(ap, i);
2199 if (qc) {
2200 ata_qc_complete(qc);
2201 pp->qc_active &= ~(1 << i);
2202 pp->dhfis_bits &= ~(1 << i);
2203 pp->dmafis_bits &= ~(1 << i);
2204 pp->sdbfis_bits |= (1 << i);
2205 nr_done++;
2206 }
2207 }
2208
2209 if (!ap->qc_active) {
2210 DPRINTK("over\n");
2211 nv_swncq_pp_reinit(ap);
2212 return nr_done;
2213 }
2214
2215 if (pp->qc_active & pp->dhfis_bits)
2216 return nr_done;
2217
2218 if ((pp->ncq_flags & ncq_saw_backout) ||
2219 (pp->qc_active ^ pp->dhfis_bits))
2220 /* if the controller cann't get a device to host register FIS,
2221 * The driver needs to reissue the new command.
2222 */
2223 lack_dhfis = 1;
2224
2225 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2226 "SWNCQ:qc_active 0x%X defer_bits %X "
2227 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2228 ap->print_id, ap->qc_active, pp->qc_active,
2229 pp->defer_queue.defer_bits, pp->dhfis_bits,
2230 pp->dmafis_bits, pp->last_issue_tag);
2231
2232 nv_swncq_fis_reinit(ap);
2233
2234 if (lack_dhfis) {
2235 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2236 nv_swncq_issue_atacmd(ap, qc);
2237 return nr_done;
2238 }
2239
2240 if (pp->defer_queue.defer_bits) {
2241 /* send deferral queue command */
2242 qc = nv_swncq_qc_from_dq(ap);
2243 WARN_ON(qc == NULL);
2244 nv_swncq_issue_atacmd(ap, qc);
2245 }
2246
2247 return nr_done;
2248}
2249
2250static inline u32 nv_swncq_tag(struct ata_port *ap)
2251{
2252 struct nv_swncq_port_priv *pp = ap->private_data;
2253 u32 tag;
2254
2255 tag = readb(pp->tag_block) >> 2;
2256 return (tag & 0x1f);
2257}
2258
2259static int nv_swncq_dmafis(struct ata_port *ap)
2260{
2261 struct ata_queued_cmd *qc;
2262 unsigned int rw;
2263 u8 dmactl;
2264 u32 tag;
2265 struct nv_swncq_port_priv *pp = ap->private_data;
2266
2267 __ata_bmdma_stop(ap);
2268 tag = nv_swncq_tag(ap);
2269
2270 DPRINTK("dma setup tag 0x%x\n", tag);
2271 qc = ata_qc_from_tag(ap, tag);
2272
2273 if (unlikely(!qc))
2274 return 0;
2275
2276 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2277
2278 /* load PRD table addr. */
2279 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2280 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2281
2282 /* specify data direction, triple-check start bit is clear */
2283 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2284 dmactl &= ~ATA_DMA_WR;
2285 if (!rw)
2286 dmactl |= ATA_DMA_WR;
2287
2288 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2289
2290 return 1;
2291}
2292
2293static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2294{
2295 struct nv_swncq_port_priv *pp = ap->private_data;
2296 struct ata_queued_cmd *qc;
2297 struct ata_eh_info *ehi = &ap->link.eh_info;
2298 u32 serror;
2299 u8 ata_stat;
2300 int rc = 0;
2301
2302 ata_stat = ap->ops->check_status(ap);
2303 nv_swncq_irq_clear(ap, fis);
2304 if (!fis)
2305 return;
2306
2307 if (ap->pflags & ATA_PFLAG_FROZEN)
2308 return;
2309
2310 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2311 nv_swncq_hotplug(ap, fis);
2312 return;
2313 }
2314
2315 if (!pp->qc_active)
2316 return;
2317
2318 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2319 return;
2320 ap->ops->scr_write(ap, SCR_ERROR, serror);
2321
2322 if (ata_stat & ATA_ERR) {
2323 ata_ehi_clear_desc(ehi);
2324 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2325 ehi->err_mask |= AC_ERR_DEV;
2326 ehi->serror |= serror;
2327 ehi->action |= ATA_EH_SOFTRESET;
2328 ata_port_freeze(ap);
2329 return;
2330 }
2331
2332 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2333 /* If the IRQ is backout, driver must issue
2334 * the new command again some time later.
2335 */
2336 pp->ncq_flags |= ncq_saw_backout;
2337 }
2338
2339 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2340 pp->ncq_flags |= ncq_saw_sdb;
2341 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2342 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2343 ap->print_id, pp->qc_active, pp->dhfis_bits,
2344 pp->dmafis_bits, readl(pp->sactive_block));
2345 rc = nv_swncq_sdbfis(ap);
2346 if (rc < 0)
2347 goto irq_error;
2348 }
2349
2350 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2351 /* The interrupt indicates the new command
2352 * was transmitted correctly to the drive.
2353 */
2354 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2355 pp->ncq_flags |= ncq_saw_d2h;
2356 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2357 ata_ehi_push_desc(ehi, "illegal fis transaction");
2358 ehi->err_mask |= AC_ERR_HSM;
2359 ehi->action |= ATA_EH_HARDRESET;
2360 goto irq_error;
2361 }
2362
2363 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2364 !(pp->ncq_flags & ncq_saw_dmas)) {
2365 ata_stat = ap->ops->check_status(ap);
2366 if (ata_stat & ATA_BUSY)
2367 goto irq_exit;
2368
2369 if (pp->defer_queue.defer_bits) {
2370 DPRINTK("send next command\n");
2371 qc = nv_swncq_qc_from_dq(ap);
2372 nv_swncq_issue_atacmd(ap, qc);
2373 }
2374 }
2375 }
2376
2377 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2378 /* program the dma controller with appropriate PRD buffers
2379 * and start the DMA transfer for requested command.
2380 */
2381 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2382 pp->ncq_flags |= ncq_saw_dmas;
2383 rc = nv_swncq_dmafis(ap);
2384 }
2385
2386irq_exit:
2387 return;
2388irq_error:
2389 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2390 ata_port_freeze(ap);
2391 return;
2392}
2393
2394static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2395{
2396 struct ata_host *host = dev_instance;
2397 unsigned int i;
2398 unsigned int handled = 0;
2399 unsigned long flags;
2400 u32 irq_stat;
2401
2402 spin_lock_irqsave(&host->lock, flags);
2403
2404 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2405
2406 for (i = 0; i < host->n_ports; i++) {
2407 struct ata_port *ap = host->ports[i];
2408
2409 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2410 if (ap->link.sactive) {
2411 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2412 handled = 1;
2413 } else {
2414 if (irq_stat) /* reserve Hotplug */
2415 nv_swncq_irq_clear(ap, 0xfff0);
2416
2417 handled += nv_host_intr(ap, (u8)irq_stat);
2418 }
2419 }
2420 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2421 }
2422
2423 spin_unlock_irqrestore(&host->lock, flags);
2424
2425 return IRQ_RETVAL(handled);
2426}
2427
5796d1c4 2428static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2429{
5796d1c4 2430 static int printed_version;
1626aeb8 2431 const struct ata_port_info *ppi[] = { NULL, NULL };
9a829ccf 2432 struct ata_host *host;
cdf56bcf 2433 struct nv_host_priv *hpriv;
1da177e4
LT
2434 int rc;
2435 u32 bar;
0d5ff566 2436 void __iomem *base;
fbbb262d 2437 unsigned long type = ent->driver_data;
1da177e4
LT
2438
2439 // Make sure this is a SATA controller by counting the number of bars
2440 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2441 // it's an IDE controller and we ignore it.
5796d1c4 2442 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2443 if (pci_resource_start(pdev, bar) == 0)
2444 return -ENODEV;
2445
cdf56bcf 2446 if (!printed_version++)
a9524a76 2447 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2448
24dc5f33 2449 rc = pcim_enable_device(pdev);
1da177e4 2450 if (rc)
24dc5f33 2451 return rc;
1da177e4 2452
9a829ccf 2453 /* determine type and allocate host */
f140f0f1 2454 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2455 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2456 type = ADMA;
fbbb262d
RH
2457 }
2458
360737a9
JG
2459 if (type == SWNCQ) {
2460 if (swncq_enabled)
2461 dev_printk(KERN_NOTICE, &pdev->dev,
2462 "Using SWNCQ mode\n");
2463 else
2464 type = GENERIC;
2465 }
2466
1626aeb8 2467 ppi[0] = &nv_port_info[type];
d583bc18 2468 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
9a829ccf
TH
2469 if (rc)
2470 return rc;
1da177e4 2471
24dc5f33 2472 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2473 if (!hpriv)
24dc5f33 2474 return -ENOMEM;
9a829ccf
TH
2475 hpriv->type = type;
2476 host->private_data = hpriv;
cdf56bcf 2477
9a829ccf
TH
2478 /* request and iomap NV_MMIO_BAR */
2479 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2480 if (rc)
2481 return rc;
1da177e4 2482
9a829ccf
TH
2483 /* configure SCR access */
2484 base = host->iomap[NV_MMIO_BAR];
2485 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2486 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2487
ada364e8 2488 /* enable SATA space for CK804 */
fbbb262d 2489 if (type >= CK804) {
ada364e8
TH
2490 u8 regval;
2491
2492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2495 }
2496
9a829ccf 2497 /* init ADMA */
fbbb262d 2498 if (type == ADMA) {
9a829ccf 2499 rc = nv_adma_host_init(host);
fbbb262d 2500 if (rc)
24dc5f33 2501 return rc;
360737a9 2502 } else if (type == SWNCQ)
f140f0f1 2503 nv_swncq_host_init(host);
fbbb262d 2504
9a829ccf
TH
2505 pci_set_master(pdev);
2506 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2507 IRQF_SHARED, ppi[0]->sht);
1da177e4
LT
2508}
2509
438ac6d5 2510#ifdef CONFIG_PM
cdf56bcf
RH
2511static int nv_pci_device_resume(struct pci_dev *pdev)
2512{
2513 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2514 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2515 int rc;
cdf56bcf 2516
ce053fa8 2517 rc = ata_pci_device_do_resume(pdev);
b447916e 2518 if (rc)
ce053fa8 2519 return rc;
cdf56bcf
RH
2520
2521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2522 if (hpriv->type >= CK804) {
cdf56bcf
RH
2523 u8 regval;
2524
2525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2528 }
b447916e 2529 if (hpriv->type == ADMA) {
cdf56bcf
RH
2530 u32 tmp32;
2531 struct nv_adma_port_priv *pp;
2532 /* enable/disable ADMA on the ports appropriately */
2533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2534
2535 pp = host->ports[0]->private_data;
b447916e 2536 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2539 else
2540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2542 pp = host->ports[1]->private_data;
b447916e 2543 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2546 else
2547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2549
2550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2551 }
2552 }
2553
2554 ata_host_resume(host);
2555
2556 return 0;
2557}
438ac6d5 2558#endif
cdf56bcf 2559
cca3974e 2560static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2561{
cca3974e 2562 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2563 u8 regval;
2564
2565 /* disable SATA space for CK804 */
2566 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2567 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2568 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2569}
2570
fbbb262d
RH
2571static void nv_adma_host_stop(struct ata_host *host)
2572{
2573 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2574 u32 tmp32;
2575
fbbb262d
RH
2576 /* disable ADMA on the ports */
2577 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2578 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2579 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2580 NV_MCP_SATA_CFG_20_PORT1_EN |
2581 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2582
2583 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2584
2585 nv_ck804_host_stop(host);
2586}
2587
1da177e4
LT
2588static int __init nv_init(void)
2589{
b7887196 2590 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2591}
2592
2593static void __exit nv_exit(void)
2594{
2595 pci_unregister_driver(&nv_pci_driver);
2596}
2597
2598module_init(nv_init);
2599module_exit(nv_exit);
fbbb262d
RH
2600module_param_named(adma, adma_enabled, bool, 0444);
2601MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
f140f0f1
KL
2602module_param_named(swncq, swncq_enabled, bool, 0444);
2603MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2604