]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/ata/sata_nv.c
sata_nv: cleanup ADMA error handling
[mirror_ubuntu-artful-kernel.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
cdf56bcf 52#define DRV_VERSION "3.3"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d
RH
171
172};
173
174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181};
182
183enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
10ad05df 210};
1da177e4 211
fbbb262d
RH
212
213struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
cdf56bcf
RH
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
fbbb262d
RH
221 u8 flags;
222};
223
cdf56bcf
RH
224struct nv_host_priv {
225 unsigned long type;
226};
227
fbbb262d
RH
228#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
229
1da177e4 230static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
cdf56bcf
RH
231static void nv_remove_one (struct pci_dev *pdev);
232static int nv_pci_device_resume(struct pci_dev *pdev);
cca3974e 233static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
234static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
1da177e4
LT
237static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 239
39f87582
TH
240static void nv_nf2_freeze(struct ata_port *ap);
241static void nv_nf2_thaw(struct ata_port *ap);
242static void nv_ck804_freeze(struct ata_port *ap);
243static void nv_ck804_thaw(struct ata_port *ap);
244static void nv_error_handler(struct ata_port *ap);
fbbb262d 245static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 246static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
247static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250static void nv_adma_irq_clear(struct ata_port *ap);
251static int nv_adma_port_start(struct ata_port *ap);
252static void nv_adma_port_stop(struct ata_port *ap);
cdf56bcf
RH
253static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254static int nv_adma_port_resume(struct ata_port *ap);
fbbb262d
RH
255static void nv_adma_error_handler(struct ata_port *ap);
256static void nv_adma_host_stop(struct ata_host *host);
257static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260static u8 nv_adma_bmdma_status(struct ata_port *ap);
39f87582 261
1da177e4
LT
262enum nv_host_type
263{
264 GENERIC,
265 NFORCE2,
27e4b274 266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d
RH
267 CK804,
268 ADMA
1da177e4
LT
269};
270
3b7d697d 271static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
1da177e4
LT
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
541134cf
DD
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
2d2744fc
JG
292
293 { } /* terminate list */
1da177e4
LT
294};
295
1da177e4
LT
296static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
cdf56bcf
RH
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
1da177e4
LT
303};
304
193515d5 305static struct scsi_host_template nv_sht = {
1da177e4
LT
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
ccf68c34 319 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 320 .bios_param = ata_std_bios_param,
cdf56bcf
RH
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
1da177e4
LT
323};
324
fbbb262d
RH
325static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
327 .name = DRV_NAME,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
cdf56bcf
RH
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
fbbb262d
RH
343};
344
ada364e8 345static const struct ata_port_operations nv_generic_ops = {
1da177e4
LT
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
1da177e4
LT
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
39f87582
TH
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 362 .data_xfer = ata_data_xfer,
ada364e8 363 .irq_handler = nv_generic_interrupt,
1da177e4 364 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
1da177e4
LT
367 .scr_read = nv_scr_read,
368 .scr_write = nv_scr_write,
369 .port_start = ata_port_start,
1da177e4
LT
370};
371
ada364e8
TH
372static const struct ata_port_operations nv_nf2_ops = {
373 .port_disable = ata_port_disable,
374 .tf_load = ata_tf_load,
375 .tf_read = ata_tf_read,
376 .exec_command = ata_exec_command,
377 .check_status = ata_check_status,
378 .dev_select = ata_std_dev_select,
ada364e8
TH
379 .bmdma_setup = ata_bmdma_setup,
380 .bmdma_start = ata_bmdma_start,
381 .bmdma_stop = ata_bmdma_stop,
382 .bmdma_status = ata_bmdma_status,
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
39f87582
TH
385 .freeze = nv_nf2_freeze,
386 .thaw = nv_nf2_thaw,
387 .error_handler = nv_error_handler,
388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 389 .data_xfer = ata_data_xfer,
ada364e8
TH
390 .irq_handler = nv_nf2_interrupt,
391 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
ada364e8
TH
394 .scr_read = nv_scr_read,
395 .scr_write = nv_scr_write,
396 .port_start = ata_port_start,
ada364e8
TH
397};
398
399static const struct ata_port_operations nv_ck804_ops = {
400 .port_disable = ata_port_disable,
401 .tf_load = ata_tf_load,
402 .tf_read = ata_tf_read,
403 .exec_command = ata_exec_command,
404 .check_status = ata_check_status,
405 .dev_select = ata_std_dev_select,
ada364e8
TH
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410 .qc_prep = ata_qc_prep,
411 .qc_issue = ata_qc_issue_prot,
39f87582
TH
412 .freeze = nv_ck804_freeze,
413 .thaw = nv_ck804_thaw,
414 .error_handler = nv_error_handler,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 416 .data_xfer = ata_data_xfer,
ada364e8
TH
417 .irq_handler = nv_ck804_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
ada364e8
TH
421 .scr_read = nv_scr_read,
422 .scr_write = nv_scr_write,
423 .port_start = ata_port_start,
ada364e8
TH
424 .host_stop = nv_ck804_host_stop,
425};
426
fbbb262d
RH
427static const struct ata_port_operations nv_adma_ops = {
428 .port_disable = ata_port_disable,
429 .tf_load = ata_tf_load,
430 .tf_read = ata_tf_read,
2dec7555 431 .check_atapi_dma = nv_adma_check_atapi_dma,
fbbb262d
RH
432 .exec_command = ata_exec_command,
433 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status,
439 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop,
0d5ff566 445 .data_xfer = ata_data_xfer,
fbbb262d
RH
446 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear,
246ce3b6
AI
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
fbbb262d
RH
450 .scr_read = nv_scr_read,
451 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop,
cdf56bcf
RH
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
fbbb262d
RH
456 .host_stop = nv_adma_host_stop,
457};
458
ada364e8
TH
459static struct ata_port_info nv_port_info[] = {
460 /* generic */
461 {
462 .sht = &nv_sht,
722420fe
TH
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_generic_ops,
469 },
470 /* nforce2/3 */
471 {
472 .sht = &nv_sht,
722420fe
TH
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_nf2_ops,
479 },
480 /* ck804 */
481 {
482 .sht = &nv_sht,
722420fe
TH
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_ck804_ops,
489 },
fbbb262d
RH
490 /* ADMA */
491 {
492 .sht = &nv_adma_sht,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
cdf56bcf 494 ATA_FLAG_HRST_TO_RESUME |
fbbb262d
RH
495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_adma_ops,
500 },
1da177e4
LT
501};
502
503MODULE_AUTHOR("NVIDIA");
504MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505MODULE_LICENSE("GPL");
506MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507MODULE_VERSION(DRV_VERSION);
508
fbbb262d
RH
509static int adma_enabled = 1;
510
2dec7555
RH
511static void nv_adma_register_mode(struct ata_port *ap)
512{
2dec7555 513 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 514 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
515 u16 tmp;
516
517 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
518 return;
519
520 tmp = readw(mmio + NV_ADMA_CTL);
521 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
522
523 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
524}
525
526static void nv_adma_mode(struct ata_port *ap)
527{
2dec7555 528 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 529 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
530 u16 tmp;
531
532 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
533 return;
f20b16ff 534
2dec7555
RH
535 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
536
537 tmp = readw(mmio + NV_ADMA_CTL);
538 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
539
540 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
541}
542
fbbb262d
RH
543static int nv_adma_slave_config(struct scsi_device *sdev)
544{
545 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555
RH
546 struct nv_adma_port_priv *pp = ap->private_data;
547 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
fbbb262d
RH
548 u64 bounce_limit;
549 unsigned long segment_boundary;
550 unsigned short sg_tablesize;
551 int rc;
2dec7555
RH
552 int adma_enable;
553 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
554
555 rc = ata_scsi_slave_config(sdev);
556
557 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
558 /* Not a proper libata device, ignore */
559 return rc;
560
561 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
562 /*
563 * NVIDIA reports that ADMA mode does not support ATAPI commands.
564 * Therefore ATAPI commands are sent through the legacy interface.
565 * However, the legacy interface only supports 32-bit DMA.
566 * Restrict DMA parameters as required by the legacy interface
567 * when an ATAPI device is connected.
568 */
569 bounce_limit = ATA_DMA_MASK;
570 segment_boundary = ATA_DMA_BOUNDARY;
571 /* Subtract 1 since an extra entry may be needed for padding, see
572 libata-scsi.c */
573 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 574
2dec7555
RH
575 /* Since the legacy DMA engine is in use, we need to disable ADMA
576 on the port. */
577 adma_enable = 0;
578 nv_adma_register_mode(ap);
fbbb262d
RH
579 }
580 else {
581 bounce_limit = *ap->dev->dma_mask;
582 segment_boundary = NV_ADMA_DMA_BOUNDARY;
583 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 584 adma_enable = 1;
fbbb262d 585 }
f20b16ff 586
2dec7555
RH
587 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
588
589 if(ap->port_no == 1)
590 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
591 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
592 else
593 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
594 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 595
2dec7555
RH
596 if(adma_enable) {
597 new_reg = current_reg | config_mask;
598 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
599 }
600 else {
601 new_reg = current_reg & ~config_mask;
602 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
603 }
f20b16ff 604
2dec7555
RH
605 if(current_reg != new_reg)
606 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 607
fbbb262d
RH
608 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
609 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
610 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
611 ata_port_printk(ap, KERN_INFO,
612 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
613 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
614 return rc;
615}
616
2dec7555
RH
617static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
618{
619 struct nv_adma_port_priv *pp = qc->ap->private_data;
620 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
621}
622
623static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
624{
625 unsigned int idx = 0;
626
627 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
628
629 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
632 cpb[idx++] = cpu_to_le16(IGN);
633 cpb[idx++] = cpu_to_le16(IGN);
634 cpb[idx++] = cpu_to_le16(IGN);
635 }
636 else {
637 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
638 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
640 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
641 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
642 }
643 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
644 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
647 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
648
649 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
650
651 return idx;
652}
653
5bd28a4b 654static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
655{
656 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 657 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
658
659 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
660
5bd28a4b
RH
661 if (unlikely((force_err ||
662 flags & (NV_CPB_RESP_ATA_ERR |
663 NV_CPB_RESP_CMD_ERR |
664 NV_CPB_RESP_CPB_ERR)))) {
665 struct ata_eh_info *ehi = &ap->eh_info;
666 int freeze = 0;
667
668 ata_ehi_clear_desc(ehi);
669 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
670 if (flags & NV_CPB_RESP_ATA_ERR) {
671 ata_ehi_push_desc(ehi, ": ATA error");
672 ehi->err_mask |= AC_ERR_DEV;
673 } else if (flags & NV_CPB_RESP_CMD_ERR) {
674 ata_ehi_push_desc(ehi, ": CMD error");
675 ehi->err_mask |= AC_ERR_DEV;
676 } else if (flags & NV_CPB_RESP_CPB_ERR) {
677 ata_ehi_push_desc(ehi, ": CPB error");
678 ehi->err_mask |= AC_ERR_SYSTEM;
679 freeze = 1;
680 } else {
681 /* notifier error, but no error in CPB flags? */
682 ehi->err_mask |= AC_ERR_OTHER;
683 freeze = 1;
684 }
685 /* Kill all commands. EH will determine what actually failed. */
686 if (freeze)
687 ata_port_freeze(ap);
688 else
689 ata_port_abort(ap);
690 return 1;
fbbb262d 691 }
5bd28a4b
RH
692
693 if (flags & NV_CPB_RESP_DONE) {
fbbb262d 694 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
695 VPRINTK("CPB flags done, flags=0x%x\n", flags);
696 if (likely(qc)) {
697 /* Grab the ATA port status for non-NCQ commands.
fbbb262d
RH
698 For NCQ commands the current status may have nothing to do with
699 the command just completed. */
5bd28a4b
RH
700 if (qc->tf.protocol != ATA_PROT_NCQ) {
701 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
702 qc->err_mask |= ac_err_mask(ata_status);
703 }
fbbb262d
RH
704 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
705 qc->err_mask);
706 ata_qc_complete(qc);
707 }
708 }
5bd28a4b 709 return 0;
fbbb262d
RH
710}
711
2dec7555
RH
712static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
713{
714 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
2dec7555
RH
715
716 /* freeze if hotplugged */
717 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
718 ata_port_freeze(ap);
719 return 1;
720 }
721
722 /* bail out if not our interrupt */
723 if (!(irq_stat & NV_INT_DEV))
724 return 0;
725
726 /* DEV interrupt w/ no active qc? */
727 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
728 ata_check_status(ap);
729 return 1;
730 }
731
732 /* handle interrupt */
f740d168 733 return ata_host_intr(ap, qc);
2dec7555
RH
734}
735
fbbb262d
RH
736static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
737{
738 struct ata_host *host = dev_instance;
739 int i, handled = 0;
2dec7555 740 u32 notifier_clears[2];
fbbb262d
RH
741
742 spin_lock(&host->lock);
743
744 for (i = 0; i < host->n_ports; i++) {
745 struct ata_port *ap = host->ports[i];
2dec7555 746 notifier_clears[i] = 0;
fbbb262d
RH
747
748 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
749 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 750 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
751 u16 status;
752 u32 gen_ctl;
fbbb262d
RH
753 u32 notifier, notifier_error;
754
755 /* if in ATA register mode, use standard ata interrupt handler */
756 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 757 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 758 >> (NV_INT_PORT_SHIFT * i);
f740d168
RH
759 if(ata_tag_valid(ap->active_tag))
760 /** NV_INT_DEV indication seems unreliable at times
761 at least in ADMA mode. Force it on always when a
762 command is active, to prevent losing interrupts. */
763 irq_stat |= NV_INT_DEV;
2dec7555 764 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
765 continue;
766 }
767
768 notifier = readl(mmio + NV_ADMA_NOTIFIER);
769 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 770 notifier_clears[i] = notifier | notifier_error;
fbbb262d 771
cdf56bcf 772 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 773
fbbb262d
RH
774 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
775 !notifier_error)
776 /* Nothing to do */
777 continue;
778
779 status = readw(mmio + NV_ADMA_STAT);
780
781 /* Clear status. Ensure the controller sees the clearing before we start
782 looking at any of the CPB statuses, so that any CPB completions after
783 this point in the handler will raise another interrupt. */
784 writew(status, mmio + NV_ADMA_STAT);
785 readw(mmio + NV_ADMA_STAT); /* flush posted write */
786 rmb();
787
5bd28a4b
RH
788 handled++; /* irq handled if we got here */
789
790 /* freeze if hotplugged or controller error */
791 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
792 NV_ADMA_STAT_HOTUNPLUG |
793 NV_ADMA_STAT_TIMEOUT))) {
794 struct ata_eh_info *ehi = &ap->eh_info;
795
796 ata_ehi_clear_desc(ehi);
797 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
798 if (status & NV_ADMA_STAT_TIMEOUT) {
799 ehi->err_mask |= AC_ERR_SYSTEM;
800 ata_ehi_push_desc(ehi, ": timeout");
801 } else if (status & NV_ADMA_STAT_HOTPLUG) {
802 ata_ehi_hotplugged(ehi);
803 ata_ehi_push_desc(ehi, ": hotplug");
804 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
805 ata_ehi_hotplugged(ehi);
806 ata_ehi_push_desc(ehi, ": hot unplug");
807 }
fbbb262d 808 ata_port_freeze(ap);
fbbb262d
RH
809 continue;
810 }
811
5bd28a4b
RH
812 if (status & (NV_ADMA_STAT_DONE |
813 NV_ADMA_STAT_CPBERR)) {
fbbb262d
RH
814 /** Check CPBs for completed commands */
815
5bd28a4b 816 if (ata_tag_valid(ap->active_tag)) {
fbbb262d 817 /* Non-NCQ command */
5bd28a4b
RH
818 nv_adma_check_cpb(ap, ap->active_tag,
819 notifier_error & (1 << ap->active_tag));
820 } else {
821 int pos, error = 0;
fbbb262d 822 u32 active = ap->sactive;
5bd28a4b
RH
823
824 while ((pos = ffs(active)) && !error) {
fbbb262d 825 pos--;
5bd28a4b
RH
826 error = nv_adma_check_cpb(ap, pos,
827 notifier_error & (1 << pos) );
fbbb262d
RH
828 active &= ~(1 << pos );
829 }
830 }
831 }
fbbb262d
RH
832 }
833 }
f20b16ff 834
2dec7555
RH
835 if(notifier_clears[0] || notifier_clears[1]) {
836 /* Note: Both notifier clear registers must be written
837 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
838 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
839 writel(notifier_clears[0], pp->notifier_clear_block);
840 pp = host->ports[1]->private_data;
841 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 842 }
fbbb262d
RH
843
844 spin_unlock(&host->lock);
845
846 return IRQ_RETVAL(handled);
847}
848
849static void nv_adma_irq_clear(struct ata_port *ap)
850{
cdf56bcf
RH
851 struct nv_adma_port_priv *pp = ap->private_data;
852 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
853 u16 status = readw(mmio + NV_ADMA_STAT);
854 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
855 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
0d5ff566 856 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
fbbb262d
RH
857
858 /* clear ADMA status */
859 writew(status, mmio + NV_ADMA_STAT);
860 writel(notifier | notifier_error,
cdf56bcf 861 pp->notifier_clear_block);
fbbb262d
RH
862
863 /** clear legacy status */
0d5ff566 864 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
fbbb262d
RH
865}
866
867static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
868{
2dec7555
RH
869 struct ata_port *ap = qc->ap;
870 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
871 struct nv_adma_port_priv *pp = ap->private_data;
872 u8 dmactl;
fbbb262d 873
2dec7555 874 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
875 WARN_ON(1);
876 return;
877 }
878
2dec7555 879 /* load PRD table addr. */
0d5ff566 880 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2dec7555
RH
881
882 /* specify data direction, triple-check start bit is clear */
0d5ff566 883 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
884 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
885 if (!rw)
886 dmactl |= ATA_DMA_WR;
887
0d5ff566 888 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
889
890 /* issue r/w command */
891 ata_exec_command(ap, &qc->tf);
fbbb262d
RH
892}
893
894static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
895{
2dec7555
RH
896 struct ata_port *ap = qc->ap;
897 struct nv_adma_port_priv *pp = ap->private_data;
898 u8 dmactl;
fbbb262d 899
2dec7555 900 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
901 WARN_ON(1);
902 return;
903 }
904
2dec7555 905 /* start host DMA transaction */
0d5ff566
TH
906 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
907 iowrite8(dmactl | ATA_DMA_START,
908 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d
RH
909}
910
911static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
912{
2dec7555 913 struct ata_port *ap = qc->ap;
fbbb262d
RH
914 struct nv_adma_port_priv *pp = ap->private_data;
915
2dec7555 916 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
fbbb262d
RH
917 return;
918
2dec7555 919 /* clear start/stop bit */
0d5ff566
TH
920 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
921 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d 922
2dec7555
RH
923 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
924 ata_altstatus(ap); /* dummy read */
fbbb262d
RH
925}
926
2dec7555 927static u8 nv_adma_bmdma_status(struct ata_port *ap)
fbbb262d 928{
fbbb262d 929 struct nv_adma_port_priv *pp = ap->private_data;
fbbb262d 930
2dec7555 931 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
fbbb262d 932
0d5ff566 933 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
fbbb262d
RH
934}
935
936static int nv_adma_port_start(struct ata_port *ap)
937{
938 struct device *dev = ap->host->dev;
939 struct nv_adma_port_priv *pp;
940 int rc;
941 void *mem;
942 dma_addr_t mem_dma;
cdf56bcf 943 void __iomem *mmio;
fbbb262d
RH
944 u16 tmp;
945
946 VPRINTK("ENTER\n");
947
948 rc = ata_port_start(ap);
949 if (rc)
950 return rc;
951
24dc5f33
TH
952 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
953 if (!pp)
954 return -ENOMEM;
fbbb262d 955
0d5ff566 956 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
957 ap->port_no * NV_ADMA_PORT_SIZE;
958 pp->ctl_block = mmio;
0d5ff566 959 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
960 pp->notifier_clear_block = pp->gen_block +
961 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
962
24dc5f33
TH
963 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
964 &mem_dma, GFP_KERNEL);
965 if (!mem)
966 return -ENOMEM;
fbbb262d
RH
967 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
968
969 /*
970 * First item in chunk of DMA memory:
971 * 128-byte command parameter block (CPB)
972 * one for each command tag
973 */
974 pp->cpb = mem;
975 pp->cpb_dma = mem_dma;
976
977 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
978 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
979
980 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
981 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
982
983 /*
984 * Second item: block of ADMA_SGTBL_LEN s/g entries
985 */
986 pp->aprd = mem;
987 pp->aprd_dma = mem_dma;
988
989 ap->private_data = pp;
990
991 /* clear any outstanding interrupt conditions */
992 writew(0xffff, mmio + NV_ADMA_STAT);
993
994 /* initialize port variables */
995 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
996
997 /* clear CPB fetch count */
998 writew(0, mmio + NV_ADMA_CPB_COUNT);
999
cdf56bcf 1000 /* clear GO for register mode, enable interrupt */
fbbb262d 1001 tmp = readw(mmio + NV_ADMA_CTL);
cdf56bcf 1002 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1003
1004 tmp = readw(mmio + NV_ADMA_CTL);
1005 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1006 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1007 udelay(1);
1008 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1009 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1010
1011 return 0;
fbbb262d
RH
1012}
1013
1014static void nv_adma_port_stop(struct ata_port *ap)
1015{
fbbb262d 1016 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1017 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1018
1019 VPRINTK("ENTER\n");
fbbb262d 1020 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1021}
1022
cdf56bcf
RH
1023static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1024{
1025 struct nv_adma_port_priv *pp = ap->private_data;
1026 void __iomem *mmio = pp->ctl_block;
1027
1028 /* Go to register mode - clears GO */
1029 nv_adma_register_mode(ap);
1030
1031 /* clear CPB fetch count */
1032 writew(0, mmio + NV_ADMA_CPB_COUNT);
1033
1034 /* disable interrupt, shut down port */
1035 writew(0, mmio + NV_ADMA_CTL);
1036
1037 return 0;
1038}
1039
1040static int nv_adma_port_resume(struct ata_port *ap)
1041{
1042 struct nv_adma_port_priv *pp = ap->private_data;
1043 void __iomem *mmio = pp->ctl_block;
1044 u16 tmp;
1045
1046 /* set CPB block location */
1047 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1048 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1049
1050 /* clear any outstanding interrupt conditions */
1051 writew(0xffff, mmio + NV_ADMA_STAT);
1052
1053 /* initialize port variables */
1054 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1055
1056 /* clear CPB fetch count */
1057 writew(0, mmio + NV_ADMA_CPB_COUNT);
1058
1059 /* clear GO for register mode, enable interrupt */
1060 tmp = readw(mmio + NV_ADMA_CTL);
1061 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1062
1063 tmp = readw(mmio + NV_ADMA_CTL);
1064 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1065 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1066 udelay(1);
1067 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1068 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1069
1070 return 0;
1071}
fbbb262d
RH
1072
1073static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1074{
0d5ff566 1075 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
fbbb262d
RH
1076 struct ata_ioports *ioport = &probe_ent->port[port];
1077
1078 VPRINTK("ENTER\n");
1079
1080 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1081
0d5ff566
TH
1082 ioport->cmd_addr = mmio;
1083 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1084 ioport->error_addr =
0d5ff566
TH
1085 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1086 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1087 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1088 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1089 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1090 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1091 ioport->status_addr =
0d5ff566 1092 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1093 ioport->altstatus_addr =
0d5ff566 1094 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1095}
1096
1097static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1098{
1099 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1100 unsigned int i;
1101 u32 tmp32;
1102
1103 VPRINTK("ENTER\n");
1104
1105 /* enable ADMA on the ports */
1106 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1107 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1108 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1109 NV_MCP_SATA_CFG_20_PORT1_EN |
1110 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1111
1112 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1113
1114 for (i = 0; i < probe_ent->n_ports; i++)
1115 nv_adma_setup_port(probe_ent, i);
1116
fbbb262d
RH
1117 return 0;
1118}
1119
1120static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1121 struct scatterlist *sg,
1122 int idx,
1123 struct nv_adma_prd *aprd)
1124{
2dec7555 1125 u8 flags;
fbbb262d
RH
1126
1127 memset(aprd, 0, sizeof(struct nv_adma_prd));
1128
1129 flags = 0;
1130 if (qc->tf.flags & ATA_TFLAG_WRITE)
1131 flags |= NV_APRD_WRITE;
1132 if (idx == qc->n_elem - 1)
1133 flags |= NV_APRD_END;
1134 else if (idx != 4)
1135 flags |= NV_APRD_CONT;
1136
1137 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1138 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1139 aprd->flags = flags;
fbbb262d
RH
1140}
1141
1142static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1143{
1144 struct nv_adma_port_priv *pp = qc->ap->private_data;
1145 unsigned int idx;
1146 struct nv_adma_prd *aprd;
1147 struct scatterlist *sg;
1148
1149 VPRINTK("ENTER\n");
1150
1151 idx = 0;
1152
1153 ata_for_each_sg(sg, qc) {
1154 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1155 nv_adma_fill_aprd(qc, sg, idx, aprd);
1156 idx++;
1157 }
1158 if (idx > 5)
1159 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1160}
1161
1162static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1163{
1164 struct nv_adma_port_priv *pp = qc->ap->private_data;
1165 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1166 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1167 NV_CPB_CTL_APRD_VALID |
1168 NV_CPB_CTL_IEN;
1169
fbbb262d 1170 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555
RH
1171 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1172 nv_adma_register_mode(qc->ap);
fbbb262d
RH
1173 ata_qc_prep(qc);
1174 return;
1175 }
1176
1177 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1178
1179 cpb->len = 3;
1180 cpb->tag = qc->tag;
1181 cpb->next_cpb_idx = 0;
1182
1183 /* turn on NCQ flags for NCQ commands */
1184 if (qc->tf.protocol == ATA_PROT_NCQ)
1185 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1186
cdf56bcf
RH
1187 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1188
fbbb262d
RH
1189 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1190
1191 nv_adma_fill_sg(qc, cpb);
1192
1193 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1194 finished filling in all of the contents */
1195 wmb();
1196 cpb->ctl_flags = ctl_flags;
1197}
1198
1199static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1200{
2dec7555 1201 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1202 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1203
1204 VPRINTK("ENTER\n");
1205
1206 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555 1207 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
fbbb262d
RH
1208 /* use ATA register mode */
1209 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1210 nv_adma_register_mode(qc->ap);
1211 return ata_qc_issue_prot(qc);
1212 } else
1213 nv_adma_mode(qc->ap);
1214
1215 /* write append register, command tag in lower 8 bits
1216 and (number of cpbs to append -1) in top 8 bits */
1217 wmb();
1218 writew(qc->tag, mmio + NV_ADMA_APPEND);
1219
1220 DPRINTK("Issued tag %u\n",qc->tag);
1221
1222 return 0;
1223}
1224
7d12e780 1225static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1226{
cca3974e 1227 struct ata_host *host = dev_instance;
1da177e4
LT
1228 unsigned int i;
1229 unsigned int handled = 0;
1230 unsigned long flags;
1231
cca3974e 1232 spin_lock_irqsave(&host->lock, flags);
1da177e4 1233
cca3974e 1234 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1235 struct ata_port *ap;
1236
cca3974e 1237 ap = host->ports[i];
c1389503 1238 if (ap &&
029f5468 1239 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1240 struct ata_queued_cmd *qc;
1241
1242 qc = ata_qc_from_tag(ap, ap->active_tag);
e50362ec 1243 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 1244 handled += ata_host_intr(ap, qc);
b887030a
AC
1245 else
1246 // No request pending? Clear interrupt status
1247 // anyway, in case there's one pending.
1248 ap->ops->check_status(ap);
1da177e4
LT
1249 }
1250
1251 }
1252
cca3974e 1253 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1254
1255 return IRQ_RETVAL(handled);
1256}
1257
cca3974e 1258static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1259{
1260 int i, handled = 0;
1261
cca3974e
JG
1262 for (i = 0; i < host->n_ports; i++) {
1263 struct ata_port *ap = host->ports[i];
ada364e8
TH
1264
1265 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1266 handled += nv_host_intr(ap, irq_stat);
1267
1268 irq_stat >>= NV_INT_PORT_SHIFT;
1269 }
1270
1271 return IRQ_RETVAL(handled);
1272}
1273
7d12e780 1274static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1275{
cca3974e 1276 struct ata_host *host = dev_instance;
ada364e8
TH
1277 u8 irq_stat;
1278 irqreturn_t ret;
1279
cca3974e 1280 spin_lock(&host->lock);
0d5ff566 1281 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1282 ret = nv_do_interrupt(host, irq_stat);
1283 spin_unlock(&host->lock);
ada364e8
TH
1284
1285 return ret;
1286}
1287
7d12e780 1288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1289{
cca3974e 1290 struct ata_host *host = dev_instance;
ada364e8
TH
1291 u8 irq_stat;
1292 irqreturn_t ret;
1293
cca3974e 1294 spin_lock(&host->lock);
0d5ff566 1295 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1296 ret = nv_do_interrupt(host, irq_stat);
1297 spin_unlock(&host->lock);
ada364e8
TH
1298
1299 return ret;
1300}
1301
1da177e4
LT
1302static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1303{
1da177e4
LT
1304 if (sc_reg > SCR_CONTROL)
1305 return 0xffffffffU;
1306
0d5ff566 1307 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1308}
1309
1310static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1311{
1da177e4
LT
1312 if (sc_reg > SCR_CONTROL)
1313 return;
1314
0d5ff566 1315 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1316}
1317
39f87582
TH
1318static void nv_nf2_freeze(struct ata_port *ap)
1319{
0d5ff566 1320 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1321 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1322 u8 mask;
1323
0d5ff566 1324 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1325 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1326 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1327}
1328
1329static void nv_nf2_thaw(struct ata_port *ap)
1330{
0d5ff566 1331 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1332 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1333 u8 mask;
1334
0d5ff566 1335 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1336
0d5ff566 1337 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1338 mask |= (NV_INT_MASK << shift);
0d5ff566 1339 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1340}
1341
1342static void nv_ck804_freeze(struct ata_port *ap)
1343{
0d5ff566 1344 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1345 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1346 u8 mask;
1347
1348 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1349 mask &= ~(NV_INT_ALL << shift);
1350 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1351}
1352
1353static void nv_ck804_thaw(struct ata_port *ap)
1354{
0d5ff566 1355 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1356 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1357 u8 mask;
1358
1359 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1360
1361 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1362 mask |= (NV_INT_MASK << shift);
1363 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1364}
1365
1366static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1367{
1368 unsigned int dummy;
1369
1370 /* SATA hardreset fails to retrieve proper device signature on
1371 * some controllers. Don't classify on hardreset. For more
1372 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1373 */
1374 return sata_std_hardreset(ap, &dummy);
1375}
1376
1377static void nv_error_handler(struct ata_port *ap)
1378{
1379 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1380 nv_hardreset, ata_std_postreset);
1381}
1382
fbbb262d
RH
1383static void nv_adma_error_handler(struct ata_port *ap)
1384{
1385 struct nv_adma_port_priv *pp = ap->private_data;
1386 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1387 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1388 int i;
1389 u16 tmp;
1390
fbbb262d
RH
1391 /* Push us back into port register mode for error handling. */
1392 nv_adma_register_mode(ap);
1393
fbbb262d
RH
1394 /* Mark all of the CPBs as invalid to prevent them from being executed */
1395 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1396 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1397
1398 /* clear CPB fetch count */
1399 writew(0, mmio + NV_ADMA_CPB_COUNT);
1400
1401 /* Reset channel */
1402 tmp = readw(mmio + NV_ADMA_CTL);
1403 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1404 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1405 udelay(1);
1406 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1407 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1408 }
1409
1410 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1411 nv_hardreset, ata_std_postreset);
1412}
1413
1da177e4
LT
1414static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1415{
1416 static int printed_version = 0;
29da9f6d 1417 struct ata_port_info *ppi[2];
1da177e4 1418 struct ata_probe_ent *probe_ent;
cdf56bcf 1419 struct nv_host_priv *hpriv;
1da177e4
LT
1420 int rc;
1421 u32 bar;
0d5ff566 1422 void __iomem *base;
fbbb262d
RH
1423 unsigned long type = ent->driver_data;
1424 int mask_set = 0;
1da177e4
LT
1425
1426 // Make sure this is a SATA controller by counting the number of bars
1427 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1428 // it's an IDE controller and we ignore it.
1429 for (bar=0; bar<6; bar++)
1430 if (pci_resource_start(pdev, bar) == 0)
1431 return -ENODEV;
1432
cdf56bcf 1433 if (!printed_version++)
a9524a76 1434 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 1435
24dc5f33 1436 rc = pcim_enable_device(pdev);
1da177e4 1437 if (rc)
24dc5f33 1438 return rc;
1da177e4
LT
1439
1440 rc = pci_request_regions(pdev, DRV_NAME);
1441 if (rc) {
24dc5f33
TH
1442 pcim_pin_device(pdev);
1443 return rc;
1da177e4
LT
1444 }
1445
fbbb262d
RH
1446 if(type >= CK804 && adma_enabled) {
1447 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1448 type = ADMA;
1449 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1450 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1451 mask_set = 1;
1452 }
1453
1454 if(!mask_set) {
1455 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1456 if (rc)
24dc5f33 1457 return rc;
fbbb262d
RH
1458 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1459 if (rc)
24dc5f33 1460 return rc;
fbbb262d 1461 }
1da177e4
LT
1462
1463 rc = -ENOMEM;
1464
24dc5f33 1465 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 1466 if (!hpriv)
24dc5f33 1467 return -ENOMEM;
cdf56bcf 1468
fbbb262d 1469 ppi[0] = ppi[1] = &nv_port_info[type];
29da9f6d 1470 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1da177e4 1471 if (!probe_ent)
24dc5f33
TH
1472 return -ENOMEM;
1473
0d5ff566 1474 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
24dc5f33 1475 return -EIO;
0d5ff566 1476 probe_ent->iomap = pcim_iomap_table(pdev);
1da177e4 1477
cdf56bcf
RH
1478 probe_ent->private_data = hpriv;
1479 hpriv->type = type;
1da177e4 1480
0d5ff566 1481 base = probe_ent->iomap[NV_MMIO_BAR];
02cbd926
JG
1482 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1483 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 1484
ada364e8 1485 /* enable SATA space for CK804 */
fbbb262d 1486 if (type >= CK804) {
ada364e8
TH
1487 u8 regval;
1488
1489 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1490 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1491 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1492 }
1493
1da177e4
LT
1494 pci_set_master(pdev);
1495
fbbb262d
RH
1496 if (type == ADMA) {
1497 rc = nv_adma_host_init(probe_ent);
1498 if (rc)
24dc5f33 1499 return rc;
fbbb262d
RH
1500 }
1501
1da177e4
LT
1502 rc = ata_device_add(probe_ent);
1503 if (rc != NV_PORTS)
24dc5f33 1504 return -ENODEV;
1da177e4 1505
24dc5f33 1506 devm_kfree(&pdev->dev, probe_ent);
1da177e4 1507 return 0;
1da177e4
LT
1508}
1509
cdf56bcf
RH
1510static void nv_remove_one (struct pci_dev *pdev)
1511{
1512 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1513 struct nv_host_priv *hpriv = host->private_data;
1514
1515 ata_pci_remove_one(pdev);
1516 kfree(hpriv);
1517}
1518
1519static int nv_pci_device_resume(struct pci_dev *pdev)
1520{
1521 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1522 struct nv_host_priv *hpriv = host->private_data;
1523
1524 ata_pci_device_do_resume(pdev);
1525
1526 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1527 if(hpriv->type >= CK804) {
1528 u8 regval;
1529
1530 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1531 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1532 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1533 }
1534 if(hpriv->type == ADMA) {
1535 u32 tmp32;
1536 struct nv_adma_port_priv *pp;
1537 /* enable/disable ADMA on the ports appropriately */
1538 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1539
1540 pp = host->ports[0]->private_data;
1541 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1542 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1543 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1544 else
1545 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1546 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1547 pp = host->ports[1]->private_data;
1548 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1549 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1550 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1551 else
1552 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1553 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1554
1555 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1556 }
1557 }
1558
1559 ata_host_resume(host);
1560
1561 return 0;
1562}
1563
cca3974e 1564static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 1565{
cca3974e 1566 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
1567 u8 regval;
1568
1569 /* disable SATA space for CK804 */
1570 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1571 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1572 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
1573}
1574
fbbb262d
RH
1575static void nv_adma_host_stop(struct ata_host *host)
1576{
1577 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1578 u32 tmp32;
1579
fbbb262d
RH
1580 /* disable ADMA on the ports */
1581 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1582 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1583 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1584 NV_MCP_SATA_CFG_20_PORT1_EN |
1585 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1586
1587 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1588
1589 nv_ck804_host_stop(host);
1590}
1591
1da177e4
LT
1592static int __init nv_init(void)
1593{
b7887196 1594 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
1595}
1596
1597static void __exit nv_exit(void)
1598{
1599 pci_unregister_driver(&nv_pci_driver);
1600}
1601
1602module_init(nv_init);
1603module_exit(nv_exit);
fbbb262d
RH
1604module_param_named(adma, adma_enabled, bool, 0444);
1605MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");