]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/ata/sata_nv.c
libata: convert to iomap
[mirror_ubuntu-jammy-kernel.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 };
173
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181 };
182
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191 };
192
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
210 };
211
212
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 };
223
224 struct nv_host_priv {
225 unsigned long type;
226 };
227
228 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
229
230 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231 static void nv_remove_one (struct pci_dev *pdev);
232 static int nv_pci_device_resume(struct pci_dev *pdev);
233 static void nv_ck804_host_stop(struct ata_host *host);
234 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
237 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
239
240 static void nv_nf2_freeze(struct ata_port *ap);
241 static void nv_nf2_thaw(struct ata_port *ap);
242 static void nv_ck804_freeze(struct ata_port *ap);
243 static void nv_ck804_thaw(struct ata_port *ap);
244 static void nv_error_handler(struct ata_port *ap);
245 static int nv_adma_slave_config(struct scsi_device *sdev);
246 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
247 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250 static void nv_adma_irq_clear(struct ata_port *ap);
251 static int nv_adma_port_start(struct ata_port *ap);
252 static void nv_adma_port_stop(struct ata_port *ap);
253 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254 static int nv_adma_port_resume(struct ata_port *ap);
255 static void nv_adma_error_handler(struct ata_port *ap);
256 static void nv_adma_host_stop(struct ata_host *host);
257 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260 static u8 nv_adma_bmdma_status(struct ata_port *ap);
261
262 enum nv_host_type
263 {
264 GENERIC,
265 NFORCE2,
266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
267 CK804,
268 ADMA
269 };
270
271 static const struct pci_device_id nv_pci_tbl[] = {
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
292
293 { } /* terminate list */
294 };
295
296 static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
303 };
304
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
323 };
324
325 static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
327 .name = DRV_NAME,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
343 };
344
345 static const struct ata_port_operations nv_generic_ops = {
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
362 .data_xfer = ata_data_xfer,
363 .irq_handler = nv_generic_interrupt,
364 .irq_clear = ata_bmdma_irq_clear,
365 .scr_read = nv_scr_read,
366 .scr_write = nv_scr_write,
367 .port_start = ata_port_start,
368 };
369
370 static const struct ata_port_operations nv_nf2_ops = {
371 .port_disable = ata_port_disable,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .exec_command = ata_exec_command,
375 .check_status = ata_check_status,
376 .dev_select = ata_std_dev_select,
377 .bmdma_setup = ata_bmdma_setup,
378 .bmdma_start = ata_bmdma_start,
379 .bmdma_stop = ata_bmdma_stop,
380 .bmdma_status = ata_bmdma_status,
381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot,
383 .freeze = nv_nf2_freeze,
384 .thaw = nv_nf2_thaw,
385 .error_handler = nv_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
387 .data_xfer = ata_data_xfer,
388 .irq_handler = nv_nf2_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .scr_read = nv_scr_read,
391 .scr_write = nv_scr_write,
392 .port_start = ata_port_start,
393 };
394
395 static const struct ata_port_operations nv_ck804_ops = {
396 .port_disable = ata_port_disable,
397 .tf_load = ata_tf_load,
398 .tf_read = ata_tf_read,
399 .exec_command = ata_exec_command,
400 .check_status = ata_check_status,
401 .dev_select = ata_std_dev_select,
402 .bmdma_setup = ata_bmdma_setup,
403 .bmdma_start = ata_bmdma_start,
404 .bmdma_stop = ata_bmdma_stop,
405 .bmdma_status = ata_bmdma_status,
406 .qc_prep = ata_qc_prep,
407 .qc_issue = ata_qc_issue_prot,
408 .freeze = nv_ck804_freeze,
409 .thaw = nv_ck804_thaw,
410 .error_handler = nv_error_handler,
411 .post_internal_cmd = ata_bmdma_post_internal_cmd,
412 .data_xfer = ata_data_xfer,
413 .irq_handler = nv_ck804_interrupt,
414 .irq_clear = ata_bmdma_irq_clear,
415 .scr_read = nv_scr_read,
416 .scr_write = nv_scr_write,
417 .port_start = ata_port_start,
418 .host_stop = nv_ck804_host_stop,
419 };
420
421 static const struct ata_port_operations nv_adma_ops = {
422 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load,
424 .tf_read = ata_tf_read,
425 .check_atapi_dma = nv_adma_check_atapi_dma,
426 .exec_command = ata_exec_command,
427 .check_status = ata_check_status,
428 .dev_select = ata_std_dev_select,
429 .bmdma_setup = nv_adma_bmdma_setup,
430 .bmdma_start = nv_adma_bmdma_start,
431 .bmdma_stop = nv_adma_bmdma_stop,
432 .bmdma_status = nv_adma_bmdma_status,
433 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue,
435 .freeze = nv_ck804_freeze,
436 .thaw = nv_ck804_thaw,
437 .error_handler = nv_adma_error_handler,
438 .post_internal_cmd = nv_adma_bmdma_stop,
439 .data_xfer = ata_data_xfer,
440 .irq_handler = nv_adma_interrupt,
441 .irq_clear = nv_adma_irq_clear,
442 .scr_read = nv_scr_read,
443 .scr_write = nv_scr_write,
444 .port_start = nv_adma_port_start,
445 .port_stop = nv_adma_port_stop,
446 .port_suspend = nv_adma_port_suspend,
447 .port_resume = nv_adma_port_resume,
448 .host_stop = nv_adma_host_stop,
449 };
450
451 static struct ata_port_info nv_port_info[] = {
452 /* generic */
453 {
454 .sht = &nv_sht,
455 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
456 ATA_FLAG_HRST_TO_RESUME,
457 .pio_mask = NV_PIO_MASK,
458 .mwdma_mask = NV_MWDMA_MASK,
459 .udma_mask = NV_UDMA_MASK,
460 .port_ops = &nv_generic_ops,
461 },
462 /* nforce2/3 */
463 {
464 .sht = &nv_sht,
465 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
466 ATA_FLAG_HRST_TO_RESUME,
467 .pio_mask = NV_PIO_MASK,
468 .mwdma_mask = NV_MWDMA_MASK,
469 .udma_mask = NV_UDMA_MASK,
470 .port_ops = &nv_nf2_ops,
471 },
472 /* ck804 */
473 {
474 .sht = &nv_sht,
475 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
476 ATA_FLAG_HRST_TO_RESUME,
477 .pio_mask = NV_PIO_MASK,
478 .mwdma_mask = NV_MWDMA_MASK,
479 .udma_mask = NV_UDMA_MASK,
480 .port_ops = &nv_ck804_ops,
481 },
482 /* ADMA */
483 {
484 .sht = &nv_adma_sht,
485 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
486 ATA_FLAG_HRST_TO_RESUME |
487 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
488 .pio_mask = NV_PIO_MASK,
489 .mwdma_mask = NV_MWDMA_MASK,
490 .udma_mask = NV_UDMA_MASK,
491 .port_ops = &nv_adma_ops,
492 },
493 };
494
495 MODULE_AUTHOR("NVIDIA");
496 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
497 MODULE_LICENSE("GPL");
498 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
499 MODULE_VERSION(DRV_VERSION);
500
501 static int adma_enabled = 1;
502
503 static void nv_adma_register_mode(struct ata_port *ap)
504 {
505 struct nv_adma_port_priv *pp = ap->private_data;
506 void __iomem *mmio = pp->ctl_block;
507 u16 tmp;
508
509 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
510 return;
511
512 tmp = readw(mmio + NV_ADMA_CTL);
513 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
514
515 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
516 }
517
518 static void nv_adma_mode(struct ata_port *ap)
519 {
520 struct nv_adma_port_priv *pp = ap->private_data;
521 void __iomem *mmio = pp->ctl_block;
522 u16 tmp;
523
524 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
525 return;
526
527 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
528
529 tmp = readw(mmio + NV_ADMA_CTL);
530 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
531
532 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
533 }
534
535 static int nv_adma_slave_config(struct scsi_device *sdev)
536 {
537 struct ata_port *ap = ata_shost_to_port(sdev->host);
538 struct nv_adma_port_priv *pp = ap->private_data;
539 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
540 u64 bounce_limit;
541 unsigned long segment_boundary;
542 unsigned short sg_tablesize;
543 int rc;
544 int adma_enable;
545 u32 current_reg, new_reg, config_mask;
546
547 rc = ata_scsi_slave_config(sdev);
548
549 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
550 /* Not a proper libata device, ignore */
551 return rc;
552
553 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
554 /*
555 * NVIDIA reports that ADMA mode does not support ATAPI commands.
556 * Therefore ATAPI commands are sent through the legacy interface.
557 * However, the legacy interface only supports 32-bit DMA.
558 * Restrict DMA parameters as required by the legacy interface
559 * when an ATAPI device is connected.
560 */
561 bounce_limit = ATA_DMA_MASK;
562 segment_boundary = ATA_DMA_BOUNDARY;
563 /* Subtract 1 since an extra entry may be needed for padding, see
564 libata-scsi.c */
565 sg_tablesize = LIBATA_MAX_PRD - 1;
566
567 /* Since the legacy DMA engine is in use, we need to disable ADMA
568 on the port. */
569 adma_enable = 0;
570 nv_adma_register_mode(ap);
571 }
572 else {
573 bounce_limit = *ap->dev->dma_mask;
574 segment_boundary = NV_ADMA_DMA_BOUNDARY;
575 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
576 adma_enable = 1;
577 }
578
579 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
580
581 if(ap->port_no == 1)
582 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
583 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
584 else
585 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
586 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
587
588 if(adma_enable) {
589 new_reg = current_reg | config_mask;
590 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
591 }
592 else {
593 new_reg = current_reg & ~config_mask;
594 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
595 }
596
597 if(current_reg != new_reg)
598 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
599
600 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
601 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
602 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
603 ata_port_printk(ap, KERN_INFO,
604 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
605 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
606 return rc;
607 }
608
609 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
610 {
611 struct nv_adma_port_priv *pp = qc->ap->private_data;
612 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
613 }
614
615 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
616 {
617 unsigned int idx = 0;
618
619 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
620
621 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
622 cpb[idx++] = cpu_to_le16(IGN);
623 cpb[idx++] = cpu_to_le16(IGN);
624 cpb[idx++] = cpu_to_le16(IGN);
625 cpb[idx++] = cpu_to_le16(IGN);
626 cpb[idx++] = cpu_to_le16(IGN);
627 }
628 else {
629 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
630 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
631 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
632 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
633 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
634 }
635 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
636 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
640
641 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
642
643 return idx;
644 }
645
646 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
647 {
648 struct nv_adma_port_priv *pp = ap->private_data;
649 int complete = 0, have_err = 0;
650 u8 flags = pp->cpb[cpb_num].resp_flags;
651
652 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
653
654 if (flags & NV_CPB_RESP_DONE) {
655 VPRINTK("CPB flags done, flags=0x%x\n", flags);
656 complete = 1;
657 }
658 if (flags & NV_CPB_RESP_ATA_ERR) {
659 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
660 have_err = 1;
661 complete = 1;
662 }
663 if (flags & NV_CPB_RESP_CMD_ERR) {
664 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
665 have_err = 1;
666 complete = 1;
667 }
668 if (flags & NV_CPB_RESP_CPB_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
670 have_err = 1;
671 complete = 1;
672 }
673 if(complete || force_err)
674 {
675 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
676 if(likely(qc)) {
677 u8 ata_status = 0;
678 /* Only use the ATA port status for non-NCQ commands.
679 For NCQ commands the current status may have nothing to do with
680 the command just completed. */
681 if(qc->tf.protocol != ATA_PROT_NCQ)
682 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
683
684 if(have_err || force_err)
685 ata_status |= ATA_ERR;
686
687 qc->err_mask |= ac_err_mask(ata_status);
688 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
689 qc->err_mask);
690 ata_qc_complete(qc);
691 }
692 }
693 }
694
695 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
696 {
697 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
698
699 /* freeze if hotplugged */
700 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
701 ata_port_freeze(ap);
702 return 1;
703 }
704
705 /* bail out if not our interrupt */
706 if (!(irq_stat & NV_INT_DEV))
707 return 0;
708
709 /* DEV interrupt w/ no active qc? */
710 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
711 ata_check_status(ap);
712 return 1;
713 }
714
715 /* handle interrupt */
716 return ata_host_intr(ap, qc);
717 }
718
719 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
720 {
721 struct ata_host *host = dev_instance;
722 int i, handled = 0;
723 u32 notifier_clears[2];
724
725 spin_lock(&host->lock);
726
727 for (i = 0; i < host->n_ports; i++) {
728 struct ata_port *ap = host->ports[i];
729 notifier_clears[i] = 0;
730
731 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
732 struct nv_adma_port_priv *pp = ap->private_data;
733 void __iomem *mmio = pp->ctl_block;
734 u16 status;
735 u32 gen_ctl;
736 int have_global_err = 0;
737 u32 notifier, notifier_error;
738
739 /* if in ATA register mode, use standard ata interrupt handler */
740 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
741 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
742 >> (NV_INT_PORT_SHIFT * i);
743 if(ata_tag_valid(ap->active_tag))
744 /** NV_INT_DEV indication seems unreliable at times
745 at least in ADMA mode. Force it on always when a
746 command is active, to prevent losing interrupts. */
747 irq_stat |= NV_INT_DEV;
748 handled += nv_host_intr(ap, irq_stat);
749 continue;
750 }
751
752 notifier = readl(mmio + NV_ADMA_NOTIFIER);
753 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
754 notifier_clears[i] = notifier | notifier_error;
755
756 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
757
758 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
759 !notifier_error)
760 /* Nothing to do */
761 continue;
762
763 status = readw(mmio + NV_ADMA_STAT);
764
765 /* Clear status. Ensure the controller sees the clearing before we start
766 looking at any of the CPB statuses, so that any CPB completions after
767 this point in the handler will raise another interrupt. */
768 writew(status, mmio + NV_ADMA_STAT);
769 readw(mmio + NV_ADMA_STAT); /* flush posted write */
770 rmb();
771
772 /* freeze if hotplugged */
773 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
774 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
775 ata_port_freeze(ap);
776 handled++;
777 continue;
778 }
779
780 if (status & NV_ADMA_STAT_TIMEOUT) {
781 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
782 have_global_err = 1;
783 }
784 if (status & NV_ADMA_STAT_CPBERR) {
785 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
786 have_global_err = 1;
787 }
788 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
789 /** Check CPBs for completed commands */
790
791 if(ata_tag_valid(ap->active_tag))
792 /* Non-NCQ command */
793 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
794 (notifier_error & (1 << ap->active_tag)));
795 else {
796 int pos;
797 u32 active = ap->sactive;
798 while( (pos = ffs(active)) ) {
799 pos--;
800 nv_adma_check_cpb(ap, pos, have_global_err ||
801 (notifier_error & (1 << pos)) );
802 active &= ~(1 << pos );
803 }
804 }
805 }
806
807 handled++; /* irq handled if we got here */
808 }
809 }
810
811 if(notifier_clears[0] || notifier_clears[1]) {
812 /* Note: Both notifier clear registers must be written
813 if either is set, even if one is zero, according to NVIDIA. */
814 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
815 writel(notifier_clears[0], pp->notifier_clear_block);
816 pp = host->ports[1]->private_data;
817 writel(notifier_clears[1], pp->notifier_clear_block);
818 }
819
820 spin_unlock(&host->lock);
821
822 return IRQ_RETVAL(handled);
823 }
824
825 static void nv_adma_irq_clear(struct ata_port *ap)
826 {
827 struct nv_adma_port_priv *pp = ap->private_data;
828 void __iomem *mmio = pp->ctl_block;
829 u16 status = readw(mmio + NV_ADMA_STAT);
830 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
831 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
832 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
833
834 /* clear ADMA status */
835 writew(status, mmio + NV_ADMA_STAT);
836 writel(notifier | notifier_error,
837 pp->notifier_clear_block);
838
839 /** clear legacy status */
840 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
841 }
842
843 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
844 {
845 struct ata_port *ap = qc->ap;
846 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
847 struct nv_adma_port_priv *pp = ap->private_data;
848 u8 dmactl;
849
850 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
851 WARN_ON(1);
852 return;
853 }
854
855 /* load PRD table addr. */
856 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
857
858 /* specify data direction, triple-check start bit is clear */
859 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
860 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
861 if (!rw)
862 dmactl |= ATA_DMA_WR;
863
864 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
865
866 /* issue r/w command */
867 ata_exec_command(ap, &qc->tf);
868 }
869
870 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
871 {
872 struct ata_port *ap = qc->ap;
873 struct nv_adma_port_priv *pp = ap->private_data;
874 u8 dmactl;
875
876 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
877 WARN_ON(1);
878 return;
879 }
880
881 /* start host DMA transaction */
882 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
883 iowrite8(dmactl | ATA_DMA_START,
884 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
885 }
886
887 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
888 {
889 struct ata_port *ap = qc->ap;
890 struct nv_adma_port_priv *pp = ap->private_data;
891
892 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
893 return;
894
895 /* clear start/stop bit */
896 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
897 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
898
899 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
900 ata_altstatus(ap); /* dummy read */
901 }
902
903 static u8 nv_adma_bmdma_status(struct ata_port *ap)
904 {
905 struct nv_adma_port_priv *pp = ap->private_data;
906
907 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
908
909 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
910 }
911
912 static int nv_adma_port_start(struct ata_port *ap)
913 {
914 struct device *dev = ap->host->dev;
915 struct nv_adma_port_priv *pp;
916 int rc;
917 void *mem;
918 dma_addr_t mem_dma;
919 void __iomem *mmio;
920 u16 tmp;
921
922 VPRINTK("ENTER\n");
923
924 rc = ata_port_start(ap);
925 if (rc)
926 return rc;
927
928 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
929 if (!pp)
930 return -ENOMEM;
931
932 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
933 ap->port_no * NV_ADMA_PORT_SIZE;
934 pp->ctl_block = mmio;
935 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
936 pp->notifier_clear_block = pp->gen_block +
937 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
938
939 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
940 &mem_dma, GFP_KERNEL);
941 if (!mem)
942 return -ENOMEM;
943 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
944
945 /*
946 * First item in chunk of DMA memory:
947 * 128-byte command parameter block (CPB)
948 * one for each command tag
949 */
950 pp->cpb = mem;
951 pp->cpb_dma = mem_dma;
952
953 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
954 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
955
956 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
957 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
958
959 /*
960 * Second item: block of ADMA_SGTBL_LEN s/g entries
961 */
962 pp->aprd = mem;
963 pp->aprd_dma = mem_dma;
964
965 ap->private_data = pp;
966
967 /* clear any outstanding interrupt conditions */
968 writew(0xffff, mmio + NV_ADMA_STAT);
969
970 /* initialize port variables */
971 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
972
973 /* clear CPB fetch count */
974 writew(0, mmio + NV_ADMA_CPB_COUNT);
975
976 /* clear GO for register mode, enable interrupt */
977 tmp = readw(mmio + NV_ADMA_CTL);
978 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
979
980 tmp = readw(mmio + NV_ADMA_CTL);
981 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
982 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
983 udelay(1);
984 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
985 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
986
987 return 0;
988 }
989
990 static void nv_adma_port_stop(struct ata_port *ap)
991 {
992 struct nv_adma_port_priv *pp = ap->private_data;
993 void __iomem *mmio = pp->ctl_block;
994
995 VPRINTK("ENTER\n");
996 writew(0, mmio + NV_ADMA_CTL);
997 }
998
999 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1000 {
1001 struct nv_adma_port_priv *pp = ap->private_data;
1002 void __iomem *mmio = pp->ctl_block;
1003
1004 /* Go to register mode - clears GO */
1005 nv_adma_register_mode(ap);
1006
1007 /* clear CPB fetch count */
1008 writew(0, mmio + NV_ADMA_CPB_COUNT);
1009
1010 /* disable interrupt, shut down port */
1011 writew(0, mmio + NV_ADMA_CTL);
1012
1013 return 0;
1014 }
1015
1016 static int nv_adma_port_resume(struct ata_port *ap)
1017 {
1018 struct nv_adma_port_priv *pp = ap->private_data;
1019 void __iomem *mmio = pp->ctl_block;
1020 u16 tmp;
1021
1022 /* set CPB block location */
1023 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1024 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1025
1026 /* clear any outstanding interrupt conditions */
1027 writew(0xffff, mmio + NV_ADMA_STAT);
1028
1029 /* initialize port variables */
1030 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1031
1032 /* clear CPB fetch count */
1033 writew(0, mmio + NV_ADMA_CPB_COUNT);
1034
1035 /* clear GO for register mode, enable interrupt */
1036 tmp = readw(mmio + NV_ADMA_CTL);
1037 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1038
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1041 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1042 udelay(1);
1043 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1044 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1045
1046 return 0;
1047 }
1048
1049 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1050 {
1051 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1052 struct ata_ioports *ioport = &probe_ent->port[port];
1053
1054 VPRINTK("ENTER\n");
1055
1056 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1057
1058 ioport->cmd_addr = mmio;
1059 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1060 ioport->error_addr =
1061 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1062 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1063 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1064 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1065 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1066 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1067 ioport->status_addr =
1068 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1069 ioport->altstatus_addr =
1070 ioport->ctl_addr = mmio + 0x20;
1071 }
1072
1073 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1074 {
1075 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1076 unsigned int i;
1077 u32 tmp32;
1078
1079 VPRINTK("ENTER\n");
1080
1081 /* enable ADMA on the ports */
1082 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1083 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1084 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1085 NV_MCP_SATA_CFG_20_PORT1_EN |
1086 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1087
1088 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1089
1090 for (i = 0; i < probe_ent->n_ports; i++)
1091 nv_adma_setup_port(probe_ent, i);
1092
1093 return 0;
1094 }
1095
1096 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1097 struct scatterlist *sg,
1098 int idx,
1099 struct nv_adma_prd *aprd)
1100 {
1101 u8 flags;
1102
1103 memset(aprd, 0, sizeof(struct nv_adma_prd));
1104
1105 flags = 0;
1106 if (qc->tf.flags & ATA_TFLAG_WRITE)
1107 flags |= NV_APRD_WRITE;
1108 if (idx == qc->n_elem - 1)
1109 flags |= NV_APRD_END;
1110 else if (idx != 4)
1111 flags |= NV_APRD_CONT;
1112
1113 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1114 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1115 aprd->flags = flags;
1116 }
1117
1118 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1119 {
1120 struct nv_adma_port_priv *pp = qc->ap->private_data;
1121 unsigned int idx;
1122 struct nv_adma_prd *aprd;
1123 struct scatterlist *sg;
1124
1125 VPRINTK("ENTER\n");
1126
1127 idx = 0;
1128
1129 ata_for_each_sg(sg, qc) {
1130 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1131 nv_adma_fill_aprd(qc, sg, idx, aprd);
1132 idx++;
1133 }
1134 if (idx > 5)
1135 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1136 }
1137
1138 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1139 {
1140 struct nv_adma_port_priv *pp = qc->ap->private_data;
1141 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1142 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1143 NV_CPB_CTL_APRD_VALID |
1144 NV_CPB_CTL_IEN;
1145
1146 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1147 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1148 nv_adma_register_mode(qc->ap);
1149 ata_qc_prep(qc);
1150 return;
1151 }
1152
1153 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1154
1155 cpb->len = 3;
1156 cpb->tag = qc->tag;
1157 cpb->next_cpb_idx = 0;
1158
1159 /* turn on NCQ flags for NCQ commands */
1160 if (qc->tf.protocol == ATA_PROT_NCQ)
1161 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1162
1163 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1164
1165 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1166
1167 nv_adma_fill_sg(qc, cpb);
1168
1169 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1170 finished filling in all of the contents */
1171 wmb();
1172 cpb->ctl_flags = ctl_flags;
1173 }
1174
1175 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1176 {
1177 struct nv_adma_port_priv *pp = qc->ap->private_data;
1178 void __iomem *mmio = pp->ctl_block;
1179
1180 VPRINTK("ENTER\n");
1181
1182 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1183 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1184 /* use ATA register mode */
1185 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1186 nv_adma_register_mode(qc->ap);
1187 return ata_qc_issue_prot(qc);
1188 } else
1189 nv_adma_mode(qc->ap);
1190
1191 /* write append register, command tag in lower 8 bits
1192 and (number of cpbs to append -1) in top 8 bits */
1193 wmb();
1194 writew(qc->tag, mmio + NV_ADMA_APPEND);
1195
1196 DPRINTK("Issued tag %u\n",qc->tag);
1197
1198 return 0;
1199 }
1200
1201 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1202 {
1203 struct ata_host *host = dev_instance;
1204 unsigned int i;
1205 unsigned int handled = 0;
1206 unsigned long flags;
1207
1208 spin_lock_irqsave(&host->lock, flags);
1209
1210 for (i = 0; i < host->n_ports; i++) {
1211 struct ata_port *ap;
1212
1213 ap = host->ports[i];
1214 if (ap &&
1215 !(ap->flags & ATA_FLAG_DISABLED)) {
1216 struct ata_queued_cmd *qc;
1217
1218 qc = ata_qc_from_tag(ap, ap->active_tag);
1219 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1220 handled += ata_host_intr(ap, qc);
1221 else
1222 // No request pending? Clear interrupt status
1223 // anyway, in case there's one pending.
1224 ap->ops->check_status(ap);
1225 }
1226
1227 }
1228
1229 spin_unlock_irqrestore(&host->lock, flags);
1230
1231 return IRQ_RETVAL(handled);
1232 }
1233
1234 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1235 {
1236 int i, handled = 0;
1237
1238 for (i = 0; i < host->n_ports; i++) {
1239 struct ata_port *ap = host->ports[i];
1240
1241 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1242 handled += nv_host_intr(ap, irq_stat);
1243
1244 irq_stat >>= NV_INT_PORT_SHIFT;
1245 }
1246
1247 return IRQ_RETVAL(handled);
1248 }
1249
1250 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1251 {
1252 struct ata_host *host = dev_instance;
1253 u8 irq_stat;
1254 irqreturn_t ret;
1255
1256 spin_lock(&host->lock);
1257 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1258 ret = nv_do_interrupt(host, irq_stat);
1259 spin_unlock(&host->lock);
1260
1261 return ret;
1262 }
1263
1264 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1265 {
1266 struct ata_host *host = dev_instance;
1267 u8 irq_stat;
1268 irqreturn_t ret;
1269
1270 spin_lock(&host->lock);
1271 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1272 ret = nv_do_interrupt(host, irq_stat);
1273 spin_unlock(&host->lock);
1274
1275 return ret;
1276 }
1277
1278 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1279 {
1280 if (sc_reg > SCR_CONTROL)
1281 return 0xffffffffU;
1282
1283 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1284 }
1285
1286 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1287 {
1288 if (sc_reg > SCR_CONTROL)
1289 return;
1290
1291 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1292 }
1293
1294 static void nv_nf2_freeze(struct ata_port *ap)
1295 {
1296 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1297 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1298 u8 mask;
1299
1300 mask = ioread8(scr_addr + NV_INT_ENABLE);
1301 mask &= ~(NV_INT_ALL << shift);
1302 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1303 }
1304
1305 static void nv_nf2_thaw(struct ata_port *ap)
1306 {
1307 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1308 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1309 u8 mask;
1310
1311 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1312
1313 mask = ioread8(scr_addr + NV_INT_ENABLE);
1314 mask |= (NV_INT_MASK << shift);
1315 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1316 }
1317
1318 static void nv_ck804_freeze(struct ata_port *ap)
1319 {
1320 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1321 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1322 u8 mask;
1323
1324 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1325 mask &= ~(NV_INT_ALL << shift);
1326 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1327 }
1328
1329 static void nv_ck804_thaw(struct ata_port *ap)
1330 {
1331 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1332 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1333 u8 mask;
1334
1335 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1336
1337 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1338 mask |= (NV_INT_MASK << shift);
1339 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1340 }
1341
1342 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1343 {
1344 unsigned int dummy;
1345
1346 /* SATA hardreset fails to retrieve proper device signature on
1347 * some controllers. Don't classify on hardreset. For more
1348 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1349 */
1350 return sata_std_hardreset(ap, &dummy);
1351 }
1352
1353 static void nv_error_handler(struct ata_port *ap)
1354 {
1355 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1356 nv_hardreset, ata_std_postreset);
1357 }
1358
1359 static void nv_adma_error_handler(struct ata_port *ap)
1360 {
1361 struct nv_adma_port_priv *pp = ap->private_data;
1362 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1363 void __iomem *mmio = pp->ctl_block;
1364 int i;
1365 u16 tmp;
1366
1367 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1368 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1369 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1370 u32 status = readw(mmio + NV_ADMA_STAT);
1371
1372 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1373 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1374 notifier, notifier_error, gen_ctl, status);
1375
1376 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1377 struct nv_adma_cpb *cpb = &pp->cpb[i];
1378 if( cpb->ctl_flags || cpb->resp_flags )
1379 ata_port_printk(ap, KERN_ERR,
1380 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1381 i, cpb->ctl_flags, cpb->resp_flags);
1382 }
1383
1384 /* Push us back into port register mode for error handling. */
1385 nv_adma_register_mode(ap);
1386
1387 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1388
1389 /* Mark all of the CPBs as invalid to prevent them from being executed */
1390 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1391 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1392
1393 /* clear CPB fetch count */
1394 writew(0, mmio + NV_ADMA_CPB_COUNT);
1395
1396 /* Reset channel */
1397 tmp = readw(mmio + NV_ADMA_CTL);
1398 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1399 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1400 udelay(1);
1401 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1402 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1403 }
1404
1405 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1406 nv_hardreset, ata_std_postreset);
1407 }
1408
1409 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1410 {
1411 static int printed_version = 0;
1412 struct ata_port_info *ppi[2];
1413 struct ata_probe_ent *probe_ent;
1414 struct nv_host_priv *hpriv;
1415 int rc;
1416 u32 bar;
1417 void __iomem *base;
1418 unsigned long type = ent->driver_data;
1419 int mask_set = 0;
1420
1421 // Make sure this is a SATA controller by counting the number of bars
1422 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1423 // it's an IDE controller and we ignore it.
1424 for (bar=0; bar<6; bar++)
1425 if (pci_resource_start(pdev, bar) == 0)
1426 return -ENODEV;
1427
1428 if (!printed_version++)
1429 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1430
1431 rc = pcim_enable_device(pdev);
1432 if (rc)
1433 return rc;
1434
1435 rc = pci_request_regions(pdev, DRV_NAME);
1436 if (rc) {
1437 pcim_pin_device(pdev);
1438 return rc;
1439 }
1440
1441 if(type >= CK804 && adma_enabled) {
1442 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1443 type = ADMA;
1444 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1445 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1446 mask_set = 1;
1447 }
1448
1449 if(!mask_set) {
1450 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1451 if (rc)
1452 return rc;
1453 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1454 if (rc)
1455 return rc;
1456 }
1457
1458 rc = -ENOMEM;
1459
1460 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1461 if (!hpriv)
1462 return -ENOMEM;
1463
1464 ppi[0] = ppi[1] = &nv_port_info[type];
1465 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1466 if (!probe_ent)
1467 return -ENOMEM;
1468
1469 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1470 return -EIO;
1471 probe_ent->iomap = pcim_iomap_table(pdev);
1472
1473 probe_ent->private_data = hpriv;
1474 hpriv->type = type;
1475
1476 base = probe_ent->iomap[NV_MMIO_BAR];
1477 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1478 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1479
1480 /* enable SATA space for CK804 */
1481 if (type >= CK804) {
1482 u8 regval;
1483
1484 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1485 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1486 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1487 }
1488
1489 pci_set_master(pdev);
1490
1491 if (type == ADMA) {
1492 rc = nv_adma_host_init(probe_ent);
1493 if (rc)
1494 return rc;
1495 }
1496
1497 rc = ata_device_add(probe_ent);
1498 if (rc != NV_PORTS)
1499 return -ENODEV;
1500
1501 devm_kfree(&pdev->dev, probe_ent);
1502 return 0;
1503 }
1504
1505 static void nv_remove_one (struct pci_dev *pdev)
1506 {
1507 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1508 struct nv_host_priv *hpriv = host->private_data;
1509
1510 ata_pci_remove_one(pdev);
1511 kfree(hpriv);
1512 }
1513
1514 static int nv_pci_device_resume(struct pci_dev *pdev)
1515 {
1516 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1517 struct nv_host_priv *hpriv = host->private_data;
1518
1519 ata_pci_device_do_resume(pdev);
1520
1521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1522 if(hpriv->type >= CK804) {
1523 u8 regval;
1524
1525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1528 }
1529 if(hpriv->type == ADMA) {
1530 u32 tmp32;
1531 struct nv_adma_port_priv *pp;
1532 /* enable/disable ADMA on the ports appropriately */
1533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1534
1535 pp = host->ports[0]->private_data;
1536 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1539 else
1540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1542 pp = host->ports[1]->private_data;
1543 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1546 else
1547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1549
1550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1551 }
1552 }
1553
1554 ata_host_resume(host);
1555
1556 return 0;
1557 }
1558
1559 static void nv_ck804_host_stop(struct ata_host *host)
1560 {
1561 struct pci_dev *pdev = to_pci_dev(host->dev);
1562 u8 regval;
1563
1564 /* disable SATA space for CK804 */
1565 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1566 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1567 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1568 }
1569
1570 static void nv_adma_host_stop(struct ata_host *host)
1571 {
1572 struct pci_dev *pdev = to_pci_dev(host->dev);
1573 u32 tmp32;
1574
1575 /* disable ADMA on the ports */
1576 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1577 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1578 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1579 NV_MCP_SATA_CFG_20_PORT1_EN |
1580 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1581
1582 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1583
1584 nv_ck804_host_stop(host);
1585 }
1586
1587 static int __init nv_init(void)
1588 {
1589 return pci_register_driver(&nv_pci_driver);
1590 }
1591
1592 static void __exit nv_exit(void)
1593 {
1594 pci_unregister_driver(&nv_pci_driver);
1595 }
1596
1597 module_init(nv_init);
1598 module_exit(nv_exit);
1599 module_param_named(adma, adma_enabled, bool, 0444);
1600 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");