]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/ata/sata_mv.c
sata_mv ncq Add want ncq parameter for EDMA configuration
[mirror_ubuntu-jammy-kernel.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
02a121da
ML
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
02a121da
ML
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 174
20f733e7
BR
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
0c58912e 213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 214 PHY_MODE3 = 0x310,
bca1c4eb
JG
215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
c9d39130
JG
217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
0c58912e
ML
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 248
6c1153e0 249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
6c1153e0 255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 256
6c1153e0 257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
6c1153e0 264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 265
6c1153e0 266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
bdd4ddde
JG
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
6c1153e0 281 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
6c1153e0 296 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
20f733e7 300
31961943
BR
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
0ea9e179
JG
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 316
c9d39130 317 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 318 EDMA_ARB_CFG = 0x38,
bca1c4eb 319
31961943
BR
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 326 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 331
31961943 332 /* Port private flags (pp_flags) */
0ea9e179 333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
336};
337
ee9ccdf7
JG
338#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 340#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 341
095fec88 342enum {
baf14aa1
JG
343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
345 */
346 MV_DMA_BOUNDARY = 0xffffU,
095fec88 347
0ea9e179
JG
348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
350 */
095fec88
JG
351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352
0ea9e179 353 /* ditto, for response queue */
095fec88
JG
354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
355};
356
522479fb
JG
357enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
e4e7b892
JG
363 chip_6042,
364 chip_7042,
522479fb
JG
365};
366
31961943
BR
367/* Command ReQuest Block: 32B */
368struct mv_crqb {
e1469874
ML
369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
31961943 373};
20f733e7 374
e4e7b892 375struct mv_crqb_iie {
e1469874
ML
376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
e4e7b892
JG
381};
382
31961943
BR
383/* Command ResPonse Block: 8B */
384struct mv_crpb {
e1469874
ML
385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
20f733e7
BR
388};
389
31961943
BR
390/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391struct mv_sg {
e1469874
ML
392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
31961943 396};
20f733e7 397
31961943
BR
398struct mv_port_priv {
399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
405
406 unsigned int req_idx;
407 unsigned int resp_idx;
408
31961943
BR
409 u32 pp_flags;
410};
411
bca1c4eb
JG
412struct mv_port_signal {
413 u32 amps;
414 u32 pre;
415};
416
02a121da
ML
417struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
424};
425
47c2b677 426struct mv_hw_ops {
2a47ce06
JG
427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
47c2b677
JG
429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
c9d39130
JG
432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
522479fb
JG
434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
436};
437
20f733e7 438static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
439static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
443static int mv_port_start(struct ata_port *ap);
444static void mv_port_stop(struct ata_port *ap);
445static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 446static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 447static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
448static void mv_error_handler(struct ata_port *ap);
449static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450static void mv_eh_freeze(struct ata_port *ap);
451static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
452static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
453
2a47ce06
JG
454static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 unsigned int port);
47c2b677
JG
456static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 void __iomem *mmio);
c9d39130
JG
459static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 unsigned int n_hc);
522479fb
JG
461static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 463
2a47ce06
JG
464static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
47c2b677
JG
466static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
c9d39130
JG
469static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
522479fb
JG
471static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
473static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
72109168
ML
475static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477static int __mv_stop_dma(struct ata_port *ap);
47c2b677 478
c5d3e45a
JG
479static struct scsi_host_template mv5_sht = {
480 .module = THIS_MODULE,
481 .name = DRV_NAME,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
baf14aa1 486 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
489 .use_clustering = 1,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 492 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
495};
496
497static struct scsi_host_template mv6_sht = {
20f733e7
BR
498 .module = THIS_MODULE,
499 .name = DRV_NAME,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 502 .can_queue = ATA_DEF_QUEUE,
20f733e7 503 .this_id = ATA_SHT_THIS_ID,
baf14aa1 504 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
d88184fb 507 .use_clustering = 1,
20f733e7
BR
508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 510 .slave_configure = ata_scsi_slave_config,
ccf68c34 511 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 512 .bios_param = ata_std_bios_param,
20f733e7
BR
513};
514
c9d39130 515static const struct ata_port_operations mv5_ops = {
c9d39130
JG
516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
521
cffacd85 522 .cable_detect = ata_cable_sata,
c9d39130
JG
523
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
0d5ff566 526 .data_xfer = ata_data_xfer,
c9d39130 527
c9d39130 528 .irq_clear = mv_irq_clear,
246ce3b6 529 .irq_on = ata_irq_on,
c9d39130 530
bdd4ddde
JG
531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
534 .thaw = mv_eh_thaw,
535
c9d39130
JG
536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
538
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
c9d39130
JG
541};
542
543static const struct ata_port_operations mv6_ops = {
20f733e7
BR
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
cffacd85 550 .cable_detect = ata_cable_sata,
20f733e7 551
31961943
BR
552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
0d5ff566 554 .data_xfer = ata_data_xfer,
20f733e7 555
20f733e7 556 .irq_clear = mv_irq_clear,
246ce3b6 557 .irq_on = ata_irq_on,
20f733e7 558
bdd4ddde
JG
559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
562 .thaw = mv_eh_thaw,
563
20f733e7
BR
564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
566
31961943
BR
567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
20f733e7
BR
569};
570
e4e7b892 571static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
577
cffacd85 578 .cable_detect = ata_cable_sata,
e4e7b892
JG
579
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
0d5ff566 582 .data_xfer = ata_data_xfer,
e4e7b892 583
e4e7b892 584 .irq_clear = mv_irq_clear,
246ce3b6 585 .irq_on = ata_irq_on,
e4e7b892 586
bdd4ddde
JG
587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
590 .thaw = mv_eh_thaw,
591
e4e7b892
JG
592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
594
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
e4e7b892
JG
597};
598
98ac62de 599static const struct ata_port_info mv_port_info[] = {
20f733e7 600 { /* chip_504x */
cca3974e 601 .flags = MV_COMMON_FLAGS,
31961943 602 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 603 .udma_mask = ATA_UDMA6,
c9d39130 604 .port_ops = &mv5_ops,
20f733e7
BR
605 },
606 { /* chip_508x */
c5d3e45a 607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 608 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 609 .udma_mask = ATA_UDMA6,
c9d39130 610 .port_ops = &mv5_ops,
20f733e7 611 },
47c2b677 612 { /* chip_5080 */
c5d3e45a 613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 614 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 615 .udma_mask = ATA_UDMA6,
c9d39130 616 .port_ops = &mv5_ops,
47c2b677 617 },
20f733e7 618 { /* chip_604x */
c5d3e45a 619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 620 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 621 .udma_mask = ATA_UDMA6,
c9d39130 622 .port_ops = &mv6_ops,
20f733e7
BR
623 },
624 { /* chip_608x */
c5d3e45a
JG
625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 MV_FLAG_DUAL_HC,
31961943 627 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 628 .udma_mask = ATA_UDMA6,
c9d39130 629 .port_ops = &mv6_ops,
20f733e7 630 },
e4e7b892 631 { /* chip_6042 */
c5d3e45a 632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 633 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 634 .udma_mask = ATA_UDMA6,
e4e7b892
JG
635 .port_ops = &mv_iie_ops,
636 },
637 { /* chip_7042 */
c5d3e45a 638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 639 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 640 .udma_mask = ATA_UDMA6,
e4e7b892
JG
641 .port_ops = &mv_iie_ops,
642 },
20f733e7
BR
643};
644
3b7d697d 645static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
653
654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
659
660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
661
d9f9c6bc
FA
662 /* Adaptec 1430SA */
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
664
02a121da 665 /* Marvell 7042 support */
6a3d586d
MT
666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
667
02a121da
ML
668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
671
2d2744fc 672 { } /* terminate list */
20f733e7
BR
673};
674
675static struct pci_driver mv_pci_driver = {
676 .name = DRV_NAME,
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
680};
681
47c2b677
JG
682static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
522479fb
JG
687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
47c2b677
JG
689};
690
691static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
522479fb
JG
696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
698};
699
ddef9bb3
JG
700/*
701 * module options
702 */
703static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
704
705
d88184fb
JG
706/* move to PCI layer or libata core? */
707static int pci_go_64(struct pci_dev *pdev)
708{
709 int rc;
710
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
713 if (rc) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
715 if (rc) {
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
718 return rc;
719 }
720 }
721 } else {
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
726 return rc;
727 }
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
729 if (rc) {
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
732 return rc;
733 }
734 }
735
736 return rc;
737}
738
20f733e7
BR
739/*
740 * Functions
741 */
742
743static inline void writelfl(unsigned long data, void __iomem *addr)
744{
745 writel(data, addr);
746 (void) readl(addr); /* flush to avoid PCI posted write */
747}
748
20f733e7
BR
749static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
750{
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
752}
753
c9d39130
JG
754static inline unsigned int mv_hc_from_port(unsigned int port)
755{
756 return port >> MV_PORT_HC_SHIFT;
757}
758
759static inline unsigned int mv_hardport_from_port(unsigned int port)
760{
761 return port & MV_PORT_MASK;
762}
763
764static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 unsigned int port)
766{
767 return mv_hc_base(base, mv_hc_from_port(port));
768}
769
20f733e7
BR
770static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
771{
c9d39130 772 return mv_hc_base_from_port(base, port) +
8b260248 773 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
775}
776
777static inline void __iomem *mv_ap_base(struct ata_port *ap)
778{
0d5ff566 779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
780}
781
cca3974e 782static inline int mv_get_hc_count(unsigned long port_flags)
31961943 783{
cca3974e 784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
785}
786
787static void mv_irq_clear(struct ata_port *ap)
20f733e7 788{
20f733e7
BR
789}
790
c5d3e45a
JG
791static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
794{
bdd4ddde
JG
795 u32 index;
796
c5d3e45a
JG
797 /*
798 * initialize request queue
799 */
bdd4ddde
JG
800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
801
c5d3e45a
JG
802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 808 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810 else
bdd4ddde 811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
812
813 /*
814 * initialize response queue
815 */
bdd4ddde
JG
816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
817
c5d3e45a
JG
818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
820
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 822 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824 else
bdd4ddde 825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 826
bdd4ddde 827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
829}
830
05b308e1
BR
831/**
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
835 *
beec7dbc
TH
836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
05b308e1
BR
838 *
839 * LOCKING:
840 * Inherited from caller.
841 */
0c58912e 842static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 843 struct mv_port_priv *pp, u8 protocol)
20f733e7 844{
72109168
ML
845 int want_ncq = (protocol == ATA_PROT_NCQ);
846
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
851 }
c5d3e45a 852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
858
bdd4ddde 859 /* clear EDMA event indicators, if any */
f630d562 860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 861
0c58912e
ML
862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
869 }
870
72109168 871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
872
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
875
f630d562 876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 877
f630d562 878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
880 }
f630d562 881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
882}
883
05b308e1 884/**
0ea9e179 885 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
886 * @ap: ATA channel to manipulate
887 *
beec7dbc
TH
888 * Verify the local cache of the eDMA state is accurate with a
889 * WARN_ON.
05b308e1
BR
890 *
891 * LOCKING:
892 * Inherited from caller.
893 */
0ea9e179 894static int __mv_stop_dma(struct ata_port *ap)
20f733e7 895{
31961943
BR
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
31961943 898 u32 reg;
c5d3e45a 899 int i, err = 0;
31961943 900
4537deb5 901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 902 /* Disable EDMA if active. The disable bit auto clears.
31961943 903 */
31961943
BR
904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 906 } else {
beec7dbc 907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 908 }
8b260248 909
31961943
BR
910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 913 if (!(reg & EDMA_EN))
31961943 914 break;
4537deb5 915
31961943
BR
916 udelay(100);
917 }
918
c5d3e45a 919 if (reg & EDMA_EN) {
f15a1daf 920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 921 err = -EIO;
31961943 922 }
c5d3e45a
JG
923
924 return err;
20f733e7
BR
925}
926
0ea9e179
JG
927static int mv_stop_dma(struct ata_port *ap)
928{
929 unsigned long flags;
930 int rc;
931
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
935
936 return rc;
937}
938
8a70f8dc 939#ifdef ATA_DEBUG
31961943 940static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 941{
31961943
BR
942 int b, w;
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 946 printk("%08x ", readl(start + b));
31961943
BR
947 b += sizeof(u32);
948 }
949 printk("\n");
950 }
31961943 951}
8a70f8dc
JG
952#endif
953
31961943
BR
954static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
955{
956#ifdef ATA_DEBUG
957 int b, w;
958 u32 dw;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
962 (void) pci_read_config_dword(pdev, b, &dw);
963 printk("%08x ", dw);
31961943
BR
964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
968#endif
969}
970static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
972{
973#ifdef ATA_DEBUG
8b260248 974 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
978
979 if (0 > port) {
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
982 num_hcs = 2;
983 } else {
984 start_hc = port >> MV_PORT_HC_SHIFT;
985 start_port = port;
986 num_ports = num_hcs = 1;
987 }
8b260248 988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
989 num_ports > 1 ? num_ports - 1 : start_port);
990
991 if (NULL != pdev) {
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
994 }
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1001 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1004 }
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
2dcb407e 1007 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1008 mv_dump_mem(port_base, 0x54);
2dcb407e 1009 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1010 mv_dump_mem(port_base+0x300, 0x60);
1011 }
1012#endif
20f733e7
BR
1013}
1014
1015static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1016{
1017 unsigned int ofs;
1018
1019 switch (sc_reg_in) {
1020 case SCR_STATUS:
1021 case SCR_CONTROL:
1022 case SCR_ERROR:
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1024 break;
1025 case SCR_ACTIVE:
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1027 break;
1028 default:
1029 ofs = 0xffffffffU;
1030 break;
1031 }
1032 return ofs;
1033}
1034
da3dbb17 1035static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
da3dbb17
TH
1039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1041 return 0;
1042 } else
1043 return -EINVAL;
20f733e7
BR
1044}
1045
da3dbb17 1046static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1047{
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1049
da3dbb17 1050 if (ofs != 0xffffffffU) {
20f733e7 1051 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1052 return 0;
1053 } else
1054 return -EINVAL;
20f733e7
BR
1055}
1056
72109168
ML
1057static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
e4e7b892 1059{
0c58912e 1060 u32 cfg;
e4e7b892
JG
1061
1062 /* set up non-NCQ EDMA configuration */
0c58912e 1063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1064
0c58912e 1065 if (IS_GEN_I(hpriv))
e4e7b892
JG
1066 cfg |= (1 << 8); /* enab config burst size mask */
1067
0c58912e 1068 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1070
1071 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1074 cfg |= (1 << 18); /* enab early completion */
e728eabe 1075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1076 }
1077
72109168
ML
1078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1083
e4e7b892
JG
1084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1085}
1086
05b308e1
BR
1087/**
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1090 *
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1093 *
1094 * LOCKING:
1095 * Inherited from caller.
1096 */
31961943
BR
1097static int mv_port_start(struct ata_port *ap)
1098{
cca3974e
JG
1099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 void *mem;
1104 dma_addr_t mem_dma;
0ea9e179 1105 unsigned long flags;
24dc5f33 1106 int rc;
31961943 1107
24dc5f33 1108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1109 if (!pp)
24dc5f33 1110 return -ENOMEM;
31961943 1111
24dc5f33
TH
1112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1113 GFP_KERNEL);
6037d6bb 1114 if (!mem)
24dc5f33 1115 return -ENOMEM;
31961943
BR
1116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1117
6037d6bb
JG
1118 rc = ata_pad_alloc(ap, dev);
1119 if (rc)
24dc5f33 1120 return rc;
6037d6bb 1121
8b260248 1122 /* First item in chunk of DMA memory:
31961943
BR
1123 * 32-slot command request table (CRQB), 32 bytes each in size
1124 */
1125 pp->crqb = mem;
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1129
8b260248 1130 /* Second item:
31961943
BR
1131 * 32-slot command response table (CRPB), 8 bytes each in size
1132 */
1133 pp->crpb = mem;
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1137
1138 /* Third item:
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1140 */
1141 pp->sg_tbl = mem;
1142 pp->sg_tbl_dma = mem_dma;
1143
0ea9e179
JG
1144 spin_lock_irqsave(&ap->host->lock, flags);
1145
72109168 1146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
e4e7b892 1147
c5d3e45a 1148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1149
0ea9e179
JG
1150 spin_unlock_irqrestore(&ap->host->lock, flags);
1151
31961943
BR
1152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1154 * to shadow regs.
1155 */
1156 ap->private_data = pp;
1157 return 0;
1158}
1159
05b308e1
BR
1160/**
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1163 *
1164 * Stop DMA, cleanup port memory.
1165 *
1166 * LOCKING:
cca3974e 1167 * This routine uses the host lock to protect the DMA stop.
05b308e1 1168 */
31961943
BR
1169static void mv_port_stop(struct ata_port *ap)
1170{
31961943 1171 mv_stop_dma(ap);
31961943
BR
1172}
1173
05b308e1
BR
1174/**
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1177 *
1178 * Populate the SG list and mark the last entry.
1179 *
1180 * LOCKING:
1181 * Inherited from caller.
1182 */
6c08772e 1183static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1184{
1185 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1186 struct scatterlist *sg;
3be6cbd7 1187 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1188 unsigned int si;
31961943 1189
d88184fb 1190 mv_sg = pp->sg_tbl;
ff2aeb1e 1191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
22374677 1194
4007b493
OJ
1195 while (sg_len) {
1196 u32 offset = addr & 0xffff;
1197 u32 len = sg_len;
22374677 1198
4007b493
OJ
1199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
1201
1202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1205
1206 sg_len -= len;
1207 addr += len;
1208
3be6cbd7 1209 last_sg = mv_sg;
4007b493 1210 mv_sg++;
4007b493 1211 }
31961943 1212 }
3be6cbd7
JG
1213
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1216}
1217
5796d1c4 1218static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1219{
559eedad 1220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1221 (last ? CRQB_CMD_LAST : 0);
559eedad 1222 *cmdw = cpu_to_le16(tmp);
31961943
BR
1223}
1224
05b308e1
BR
1225/**
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1228 *
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1233 *
1234 * LOCKING:
1235 * Inherited from caller.
1236 */
31961943
BR
1237static void mv_qc_prep(struct ata_queued_cmd *qc)
1238{
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
e1469874 1241 __le16 *cw;
31961943
BR
1242 struct ata_taskfile *tf;
1243 u16 flags = 0;
a6432436 1244 unsigned in_index;
31961943 1245
2dcb407e 1246 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1247 return;
20f733e7 1248
31961943
BR
1249 /* Fill in command request block
1250 */
e4e7b892 1251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1252 flags |= CRQB_FLAG_READ;
beec7dbc 1253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1254 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1255 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1256
bdd4ddde
JG
1257 /* get current queue index from software */
1258 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1259
1260 pp->crqb[in_index].sg_addr =
31961943 1261 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1262 pp->crqb[in_index].sg_addr_hi =
31961943 1263 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1264 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1265
a6432436 1266 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1267 tf = &qc->tf;
1268
1269 /* Sadly, the CRQB cannot accomodate all registers--there are
1270 * only 11 bytes...so we must pick and choose required
1271 * registers based on the command. So, we drop feature and
1272 * hob_feature for [RW] DMA commands, but they are needed for
1273 * NCQ. NCQ will drop hob_nsect.
20f733e7 1274 */
31961943
BR
1275 switch (tf->command) {
1276 case ATA_CMD_READ:
1277 case ATA_CMD_READ_EXT:
1278 case ATA_CMD_WRITE:
1279 case ATA_CMD_WRITE_EXT:
c15d85c8 1280 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1281 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1282 break;
1283#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1284 case ATA_CMD_FPDMA_READ:
1285 case ATA_CMD_FPDMA_WRITE:
8b260248 1286 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1287 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1288 break;
1289#endif /* FIXME: remove this line when NCQ added */
1290 default:
1291 /* The only other commands EDMA supports in non-queued and
1292 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1293 * of which are defined/used by Linux. If we get here, this
1294 * driver needs work.
1295 *
1296 * FIXME: modify libata to give qc_prep a return value and
1297 * return error here.
1298 */
1299 BUG_ON(tf->command);
1300 break;
1301 }
1302 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1303 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1305 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1307 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1309 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1310 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1311
e4e7b892
JG
1312 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1313 return;
1314 mv_fill_sg(qc);
1315}
1316
1317/**
1318 * mv_qc_prep_iie - Host specific command preparation.
1319 * @qc: queued command to prepare
1320 *
1321 * This routine simply redirects to the general purpose routine
1322 * if command is not DMA. Else, it handles prep of the CRQB
1323 * (command request block), does some sanity checking, and calls
1324 * the SG load routine.
1325 *
1326 * LOCKING:
1327 * Inherited from caller.
1328 */
1329static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1330{
1331 struct ata_port *ap = qc->ap;
1332 struct mv_port_priv *pp = ap->private_data;
1333 struct mv_crqb_iie *crqb;
1334 struct ata_taskfile *tf;
a6432436 1335 unsigned in_index;
e4e7b892
JG
1336 u32 flags = 0;
1337
2dcb407e 1338 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1339 return;
1340
e4e7b892
JG
1341 /* Fill in Gen IIE command request block
1342 */
1343 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1344 flags |= CRQB_FLAG_READ;
1345
beec7dbc 1346 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1347 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1348 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1349 what we use as our tag */
e4e7b892 1350
bdd4ddde
JG
1351 /* get current queue index from software */
1352 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1353
1354 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1355 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1356 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1357 crqb->flags = cpu_to_le32(flags);
1358
1359 tf = &qc->tf;
1360 crqb->ata_cmd[0] = cpu_to_le32(
1361 (tf->command << 16) |
1362 (tf->feature << 24)
1363 );
1364 crqb->ata_cmd[1] = cpu_to_le32(
1365 (tf->lbal << 0) |
1366 (tf->lbam << 8) |
1367 (tf->lbah << 16) |
1368 (tf->device << 24)
1369 );
1370 crqb->ata_cmd[2] = cpu_to_le32(
1371 (tf->hob_lbal << 0) |
1372 (tf->hob_lbam << 8) |
1373 (tf->hob_lbah << 16) |
1374 (tf->hob_feature << 24)
1375 );
1376 crqb->ata_cmd[3] = cpu_to_le32(
1377 (tf->nsect << 0) |
1378 (tf->hob_nsect << 8)
1379 );
1380
1381 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1382 return;
31961943
BR
1383 mv_fill_sg(qc);
1384}
1385
05b308e1
BR
1386/**
1387 * mv_qc_issue - Initiate a command to the host
1388 * @qc: queued command to start
1389 *
1390 * This routine simply redirects to the general purpose routine
1391 * if command is not DMA. Else, it sanity checks our local
1392 * caches of the request producer/consumer indices then enables
1393 * DMA and bumps the request producer index.
1394 *
1395 * LOCKING:
1396 * Inherited from caller.
1397 */
9a3d9eb0 1398static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1399{
c5d3e45a
JG
1400 struct ata_port *ap = qc->ap;
1401 void __iomem *port_mmio = mv_ap_base(ap);
1402 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1403 u32 in_index;
31961943 1404
c5d3e45a 1405 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1406 /* We're about to send a non-EDMA capable command to the
1407 * port. Turn off EDMA so there won't be problems accessing
1408 * shadow block, etc registers.
1409 */
0ea9e179 1410 __mv_stop_dma(ap);
31961943
BR
1411 return ata_qc_issue_prot(qc);
1412 }
1413
72109168 1414 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde
JG
1415
1416 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1417
31961943 1418 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1419 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1420 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1421
bdd4ddde 1422 pp->req_idx++;
31961943 1423
bdd4ddde 1424 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1425
1426 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1427 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1428 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1429
1430 return 0;
1431}
1432
05b308e1
BR
1433/**
1434 * mv_err_intr - Handle error interrupts on the port
1435 * @ap: ATA channel to manipulate
9b358e30 1436 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1437 *
1438 * In most cases, just clear the interrupt and move on. However,
1439 * some cases require an eDMA reset, which is done right before
1440 * the COMRESET in mv_phy_reset(). The SERR case requires a
1441 * clear of pending errors in the SATA SERROR register. Finally,
1442 * if the port disabled DMA, update our cached copy to match.
1443 *
1444 * LOCKING:
1445 * Inherited from caller.
1446 */
bdd4ddde 1447static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1448{
1449 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1450 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1451 struct mv_port_priv *pp = ap->private_data;
1452 struct mv_host_priv *hpriv = ap->host->private_data;
1453 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1454 unsigned int action = 0, err_mask = 0;
9af5c9c9 1455 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1456
bdd4ddde 1457 ata_ehi_clear_desc(ehi);
20f733e7 1458
bdd4ddde
JG
1459 if (!edma_enabled) {
1460 /* just a guess: do we need to do this? should we
1461 * expand this, and do it in all cases?
1462 */
936fd732
TH
1463 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1464 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1465 }
bdd4ddde
JG
1466
1467 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1468
1469 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1470
1471 /*
1472 * all generations share these EDMA error cause bits
1473 */
1474
1475 if (edma_err_cause & EDMA_ERR_DEV)
1476 err_mask |= AC_ERR_DEV;
1477 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1478 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1479 EDMA_ERR_INTRL_PAR)) {
1480 err_mask |= AC_ERR_ATA_BUS;
1481 action |= ATA_EH_HARDRESET;
b64bbc39 1482 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1483 }
1484 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1485 ata_ehi_hotplugged(ehi);
1486 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1487 "dev disconnect" : "dev connect");
3606a380 1488 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1489 }
1490
ee9ccdf7 1491 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1492 eh_freeze_mask = EDMA_EH_FREEZE_5;
1493
1494 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1495 struct mv_port_priv *pp = ap->private_data;
1496 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1497 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1498 }
1499 } else {
1500 eh_freeze_mask = EDMA_EH_FREEZE;
1501
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1505 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1506 }
1507
1508 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1509 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1511 err_mask = AC_ERR_ATA_BUS;
1512 action |= ATA_EH_HARDRESET;
1513 }
afb0edd9 1514 }
20f733e7
BR
1515
1516 /* Clear EDMA now that SERR cleanup done */
3606a380 1517 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1518
bdd4ddde
JG
1519 if (!err_mask) {
1520 err_mask = AC_ERR_OTHER;
1521 action |= ATA_EH_HARDRESET;
1522 }
1523
1524 ehi->serror |= serr;
1525 ehi->action |= action;
1526
1527 if (qc)
1528 qc->err_mask |= err_mask;
1529 else
1530 ehi->err_mask |= err_mask;
1531
1532 if (edma_err_cause & eh_freeze_mask)
1533 ata_port_freeze(ap);
1534 else
1535 ata_port_abort(ap);
1536}
1537
1538static void mv_intr_pio(struct ata_port *ap)
1539{
1540 struct ata_queued_cmd *qc;
1541 u8 ata_status;
1542
1543 /* ignore spurious intr if drive still BUSY */
1544 ata_status = readb(ap->ioaddr.status_addr);
1545 if (unlikely(ata_status & ATA_BUSY))
1546 return;
1547
1548 /* get active ATA command */
9af5c9c9 1549 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1550 if (unlikely(!qc)) /* no active tag */
1551 return;
1552 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1553 return;
1554
1555 /* and finally, complete the ATA command */
1556 qc->err_mask |= ac_err_mask(ata_status);
1557 ata_qc_complete(qc);
1558}
1559
1560static void mv_intr_edma(struct ata_port *ap)
1561{
1562 void __iomem *port_mmio = mv_ap_base(ap);
1563 struct mv_host_priv *hpriv = ap->host->private_data;
1564 struct mv_port_priv *pp = ap->private_data;
1565 struct ata_queued_cmd *qc;
1566 u32 out_index, in_index;
1567 bool work_done = false;
1568
1569 /* get h/w response queue pointer */
1570 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1571 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1572
1573 while (1) {
1574 u16 status;
6c1153e0 1575 unsigned int tag;
bdd4ddde
JG
1576
1577 /* get s/w response queue last-read pointer, and compare */
1578 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1579 if (in_index == out_index)
1580 break;
1581
bdd4ddde 1582 /* 50xx: get active ATA command */
0ea9e179 1583 if (IS_GEN_I(hpriv))
9af5c9c9 1584 tag = ap->link.active_tag;
bdd4ddde 1585
6c1153e0
JG
1586 /* Gen II/IIE: get active ATA command via tag, to enable
1587 * support for queueing. this works transparently for
1588 * queued and non-queued modes.
bdd4ddde 1589 */
6c1153e0
JG
1590 else if (IS_GEN_II(hpriv))
1591 tag = (le16_to_cpu(pp->crpb[out_index].id)
1592 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1593
6c1153e0
JG
1594 else /* IS_GEN_IIE */
1595 tag = (le16_to_cpu(pp->crpb[out_index].id)
1596 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1597
6c1153e0 1598 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1599
1600 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1601 * bits (WARNING: might not necessarily be associated
1602 * with this command), which -should- be clear
1603 * if all is well
1604 */
1605 status = le16_to_cpu(pp->crpb[out_index].flags);
1606 if (unlikely(status & 0xff)) {
1607 mv_err_intr(ap, qc);
1608 return;
1609 }
1610
1611 /* and finally, complete the ATA command */
1612 if (qc) {
1613 qc->err_mask |=
1614 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1615 ata_qc_complete(qc);
1616 }
1617
0ea9e179 1618 /* advance software response queue pointer, to
bdd4ddde
JG
1619 * indicate (after the loop completes) to hardware
1620 * that we have consumed a response queue entry.
1621 */
1622 work_done = true;
1623 pp->resp_idx++;
1624 }
1625
1626 if (work_done)
1627 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1628 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1629 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1630}
1631
05b308e1
BR
1632/**
1633 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1634 * @host: host specific structure
05b308e1
BR
1635 * @relevant: port error bits relevant to this host controller
1636 * @hc: which host controller we're to look at
1637 *
1638 * Read then write clear the HC interrupt status then walk each
1639 * port connected to the HC and see if it needs servicing. Port
1640 * success ints are reported in the HC interrupt status reg, the
1641 * port error ints are reported in the higher level main
1642 * interrupt status register and thus are passed in via the
1643 * 'relevant' argument.
1644 *
1645 * LOCKING:
1646 * Inherited from caller.
1647 */
cca3974e 1648static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1649{
0d5ff566 1650 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1651 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1652 u32 hc_irq_cause;
c5d3e45a 1653 int port, port0;
20f733e7 1654
35177265 1655 if (hc == 0)
20f733e7 1656 port0 = 0;
35177265 1657 else
20f733e7 1658 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1659
1660 /* we'll need the HC success int register in most cases */
1661 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1662 if (!hc_irq_cause)
1663 return;
1664
1665 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1666
1667 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1668 hc, relevant, hc_irq_cause);
20f733e7
BR
1669
1670 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1671 struct ata_port *ap = host->ports[port];
63af2a5c 1672 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1673 int have_err_bits, hard_port, shift;
55d8ca4f 1674
bdd4ddde 1675 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1676 continue;
1677
31961943 1678 shift = port << 1; /* (port * 2) */
20f733e7
BR
1679 if (port >= MV_PORTS_PER_HC) {
1680 shift++; /* skip bit 8 in the HC Main IRQ reg */
1681 }
bdd4ddde
JG
1682 have_err_bits = ((PORT0_ERR << shift) & relevant);
1683
1684 if (unlikely(have_err_bits)) {
1685 struct ata_queued_cmd *qc;
8b260248 1686
9af5c9c9 1687 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1688 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1689 continue;
1690
1691 mv_err_intr(ap, qc);
1692 continue;
1693 }
1694
1695 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1696
1697 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1698 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1699 mv_intr_edma(ap);
1700 } else {
1701 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1702 mv_intr_pio(ap);
20f733e7
BR
1703 }
1704 }
1705 VPRINTK("EXIT\n");
1706}
1707
bdd4ddde
JG
1708static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1709{
02a121da 1710 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1711 struct ata_port *ap;
1712 struct ata_queued_cmd *qc;
1713 struct ata_eh_info *ehi;
1714 unsigned int i, err_mask, printed = 0;
1715 u32 err_cause;
1716
02a121da 1717 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1718
1719 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1720 err_cause);
1721
1722 DPRINTK("All regs @ PCI error\n");
1723 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1724
02a121da 1725 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1726
1727 for (i = 0; i < host->n_ports; i++) {
1728 ap = host->ports[i];
936fd732 1729 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1730 ehi = &ap->link.eh_info;
bdd4ddde
JG
1731 ata_ehi_clear_desc(ehi);
1732 if (!printed++)
1733 ata_ehi_push_desc(ehi,
1734 "PCI err cause 0x%08x", err_cause);
1735 err_mask = AC_ERR_HOST_BUS;
1736 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1737 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1738 if (qc)
1739 qc->err_mask |= err_mask;
1740 else
1741 ehi->err_mask |= err_mask;
1742
1743 ata_port_freeze(ap);
1744 }
1745 }
1746}
1747
05b308e1 1748/**
c5d3e45a 1749 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1750 * @irq: unused
1751 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1752 *
1753 * Read the read only register to determine if any host
1754 * controllers have pending interrupts. If so, call lower level
1755 * routine to handle. Also check for PCI errors which are only
1756 * reported here.
1757 *
8b260248 1758 * LOCKING:
cca3974e 1759 * This routine holds the host lock while processing pending
05b308e1
BR
1760 * interrupts.
1761 */
7d12e780 1762static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1763{
cca3974e 1764 struct ata_host *host = dev_instance;
20f733e7 1765 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1766 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
646a4da5 1767 u32 irq_stat, irq_mask;
20f733e7 1768
646a4da5 1769 spin_lock(&host->lock);
20f733e7 1770 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
646a4da5 1771 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
1772
1773 /* check the cases where we either have nothing pending or have read
1774 * a bogus register value which can indicate HW removal or PCI fault
1775 */
646a4da5
ML
1776 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1777 goto out_unlock;
20f733e7 1778
cca3974e 1779 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1780
bdd4ddde
JG
1781 if (unlikely(irq_stat & PCI_ERR)) {
1782 mv_pci_error(host, mmio);
1783 handled = 1;
1784 goto out_unlock; /* skip all other HC irq handling */
1785 }
1786
20f733e7
BR
1787 for (hc = 0; hc < n_hcs; hc++) {
1788 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1789 if (relevant) {
cca3974e 1790 mv_host_intr(host, relevant, hc);
bdd4ddde 1791 handled = 1;
20f733e7
BR
1792 }
1793 }
615ab953 1794
bdd4ddde 1795out_unlock:
cca3974e 1796 spin_unlock(&host->lock);
20f733e7
BR
1797
1798 return IRQ_RETVAL(handled);
1799}
1800
c9d39130
JG
1801static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1802{
1803 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1804 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1805
1806 return hc_mmio + ofs;
1807}
1808
1809static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1810{
1811 unsigned int ofs;
1812
1813 switch (sc_reg_in) {
1814 case SCR_STATUS:
1815 case SCR_ERROR:
1816 case SCR_CONTROL:
1817 ofs = sc_reg_in * sizeof(u32);
1818 break;
1819 default:
1820 ofs = 0xffffffffU;
1821 break;
1822 }
1823 return ofs;
1824}
1825
da3dbb17 1826static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1827{
0d5ff566
TH
1828 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1829 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1830 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1831
da3dbb17
TH
1832 if (ofs != 0xffffffffU) {
1833 *val = readl(addr + ofs);
1834 return 0;
1835 } else
1836 return -EINVAL;
c9d39130
JG
1837}
1838
da3dbb17 1839static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1840{
0d5ff566
TH
1841 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1842 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1843 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1844
da3dbb17 1845 if (ofs != 0xffffffffU) {
0d5ff566 1846 writelfl(val, addr + ofs);
da3dbb17
TH
1847 return 0;
1848 } else
1849 return -EINVAL;
c9d39130
JG
1850}
1851
522479fb
JG
1852static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1853{
522479fb
JG
1854 int early_5080;
1855
44c10138 1856 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1857
1858 if (!early_5080) {
1859 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1860 tmp |= (1 << 0);
1861 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1862 }
1863
1864 mv_reset_pci_bus(pdev, mmio);
1865}
1866
1867static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1868{
1869 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1870}
1871
47c2b677 1872static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1873 void __iomem *mmio)
1874{
c9d39130
JG
1875 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1876 u32 tmp;
1877
1878 tmp = readl(phy_mmio + MV5_PHY_MODE);
1879
1880 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1881 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1882}
1883
47c2b677 1884static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1885{
522479fb
JG
1886 u32 tmp;
1887
1888 writel(0, mmio + MV_GPIO_PORT_CTL);
1889
1890 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1891
1892 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1893 tmp |= ~(1 << 0);
1894 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1895}
1896
2a47ce06
JG
1897static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
bca1c4eb 1899{
c9d39130
JG
1900 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1901 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1902 u32 tmp;
1903 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1904
1905 if (fix_apm_sq) {
1906 tmp = readl(phy_mmio + MV5_LT_MODE);
1907 tmp |= (1 << 19);
1908 writel(tmp, phy_mmio + MV5_LT_MODE);
1909
1910 tmp = readl(phy_mmio + MV5_PHY_CTL);
1911 tmp &= ~0x3;
1912 tmp |= 0x1;
1913 writel(tmp, phy_mmio + MV5_PHY_CTL);
1914 }
1915
1916 tmp = readl(phy_mmio + MV5_PHY_MODE);
1917 tmp &= ~mask;
1918 tmp |= hpriv->signal[port].pre;
1919 tmp |= hpriv->signal[port].amps;
1920 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1921}
1922
c9d39130
JG
1923
1924#undef ZERO
1925#define ZERO(reg) writel(0, port_mmio + (reg))
1926static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1927 unsigned int port)
1928{
1929 void __iomem *port_mmio = mv_port_base(mmio, port);
1930
1931 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1932
1933 mv_channel_reset(hpriv, mmio, port);
1934
1935 ZERO(0x028); /* command */
1936 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1937 ZERO(0x004); /* timer */
1938 ZERO(0x008); /* irq err cause */
1939 ZERO(0x00c); /* irq err mask */
1940 ZERO(0x010); /* rq bah */
1941 ZERO(0x014); /* rq inp */
1942 ZERO(0x018); /* rq outp */
1943 ZERO(0x01c); /* respq bah */
1944 ZERO(0x024); /* respq outp */
1945 ZERO(0x020); /* respq inp */
1946 ZERO(0x02c); /* test control */
1947 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1948}
1949#undef ZERO
1950
1951#define ZERO(reg) writel(0, hc_mmio + (reg))
1952static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int hc)
47c2b677 1954{
c9d39130
JG
1955 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1956 u32 tmp;
1957
1958 ZERO(0x00c);
1959 ZERO(0x010);
1960 ZERO(0x014);
1961 ZERO(0x018);
1962
1963 tmp = readl(hc_mmio + 0x20);
1964 tmp &= 0x1c1c1c1c;
1965 tmp |= 0x03030303;
1966 writel(tmp, hc_mmio + 0x20);
1967}
1968#undef ZERO
1969
1970static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
1972{
1973 unsigned int hc, port;
1974
1975 for (hc = 0; hc < n_hc; hc++) {
1976 for (port = 0; port < MV_PORTS_PER_HC; port++)
1977 mv5_reset_hc_port(hpriv, mmio,
1978 (hc * MV_PORTS_PER_HC) + port);
1979
1980 mv5_reset_one_hc(hpriv, mmio, hc);
1981 }
1982
1983 return 0;
47c2b677
JG
1984}
1985
101ffae2
JG
1986#undef ZERO
1987#define ZERO(reg) writel(0, mmio + (reg))
1988static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1989{
02a121da
ML
1990 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1991 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1992 u32 tmp;
1993
1994 tmp = readl(mmio + MV_PCI_MODE);
1995 tmp &= 0xff00ffff;
1996 writel(tmp, mmio + MV_PCI_MODE);
1997
1998 ZERO(MV_PCI_DISC_TIMER);
1999 ZERO(MV_PCI_MSI_TRIGGER);
2000 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2001 ZERO(HC_MAIN_IRQ_MASK_OFS);
2002 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2003 ZERO(hpriv->irq_cause_ofs);
2004 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2005 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2006 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2007 ZERO(MV_PCI_ERR_ATTRIBUTE);
2008 ZERO(MV_PCI_ERR_COMMAND);
2009}
2010#undef ZERO
2011
2012static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2013{
2014 u32 tmp;
2015
2016 mv5_reset_flash(hpriv, mmio);
2017
2018 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2019 tmp &= 0x3;
2020 tmp |= (1 << 5) | (1 << 6);
2021 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2022}
2023
2024/**
2025 * mv6_reset_hc - Perform the 6xxx global soft reset
2026 * @mmio: base address of the HBA
2027 *
2028 * This routine only applies to 6xxx parts.
2029 *
2030 * LOCKING:
2031 * Inherited from caller.
2032 */
c9d39130
JG
2033static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2034 unsigned int n_hc)
101ffae2
JG
2035{
2036 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2037 int i, rc = 0;
2038 u32 t;
2039
2040 /* Following procedure defined in PCI "main command and status
2041 * register" table.
2042 */
2043 t = readl(reg);
2044 writel(t | STOP_PCI_MASTER, reg);
2045
2046 for (i = 0; i < 1000; i++) {
2047 udelay(1);
2048 t = readl(reg);
2dcb407e 2049 if (PCI_MASTER_EMPTY & t)
101ffae2 2050 break;
101ffae2
JG
2051 }
2052 if (!(PCI_MASTER_EMPTY & t)) {
2053 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2054 rc = 1;
2055 goto done;
2056 }
2057
2058 /* set reset */
2059 i = 5;
2060 do {
2061 writel(t | GLOB_SFT_RST, reg);
2062 t = readl(reg);
2063 udelay(1);
2064 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2065
2066 if (!(GLOB_SFT_RST & t)) {
2067 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2068 rc = 1;
2069 goto done;
2070 }
2071
2072 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2073 i = 5;
2074 do {
2075 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2076 t = readl(reg);
2077 udelay(1);
2078 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2079
2080 if (GLOB_SFT_RST & t) {
2081 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2082 rc = 1;
2083 }
2084done:
2085 return rc;
2086}
2087
47c2b677 2088static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2089 void __iomem *mmio)
2090{
2091 void __iomem *port_mmio;
2092 u32 tmp;
2093
ba3fe8fb
JG
2094 tmp = readl(mmio + MV_RESET_CFG);
2095 if ((tmp & (1 << 0)) == 0) {
47c2b677 2096 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2097 hpriv->signal[idx].pre = 0x1 << 5;
2098 return;
2099 }
2100
2101 port_mmio = mv_port_base(mmio, idx);
2102 tmp = readl(port_mmio + PHY_MODE2);
2103
2104 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2105 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2106}
2107
47c2b677 2108static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2109{
47c2b677 2110 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2111}
2112
c9d39130 2113static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2114 unsigned int port)
bca1c4eb 2115{
c9d39130
JG
2116 void __iomem *port_mmio = mv_port_base(mmio, port);
2117
bca1c4eb 2118 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2119 int fix_phy_mode2 =
2120 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2121 int fix_phy_mode4 =
47c2b677
JG
2122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2123 u32 m2, tmp;
2124
2125 if (fix_phy_mode2) {
2126 m2 = readl(port_mmio + PHY_MODE2);
2127 m2 &= ~(1 << 16);
2128 m2 |= (1 << 31);
2129 writel(m2, port_mmio + PHY_MODE2);
2130
2131 udelay(200);
2132
2133 m2 = readl(port_mmio + PHY_MODE2);
2134 m2 &= ~((1 << 16) | (1 << 31));
2135 writel(m2, port_mmio + PHY_MODE2);
2136
2137 udelay(200);
2138 }
2139
2140 /* who knows what this magic does */
2141 tmp = readl(port_mmio + PHY_MODE3);
2142 tmp &= ~0x7F800000;
2143 tmp |= 0x2A800000;
2144 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2145
2146 if (fix_phy_mode4) {
47c2b677 2147 u32 m4;
bca1c4eb
JG
2148
2149 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2150
2151 if (hp_flags & MV_HP_ERRATA_60X1B2)
2152 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2153
2154 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2155
2156 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2157
2158 if (hp_flags & MV_HP_ERRATA_60X1B2)
2159 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2160 }
2161
2162 /* Revert values of pre-emphasis and signal amps to the saved ones */
2163 m2 = readl(port_mmio + PHY_MODE2);
2164
2165 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2166 m2 |= hpriv->signal[port].amps;
2167 m2 |= hpriv->signal[port].pre;
47c2b677 2168 m2 &= ~(1 << 16);
bca1c4eb 2169
e4e7b892
JG
2170 /* according to mvSata 3.6.1, some IIE values are fixed */
2171 if (IS_GEN_IIE(hpriv)) {
2172 m2 &= ~0xC30FF01F;
2173 m2 |= 0x0000900F;
2174 }
2175
bca1c4eb
JG
2176 writel(m2, port_mmio + PHY_MODE2);
2177}
2178
c9d39130
JG
2179static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2180 unsigned int port_no)
2181{
2182 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2183
2184 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2185
ee9ccdf7 2186 if (IS_GEN_II(hpriv)) {
c9d39130 2187 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2188 ifctl |= (1 << 7); /* enable gen2i speed */
2189 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2190 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2191 }
2192
2193 udelay(25); /* allow reset propagation */
2194
2195 /* Spec never mentions clearing the bit. Marvell's driver does
2196 * clear the bit, however.
2197 */
2198 writelfl(0, port_mmio + EDMA_CMD_OFS);
2199
2200 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2201
ee9ccdf7 2202 if (IS_GEN_I(hpriv))
c9d39130
JG
2203 mdelay(1);
2204}
2205
05b308e1 2206/**
bdd4ddde 2207 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2208 * @ap: ATA channel to manipulate
2209 *
2210 * Part of this is taken from __sata_phy_reset and modified to
2211 * not sleep since this routine gets called from interrupt level.
2212 *
2213 * LOCKING:
2214 * Inherited from caller. This is coded to safe to call at
2215 * interrupt level, i.e. it does not sleep.
31961943 2216 */
bdd4ddde
JG
2217static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2218 unsigned long deadline)
20f733e7 2219{
095fec88 2220 struct mv_port_priv *pp = ap->private_data;
cca3974e 2221 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2222 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2223 int retry = 5;
2224 u32 sstatus;
20f733e7
BR
2225
2226 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2227
da3dbb17
TH
2228#ifdef DEBUG
2229 {
2230 u32 sstatus, serror, scontrol;
2231
2232 mv_scr_read(ap, SCR_STATUS, &sstatus);
2233 mv_scr_read(ap, SCR_ERROR, &serror);
2234 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2235 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2236 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2237 }
2238#endif
20f733e7 2239
22374677
JG
2240 /* Issue COMRESET via SControl */
2241comreset_retry:
936fd732 2242 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2243 msleep(1);
22374677 2244
936fd732 2245 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2246 msleep(20);
22374677 2247
31961943 2248 do {
936fd732 2249 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2250 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2251 break;
22374677 2252
bdd4ddde 2253 msleep(1);
c5d3e45a 2254 } while (time_before(jiffies, deadline));
20f733e7 2255
22374677 2256 /* work around errata */
ee9ccdf7 2257 if (IS_GEN_II(hpriv) &&
22374677
JG
2258 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2259 (retry-- > 0))
2260 goto comreset_retry;
095fec88 2261
da3dbb17
TH
2262#ifdef DEBUG
2263 {
2264 u32 sstatus, serror, scontrol;
2265
2266 mv_scr_read(ap, SCR_STATUS, &sstatus);
2267 mv_scr_read(ap, SCR_ERROR, &serror);
2268 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2269 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2270 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2271 }
2272#endif
31961943 2273
936fd732 2274 if (ata_link_offline(&ap->link)) {
bdd4ddde 2275 *class = ATA_DEV_NONE;
20f733e7
BR
2276 return;
2277 }
2278
22374677
JG
2279 /* even after SStatus reflects that device is ready,
2280 * it seems to take a while for link to be fully
2281 * established (and thus Status no longer 0x80/0x7F),
2282 * so we poll a bit for that, here.
2283 */
2284 retry = 20;
2285 while (1) {
2286 u8 drv_stat = ata_check_status(ap);
2287 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2288 break;
bdd4ddde 2289 msleep(500);
22374677
JG
2290 if (retry-- <= 0)
2291 break;
bdd4ddde
JG
2292 if (time_after(jiffies, deadline))
2293 break;
22374677
JG
2294 }
2295
bdd4ddde
JG
2296 /* FIXME: if we passed the deadline, the following
2297 * code probably produces an invalid result
2298 */
20f733e7 2299
bdd4ddde 2300 /* finally, read device signature from TF registers */
3f19859e 2301 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2302
2303 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2304
bdd4ddde 2305 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2306
bca1c4eb 2307 VPRINTK("EXIT\n");
20f733e7
BR
2308}
2309
cc0680a5 2310static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2311{
cc0680a5 2312 struct ata_port *ap = link->ap;
bdd4ddde 2313 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2314 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2315 int rc;
0ea9e179 2316
bdd4ddde
JG
2317 rc = mv_stop_dma(ap);
2318 if (rc)
2319 ehc->i.action |= ATA_EH_HARDRESET;
2320
2321 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2322 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2323 ehc->i.action |= ATA_EH_HARDRESET;
2324 }
2325
2326 /* if we're about to do hardreset, nothing more to do */
2327 if (ehc->i.action & ATA_EH_HARDRESET)
2328 return 0;
2329
cc0680a5 2330 if (ata_link_online(link))
bdd4ddde
JG
2331 rc = ata_wait_ready(ap, deadline);
2332 else
2333 rc = -ENODEV;
2334
2335 return rc;
22374677
JG
2336}
2337
cc0680a5 2338static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2339 unsigned long deadline)
31961943 2340{
cc0680a5 2341 struct ata_port *ap = link->ap;
bdd4ddde 2342 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2343 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2344
bdd4ddde 2345 mv_stop_dma(ap);
31961943 2346
bdd4ddde 2347 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2348
bdd4ddde
JG
2349 mv_phy_reset(ap, class, deadline);
2350
2351 return 0;
2352}
2353
cc0680a5 2354static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2355{
cc0680a5 2356 struct ata_port *ap = link->ap;
bdd4ddde
JG
2357 u32 serr;
2358
2359 /* print link status */
cc0680a5 2360 sata_print_link_status(link);
31961943 2361
bdd4ddde 2362 /* clear SError */
cc0680a5
TH
2363 sata_scr_read(link, SCR_ERROR, &serr);
2364 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2365
2366 /* bail out if no device is present */
2367 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2368 DPRINTK("EXIT, no device\n");
2369 return;
9b358e30 2370 }
bdd4ddde
JG
2371
2372 /* set up device control */
2373 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2374}
2375
2376static void mv_error_handler(struct ata_port *ap)
2377{
2378 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2379 mv_hardreset, mv_postreset);
2380}
2381
2382static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2383{
2384 mv_stop_dma(qc->ap);
2385}
2386
2387static void mv_eh_freeze(struct ata_port *ap)
2388{
2389 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2390 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2391 u32 tmp, mask;
2392 unsigned int shift;
2393
2394 /* FIXME: handle coalescing completion events properly */
2395
2396 shift = ap->port_no * 2;
2397 if (hc > 0)
2398 shift++;
2399
2400 mask = 0x3 << shift;
2401
2402 /* disable assertion of portN err, done events */
2403 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2404 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2405}
2406
2407static void mv_eh_thaw(struct ata_port *ap)
2408{
2409 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2410 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2411 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2412 void __iomem *port_mmio = mv_ap_base(ap);
2413 u32 tmp, mask, hc_irq_cause;
2414 unsigned int shift, hc_port_no = ap->port_no;
2415
2416 /* FIXME: handle coalescing completion events properly */
2417
2418 shift = ap->port_no * 2;
2419 if (hc > 0) {
2420 shift++;
2421 hc_port_no -= 4;
2422 }
2423
2424 mask = 0x3 << shift;
2425
2426 /* clear EDMA errors on this port */
2427 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2428
2429 /* clear pending irq events */
2430 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2431 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2432 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2433 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2434
2435 /* enable assertion of portN err, done events */
2436 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2437 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2438}
2439
05b308e1
BR
2440/**
2441 * mv_port_init - Perform some early initialization on a single port.
2442 * @port: libata data structure storing shadow register addresses
2443 * @port_mmio: base address of the port
2444 *
2445 * Initialize shadow register mmio addresses, clear outstanding
2446 * interrupts on the port, and unmask interrupts for the future
2447 * start of the port.
2448 *
2449 * LOCKING:
2450 * Inherited from caller.
2451 */
31961943 2452static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2453{
0d5ff566 2454 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2455 unsigned serr_ofs;
2456
8b260248 2457 /* PIO related setup
31961943
BR
2458 */
2459 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2460 port->error_addr =
31961943
BR
2461 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2462 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2463 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2464 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2465 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2466 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2467 port->status_addr =
31961943
BR
2468 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2469 /* special case: control/altstatus doesn't have ATA_REG_ address */
2470 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2471
2472 /* unused: */
8d9db2d2 2473 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2474
31961943
BR
2475 /* Clear any currently outstanding port interrupt conditions */
2476 serr_ofs = mv_scr_offset(SCR_ERROR);
2477 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2478 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2479
646a4da5
ML
2480 /* unmask all non-transient EDMA error interrupts */
2481 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2482
8b260248 2483 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2484 readl(port_mmio + EDMA_CFG_OFS),
2485 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2486 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2487}
2488
4447d351 2489static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2490{
4447d351
TH
2491 struct pci_dev *pdev = to_pci_dev(host->dev);
2492 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2493 u32 hp_flags = hpriv->hp_flags;
2494
5796d1c4 2495 switch (board_idx) {
47c2b677
JG
2496 case chip_5080:
2497 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2498 hp_flags |= MV_HP_GEN_I;
47c2b677 2499
44c10138 2500 switch (pdev->revision) {
47c2b677
JG
2501 case 0x1:
2502 hp_flags |= MV_HP_ERRATA_50XXB0;
2503 break;
2504 case 0x3:
2505 hp_flags |= MV_HP_ERRATA_50XXB2;
2506 break;
2507 default:
2508 dev_printk(KERN_WARNING, &pdev->dev,
2509 "Applying 50XXB2 workarounds to unknown rev\n");
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 }
2513 break;
2514
bca1c4eb
JG
2515 case chip_504x:
2516 case chip_508x:
47c2b677 2517 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2518 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2519
44c10138 2520 switch (pdev->revision) {
47c2b677
JG
2521 case 0x0:
2522 hp_flags |= MV_HP_ERRATA_50XXB0;
2523 break;
2524 case 0x3:
2525 hp_flags |= MV_HP_ERRATA_50XXB2;
2526 break;
2527 default:
2528 dev_printk(KERN_WARNING, &pdev->dev,
2529 "Applying B2 workarounds to unknown rev\n");
2530 hp_flags |= MV_HP_ERRATA_50XXB2;
2531 break;
bca1c4eb
JG
2532 }
2533 break;
2534
2535 case chip_604x:
2536 case chip_608x:
47c2b677 2537 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2538 hp_flags |= MV_HP_GEN_II;
47c2b677 2539
44c10138 2540 switch (pdev->revision) {
47c2b677
JG
2541 case 0x7:
2542 hp_flags |= MV_HP_ERRATA_60X1B2;
2543 break;
2544 case 0x9:
2545 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2549 "Applying B2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2551 break;
2552 }
2553 break;
2554
e4e7b892 2555 case chip_7042:
02a121da 2556 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2557 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2558 (pdev->device == 0x2300 || pdev->device == 0x2310))
2559 {
4e520033
ML
2560 /*
2561 * Highpoint RocketRAID PCIe 23xx series cards:
2562 *
2563 * Unconfigured drives are treated as "Legacy"
2564 * by the BIOS, and it overwrites sector 8 with
2565 * a "Lgcy" metadata block prior to Linux boot.
2566 *
2567 * Configured drives (RAID or JBOD) leave sector 8
2568 * alone, but instead overwrite a high numbered
2569 * sector for the RAID metadata. This sector can
2570 * be determined exactly, by truncating the physical
2571 * drive capacity to a nice even GB value.
2572 *
2573 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2574 *
2575 * Warn the user, lest they think we're just buggy.
2576 */
2577 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2578 " BIOS CORRUPTS DATA on all attached drives,"
2579 " regardless of if/how they are configured."
2580 " BEWARE!\n");
2581 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2582 " use sectors 8-9 on \"Legacy\" drives,"
2583 " and avoid the final two gigabytes on"
2584 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2585 }
e4e7b892
JG
2586 case chip_6042:
2587 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2588 hp_flags |= MV_HP_GEN_IIE;
2589
44c10138 2590 switch (pdev->revision) {
e4e7b892
JG
2591 case 0x0:
2592 hp_flags |= MV_HP_ERRATA_XX42A0;
2593 break;
2594 case 0x1:
2595 hp_flags |= MV_HP_ERRATA_60X1C0;
2596 break;
2597 default:
2598 dev_printk(KERN_WARNING, &pdev->dev,
2599 "Applying 60X1C0 workarounds to unknown rev\n");
2600 hp_flags |= MV_HP_ERRATA_60X1C0;
2601 break;
2602 }
2603 break;
2604
bca1c4eb 2605 default:
5796d1c4
JG
2606 dev_printk(KERN_ERR, &pdev->dev,
2607 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2608 return 1;
2609 }
2610
2611 hpriv->hp_flags = hp_flags;
02a121da
ML
2612 if (hp_flags & MV_HP_PCIE) {
2613 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2614 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2615 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2616 } else {
2617 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2618 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2619 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2620 }
bca1c4eb
JG
2621
2622 return 0;
2623}
2624
05b308e1 2625/**
47c2b677 2626 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2627 * @host: ATA host to initialize
2628 * @board_idx: controller index
05b308e1
BR
2629 *
2630 * If possible, do an early global reset of the host. Then do
2631 * our port init and clear/unmask all/relevant host interrupts.
2632 *
2633 * LOCKING:
2634 * Inherited from caller.
2635 */
4447d351 2636static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2637{
2638 int rc = 0, n_hc, port, hc;
4447d351
TH
2639 struct pci_dev *pdev = to_pci_dev(host->dev);
2640 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2641 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2642
47c2b677
JG
2643 /* global interrupt mask */
2644 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2645
4447d351 2646 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2647 if (rc)
2648 goto done;
2649
4447d351 2650 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2651
4447d351 2652 for (port = 0; port < host->n_ports; port++)
47c2b677 2653 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2654
c9d39130 2655 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2656 if (rc)
20f733e7 2657 goto done;
20f733e7 2658
522479fb
JG
2659 hpriv->ops->reset_flash(hpriv, mmio);
2660 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2661 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2662
4447d351 2663 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2664 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2665 void __iomem *port_mmio = mv_port_base(mmio, port);
2666
2a47ce06 2667 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2668 ifctl |= (1 << 7); /* enable gen2i speed */
2669 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2670 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2671 }
2672
c9d39130 2673 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2674 }
2675
4447d351 2676 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2677 struct ata_port *ap = host->ports[port];
2a47ce06 2678 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2679 unsigned int offset = port_mmio - mmio;
2680
2681 mv_port_init(&ap->ioaddr, port_mmio);
2682
2683 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2684 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2685 }
2686
2687 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2688 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2689
2690 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2691 "(before clear)=0x%08x\n", hc,
2692 readl(hc_mmio + HC_CFG_OFS),
2693 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2694
2695 /* Clear any currently outstanding hc interrupt conditions */
2696 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2697 }
2698
31961943 2699 /* Clear any currently outstanding host interrupt conditions */
02a121da 2700 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943
BR
2701
2702 /* and unmask interrupt generation for host regs */
02a121da 2703 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
fb621e2f 2704
ee9ccdf7 2705 if (IS_GEN_I(hpriv))
fb621e2f
JG
2706 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2707 else
2708 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2709
2710 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2711 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2712 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2713 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
02a121da
ML
2714 readl(mmio + hpriv->irq_cause_ofs),
2715 readl(mmio + hpriv->irq_mask_ofs));
bca1c4eb 2716
31961943 2717done:
20f733e7
BR
2718 return rc;
2719}
2720
05b308e1
BR
2721/**
2722 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2723 * @host: ATA host to print info about
05b308e1
BR
2724 *
2725 * FIXME: complete this.
2726 *
2727 * LOCKING:
2728 * Inherited from caller.
2729 */
4447d351 2730static void mv_print_info(struct ata_host *host)
31961943 2731{
4447d351
TH
2732 struct pci_dev *pdev = to_pci_dev(host->dev);
2733 struct mv_host_priv *hpriv = host->private_data;
44c10138 2734 u8 scc;
c1e4fe71 2735 const char *scc_s, *gen;
31961943
BR
2736
2737 /* Use this to determine the HW stepping of the chip so we know
2738 * what errata to workaround
2739 */
31961943
BR
2740 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2741 if (scc == 0)
2742 scc_s = "SCSI";
2743 else if (scc == 0x01)
2744 scc_s = "RAID";
2745 else
c1e4fe71
JG
2746 scc_s = "?";
2747
2748 if (IS_GEN_I(hpriv))
2749 gen = "I";
2750 else if (IS_GEN_II(hpriv))
2751 gen = "II";
2752 else if (IS_GEN_IIE(hpriv))
2753 gen = "IIE";
2754 else
2755 gen = "?";
31961943 2756
a9524a76 2757 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2758 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2759 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2760 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2761}
2762
05b308e1
BR
2763/**
2764 * mv_init_one - handle a positive probe of a Marvell host
2765 * @pdev: PCI device found
2766 * @ent: PCI device ID entry for the matched host
2767 *
2768 * LOCKING:
2769 * Inherited from caller.
2770 */
20f733e7
BR
2771static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2772{
2dcb407e 2773 static int printed_version;
20f733e7 2774 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2775 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2776 struct ata_host *host;
2777 struct mv_host_priv *hpriv;
2778 int n_ports, rc;
20f733e7 2779
a9524a76
JG
2780 if (!printed_version++)
2781 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2782
4447d351
TH
2783 /* allocate host */
2784 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2785
2786 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2787 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2788 if (!host || !hpriv)
2789 return -ENOMEM;
2790 host->private_data = hpriv;
2791
2792 /* acquire resources */
24dc5f33
TH
2793 rc = pcim_enable_device(pdev);
2794 if (rc)
20f733e7 2795 return rc;
20f733e7 2796
0d5ff566
TH
2797 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2798 if (rc == -EBUSY)
24dc5f33 2799 pcim_pin_device(pdev);
0d5ff566 2800 if (rc)
24dc5f33 2801 return rc;
4447d351 2802 host->iomap = pcim_iomap_table(pdev);
20f733e7 2803
d88184fb
JG
2804 rc = pci_go_64(pdev);
2805 if (rc)
2806 return rc;
2807
20f733e7 2808 /* initialize adapter */
4447d351 2809 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2810 if (rc)
2811 return rc;
20f733e7 2812
31961943 2813 /* Enable interrupts */
6a59dcf8 2814 if (msi && pci_enable_msi(pdev))
31961943 2815 pci_intx(pdev, 1);
20f733e7 2816
31961943 2817 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2818 mv_print_info(host);
20f733e7 2819
4447d351 2820 pci_set_master(pdev);
ea8b4db9 2821 pci_try_set_mwi(pdev);
4447d351 2822 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2823 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2824}
2825
2826static int __init mv_init(void)
2827{
b7887196 2828 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2829}
2830
2831static void __exit mv_exit(void)
2832{
2833 pci_unregister_driver(&mv_pci_driver);
2834}
2835
2836MODULE_AUTHOR("Brett Russ");
2837MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2838MODULE_LICENSE("GPL");
2839MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2840MODULE_VERSION(DRV_VERSION);
2841
ddef9bb3
JG
2842module_param(msi, int, 0444);
2843MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2844
20f733e7
BR
2845module_init(mv_init);
2846module_exit(mv_exit);