]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/ata/sata_mv.c
[libata] pata_isapnp: replace missing module device table
[mirror_ubuntu-artful-kernel.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
20f733e7 72#include <linux/libata.h>
20f733e7
BR
73
74#define DRV_NAME "sata_mv"
8bc3fc47 75#define DRV_VERSION "0.81"
20f733e7
BR
76
77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
20f733e7 94 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 95 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
20f733e7
BR
98
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
31961943
BR
104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
114 MV_MAX_SG_CT = 176,
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117
20f733e7
BR
118 MV_PORTS_PER_HC = 4,
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
31961943 121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
122 MV_PORT_MASK = 3,
123
124 /* Host Flags */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
47c2b677 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 131
31961943
BR
132 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
20f733e7
BR
146 /* PCI interface registers */
147
31961943
BR
148 PCI_COMMAND_OFS = 0xc00,
149
20f733e7
BR
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
154
522479fb
JG
155 MV_PCI_MODE = 0xd00,
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
165
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
176 PCI_ERR = (1 << 18),
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 HC_MAIN_RSVD),
fb621e2f
JG
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 HC_MAIN_RSVD_5),
20f733e7
BR
192
193 /* SATAHC registers */
194 HC_CFG_OFS = 0,
195
196 HC_IRQ_CAUSE_OFS = 0x14,
31961943 197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
200
201 /* Shadow block registers */
31961943
BR
202 SHD_BLK_OFS = 0x100,
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
204
205 /* SATA registers */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
47c2b677 208 PHY_MODE3 = 0x310,
bca1c4eb
JG
209 PHY_MODE4 = 0x314,
210 PHY_MODE2 = 0x330,
c9d39130
JG
211 MV5_PHY_MODE = 0x74,
212 MV5_LT_MODE = 0x30,
213 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
214 SATA_INTERFACE_CTL = 0x050,
215
216 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
217
218 /* Port registers */
219 EDMA_CFG_OFS = 0,
31961943
BR
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
225
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
251 EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON |
253 EDMA_ERR_DEV_CON |
254 EDMA_ERR_SERR |
255 EDMA_ERR_SELF_DIS |
6c1153e0 256 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
257 EDMA_ERR_CRPB_PAR |
258 EDMA_ERR_INTRL_PAR |
259 EDMA_ERR_IORDY |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
265 EDMA_ERR_PRD_PAR |
266 EDMA_ERR_DEV_DCON |
267 EDMA_ERR_DEV_CON |
268 EDMA_ERR_OVERRUN_5 |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
6c1153e0 271 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
272 EDMA_ERR_CRPB_PAR |
273 EDMA_ERR_INTRL_PAR |
274 EDMA_ERR_IORDY,
20f733e7 275
31961943
BR
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
278
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
281
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
285 EDMA_RSP_Q_PTR_SHIFT = 3,
286
0ea9e179
JG
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 291
c9d39130 292 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 293 EDMA_ARB_CFG = 0x38,
bca1c4eb 294
31961943
BR
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 301 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 305
31961943 306 /* Port private flags (pp_flags) */
0ea9e179
JG
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
309};
310
ee9ccdf7
JG
311#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 314
095fec88 315enum {
d88184fb 316 MV_DMA_BOUNDARY = 0xffffffffU,
095fec88 317
0ea9e179
JG
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
320 */
095fec88
JG
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
0ea9e179 323 /* ditto, for response queue */
095fec88
JG
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
325};
326
522479fb
JG
327enum chip_type {
328 chip_504x,
329 chip_508x,
330 chip_5080,
331 chip_604x,
332 chip_608x,
e4e7b892
JG
333 chip_6042,
334 chip_7042,
522479fb
JG
335};
336
31961943
BR
337/* Command ReQuest Block: 32B */
338struct mv_crqb {
e1469874
ML
339 __le32 sg_addr;
340 __le32 sg_addr_hi;
341 __le16 ctrl_flags;
342 __le16 ata_cmd[11];
31961943 343};
20f733e7 344
e4e7b892 345struct mv_crqb_iie {
e1469874
ML
346 __le32 addr;
347 __le32 addr_hi;
348 __le32 flags;
349 __le32 len;
350 __le32 ata_cmd[4];
e4e7b892
JG
351};
352
31961943
BR
353/* Command ResPonse Block: 8B */
354struct mv_crpb {
e1469874
ML
355 __le16 id;
356 __le16 flags;
357 __le32 tmstmp;
20f733e7
BR
358};
359
31961943
BR
360/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361struct mv_sg {
e1469874
ML
362 __le32 addr;
363 __le32 flags_size;
364 __le32 addr_hi;
365 __le32 reserved;
31961943 366};
20f733e7 367
31961943
BR
368struct mv_port_priv {
369 struct mv_crqb *crqb;
370 dma_addr_t crqb_dma;
371 struct mv_crpb *crpb;
372 dma_addr_t crpb_dma;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
375
376 unsigned int req_idx;
377 unsigned int resp_idx;
378
31961943
BR
379 u32 pp_flags;
380};
381
bca1c4eb
JG
382struct mv_port_signal {
383 u32 amps;
384 u32 pre;
385};
386
47c2b677
JG
387struct mv_host_priv;
388struct mv_hw_ops {
2a47ce06
JG
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 unsigned int port);
47c2b677
JG
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 void __iomem *mmio);
c9d39130
JG
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 unsigned int n_hc);
522479fb
JG
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
398};
399
31961943
BR
400struct mv_host_priv {
401 u32 hp_flags;
bca1c4eb 402 struct mv_port_signal signal[8];
47c2b677 403 const struct mv_hw_ops *ops;
20f733e7
BR
404};
405
406static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
407static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
411static int mv_port_start(struct ata_port *ap);
412static void mv_port_stop(struct ata_port *ap);
413static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 414static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 415static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
416static void mv_error_handler(struct ata_port *ap);
417static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418static void mv_eh_freeze(struct ata_port *ap);
419static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
420static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421
2a47ce06
JG
422static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 unsigned int port);
47c2b677
JG
424static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 void __iomem *mmio);
c9d39130
JG
427static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int n_hc);
522479fb
JG
429static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 431
2a47ce06
JG
432static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int port);
47c2b677
JG
434static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 void __iomem *mmio);
c9d39130
JG
437static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int n_hc);
522479fb
JG
439static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
441static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
47c2b677 443
c5d3e45a
JG
444static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
446 .name = DRV_NAME,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
460};
461
462static struct scsi_host_template mv6_sht = {
20f733e7
BR
463 .module = THIS_MODULE,
464 .name = DRV_NAME,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 467 .can_queue = ATA_DEF_QUEUE,
20f733e7 468 .this_id = ATA_SHT_THIS_ID,
d88184fb 469 .sg_tablesize = MV_MAX_SG_CT,
20f733e7
BR
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
d88184fb 472 .use_clustering = 1,
20f733e7
BR
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
ccf68c34 476 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 477 .bios_param = ata_std_bios_param,
20f733e7
BR
478};
479
c9d39130
JG
480static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
482
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
488
cffacd85 489 .cable_detect = ata_cable_sata,
c9d39130
JG
490
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
0d5ff566 493 .data_xfer = ata_data_xfer,
c9d39130 494
c9d39130 495 .irq_clear = mv_irq_clear,
246ce3b6
AI
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
c9d39130 498
bdd4ddde
JG
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
502 .thaw = mv_eh_thaw,
503
c9d39130
JG
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
506
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
c9d39130
JG
509};
510
511static const struct ata_port_operations mv6_ops = {
20f733e7
BR
512 .port_disable = ata_port_disable,
513
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
cffacd85 520 .cable_detect = ata_cable_sata,
20f733e7 521
31961943
BR
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
0d5ff566 524 .data_xfer = ata_data_xfer,
20f733e7 525
20f733e7 526 .irq_clear = mv_irq_clear,
246ce3b6
AI
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
20f733e7 529
bdd4ddde
JG
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
533 .thaw = mv_eh_thaw,
534
20f733e7
BR
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
537
31961943
BR
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
20f733e7
BR
540};
541
e4e7b892
JG
542static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
544
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
550
cffacd85 551 .cable_detect = ata_cable_sata,
e4e7b892
JG
552
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
0d5ff566 555 .data_xfer = ata_data_xfer,
e4e7b892 556
e4e7b892 557 .irq_clear = mv_irq_clear,
246ce3b6
AI
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
e4e7b892 560
bdd4ddde
JG
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
e4e7b892
JG
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
e4e7b892
JG
571};
572
98ac62de 573static const struct ata_port_info mv_port_info[] = {
20f733e7 574 { /* chip_504x */
cca3974e 575 .flags = MV_COMMON_FLAGS,
31961943 576 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 577 .udma_mask = ATA_UDMA6,
c9d39130 578 .port_ops = &mv5_ops,
20f733e7
BR
579 },
580 { /* chip_508x */
c5d3e45a 581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 582 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 583 .udma_mask = ATA_UDMA6,
c9d39130 584 .port_ops = &mv5_ops,
20f733e7 585 },
47c2b677 586 { /* chip_5080 */
c5d3e45a 587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 588 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 589 .udma_mask = ATA_UDMA6,
c9d39130 590 .port_ops = &mv5_ops,
47c2b677 591 },
20f733e7 592 { /* chip_604x */
c5d3e45a 593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 594 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 595 .udma_mask = ATA_UDMA6,
c9d39130 596 .port_ops = &mv6_ops,
20f733e7
BR
597 },
598 { /* chip_608x */
c5d3e45a
JG
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 MV_FLAG_DUAL_HC,
31961943 601 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 602 .udma_mask = ATA_UDMA6,
c9d39130 603 .port_ops = &mv6_ops,
20f733e7 604 },
e4e7b892 605 { /* chip_6042 */
c5d3e45a 606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 607 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 608 .udma_mask = ATA_UDMA6,
e4e7b892
JG
609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
c5d3e45a 612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 613 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 614 .udma_mask = ATA_UDMA6,
e4e7b892
JG
615 .port_ops = &mv_iie_ops,
616 },
20f733e7
BR
617};
618
3b7d697d 619static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624
625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630
631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632
d9f9c6bc
FA
633 /* Adaptec 1430SA */
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635
e93f09dc
OJ
636 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
637
6a3d586d
MT
638 /* add Marvell 7042 support */
639 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
640
2d2744fc 641 { } /* terminate list */
20f733e7
BR
642};
643
644static struct pci_driver mv_pci_driver = {
645 .name = DRV_NAME,
646 .id_table = mv_pci_tbl,
647 .probe = mv_init_one,
648 .remove = ata_pci_remove_one,
649};
650
47c2b677
JG
651static const struct mv_hw_ops mv5xxx_ops = {
652 .phy_errata = mv5_phy_errata,
653 .enable_leds = mv5_enable_leds,
654 .read_preamp = mv5_read_preamp,
655 .reset_hc = mv5_reset_hc,
522479fb
JG
656 .reset_flash = mv5_reset_flash,
657 .reset_bus = mv5_reset_bus,
47c2b677
JG
658};
659
660static const struct mv_hw_ops mv6xxx_ops = {
661 .phy_errata = mv6_phy_errata,
662 .enable_leds = mv6_enable_leds,
663 .read_preamp = mv6_read_preamp,
664 .reset_hc = mv6_reset_hc,
522479fb
JG
665 .reset_flash = mv6_reset_flash,
666 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
667};
668
ddef9bb3
JG
669/*
670 * module options
671 */
672static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
673
674
d88184fb
JG
675/* move to PCI layer or libata core? */
676static int pci_go_64(struct pci_dev *pdev)
677{
678 int rc;
679
680 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
682 if (rc) {
683 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
684 if (rc) {
685 dev_printk(KERN_ERR, &pdev->dev,
686 "64-bit DMA enable failed\n");
687 return rc;
688 }
689 }
690 } else {
691 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
692 if (rc) {
693 dev_printk(KERN_ERR, &pdev->dev,
694 "32-bit DMA enable failed\n");
695 return rc;
696 }
697 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
698 if (rc) {
699 dev_printk(KERN_ERR, &pdev->dev,
700 "32-bit consistent DMA enable failed\n");
701 return rc;
702 }
703 }
704
705 return rc;
706}
707
20f733e7
BR
708/*
709 * Functions
710 */
711
712static inline void writelfl(unsigned long data, void __iomem *addr)
713{
714 writel(data, addr);
715 (void) readl(addr); /* flush to avoid PCI posted write */
716}
717
20f733e7
BR
718static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
719{
720 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
721}
722
c9d39130
JG
723static inline unsigned int mv_hc_from_port(unsigned int port)
724{
725 return port >> MV_PORT_HC_SHIFT;
726}
727
728static inline unsigned int mv_hardport_from_port(unsigned int port)
729{
730 return port & MV_PORT_MASK;
731}
732
733static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
734 unsigned int port)
735{
736 return mv_hc_base(base, mv_hc_from_port(port));
737}
738
20f733e7
BR
739static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
740{
c9d39130 741 return mv_hc_base_from_port(base, port) +
8b260248 742 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 743 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
744}
745
746static inline void __iomem *mv_ap_base(struct ata_port *ap)
747{
0d5ff566 748 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
749}
750
cca3974e 751static inline int mv_get_hc_count(unsigned long port_flags)
31961943 752{
cca3974e 753 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
754}
755
756static void mv_irq_clear(struct ata_port *ap)
20f733e7 757{
20f733e7
BR
758}
759
c5d3e45a
JG
760static void mv_set_edma_ptrs(void __iomem *port_mmio,
761 struct mv_host_priv *hpriv,
762 struct mv_port_priv *pp)
763{
bdd4ddde
JG
764 u32 index;
765
c5d3e45a
JG
766 /*
767 * initialize request queue
768 */
bdd4ddde
JG
769 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
770
c5d3e45a
JG
771 WARN_ON(pp->crqb_dma & 0x3ff);
772 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 773 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
774 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
775
776 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 777 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
778 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 else
bdd4ddde 780 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
781
782 /*
783 * initialize response queue
784 */
bdd4ddde
JG
785 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
786
c5d3e45a
JG
787 WARN_ON(pp->crpb_dma & 0xff);
788 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
789
790 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 791 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
792 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 else
bdd4ddde 794 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 795
bdd4ddde 796 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 797 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
798}
799
05b308e1
BR
800/**
801 * mv_start_dma - Enable eDMA engine
802 * @base: port base address
803 * @pp: port private data
804 *
beec7dbc
TH
805 * Verify the local cache of the eDMA state is accurate with a
806 * WARN_ON.
05b308e1
BR
807 *
808 * LOCKING:
809 * Inherited from caller.
810 */
c5d3e45a
JG
811static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
812 struct mv_port_priv *pp)
20f733e7 813{
c5d3e45a 814 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
815 /* clear EDMA event indicators, if any */
816 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
817
818 mv_set_edma_ptrs(base, hpriv, pp);
819
afb0edd9
BR
820 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 }
beec7dbc 823 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
824}
825
05b308e1 826/**
0ea9e179 827 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
828 * @ap: ATA channel to manipulate
829 *
beec7dbc
TH
830 * Verify the local cache of the eDMA state is accurate with a
831 * WARN_ON.
05b308e1
BR
832 *
833 * LOCKING:
834 * Inherited from caller.
835 */
0ea9e179 836static int __mv_stop_dma(struct ata_port *ap)
20f733e7 837{
31961943
BR
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
31961943 840 u32 reg;
c5d3e45a 841 int i, err = 0;
31961943 842
4537deb5 843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 844 /* Disable EDMA if active. The disable bit auto clears.
31961943 845 */
31961943
BR
846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 848 } else {
beec7dbc 849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 850 }
8b260248 851
31961943
BR
852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 855 if (!(reg & EDMA_EN))
31961943 856 break;
4537deb5 857
31961943
BR
858 udelay(100);
859 }
860
c5d3e45a 861 if (reg & EDMA_EN) {
f15a1daf 862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 863 err = -EIO;
31961943 864 }
c5d3e45a
JG
865
866 return err;
20f733e7
BR
867}
868
0ea9e179
JG
869static int mv_stop_dma(struct ata_port *ap)
870{
871 unsigned long flags;
872 int rc;
873
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
877
878 return rc;
879}
880
8a70f8dc 881#ifdef ATA_DEBUG
31961943 882static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 883{
31961943
BR
884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
888 printk("%08x ",readl(start + b));
889 b += sizeof(u32);
890 }
891 printk("\n");
892 }
31961943 893}
8a70f8dc
JG
894#endif
895
31961943
BR
896static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897{
898#ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 (void) pci_read_config_dword(pdev,b,&dw);
905 printk("%08x ",dw);
906 b += sizeof(u32);
907 }
908 printk("\n");
909 }
910#endif
911}
912static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
914{
915#ifdef ATA_DEBUG
8b260248 916 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
920
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
929 }
8b260248 930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
931 num_ports > 1 ? num_ports - 1 : start_port);
932
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
936 }
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 943 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
946 }
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
949 DPRINTK("EDMA regs (port %i):\n",p);
950 mv_dump_mem(port_base, 0x54);
951 DPRINTK("SATA regs (port %i):\n",p);
952 mv_dump_mem(port_base+0x300, 0x60);
953 }
954#endif
20f733e7
BR
955}
956
957static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958{
959 unsigned int ofs;
960
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
973 }
974 return ofs;
975}
976
da3dbb17 977static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
978{
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
980
da3dbb17
TH
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
20f733e7
BR
986}
987
da3dbb17 988static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
989{
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
991
da3dbb17 992 if (ofs != 0xffffffffU) {
20f733e7 993 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
994 return 0;
995 } else
996 return -EINVAL;
20f733e7
BR
997}
998
c5d3e45a
JG
999static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1000 void __iomem *port_mmio)
e4e7b892
JG
1001{
1002 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1003
1004 /* set up non-NCQ EDMA configuration */
c5d3e45a 1005 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1006
e728eabe
JG
1007 if (IS_GEN_I(hpriv)) {
1008 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1009 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1010 }
e4e7b892 1011
e728eabe
JG
1012 else if (IS_GEN_II(hpriv)) {
1013 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1014 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1015 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1016 }
e4e7b892
JG
1017
1018 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1019 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1021 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1022 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1023 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1024 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1025 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1026 }
1027
1028 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1029}
1030
05b308e1
BR
1031/**
1032 * mv_port_start - Port specific init/start routine.
1033 * @ap: ATA channel to manipulate
1034 *
1035 * Allocate and point to DMA memory, init port private memory,
1036 * zero indices.
1037 *
1038 * LOCKING:
1039 * Inherited from caller.
1040 */
31961943
BR
1041static int mv_port_start(struct ata_port *ap)
1042{
cca3974e
JG
1043 struct device *dev = ap->host->dev;
1044 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1045 struct mv_port_priv *pp;
1046 void __iomem *port_mmio = mv_ap_base(ap);
1047 void *mem;
1048 dma_addr_t mem_dma;
0ea9e179 1049 unsigned long flags;
24dc5f33 1050 int rc;
31961943 1051
24dc5f33 1052 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1053 if (!pp)
24dc5f33 1054 return -ENOMEM;
31961943 1055
24dc5f33
TH
1056 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1057 GFP_KERNEL);
6037d6bb 1058 if (!mem)
24dc5f33 1059 return -ENOMEM;
31961943
BR
1060 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1061
6037d6bb
JG
1062 rc = ata_pad_alloc(ap, dev);
1063 if (rc)
24dc5f33 1064 return rc;
6037d6bb 1065
8b260248 1066 /* First item in chunk of DMA memory:
31961943
BR
1067 * 32-slot command request table (CRQB), 32 bytes each in size
1068 */
1069 pp->crqb = mem;
1070 pp->crqb_dma = mem_dma;
1071 mem += MV_CRQB_Q_SZ;
1072 mem_dma += MV_CRQB_Q_SZ;
1073
8b260248 1074 /* Second item:
31961943
BR
1075 * 32-slot command response table (CRPB), 8 bytes each in size
1076 */
1077 pp->crpb = mem;
1078 pp->crpb_dma = mem_dma;
1079 mem += MV_CRPB_Q_SZ;
1080 mem_dma += MV_CRPB_Q_SZ;
1081
1082 /* Third item:
1083 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1084 */
1085 pp->sg_tbl = mem;
1086 pp->sg_tbl_dma = mem_dma;
1087
0ea9e179
JG
1088 spin_lock_irqsave(&ap->host->lock, flags);
1089
c5d3e45a 1090 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1091
c5d3e45a 1092 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1093
0ea9e179
JG
1094 spin_unlock_irqrestore(&ap->host->lock, flags);
1095
31961943
BR
1096 /* Don't turn on EDMA here...do it before DMA commands only. Else
1097 * we'll be unable to send non-data, PIO, etc due to restricted access
1098 * to shadow regs.
1099 */
1100 ap->private_data = pp;
1101 return 0;
1102}
1103
05b308e1
BR
1104/**
1105 * mv_port_stop - Port specific cleanup/stop routine.
1106 * @ap: ATA channel to manipulate
1107 *
1108 * Stop DMA, cleanup port memory.
1109 *
1110 * LOCKING:
cca3974e 1111 * This routine uses the host lock to protect the DMA stop.
05b308e1 1112 */
31961943
BR
1113static void mv_port_stop(struct ata_port *ap)
1114{
31961943 1115 mv_stop_dma(ap);
31961943
BR
1116}
1117
05b308e1
BR
1118/**
1119 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1120 * @qc: queued command whose SG list to source from
1121 *
1122 * Populate the SG list and mark the last entry.
1123 *
1124 * LOCKING:
1125 * Inherited from caller.
1126 */
d88184fb 1127static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1128{
1129 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1130 unsigned int n_sg = 0;
972c26bd 1131 struct scatterlist *sg;
d88184fb 1132 struct mv_sg *mv_sg;
31961943 1133
d88184fb 1134 mv_sg = pp->sg_tbl;
972c26bd 1135 ata_for_each_sg(sg, qc) {
d88184fb
JG
1136 dma_addr_t addr = sg_dma_address(sg);
1137 u32 sg_len = sg_dma_len(sg);
22374677 1138
d88184fb
JG
1139 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1140 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1141 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
22374677 1142
d88184fb
JG
1143 if (ata_sg_is_last(sg, qc))
1144 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
22374677 1145
d88184fb
JG
1146 mv_sg++;
1147 n_sg++;
31961943 1148 }
d88184fb
JG
1149
1150 return n_sg;
31961943
BR
1151}
1152
e1469874 1153static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1154{
559eedad 1155 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1156 (last ? CRQB_CMD_LAST : 0);
559eedad 1157 *cmdw = cpu_to_le16(tmp);
31961943
BR
1158}
1159
05b308e1
BR
1160/**
1161 * mv_qc_prep - Host specific command preparation.
1162 * @qc: queued command to prepare
1163 *
1164 * This routine simply redirects to the general purpose routine
1165 * if command is not DMA. Else, it handles prep of the CRQB
1166 * (command request block), does some sanity checking, and calls
1167 * the SG load routine.
1168 *
1169 * LOCKING:
1170 * Inherited from caller.
1171 */
31961943
BR
1172static void mv_qc_prep(struct ata_queued_cmd *qc)
1173{
1174 struct ata_port *ap = qc->ap;
1175 struct mv_port_priv *pp = ap->private_data;
e1469874 1176 __le16 *cw;
31961943
BR
1177 struct ata_taskfile *tf;
1178 u16 flags = 0;
a6432436 1179 unsigned in_index;
31961943 1180
c5d3e45a 1181 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1182 return;
20f733e7 1183
31961943
BR
1184 /* Fill in command request block
1185 */
e4e7b892 1186 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1187 flags |= CRQB_FLAG_READ;
beec7dbc 1188 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1189 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1190 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1191
bdd4ddde
JG
1192 /* get current queue index from software */
1193 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1194
1195 pp->crqb[in_index].sg_addr =
31961943 1196 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1197 pp->crqb[in_index].sg_addr_hi =
31961943 1198 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1199 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1200
a6432436 1201 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1202 tf = &qc->tf;
1203
1204 /* Sadly, the CRQB cannot accomodate all registers--there are
1205 * only 11 bytes...so we must pick and choose required
1206 * registers based on the command. So, we drop feature and
1207 * hob_feature for [RW] DMA commands, but they are needed for
1208 * NCQ. NCQ will drop hob_nsect.
20f733e7 1209 */
31961943
BR
1210 switch (tf->command) {
1211 case ATA_CMD_READ:
1212 case ATA_CMD_READ_EXT:
1213 case ATA_CMD_WRITE:
1214 case ATA_CMD_WRITE_EXT:
c15d85c8 1215 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1216 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1217 break;
1218#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1219 case ATA_CMD_FPDMA_READ:
1220 case ATA_CMD_FPDMA_WRITE:
8b260248 1221 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1222 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1223 break;
1224#endif /* FIXME: remove this line when NCQ added */
1225 default:
1226 /* The only other commands EDMA supports in non-queued and
1227 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1228 * of which are defined/used by Linux. If we get here, this
1229 * driver needs work.
1230 *
1231 * FIXME: modify libata to give qc_prep a return value and
1232 * return error here.
1233 */
1234 BUG_ON(tf->command);
1235 break;
1236 }
1237 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1238 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1239 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1240 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1241 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1242 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1243 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1244 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1245 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1246
e4e7b892
JG
1247 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1248 return;
1249 mv_fill_sg(qc);
1250}
1251
1252/**
1253 * mv_qc_prep_iie - Host specific command preparation.
1254 * @qc: queued command to prepare
1255 *
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1260 *
1261 * LOCKING:
1262 * Inherited from caller.
1263 */
1264static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1265{
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
1268 struct mv_crqb_iie *crqb;
1269 struct ata_taskfile *tf;
a6432436 1270 unsigned in_index;
e4e7b892
JG
1271 u32 flags = 0;
1272
c5d3e45a 1273 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1274 return;
1275
e4e7b892
JG
1276 /* Fill in Gen IIE command request block
1277 */
1278 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1279 flags |= CRQB_FLAG_READ;
1280
beec7dbc 1281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1282 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1283 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1284 what we use as our tag */
e4e7b892 1285
bdd4ddde
JG
1286 /* get current queue index from software */
1287 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1288
1289 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1290 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1291 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1292 crqb->flags = cpu_to_le32(flags);
1293
1294 tf = &qc->tf;
1295 crqb->ata_cmd[0] = cpu_to_le32(
1296 (tf->command << 16) |
1297 (tf->feature << 24)
1298 );
1299 crqb->ata_cmd[1] = cpu_to_le32(
1300 (tf->lbal << 0) |
1301 (tf->lbam << 8) |
1302 (tf->lbah << 16) |
1303 (tf->device << 24)
1304 );
1305 crqb->ata_cmd[2] = cpu_to_le32(
1306 (tf->hob_lbal << 0) |
1307 (tf->hob_lbam << 8) |
1308 (tf->hob_lbah << 16) |
1309 (tf->hob_feature << 24)
1310 );
1311 crqb->ata_cmd[3] = cpu_to_le32(
1312 (tf->nsect << 0) |
1313 (tf->hob_nsect << 8)
1314 );
1315
1316 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1317 return;
31961943
BR
1318 mv_fill_sg(qc);
1319}
1320
05b308e1
BR
1321/**
1322 * mv_qc_issue - Initiate a command to the host
1323 * @qc: queued command to start
1324 *
1325 * This routine simply redirects to the general purpose routine
1326 * if command is not DMA. Else, it sanity checks our local
1327 * caches of the request producer/consumer indices then enables
1328 * DMA and bumps the request producer index.
1329 *
1330 * LOCKING:
1331 * Inherited from caller.
1332 */
9a3d9eb0 1333static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1334{
c5d3e45a
JG
1335 struct ata_port *ap = qc->ap;
1336 void __iomem *port_mmio = mv_ap_base(ap);
1337 struct mv_port_priv *pp = ap->private_data;
1338 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1339 u32 in_index;
31961943 1340
c5d3e45a 1341 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1342 /* We're about to send a non-EDMA capable command to the
1343 * port. Turn off EDMA so there won't be problems accessing
1344 * shadow block, etc registers.
1345 */
0ea9e179 1346 __mv_stop_dma(ap);
31961943
BR
1347 return ata_qc_issue_prot(qc);
1348 }
1349
bdd4ddde
JG
1350 mv_start_dma(port_mmio, hpriv, pp);
1351
1352 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1353
31961943 1354 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1355 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1356 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1357
bdd4ddde 1358 pp->req_idx++;
31961943 1359
bdd4ddde 1360 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1361
1362 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1363 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1364 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1365
1366 return 0;
1367}
1368
05b308e1
BR
1369/**
1370 * mv_err_intr - Handle error interrupts on the port
1371 * @ap: ATA channel to manipulate
9b358e30 1372 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1373 *
1374 * In most cases, just clear the interrupt and move on. However,
1375 * some cases require an eDMA reset, which is done right before
1376 * the COMRESET in mv_phy_reset(). The SERR case requires a
1377 * clear of pending errors in the SATA SERROR register. Finally,
1378 * if the port disabled DMA, update our cached copy to match.
1379 *
1380 * LOCKING:
1381 * Inherited from caller.
1382 */
bdd4ddde 1383static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1384{
1385 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1386 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_host_priv *hpriv = ap->host->private_data;
1389 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1390 unsigned int action = 0, err_mask = 0;
1391 struct ata_eh_info *ehi = &ap->eh_info;
20f733e7 1392
bdd4ddde 1393 ata_ehi_clear_desc(ehi);
20f733e7 1394
bdd4ddde
JG
1395 if (!edma_enabled) {
1396 /* just a guess: do we need to do this? should we
1397 * expand this, and do it in all cases?
1398 */
81952c54
TH
1399 sata_scr_read(ap, SCR_ERROR, &serr);
1400 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1401 }
bdd4ddde
JG
1402
1403 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1404
1405 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1406
1407 /*
1408 * all generations share these EDMA error cause bits
1409 */
1410
1411 if (edma_err_cause & EDMA_ERR_DEV)
1412 err_mask |= AC_ERR_DEV;
1413 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1414 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1415 EDMA_ERR_INTRL_PAR)) {
1416 err_mask |= AC_ERR_ATA_BUS;
1417 action |= ATA_EH_HARDRESET;
b64bbc39 1418 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1419 }
1420 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1421 ata_ehi_hotplugged(ehi);
1422 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1423 "dev disconnect" : "dev connect");
bdd4ddde
JG
1424 }
1425
ee9ccdf7 1426 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1427 eh_freeze_mask = EDMA_EH_FREEZE_5;
1428
1429 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1430 struct mv_port_priv *pp = ap->private_data;
1431 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1432 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1433 }
1434 } else {
1435 eh_freeze_mask = EDMA_EH_FREEZE;
1436
1437 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1438 struct mv_port_priv *pp = ap->private_data;
1439 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1440 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1441 }
1442
1443 if (edma_err_cause & EDMA_ERR_SERR) {
1444 sata_scr_read(ap, SCR_ERROR, &serr);
1445 sata_scr_write_flush(ap, SCR_ERROR, serr);
1446 err_mask = AC_ERR_ATA_BUS;
1447 action |= ATA_EH_HARDRESET;
1448 }
afb0edd9 1449 }
20f733e7
BR
1450
1451 /* Clear EDMA now that SERR cleanup done */
1452 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1453
bdd4ddde
JG
1454 if (!err_mask) {
1455 err_mask = AC_ERR_OTHER;
1456 action |= ATA_EH_HARDRESET;
1457 }
1458
1459 ehi->serror |= serr;
1460 ehi->action |= action;
1461
1462 if (qc)
1463 qc->err_mask |= err_mask;
1464 else
1465 ehi->err_mask |= err_mask;
1466
1467 if (edma_err_cause & eh_freeze_mask)
1468 ata_port_freeze(ap);
1469 else
1470 ata_port_abort(ap);
1471}
1472
1473static void mv_intr_pio(struct ata_port *ap)
1474{
1475 struct ata_queued_cmd *qc;
1476 u8 ata_status;
1477
1478 /* ignore spurious intr if drive still BUSY */
1479 ata_status = readb(ap->ioaddr.status_addr);
1480 if (unlikely(ata_status & ATA_BUSY))
1481 return;
1482
1483 /* get active ATA command */
1484 qc = ata_qc_from_tag(ap, ap->active_tag);
1485 if (unlikely(!qc)) /* no active tag */
1486 return;
1487 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1488 return;
1489
1490 /* and finally, complete the ATA command */
1491 qc->err_mask |= ac_err_mask(ata_status);
1492 ata_qc_complete(qc);
1493}
1494
1495static void mv_intr_edma(struct ata_port *ap)
1496{
1497 void __iomem *port_mmio = mv_ap_base(ap);
1498 struct mv_host_priv *hpriv = ap->host->private_data;
1499 struct mv_port_priv *pp = ap->private_data;
1500 struct ata_queued_cmd *qc;
1501 u32 out_index, in_index;
1502 bool work_done = false;
1503
1504 /* get h/w response queue pointer */
1505 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1506 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1507
1508 while (1) {
1509 u16 status;
6c1153e0 1510 unsigned int tag;
bdd4ddde
JG
1511
1512 /* get s/w response queue last-read pointer, and compare */
1513 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1514 if (in_index == out_index)
1515 break;
1516
bdd4ddde 1517 /* 50xx: get active ATA command */
0ea9e179 1518 if (IS_GEN_I(hpriv))
6c1153e0 1519 tag = ap->active_tag;
bdd4ddde 1520
6c1153e0
JG
1521 /* Gen II/IIE: get active ATA command via tag, to enable
1522 * support for queueing. this works transparently for
1523 * queued and non-queued modes.
bdd4ddde 1524 */
6c1153e0
JG
1525 else if (IS_GEN_II(hpriv))
1526 tag = (le16_to_cpu(pp->crpb[out_index].id)
1527 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1528
6c1153e0
JG
1529 else /* IS_GEN_IIE */
1530 tag = (le16_to_cpu(pp->crpb[out_index].id)
1531 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1532
6c1153e0 1533 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1534
1535 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1536 * bits (WARNING: might not necessarily be associated
1537 * with this command), which -should- be clear
1538 * if all is well
1539 */
1540 status = le16_to_cpu(pp->crpb[out_index].flags);
1541 if (unlikely(status & 0xff)) {
1542 mv_err_intr(ap, qc);
1543 return;
1544 }
1545
1546 /* and finally, complete the ATA command */
1547 if (qc) {
1548 qc->err_mask |=
1549 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1550 ata_qc_complete(qc);
1551 }
1552
0ea9e179 1553 /* advance software response queue pointer, to
bdd4ddde
JG
1554 * indicate (after the loop completes) to hardware
1555 * that we have consumed a response queue entry.
1556 */
1557 work_done = true;
1558 pp->resp_idx++;
1559 }
1560
1561 if (work_done)
1562 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1563 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1564 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1565}
1566
05b308e1
BR
1567/**
1568 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1569 * @host: host specific structure
05b308e1
BR
1570 * @relevant: port error bits relevant to this host controller
1571 * @hc: which host controller we're to look at
1572 *
1573 * Read then write clear the HC interrupt status then walk each
1574 * port connected to the HC and see if it needs servicing. Port
1575 * success ints are reported in the HC interrupt status reg, the
1576 * port error ints are reported in the higher level main
1577 * interrupt status register and thus are passed in via the
1578 * 'relevant' argument.
1579 *
1580 * LOCKING:
1581 * Inherited from caller.
1582 */
cca3974e 1583static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1584{
0d5ff566 1585 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1587 u32 hc_irq_cause;
c5d3e45a 1588 int port, port0;
20f733e7 1589
35177265 1590 if (hc == 0)
20f733e7 1591 port0 = 0;
35177265 1592 else
20f733e7 1593 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1594
1595 /* we'll need the HC success int register in most cases */
1596 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1597 if (!hc_irq_cause)
1598 return;
1599
1600 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1601
1602 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1603 hc,relevant,hc_irq_cause);
1604
1605 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1606 struct ata_port *ap = host->ports[port];
63af2a5c 1607 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1608 int have_err_bits, hard_port, shift;
55d8ca4f 1609
bdd4ddde 1610 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1611 continue;
1612
31961943 1613 shift = port << 1; /* (port * 2) */
20f733e7
BR
1614 if (port >= MV_PORTS_PER_HC) {
1615 shift++; /* skip bit 8 in the HC Main IRQ reg */
1616 }
bdd4ddde
JG
1617 have_err_bits = ((PORT0_ERR << shift) & relevant);
1618
1619 if (unlikely(have_err_bits)) {
1620 struct ata_queued_cmd *qc;
8b260248 1621
20f733e7 1622 qc = ata_qc_from_tag(ap, ap->active_tag);
bdd4ddde
JG
1623 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1624 continue;
1625
1626 mv_err_intr(ap, qc);
1627 continue;
1628 }
1629
1630 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1631
1632 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1633 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1634 mv_intr_edma(ap);
1635 } else {
1636 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1637 mv_intr_pio(ap);
20f733e7
BR
1638 }
1639 }
1640 VPRINTK("EXIT\n");
1641}
1642
bdd4ddde
JG
1643static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1644{
1645 struct ata_port *ap;
1646 struct ata_queued_cmd *qc;
1647 struct ata_eh_info *ehi;
1648 unsigned int i, err_mask, printed = 0;
1649 u32 err_cause;
1650
1651 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1652
1653 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1654 err_cause);
1655
1656 DPRINTK("All regs @ PCI error\n");
1657 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1658
1659 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1660
1661 for (i = 0; i < host->n_ports; i++) {
1662 ap = host->ports[i];
1663 if (!ata_port_offline(ap)) {
1664 ehi = &ap->eh_info;
1665 ata_ehi_clear_desc(ehi);
1666 if (!printed++)
1667 ata_ehi_push_desc(ehi,
1668 "PCI err cause 0x%08x", err_cause);
1669 err_mask = AC_ERR_HOST_BUS;
1670 ehi->action = ATA_EH_HARDRESET;
1671 qc = ata_qc_from_tag(ap, ap->active_tag);
1672 if (qc)
1673 qc->err_mask |= err_mask;
1674 else
1675 ehi->err_mask |= err_mask;
1676
1677 ata_port_freeze(ap);
1678 }
1679 }
1680}
1681
05b308e1 1682/**
c5d3e45a 1683 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1684 * @irq: unused
1685 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1686 *
1687 * Read the read only register to determine if any host
1688 * controllers have pending interrupts. If so, call lower level
1689 * routine to handle. Also check for PCI errors which are only
1690 * reported here.
1691 *
8b260248 1692 * LOCKING:
cca3974e 1693 * This routine holds the host lock while processing pending
05b308e1
BR
1694 * interrupts.
1695 */
7d12e780 1696static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1697{
cca3974e 1698 struct ata_host *host = dev_instance;
20f733e7 1699 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1700 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1701 u32 irq_stat;
1702
20f733e7 1703 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1704
1705 /* check the cases where we either have nothing pending or have read
1706 * a bogus register value which can indicate HW removal or PCI fault
1707 */
35177265 1708 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1709 return IRQ_NONE;
20f733e7 1710
cca3974e
JG
1711 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1712 spin_lock(&host->lock);
20f733e7 1713
bdd4ddde
JG
1714 if (unlikely(irq_stat & PCI_ERR)) {
1715 mv_pci_error(host, mmio);
1716 handled = 1;
1717 goto out_unlock; /* skip all other HC irq handling */
1718 }
1719
20f733e7
BR
1720 for (hc = 0; hc < n_hcs; hc++) {
1721 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1722 if (relevant) {
cca3974e 1723 mv_host_intr(host, relevant, hc);
bdd4ddde 1724 handled = 1;
20f733e7
BR
1725 }
1726 }
615ab953 1727
bdd4ddde 1728out_unlock:
cca3974e 1729 spin_unlock(&host->lock);
20f733e7
BR
1730
1731 return IRQ_RETVAL(handled);
1732}
1733
c9d39130
JG
1734static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1735{
1736 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1737 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1738
1739 return hc_mmio + ofs;
1740}
1741
1742static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1743{
1744 unsigned int ofs;
1745
1746 switch (sc_reg_in) {
1747 case SCR_STATUS:
1748 case SCR_ERROR:
1749 case SCR_CONTROL:
1750 ofs = sc_reg_in * sizeof(u32);
1751 break;
1752 default:
1753 ofs = 0xffffffffU;
1754 break;
1755 }
1756 return ofs;
1757}
1758
da3dbb17 1759static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1760{
0d5ff566
TH
1761 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1762 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1763 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1764
da3dbb17
TH
1765 if (ofs != 0xffffffffU) {
1766 *val = readl(addr + ofs);
1767 return 0;
1768 } else
1769 return -EINVAL;
c9d39130
JG
1770}
1771
da3dbb17 1772static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1773{
0d5ff566
TH
1774 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1775 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1776 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1777
da3dbb17 1778 if (ofs != 0xffffffffU) {
0d5ff566 1779 writelfl(val, addr + ofs);
da3dbb17
TH
1780 return 0;
1781 } else
1782 return -EINVAL;
c9d39130
JG
1783}
1784
522479fb
JG
1785static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1786{
522479fb
JG
1787 int early_5080;
1788
44c10138 1789 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1790
1791 if (!early_5080) {
1792 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1793 tmp |= (1 << 0);
1794 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1795 }
1796
1797 mv_reset_pci_bus(pdev, mmio);
1798}
1799
1800static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1801{
1802 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1803}
1804
47c2b677 1805static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1806 void __iomem *mmio)
1807{
c9d39130
JG
1808 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1809 u32 tmp;
1810
1811 tmp = readl(phy_mmio + MV5_PHY_MODE);
1812
1813 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1814 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1815}
1816
47c2b677 1817static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1818{
522479fb
JG
1819 u32 tmp;
1820
1821 writel(0, mmio + MV_GPIO_PORT_CTL);
1822
1823 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1824
1825 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1826 tmp |= ~(1 << 0);
1827 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1828}
1829
2a47ce06
JG
1830static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1831 unsigned int port)
bca1c4eb 1832{
c9d39130
JG
1833 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1834 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1835 u32 tmp;
1836 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1837
1838 if (fix_apm_sq) {
1839 tmp = readl(phy_mmio + MV5_LT_MODE);
1840 tmp |= (1 << 19);
1841 writel(tmp, phy_mmio + MV5_LT_MODE);
1842
1843 tmp = readl(phy_mmio + MV5_PHY_CTL);
1844 tmp &= ~0x3;
1845 tmp |= 0x1;
1846 writel(tmp, phy_mmio + MV5_PHY_CTL);
1847 }
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850 tmp &= ~mask;
1851 tmp |= hpriv->signal[port].pre;
1852 tmp |= hpriv->signal[port].amps;
1853 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1854}
1855
c9d39130
JG
1856
1857#undef ZERO
1858#define ZERO(reg) writel(0, port_mmio + (reg))
1859static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1860 unsigned int port)
1861{
1862 void __iomem *port_mmio = mv_port_base(mmio, port);
1863
1864 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1865
1866 mv_channel_reset(hpriv, mmio, port);
1867
1868 ZERO(0x028); /* command */
1869 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1870 ZERO(0x004); /* timer */
1871 ZERO(0x008); /* irq err cause */
1872 ZERO(0x00c); /* irq err mask */
1873 ZERO(0x010); /* rq bah */
1874 ZERO(0x014); /* rq inp */
1875 ZERO(0x018); /* rq outp */
1876 ZERO(0x01c); /* respq bah */
1877 ZERO(0x024); /* respq outp */
1878 ZERO(0x020); /* respq inp */
1879 ZERO(0x02c); /* test control */
1880 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1881}
1882#undef ZERO
1883
1884#define ZERO(reg) writel(0, hc_mmio + (reg))
1885static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1886 unsigned int hc)
47c2b677 1887{
c9d39130
JG
1888 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1889 u32 tmp;
1890
1891 ZERO(0x00c);
1892 ZERO(0x010);
1893 ZERO(0x014);
1894 ZERO(0x018);
1895
1896 tmp = readl(hc_mmio + 0x20);
1897 tmp &= 0x1c1c1c1c;
1898 tmp |= 0x03030303;
1899 writel(tmp, hc_mmio + 0x20);
1900}
1901#undef ZERO
1902
1903static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int n_hc)
1905{
1906 unsigned int hc, port;
1907
1908 for (hc = 0; hc < n_hc; hc++) {
1909 for (port = 0; port < MV_PORTS_PER_HC; port++)
1910 mv5_reset_hc_port(hpriv, mmio,
1911 (hc * MV_PORTS_PER_HC) + port);
1912
1913 mv5_reset_one_hc(hpriv, mmio, hc);
1914 }
1915
1916 return 0;
47c2b677
JG
1917}
1918
101ffae2
JG
1919#undef ZERO
1920#define ZERO(reg) writel(0, mmio + (reg))
1921static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1922{
1923 u32 tmp;
1924
1925 tmp = readl(mmio + MV_PCI_MODE);
1926 tmp &= 0xff00ffff;
1927 writel(tmp, mmio + MV_PCI_MODE);
1928
1929 ZERO(MV_PCI_DISC_TIMER);
1930 ZERO(MV_PCI_MSI_TRIGGER);
1931 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1932 ZERO(HC_MAIN_IRQ_MASK_OFS);
1933 ZERO(MV_PCI_SERR_MASK);
1934 ZERO(PCI_IRQ_CAUSE_OFS);
1935 ZERO(PCI_IRQ_MASK_OFS);
1936 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1937 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1938 ZERO(MV_PCI_ERR_ATTRIBUTE);
1939 ZERO(MV_PCI_ERR_COMMAND);
1940}
1941#undef ZERO
1942
1943static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1944{
1945 u32 tmp;
1946
1947 mv5_reset_flash(hpriv, mmio);
1948
1949 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1950 tmp &= 0x3;
1951 tmp |= (1 << 5) | (1 << 6);
1952 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1953}
1954
1955/**
1956 * mv6_reset_hc - Perform the 6xxx global soft reset
1957 * @mmio: base address of the HBA
1958 *
1959 * This routine only applies to 6xxx parts.
1960 *
1961 * LOCKING:
1962 * Inherited from caller.
1963 */
c9d39130
JG
1964static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1965 unsigned int n_hc)
101ffae2
JG
1966{
1967 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1968 int i, rc = 0;
1969 u32 t;
1970
1971 /* Following procedure defined in PCI "main command and status
1972 * register" table.
1973 */
1974 t = readl(reg);
1975 writel(t | STOP_PCI_MASTER, reg);
1976
1977 for (i = 0; i < 1000; i++) {
1978 udelay(1);
1979 t = readl(reg);
1980 if (PCI_MASTER_EMPTY & t) {
1981 break;
1982 }
1983 }
1984 if (!(PCI_MASTER_EMPTY & t)) {
1985 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1986 rc = 1;
1987 goto done;
1988 }
1989
1990 /* set reset */
1991 i = 5;
1992 do {
1993 writel(t | GLOB_SFT_RST, reg);
1994 t = readl(reg);
1995 udelay(1);
1996 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1997
1998 if (!(GLOB_SFT_RST & t)) {
1999 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2000 rc = 1;
2001 goto done;
2002 }
2003
2004 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2005 i = 5;
2006 do {
2007 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2008 t = readl(reg);
2009 udelay(1);
2010 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2011
2012 if (GLOB_SFT_RST & t) {
2013 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2014 rc = 1;
2015 }
2016done:
2017 return rc;
2018}
2019
47c2b677 2020static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2021 void __iomem *mmio)
2022{
2023 void __iomem *port_mmio;
2024 u32 tmp;
2025
ba3fe8fb
JG
2026 tmp = readl(mmio + MV_RESET_CFG);
2027 if ((tmp & (1 << 0)) == 0) {
47c2b677 2028 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2029 hpriv->signal[idx].pre = 0x1 << 5;
2030 return;
2031 }
2032
2033 port_mmio = mv_port_base(mmio, idx);
2034 tmp = readl(port_mmio + PHY_MODE2);
2035
2036 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2037 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2038}
2039
47c2b677 2040static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2041{
47c2b677 2042 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2043}
2044
c9d39130 2045static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2046 unsigned int port)
bca1c4eb 2047{
c9d39130
JG
2048 void __iomem *port_mmio = mv_port_base(mmio, port);
2049
bca1c4eb 2050 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2051 int fix_phy_mode2 =
2052 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2053 int fix_phy_mode4 =
47c2b677
JG
2054 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2055 u32 m2, tmp;
2056
2057 if (fix_phy_mode2) {
2058 m2 = readl(port_mmio + PHY_MODE2);
2059 m2 &= ~(1 << 16);
2060 m2 |= (1 << 31);
2061 writel(m2, port_mmio + PHY_MODE2);
2062
2063 udelay(200);
2064
2065 m2 = readl(port_mmio + PHY_MODE2);
2066 m2 &= ~((1 << 16) | (1 << 31));
2067 writel(m2, port_mmio + PHY_MODE2);
2068
2069 udelay(200);
2070 }
2071
2072 /* who knows what this magic does */
2073 tmp = readl(port_mmio + PHY_MODE3);
2074 tmp &= ~0x7F800000;
2075 tmp |= 0x2A800000;
2076 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2077
2078 if (fix_phy_mode4) {
47c2b677 2079 u32 m4;
bca1c4eb
JG
2080
2081 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2082
2083 if (hp_flags & MV_HP_ERRATA_60X1B2)
2084 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2085
2086 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2087
2088 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2089
2090 if (hp_flags & MV_HP_ERRATA_60X1B2)
2091 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2092 }
2093
2094 /* Revert values of pre-emphasis and signal amps to the saved ones */
2095 m2 = readl(port_mmio + PHY_MODE2);
2096
2097 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2098 m2 |= hpriv->signal[port].amps;
2099 m2 |= hpriv->signal[port].pre;
47c2b677 2100 m2 &= ~(1 << 16);
bca1c4eb 2101
e4e7b892
JG
2102 /* according to mvSata 3.6.1, some IIE values are fixed */
2103 if (IS_GEN_IIE(hpriv)) {
2104 m2 &= ~0xC30FF01F;
2105 m2 |= 0x0000900F;
2106 }
2107
bca1c4eb
JG
2108 writel(m2, port_mmio + PHY_MODE2);
2109}
2110
c9d39130
JG
2111static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2112 unsigned int port_no)
2113{
2114 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2115
2116 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2117
ee9ccdf7 2118 if (IS_GEN_II(hpriv)) {
c9d39130 2119 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2120 ifctl |= (1 << 7); /* enable gen2i speed */
2121 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2122 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2123 }
2124
2125 udelay(25); /* allow reset propagation */
2126
2127 /* Spec never mentions clearing the bit. Marvell's driver does
2128 * clear the bit, however.
2129 */
2130 writelfl(0, port_mmio + EDMA_CMD_OFS);
2131
2132 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2133
ee9ccdf7 2134 if (IS_GEN_I(hpriv))
c9d39130
JG
2135 mdelay(1);
2136}
2137
05b308e1 2138/**
bdd4ddde 2139 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2140 * @ap: ATA channel to manipulate
2141 *
2142 * Part of this is taken from __sata_phy_reset and modified to
2143 * not sleep since this routine gets called from interrupt level.
2144 *
2145 * LOCKING:
2146 * Inherited from caller. This is coded to safe to call at
2147 * interrupt level, i.e. it does not sleep.
31961943 2148 */
bdd4ddde
JG
2149static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2150 unsigned long deadline)
20f733e7 2151{
095fec88 2152 struct mv_port_priv *pp = ap->private_data;
cca3974e 2153 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2154 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2155 int retry = 5;
2156 u32 sstatus;
20f733e7
BR
2157
2158 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2159
da3dbb17
TH
2160#ifdef DEBUG
2161 {
2162 u32 sstatus, serror, scontrol;
2163
2164 mv_scr_read(ap, SCR_STATUS, &sstatus);
2165 mv_scr_read(ap, SCR_ERROR, &serror);
2166 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2167 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2168 "SCtrl 0x%08x\n", status, serror, scontrol);
2169 }
2170#endif
20f733e7 2171
22374677
JG
2172 /* Issue COMRESET via SControl */
2173comreset_retry:
81952c54 2174 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
bdd4ddde 2175 msleep(1);
22374677 2176
81952c54 2177 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
bdd4ddde 2178 msleep(20);
22374677 2179
31961943 2180 do {
81952c54 2181 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2182 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2183 break;
22374677 2184
bdd4ddde 2185 msleep(1);
c5d3e45a 2186 } while (time_before(jiffies, deadline));
20f733e7 2187
22374677 2188 /* work around errata */
ee9ccdf7 2189 if (IS_GEN_II(hpriv) &&
22374677
JG
2190 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2191 (retry-- > 0))
2192 goto comreset_retry;
095fec88 2193
da3dbb17
TH
2194#ifdef DEBUG
2195 {
2196 u32 sstatus, serror, scontrol;
2197
2198 mv_scr_read(ap, SCR_STATUS, &sstatus);
2199 mv_scr_read(ap, SCR_ERROR, &serror);
2200 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2201 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2202 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2203 }
2204#endif
31961943 2205
bdd4ddde
JG
2206 if (ata_port_offline(ap)) {
2207 *class = ATA_DEV_NONE;
20f733e7
BR
2208 return;
2209 }
2210
22374677
JG
2211 /* even after SStatus reflects that device is ready,
2212 * it seems to take a while for link to be fully
2213 * established (and thus Status no longer 0x80/0x7F),
2214 * so we poll a bit for that, here.
2215 */
2216 retry = 20;
2217 while (1) {
2218 u8 drv_stat = ata_check_status(ap);
2219 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2220 break;
bdd4ddde 2221 msleep(500);
22374677
JG
2222 if (retry-- <= 0)
2223 break;
bdd4ddde
JG
2224 if (time_after(jiffies, deadline))
2225 break;
22374677
JG
2226 }
2227
bdd4ddde
JG
2228 /* FIXME: if we passed the deadline, the following
2229 * code probably produces an invalid result
2230 */
20f733e7 2231
bdd4ddde
JG
2232 /* finally, read device signature from TF registers */
2233 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2234
2235 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2236
bdd4ddde 2237 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2238
bca1c4eb 2239 VPRINTK("EXIT\n");
20f733e7
BR
2240}
2241
bdd4ddde 2242static int mv_prereset(struct ata_port *ap, unsigned long deadline)
22374677 2243{
bdd4ddde
JG
2244 struct mv_port_priv *pp = ap->private_data;
2245 struct ata_eh_context *ehc = &ap->eh_context;
2246 int rc;
0ea9e179 2247
bdd4ddde
JG
2248 rc = mv_stop_dma(ap);
2249 if (rc)
2250 ehc->i.action |= ATA_EH_HARDRESET;
2251
2252 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2253 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2254 ehc->i.action |= ATA_EH_HARDRESET;
2255 }
2256
2257 /* if we're about to do hardreset, nothing more to do */
2258 if (ehc->i.action & ATA_EH_HARDRESET)
2259 return 0;
2260
2261 if (ata_port_online(ap))
2262 rc = ata_wait_ready(ap, deadline);
2263 else
2264 rc = -ENODEV;
2265
2266 return rc;
22374677
JG
2267}
2268
bdd4ddde
JG
2269static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2270 unsigned long deadline)
31961943 2271{
bdd4ddde 2272 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2273 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2274
bdd4ddde 2275 mv_stop_dma(ap);
31961943 2276
bdd4ddde 2277 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2278
bdd4ddde
JG
2279 mv_phy_reset(ap, class, deadline);
2280
2281 return 0;
2282}
2283
2284static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2285{
2286 u32 serr;
2287
2288 /* print link status */
2289 sata_print_link_status(ap);
31961943 2290
bdd4ddde
JG
2291 /* clear SError */
2292 sata_scr_read(ap, SCR_ERROR, &serr);
2293 sata_scr_write_flush(ap, SCR_ERROR, serr);
2294
2295 /* bail out if no device is present */
2296 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2297 DPRINTK("EXIT, no device\n");
2298 return;
9b358e30 2299 }
bdd4ddde
JG
2300
2301 /* set up device control */
2302 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2303}
2304
2305static void mv_error_handler(struct ata_port *ap)
2306{
2307 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2308 mv_hardreset, mv_postreset);
2309}
2310
2311static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2312{
2313 mv_stop_dma(qc->ap);
2314}
2315
2316static void mv_eh_freeze(struct ata_port *ap)
2317{
2318 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2319 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2320 u32 tmp, mask;
2321 unsigned int shift;
2322
2323 /* FIXME: handle coalescing completion events properly */
2324
2325 shift = ap->port_no * 2;
2326 if (hc > 0)
2327 shift++;
2328
2329 mask = 0x3 << shift;
2330
2331 /* disable assertion of portN err, done events */
2332 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2333 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2334}
2335
2336static void mv_eh_thaw(struct ata_port *ap)
2337{
2338 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2339 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2340 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2341 void __iomem *port_mmio = mv_ap_base(ap);
2342 u32 tmp, mask, hc_irq_cause;
2343 unsigned int shift, hc_port_no = ap->port_no;
2344
2345 /* FIXME: handle coalescing completion events properly */
2346
2347 shift = ap->port_no * 2;
2348 if (hc > 0) {
2349 shift++;
2350 hc_port_no -= 4;
2351 }
2352
2353 mask = 0x3 << shift;
2354
2355 /* clear EDMA errors on this port */
2356 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2357
2358 /* clear pending irq events */
2359 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2360 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2361 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2362 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2363
2364 /* enable assertion of portN err, done events */
2365 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2366 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2367}
2368
05b308e1
BR
2369/**
2370 * mv_port_init - Perform some early initialization on a single port.
2371 * @port: libata data structure storing shadow register addresses
2372 * @port_mmio: base address of the port
2373 *
2374 * Initialize shadow register mmio addresses, clear outstanding
2375 * interrupts on the port, and unmask interrupts for the future
2376 * start of the port.
2377 *
2378 * LOCKING:
2379 * Inherited from caller.
2380 */
31961943 2381static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2382{
0d5ff566 2383 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2384 unsigned serr_ofs;
2385
8b260248 2386 /* PIO related setup
31961943
BR
2387 */
2388 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2389 port->error_addr =
31961943
BR
2390 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2391 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2392 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2393 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2394 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2395 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2396 port->status_addr =
31961943
BR
2397 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2398 /* special case: control/altstatus doesn't have ATA_REG_ address */
2399 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2400
2401 /* unused: */
8d9db2d2 2402 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2403
31961943
BR
2404 /* Clear any currently outstanding port interrupt conditions */
2405 serr_ofs = mv_scr_offset(SCR_ERROR);
2406 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2407 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2408
20f733e7 2409 /* unmask all EDMA error interrupts */
31961943 2410 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2411
8b260248 2412 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2413 readl(port_mmio + EDMA_CFG_OFS),
2414 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2415 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2416}
2417
4447d351 2418static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2419{
4447d351
TH
2420 struct pci_dev *pdev = to_pci_dev(host->dev);
2421 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2422 u32 hp_flags = hpriv->hp_flags;
2423
bca1c4eb 2424 switch(board_idx) {
47c2b677
JG
2425 case chip_5080:
2426 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2427 hp_flags |= MV_HP_GEN_I;
47c2b677 2428
44c10138 2429 switch (pdev->revision) {
47c2b677
JG
2430 case 0x1:
2431 hp_flags |= MV_HP_ERRATA_50XXB0;
2432 break;
2433 case 0x3:
2434 hp_flags |= MV_HP_ERRATA_50XXB2;
2435 break;
2436 default:
2437 dev_printk(KERN_WARNING, &pdev->dev,
2438 "Applying 50XXB2 workarounds to unknown rev\n");
2439 hp_flags |= MV_HP_ERRATA_50XXB2;
2440 break;
2441 }
2442 break;
2443
bca1c4eb
JG
2444 case chip_504x:
2445 case chip_508x:
47c2b677 2446 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2447 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2448
44c10138 2449 switch (pdev->revision) {
47c2b677
JG
2450 case 0x0:
2451 hp_flags |= MV_HP_ERRATA_50XXB0;
2452 break;
2453 case 0x3:
2454 hp_flags |= MV_HP_ERRATA_50XXB2;
2455 break;
2456 default:
2457 dev_printk(KERN_WARNING, &pdev->dev,
2458 "Applying B2 workarounds to unknown rev\n");
2459 hp_flags |= MV_HP_ERRATA_50XXB2;
2460 break;
bca1c4eb
JG
2461 }
2462 break;
2463
2464 case chip_604x:
2465 case chip_608x:
47c2b677 2466 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2467 hp_flags |= MV_HP_GEN_II;
47c2b677 2468
44c10138 2469 switch (pdev->revision) {
47c2b677
JG
2470 case 0x7:
2471 hp_flags |= MV_HP_ERRATA_60X1B2;
2472 break;
2473 case 0x9:
2474 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2475 break;
2476 default:
2477 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2478 "Applying B2 workarounds to unknown rev\n");
2479 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2480 break;
2481 }
2482 break;
2483
e4e7b892
JG
2484 case chip_7042:
2485 case chip_6042:
2486 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2487 hp_flags |= MV_HP_GEN_IIE;
2488
44c10138 2489 switch (pdev->revision) {
e4e7b892
JG
2490 case 0x0:
2491 hp_flags |= MV_HP_ERRATA_XX42A0;
2492 break;
2493 case 0x1:
2494 hp_flags |= MV_HP_ERRATA_60X1C0;
2495 break;
2496 default:
2497 dev_printk(KERN_WARNING, &pdev->dev,
2498 "Applying 60X1C0 workarounds to unknown rev\n");
2499 hp_flags |= MV_HP_ERRATA_60X1C0;
2500 break;
2501 }
2502 break;
2503
bca1c4eb
JG
2504 default:
2505 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2506 return 1;
2507 }
2508
2509 hpriv->hp_flags = hp_flags;
2510
2511 return 0;
2512}
2513
05b308e1 2514/**
47c2b677 2515 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2516 * @host: ATA host to initialize
2517 * @board_idx: controller index
05b308e1
BR
2518 *
2519 * If possible, do an early global reset of the host. Then do
2520 * our port init and clear/unmask all/relevant host interrupts.
2521 *
2522 * LOCKING:
2523 * Inherited from caller.
2524 */
4447d351 2525static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2526{
2527 int rc = 0, n_hc, port, hc;
4447d351
TH
2528 struct pci_dev *pdev = to_pci_dev(host->dev);
2529 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2530 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2531
47c2b677
JG
2532 /* global interrupt mask */
2533 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2534
4447d351 2535 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2536 if (rc)
2537 goto done;
2538
4447d351 2539 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2540
4447d351 2541 for (port = 0; port < host->n_ports; port++)
47c2b677 2542 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2543
c9d39130 2544 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2545 if (rc)
20f733e7 2546 goto done;
20f733e7 2547
522479fb
JG
2548 hpriv->ops->reset_flash(hpriv, mmio);
2549 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2550 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2551
4447d351 2552 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2553 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2554 void __iomem *port_mmio = mv_port_base(mmio, port);
2555
2a47ce06 2556 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2557 ifctl |= (1 << 7); /* enable gen2i speed */
2558 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2559 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2560 }
2561
c9d39130 2562 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2563 }
2564
4447d351 2565 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2566 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2567 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2568 }
2569
2570 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2571 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2572
2573 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2574 "(before clear)=0x%08x\n", hc,
2575 readl(hc_mmio + HC_CFG_OFS),
2576 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2577
2578 /* Clear any currently outstanding hc interrupt conditions */
2579 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2580 }
2581
31961943
BR
2582 /* Clear any currently outstanding host interrupt conditions */
2583 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2584
2585 /* and unmask interrupt generation for host regs */
2586 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2587
ee9ccdf7 2588 if (IS_GEN_I(hpriv))
fb621e2f
JG
2589 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2590 else
2591 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2592
2593 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2594 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2595 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2596 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2597 readl(mmio + PCI_IRQ_CAUSE_OFS),
2598 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2599
31961943 2600done:
20f733e7
BR
2601 return rc;
2602}
2603
05b308e1
BR
2604/**
2605 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2606 * @host: ATA host to print info about
05b308e1
BR
2607 *
2608 * FIXME: complete this.
2609 *
2610 * LOCKING:
2611 * Inherited from caller.
2612 */
4447d351 2613static void mv_print_info(struct ata_host *host)
31961943 2614{
4447d351
TH
2615 struct pci_dev *pdev = to_pci_dev(host->dev);
2616 struct mv_host_priv *hpriv = host->private_data;
44c10138 2617 u8 scc;
c1e4fe71 2618 const char *scc_s, *gen;
31961943
BR
2619
2620 /* Use this to determine the HW stepping of the chip so we know
2621 * what errata to workaround
2622 */
31961943
BR
2623 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2624 if (scc == 0)
2625 scc_s = "SCSI";
2626 else if (scc == 0x01)
2627 scc_s = "RAID";
2628 else
c1e4fe71
JG
2629 scc_s = "?";
2630
2631 if (IS_GEN_I(hpriv))
2632 gen = "I";
2633 else if (IS_GEN_II(hpriv))
2634 gen = "II";
2635 else if (IS_GEN_IIE(hpriv))
2636 gen = "IIE";
2637 else
2638 gen = "?";
31961943 2639
a9524a76 2640 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2641 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2642 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2643 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2644}
2645
05b308e1
BR
2646/**
2647 * mv_init_one - handle a positive probe of a Marvell host
2648 * @pdev: PCI device found
2649 * @ent: PCI device ID entry for the matched host
2650 *
2651 * LOCKING:
2652 * Inherited from caller.
2653 */
20f733e7
BR
2654static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2655{
2656 static int printed_version = 0;
20f733e7 2657 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2658 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2659 struct ata_host *host;
2660 struct mv_host_priv *hpriv;
2661 int n_ports, rc;
20f733e7 2662
a9524a76
JG
2663 if (!printed_version++)
2664 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2665
4447d351
TH
2666 /* allocate host */
2667 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2668
2669 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2670 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2671 if (!host || !hpriv)
2672 return -ENOMEM;
2673 host->private_data = hpriv;
2674
2675 /* acquire resources */
24dc5f33
TH
2676 rc = pcim_enable_device(pdev);
2677 if (rc)
20f733e7 2678 return rc;
20f733e7 2679
0d5ff566
TH
2680 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2681 if (rc == -EBUSY)
24dc5f33 2682 pcim_pin_device(pdev);
0d5ff566 2683 if (rc)
24dc5f33 2684 return rc;
4447d351 2685 host->iomap = pcim_iomap_table(pdev);
20f733e7 2686
d88184fb
JG
2687 rc = pci_go_64(pdev);
2688 if (rc)
2689 return rc;
2690
20f733e7 2691 /* initialize adapter */
4447d351 2692 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2693 if (rc)
2694 return rc;
20f733e7 2695
31961943 2696 /* Enable interrupts */
6a59dcf8 2697 if (msi && pci_enable_msi(pdev))
31961943 2698 pci_intx(pdev, 1);
20f733e7 2699
31961943 2700 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2701 mv_print_info(host);
20f733e7 2702
4447d351 2703 pci_set_master(pdev);
ea8b4db9 2704 pci_try_set_mwi(pdev);
4447d351 2705 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2706 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2707}
2708
2709static int __init mv_init(void)
2710{
b7887196 2711 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2712}
2713
2714static void __exit mv_exit(void)
2715{
2716 pci_unregister_driver(&mv_pci_driver);
2717}
2718
2719MODULE_AUTHOR("Brett Russ");
2720MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2721MODULE_LICENSE("GPL");
2722MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2723MODULE_VERSION(DRV_VERSION);
2724
ddef9bb3
JG
2725module_param(msi, int, 0444);
2726MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2727
20f733e7
BR
2728module_init(mv_init);
2729module_exit(mv_exit);