]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/ata/sata_mv.c
[libata] sata_mv: Micro-optimization and cleanups
[mirror_ubuntu-jammy-kernel.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 7) Test and verify 3.0 Gbps support
39
40 8) Develop a low-power-consumption strategy, and implement it.
41
42 9) [Experiment, low priority] See if ATAPI can be supported using
43 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 like that.
45
46 10) [Experiment, low priority] Investigate interrupt coalescing.
47 Quite often, especially with PCI Message Signalled Interrupts (MSI),
48 the overhead reduced by interrupt mitigation is quite often not
49 worth the latency cost.
50
51 11) [Experiment, Marvell value added] Is it possible to use target
52 mode to cross-connect two Linux boxes with Marvell cards? If so,
53 creating LibATA target mode support would be very interesting.
54
55 Target mode, for those without docs, is the ability to directly
56 connect two SATA controllers.
57
58 13) Verify that 7042 is fully supported. I only have a 6042.
59
60*/
61
62
20f733e7
BR
63#include <linux/kernel.h>
64#include <linux/module.h>
65#include <linux/pci.h>
66#include <linux/init.h>
67#include <linux/blkdev.h>
68#include <linux/delay.h>
69#include <linux/interrupt.h>
20f733e7 70#include <linux/dma-mapping.h>
a9524a76 71#include <linux/device.h>
20f733e7 72#include <scsi/scsi_host.h>
193515d5 73#include <scsi/scsi_cmnd.h>
20f733e7 74#include <linux/libata.h>
20f733e7
BR
75
76#define DRV_NAME "sata_mv"
8bc3fc47 77#define DRV_VERSION "0.81"
20f733e7
BR
78
79enum {
80 /* BAR's are enumerated in terms of pci_resource_start() terms */
81 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
82 MV_IO_BAR = 2, /* offset 0x18: IO space */
83 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84
85 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
86 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87
88 MV_PCI_REG_BASE = 0,
89 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
90 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
91 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
92 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
93 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
94 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95
20f733e7 96 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 97 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
98 MV_GPIO_PORT_CTL = 0x104f0,
99 MV_RESET_CFG = 0x180d8,
20f733e7
BR
100
101 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
103 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
104 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105
31961943
BR
106 MV_MAX_Q_DEPTH = 32,
107 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108
109 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 * CRPB needs alignment on a 256B boundary. Size == 256B
111 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 */
114 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
115 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_MAX_SG_CT = 176,
117 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119
20f733e7
BR
120 MV_PORTS_PER_HC = 4,
121 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 MV_PORT_HC_SHIFT = 2,
31961943 123 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
124 MV_PORT_MASK = 3,
125
126 /* Host Flags */
127 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
128 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 129 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
130 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 ATA_FLAG_PIO_POLLING,
47c2b677 132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 133
31961943
BR
134 CRQB_FLAG_READ = (1 << 0),
135 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
138 CRQB_CMD_ADDR_SHIFT = 8,
139 CRQB_CMD_CS = (0x2 << 11),
140 CRQB_CMD_LAST = (1 << 15),
141
142 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
145
146 EPRD_FLAG_END_OF_TBL = (1 << 31),
147
20f733e7
BR
148 /* PCI interface registers */
149
31961943
BR
150 PCI_COMMAND_OFS = 0xc00,
151
20f733e7
BR
152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
156
522479fb
JG
157 MV_PCI_MODE = 0xd00,
158 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
159 MV_PCI_DISC_TIMER = 0xd04,
160 MV_PCI_MSI_TRIGGER = 0xc38,
161 MV_PCI_SERR_MASK = 0xc28,
162 MV_PCI_XBAR_TMOUT = 0x1d04,
163 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
164 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
165 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
166 MV_PCI_ERR_COMMAND = 0x1d50,
167
168 PCI_IRQ_CAUSE_OFS = 0x1d58,
169 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
170 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171
172 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
173 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
174 PORT0_ERR = (1 << 0), /* shift by port # */
175 PORT0_DONE = (1 << 1), /* shift by port # */
176 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
177 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
178 PCI_ERR = (1 << 18),
179 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
180 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
181 PORTS_0_3_COAL_DONE = (1 << 8),
182 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
183 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
184 GPIO_INT = (1 << 22),
185 SELF_INT = (1 << 23),
186 TWSI_INT = (1 << 24),
187 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 188 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 189 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
190 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
191 HC_MAIN_RSVD),
fb621e2f
JG
192 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 HC_MAIN_RSVD_5),
20f733e7
BR
194
195 /* SATAHC registers */
196 HC_CFG_OFS = 0,
197
198 HC_IRQ_CAUSE_OFS = 0x14,
31961943 199 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
200 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
201 DEV_IRQ = (1 << 8), /* shift by port # */
202
203 /* Shadow block registers */
31961943
BR
204 SHD_BLK_OFS = 0x100,
205 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
206
207 /* SATA registers */
208 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
209 SATA_ACTIVE_OFS = 0x350,
47c2b677 210 PHY_MODE3 = 0x310,
bca1c4eb
JG
211 PHY_MODE4 = 0x314,
212 PHY_MODE2 = 0x330,
c9d39130
JG
213 MV5_PHY_MODE = 0x74,
214 MV5_LT_MODE = 0x30,
215 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
216 SATA_INTERFACE_CTL = 0x050,
217
218 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
219
220 /* Port registers */
221 EDMA_CFG_OFS = 0,
31961943
BR
222 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
223 EDMA_CFG_NCQ = (1 << 5),
224 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
225 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
226 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
227
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
230 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
231 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
232 EDMA_ERR_DEV = (1 << 2), /* device error */
233 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
234 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
235 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 238 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
240 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
241 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
242 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
243 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
249 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
250 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
253 EDMA_ERR_PRD_PAR |
254 EDMA_ERR_DEV_DCON |
255 EDMA_ERR_DEV_CON |
256 EDMA_ERR_SERR |
257 EDMA_ERR_SELF_DIS |
6c1153e0 258 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
259 EDMA_ERR_CRPB_PAR |
260 EDMA_ERR_INTRL_PAR |
261 EDMA_ERR_IORDY |
262 EDMA_ERR_LNK_CTRL_RX_2 |
263 EDMA_ERR_LNK_DATA_RX |
264 EDMA_ERR_LNK_DATA_TX |
265 EDMA_ERR_TRANS_PROTO,
266 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
267 EDMA_ERR_PRD_PAR |
268 EDMA_ERR_DEV_DCON |
269 EDMA_ERR_DEV_CON |
270 EDMA_ERR_OVERRUN_5 |
271 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 |
6c1153e0 273 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
274 EDMA_ERR_CRPB_PAR |
275 EDMA_ERR_INTRL_PAR |
276 EDMA_ERR_IORDY,
20f733e7 277
31961943
BR
278 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
279 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
280
281 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
282 EDMA_REQ_Q_PTR_SHIFT = 5,
283
284 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
285 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
287 EDMA_RSP_Q_PTR_SHIFT = 3,
288
20f733e7
BR
289 EDMA_CMD_OFS = 0x28,
290 EDMA_EN = (1 << 0),
291 EDMA_DS = (1 << 1),
292 ATA_RST = (1 << 2),
293
c9d39130 294 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 295 EDMA_ARB_CFG = 0x38,
bca1c4eb 296
31961943
BR
297 /* Host private flags (hp_flags) */
298 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
299 MV_HP_ERRATA_50XXB0 = (1 << 1),
300 MV_HP_ERRATA_50XXB2 = (1 << 2),
301 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 303 MV_HP_ERRATA_XX42A0 = (1 << 5),
ee9ccdf7
JG
304 MV_HP_GEN_I = (1 << 6),
305 MV_HP_GEN_II = (1 << 7),
306 MV_HP_GEN_IIE = (1 << 8),
20f733e7 307
31961943
BR
308 /* Port private flags (pp_flags) */
309 MV_PP_FLAG_EDMA_EN = (1 << 0),
310 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
c5d3e45a 311 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
20f733e7
BR
312};
313
ee9ccdf7
JG
314#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
315#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 316#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 317
095fec88 318enum {
d88184fb 319 MV_DMA_BOUNDARY = 0xffffffffU,
095fec88
JG
320
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
323 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
324};
325
522479fb
JG
326enum chip_type {
327 chip_504x,
328 chip_508x,
329 chip_5080,
330 chip_604x,
331 chip_608x,
e4e7b892
JG
332 chip_6042,
333 chip_7042,
522479fb
JG
334};
335
31961943
BR
336/* Command ReQuest Block: 32B */
337struct mv_crqb {
e1469874
ML
338 __le32 sg_addr;
339 __le32 sg_addr_hi;
340 __le16 ctrl_flags;
341 __le16 ata_cmd[11];
31961943 342};
20f733e7 343
e4e7b892 344struct mv_crqb_iie {
e1469874
ML
345 __le32 addr;
346 __le32 addr_hi;
347 __le32 flags;
348 __le32 len;
349 __le32 ata_cmd[4];
e4e7b892
JG
350};
351
31961943
BR
352/* Command ResPonse Block: 8B */
353struct mv_crpb {
e1469874
ML
354 __le16 id;
355 __le16 flags;
356 __le32 tmstmp;
20f733e7
BR
357};
358
31961943
BR
359/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
360struct mv_sg {
e1469874
ML
361 __le32 addr;
362 __le32 flags_size;
363 __le32 addr_hi;
364 __le32 reserved;
31961943 365};
20f733e7 366
31961943
BR
367struct mv_port_priv {
368 struct mv_crqb *crqb;
369 dma_addr_t crqb_dma;
370 struct mv_crpb *crpb;
371 dma_addr_t crpb_dma;
372 struct mv_sg *sg_tbl;
373 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
374
375 unsigned int req_idx;
376 unsigned int resp_idx;
377
31961943
BR
378 u32 pp_flags;
379};
380
bca1c4eb
JG
381struct mv_port_signal {
382 u32 amps;
383 u32 pre;
384};
385
47c2b677
JG
386struct mv_host_priv;
387struct mv_hw_ops {
2a47ce06
JG
388 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
389 unsigned int port);
47c2b677
JG
390 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
391 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
392 void __iomem *mmio);
c9d39130
JG
393 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int n_hc);
522479fb
JG
395 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
397};
398
31961943
BR
399struct mv_host_priv {
400 u32 hp_flags;
bca1c4eb 401 struct mv_port_signal signal[8];
47c2b677 402 const struct mv_hw_ops *ops;
20f733e7
BR
403};
404
405static void mv_irq_clear(struct ata_port *ap);
406static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
407static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
c9d39130
JG
408static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
409static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
410static int mv_port_start(struct ata_port *ap);
411static void mv_port_stop(struct ata_port *ap);
412static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 413static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 414static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
415static void mv_error_handler(struct ata_port *ap);
416static void mv_post_int_cmd(struct ata_queued_cmd *qc);
417static void mv_eh_freeze(struct ata_port *ap);
418static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
419static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
420
2a47ce06
JG
421static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
422 unsigned int port);
47c2b677
JG
423static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
424static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
425 void __iomem *mmio);
c9d39130
JG
426static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int n_hc);
522479fb
JG
428static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
429static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 430
2a47ce06
JG
431static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int port);
47c2b677
JG
433static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
434static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
435 void __iomem *mmio);
c9d39130
JG
436static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
437 unsigned int n_hc);
522479fb
JG
438static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
439static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
440static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
441 unsigned int port_no);
47c2b677 442
c5d3e45a
JG
443static struct scsi_host_template mv5_sht = {
444 .module = THIS_MODULE,
445 .name = DRV_NAME,
446 .ioctl = ata_scsi_ioctl,
447 .queuecommand = ata_scsi_queuecmd,
448 .can_queue = ATA_DEF_QUEUE,
449 .this_id = ATA_SHT_THIS_ID,
450 .sg_tablesize = MV_MAX_SG_CT,
451 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
452 .emulated = ATA_SHT_EMULATED,
453 .use_clustering = 1,
454 .proc_name = DRV_NAME,
455 .dma_boundary = MV_DMA_BOUNDARY,
456 .slave_configure = ata_scsi_slave_config,
457 .slave_destroy = ata_scsi_slave_destroy,
458 .bios_param = ata_std_bios_param,
459};
460
461static struct scsi_host_template mv6_sht = {
20f733e7
BR
462 .module = THIS_MODULE,
463 .name = DRV_NAME,
464 .ioctl = ata_scsi_ioctl,
465 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 466 .can_queue = ATA_DEF_QUEUE,
20f733e7 467 .this_id = ATA_SHT_THIS_ID,
d88184fb 468 .sg_tablesize = MV_MAX_SG_CT,
20f733e7
BR
469 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
470 .emulated = ATA_SHT_EMULATED,
d88184fb 471 .use_clustering = 1,
20f733e7
BR
472 .proc_name = DRV_NAME,
473 .dma_boundary = MV_DMA_BOUNDARY,
474 .slave_configure = ata_scsi_slave_config,
ccf68c34 475 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 476 .bios_param = ata_std_bios_param,
20f733e7
BR
477};
478
c9d39130
JG
479static const struct ata_port_operations mv5_ops = {
480 .port_disable = ata_port_disable,
481
482 .tf_load = ata_tf_load,
483 .tf_read = ata_tf_read,
484 .check_status = ata_check_status,
485 .exec_command = ata_exec_command,
486 .dev_select = ata_std_dev_select,
487
cffacd85 488 .cable_detect = ata_cable_sata,
c9d39130
JG
489
490 .qc_prep = mv_qc_prep,
491 .qc_issue = mv_qc_issue,
0d5ff566 492 .data_xfer = ata_data_xfer,
c9d39130 493
c9d39130 494 .irq_clear = mv_irq_clear,
246ce3b6
AI
495 .irq_on = ata_irq_on,
496 .irq_ack = ata_irq_ack,
c9d39130 497
bdd4ddde
JG
498 .error_handler = mv_error_handler,
499 .post_internal_cmd = mv_post_int_cmd,
500 .freeze = mv_eh_freeze,
501 .thaw = mv_eh_thaw,
502
c9d39130
JG
503 .scr_read = mv5_scr_read,
504 .scr_write = mv5_scr_write,
505
506 .port_start = mv_port_start,
507 .port_stop = mv_port_stop,
c9d39130
JG
508};
509
510static const struct ata_port_operations mv6_ops = {
20f733e7
BR
511 .port_disable = ata_port_disable,
512
513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
518
cffacd85 519 .cable_detect = ata_cable_sata,
20f733e7 520
31961943
BR
521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
0d5ff566 523 .data_xfer = ata_data_xfer,
20f733e7 524
20f733e7 525 .irq_clear = mv_irq_clear,
246ce3b6
AI
526 .irq_on = ata_irq_on,
527 .irq_ack = ata_irq_ack,
20f733e7 528
bdd4ddde
JG
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
20f733e7
BR
534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
536
31961943
BR
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
20f733e7
BR
539};
540
e4e7b892
JG
541static const struct ata_port_operations mv_iie_ops = {
542 .port_disable = ata_port_disable,
543
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
cffacd85 550 .cable_detect = ata_cable_sata,
e4e7b892
JG
551
552 .qc_prep = mv_qc_prep_iie,
553 .qc_issue = mv_qc_issue,
0d5ff566 554 .data_xfer = ata_data_xfer,
e4e7b892 555
e4e7b892 556 .irq_clear = mv_irq_clear,
246ce3b6
AI
557 .irq_on = ata_irq_on,
558 .irq_ack = ata_irq_ack,
e4e7b892 559
bdd4ddde
JG
560 .error_handler = mv_error_handler,
561 .post_internal_cmd = mv_post_int_cmd,
562 .freeze = mv_eh_freeze,
563 .thaw = mv_eh_thaw,
564
e4e7b892
JG
565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
567
568 .port_start = mv_port_start,
569 .port_stop = mv_port_stop,
e4e7b892
JG
570};
571
98ac62de 572static const struct ata_port_info mv_port_info[] = {
20f733e7 573 { /* chip_504x */
cca3974e 574 .flags = MV_COMMON_FLAGS,
31961943 575 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 576 .udma_mask = ATA_UDMA6,
c9d39130 577 .port_ops = &mv5_ops,
20f733e7
BR
578 },
579 { /* chip_508x */
c5d3e45a 580 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 581 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 582 .udma_mask = ATA_UDMA6,
c9d39130 583 .port_ops = &mv5_ops,
20f733e7 584 },
47c2b677 585 { /* chip_5080 */
c5d3e45a 586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 587 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 588 .udma_mask = ATA_UDMA6,
c9d39130 589 .port_ops = &mv5_ops,
47c2b677 590 },
20f733e7 591 { /* chip_604x */
c5d3e45a 592 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 593 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 594 .udma_mask = ATA_UDMA6,
c9d39130 595 .port_ops = &mv6_ops,
20f733e7
BR
596 },
597 { /* chip_608x */
c5d3e45a
JG
598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
599 MV_FLAG_DUAL_HC,
31961943 600 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 601 .udma_mask = ATA_UDMA6,
c9d39130 602 .port_ops = &mv6_ops,
20f733e7 603 },
e4e7b892 604 { /* chip_6042 */
c5d3e45a 605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 606 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 607 .udma_mask = ATA_UDMA6,
e4e7b892
JG
608 .port_ops = &mv_iie_ops,
609 },
610 { /* chip_7042 */
c5d3e45a 611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 612 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 613 .udma_mask = ATA_UDMA6,
e4e7b892
JG
614 .port_ops = &mv_iie_ops,
615 },
20f733e7
BR
616};
617
3b7d697d 618static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
619 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
620 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
622 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
623
624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
629
630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631
d9f9c6bc
FA
632 /* Adaptec 1430SA */
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634
e93f09dc
OJ
635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636
6a3d586d
MT
637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639
2d2744fc 640 { } /* terminate list */
20f733e7
BR
641};
642
643static struct pci_driver mv_pci_driver = {
644 .name = DRV_NAME,
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
648};
649
47c2b677
JG
650static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
522479fb
JG
655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
47c2b677
JG
657};
658
659static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
522479fb
JG
664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
666};
667
ddef9bb3
JG
668/*
669 * module options
670 */
671static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
672
673
d88184fb
JG
674/* move to PCI layer or libata core? */
675static int pci_go_64(struct pci_dev *pdev)
676{
677 int rc;
678
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 if (rc) {
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 if (rc) {
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
686 return rc;
687 }
688 }
689 } else {
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
694 return rc;
695 }
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 if (rc) {
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
700 return rc;
701 }
702 }
703
704 return rc;
705}
706
20f733e7
BR
707/*
708 * Functions
709 */
710
711static inline void writelfl(unsigned long data, void __iomem *addr)
712{
713 writel(data, addr);
714 (void) readl(addr); /* flush to avoid PCI posted write */
715}
716
20f733e7
BR
717static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718{
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720}
721
c9d39130
JG
722static inline unsigned int mv_hc_from_port(unsigned int port)
723{
724 return port >> MV_PORT_HC_SHIFT;
725}
726
727static inline unsigned int mv_hardport_from_port(unsigned int port)
728{
729 return port & MV_PORT_MASK;
730}
731
732static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 unsigned int port)
734{
735 return mv_hc_base(base, mv_hc_from_port(port));
736}
737
20f733e7
BR
738static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739{
c9d39130 740 return mv_hc_base_from_port(base, port) +
8b260248 741 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
743}
744
745static inline void __iomem *mv_ap_base(struct ata_port *ap)
746{
0d5ff566 747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
748}
749
cca3974e 750static inline int mv_get_hc_count(unsigned long port_flags)
31961943 751{
cca3974e 752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
753}
754
755static void mv_irq_clear(struct ata_port *ap)
20f733e7 756{
20f733e7
BR
757}
758
c5d3e45a
JG
759static void mv_set_edma_ptrs(void __iomem *port_mmio,
760 struct mv_host_priv *hpriv,
761 struct mv_port_priv *pp)
762{
bdd4ddde
JG
763 u32 index;
764
c5d3e45a
JG
765 /*
766 * initialize request queue
767 */
bdd4ddde
JG
768 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
769
c5d3e45a
JG
770 WARN_ON(pp->crqb_dma & 0x3ff);
771 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 772 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
773 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
774
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 776 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
777 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
778 else
bdd4ddde 779 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
780
781 /*
782 * initialize response queue
783 */
bdd4ddde
JG
784 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
785
c5d3e45a
JG
786 WARN_ON(pp->crpb_dma & 0xff);
787 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 790 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
791 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
792 else
bdd4ddde 793 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 794
bdd4ddde 795 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 796 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
797}
798
05b308e1
BR
799/**
800 * mv_start_dma - Enable eDMA engine
801 * @base: port base address
802 * @pp: port private data
803 *
beec7dbc
TH
804 * Verify the local cache of the eDMA state is accurate with a
805 * WARN_ON.
05b308e1
BR
806 *
807 * LOCKING:
808 * Inherited from caller.
809 */
c5d3e45a
JG
810static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
20f733e7 812{
c5d3e45a 813 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
814 /* clear EDMA event indicators, if any */
815 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
816
817 mv_set_edma_ptrs(base, hpriv, pp);
818
afb0edd9
BR
819 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
820 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
821 }
beec7dbc 822 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
823}
824
05b308e1
BR
825/**
826 * mv_stop_dma - Disable eDMA engine
827 * @ap: ATA channel to manipulate
828 *
beec7dbc
TH
829 * Verify the local cache of the eDMA state is accurate with a
830 * WARN_ON.
05b308e1
BR
831 *
832 * LOCKING:
833 * Inherited from caller.
834 */
c5d3e45a 835static int mv_stop_dma(struct ata_port *ap)
20f733e7 836{
31961943
BR
837 void __iomem *port_mmio = mv_ap_base(ap);
838 struct mv_port_priv *pp = ap->private_data;
31961943 839 u32 reg;
c5d3e45a 840 int i, err = 0;
31961943 841
4537deb5 842 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 843 /* Disable EDMA if active. The disable bit auto clears.
31961943 844 */
31961943
BR
845 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
846 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 847 } else {
beec7dbc 848 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 849 }
8b260248 850
31961943
BR
851 /* now properly wait for the eDMA to stop */
852 for (i = 1000; i > 0; i--) {
853 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 854 if (!(reg & EDMA_EN))
31961943 855 break;
4537deb5 856
31961943
BR
857 udelay(100);
858 }
859
c5d3e45a 860 if (reg & EDMA_EN) {
f15a1daf 861 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 862 err = -EIO;
31961943 863 }
c5d3e45a
JG
864
865 return err;
20f733e7
BR
866}
867
8a70f8dc 868#ifdef ATA_DEBUG
31961943 869static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 870{
31961943
BR
871 int b, w;
872 for (b = 0; b < bytes; ) {
873 DPRINTK("%p: ", start + b);
874 for (w = 0; b < bytes && w < 4; w++) {
875 printk("%08x ",readl(start + b));
876 b += sizeof(u32);
877 }
878 printk("\n");
879 }
31961943 880}
8a70f8dc
JG
881#endif
882
31961943
BR
883static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
884{
885#ifdef ATA_DEBUG
886 int b, w;
887 u32 dw;
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%02x: ", b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 (void) pci_read_config_dword(pdev,b,&dw);
892 printk("%08x ",dw);
893 b += sizeof(u32);
894 }
895 printk("\n");
896 }
897#endif
898}
899static void mv_dump_all_regs(void __iomem *mmio_base, int port,
900 struct pci_dev *pdev)
901{
902#ifdef ATA_DEBUG
8b260248 903 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
904 port >> MV_PORT_HC_SHIFT);
905 void __iomem *port_base;
906 int start_port, num_ports, p, start_hc, num_hcs, hc;
907
908 if (0 > port) {
909 start_hc = start_port = 0;
910 num_ports = 8; /* shld be benign for 4 port devs */
911 num_hcs = 2;
912 } else {
913 start_hc = port >> MV_PORT_HC_SHIFT;
914 start_port = port;
915 num_ports = num_hcs = 1;
916 }
8b260248 917 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
918 num_ports > 1 ? num_ports - 1 : start_port);
919
920 if (NULL != pdev) {
921 DPRINTK("PCI config space regs:\n");
922 mv_dump_pci_cfg(pdev, 0x68);
923 }
924 DPRINTK("PCI regs:\n");
925 mv_dump_mem(mmio_base+0xc00, 0x3c);
926 mv_dump_mem(mmio_base+0xd00, 0x34);
927 mv_dump_mem(mmio_base+0xf00, 0x4);
928 mv_dump_mem(mmio_base+0x1d00, 0x6c);
929 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 930 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
931 DPRINTK("HC regs (HC %i):\n", hc);
932 mv_dump_mem(hc_base, 0x1c);
933 }
934 for (p = start_port; p < start_port + num_ports; p++) {
935 port_base = mv_port_base(mmio_base, p);
936 DPRINTK("EDMA regs (port %i):\n",p);
937 mv_dump_mem(port_base, 0x54);
938 DPRINTK("SATA regs (port %i):\n",p);
939 mv_dump_mem(port_base+0x300, 0x60);
940 }
941#endif
20f733e7
BR
942}
943
944static unsigned int mv_scr_offset(unsigned int sc_reg_in)
945{
946 unsigned int ofs;
947
948 switch (sc_reg_in) {
949 case SCR_STATUS:
950 case SCR_CONTROL:
951 case SCR_ERROR:
952 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
953 break;
954 case SCR_ACTIVE:
955 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
956 break;
957 default:
958 ofs = 0xffffffffU;
959 break;
960 }
961 return ofs;
962}
963
964static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
965{
966 unsigned int ofs = mv_scr_offset(sc_reg_in);
967
35177265 968 if (0xffffffffU != ofs)
20f733e7 969 return readl(mv_ap_base(ap) + ofs);
35177265 970 else
20f733e7 971 return (u32) ofs;
20f733e7
BR
972}
973
974static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
975{
976 unsigned int ofs = mv_scr_offset(sc_reg_in);
977
35177265 978 if (0xffffffffU != ofs)
20f733e7 979 writelfl(val, mv_ap_base(ap) + ofs);
20f733e7
BR
980}
981
c5d3e45a
JG
982static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
983 void __iomem *port_mmio)
e4e7b892
JG
984{
985 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
986
987 /* set up non-NCQ EDMA configuration */
c5d3e45a 988 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 989
e728eabe
JG
990 if (IS_GEN_I(hpriv)) {
991 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 992 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 993 }
e4e7b892 994
e728eabe
JG
995 else if (IS_GEN_II(hpriv)) {
996 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 997 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
998 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
999 }
e4e7b892
JG
1000
1001 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1002 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1003 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1004 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1005 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1006 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1007 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1008 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1009 }
1010
1011 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1012}
1013
05b308e1
BR
1014/**
1015 * mv_port_start - Port specific init/start routine.
1016 * @ap: ATA channel to manipulate
1017 *
1018 * Allocate and point to DMA memory, init port private memory,
1019 * zero indices.
1020 *
1021 * LOCKING:
1022 * Inherited from caller.
1023 */
31961943
BR
1024static int mv_port_start(struct ata_port *ap)
1025{
cca3974e
JG
1026 struct device *dev = ap->host->dev;
1027 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1028 struct mv_port_priv *pp;
1029 void __iomem *port_mmio = mv_ap_base(ap);
1030 void *mem;
1031 dma_addr_t mem_dma;
24dc5f33 1032 int rc;
31961943 1033
24dc5f33 1034 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1035 if (!pp)
24dc5f33 1036 return -ENOMEM;
31961943 1037
24dc5f33
TH
1038 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1039 GFP_KERNEL);
6037d6bb 1040 if (!mem)
24dc5f33 1041 return -ENOMEM;
31961943
BR
1042 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1043
6037d6bb
JG
1044 rc = ata_pad_alloc(ap, dev);
1045 if (rc)
24dc5f33 1046 return rc;
6037d6bb 1047
8b260248 1048 /* First item in chunk of DMA memory:
31961943
BR
1049 * 32-slot command request table (CRQB), 32 bytes each in size
1050 */
1051 pp->crqb = mem;
1052 pp->crqb_dma = mem_dma;
1053 mem += MV_CRQB_Q_SZ;
1054 mem_dma += MV_CRQB_Q_SZ;
1055
8b260248 1056 /* Second item:
31961943
BR
1057 * 32-slot command response table (CRPB), 8 bytes each in size
1058 */
1059 pp->crpb = mem;
1060 pp->crpb_dma = mem_dma;
1061 mem += MV_CRPB_Q_SZ;
1062 mem_dma += MV_CRPB_Q_SZ;
1063
1064 /* Third item:
1065 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1066 */
1067 pp->sg_tbl = mem;
1068 pp->sg_tbl_dma = mem_dma;
1069
c5d3e45a 1070 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1071
c5d3e45a 1072 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1073
31961943
BR
1074 /* Don't turn on EDMA here...do it before DMA commands only. Else
1075 * we'll be unable to send non-data, PIO, etc due to restricted access
1076 * to shadow regs.
1077 */
1078 ap->private_data = pp;
1079 return 0;
1080}
1081
05b308e1
BR
1082/**
1083 * mv_port_stop - Port specific cleanup/stop routine.
1084 * @ap: ATA channel to manipulate
1085 *
1086 * Stop DMA, cleanup port memory.
1087 *
1088 * LOCKING:
cca3974e 1089 * This routine uses the host lock to protect the DMA stop.
05b308e1 1090 */
31961943
BR
1091static void mv_port_stop(struct ata_port *ap)
1092{
afb0edd9 1093 unsigned long flags;
31961943 1094
cca3974e 1095 spin_lock_irqsave(&ap->host->lock, flags);
31961943 1096 mv_stop_dma(ap);
cca3974e 1097 spin_unlock_irqrestore(&ap->host->lock, flags);
31961943
BR
1098}
1099
05b308e1
BR
1100/**
1101 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1102 * @qc: queued command whose SG list to source from
1103 *
1104 * Populate the SG list and mark the last entry.
1105 *
1106 * LOCKING:
1107 * Inherited from caller.
1108 */
d88184fb 1109static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1110{
1111 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1112 unsigned int n_sg = 0;
972c26bd 1113 struct scatterlist *sg;
d88184fb 1114 struct mv_sg *mv_sg;
31961943 1115
d88184fb 1116 mv_sg = pp->sg_tbl;
972c26bd 1117 ata_for_each_sg(sg, qc) {
d88184fb
JG
1118 dma_addr_t addr = sg_dma_address(sg);
1119 u32 sg_len = sg_dma_len(sg);
22374677 1120
d88184fb
JG
1121 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1122 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1123 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
22374677 1124
d88184fb
JG
1125 if (ata_sg_is_last(sg, qc))
1126 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
22374677 1127
d88184fb
JG
1128 mv_sg++;
1129 n_sg++;
31961943 1130 }
d88184fb
JG
1131
1132 return n_sg;
31961943
BR
1133}
1134
e1469874 1135static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1136{
559eedad 1137 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1138 (last ? CRQB_CMD_LAST : 0);
559eedad 1139 *cmdw = cpu_to_le16(tmp);
31961943
BR
1140}
1141
05b308e1
BR
1142/**
1143 * mv_qc_prep - Host specific command preparation.
1144 * @qc: queued command to prepare
1145 *
1146 * This routine simply redirects to the general purpose routine
1147 * if command is not DMA. Else, it handles prep of the CRQB
1148 * (command request block), does some sanity checking, and calls
1149 * the SG load routine.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
31961943
BR
1154static void mv_qc_prep(struct ata_queued_cmd *qc)
1155{
1156 struct ata_port *ap = qc->ap;
1157 struct mv_port_priv *pp = ap->private_data;
e1469874 1158 __le16 *cw;
31961943
BR
1159 struct ata_taskfile *tf;
1160 u16 flags = 0;
a6432436 1161 unsigned in_index;
31961943 1162
c5d3e45a 1163 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1164 return;
20f733e7 1165
31961943
BR
1166 /* Fill in command request block
1167 */
e4e7b892 1168 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1169 flags |= CRQB_FLAG_READ;
beec7dbc 1170 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1171 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1172 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1173
bdd4ddde
JG
1174 /* get current queue index from software */
1175 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1176
1177 pp->crqb[in_index].sg_addr =
31961943 1178 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1179 pp->crqb[in_index].sg_addr_hi =
31961943 1180 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1181 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1182
a6432436 1183 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1184 tf = &qc->tf;
1185
1186 /* Sadly, the CRQB cannot accomodate all registers--there are
1187 * only 11 bytes...so we must pick and choose required
1188 * registers based on the command. So, we drop feature and
1189 * hob_feature for [RW] DMA commands, but they are needed for
1190 * NCQ. NCQ will drop hob_nsect.
20f733e7 1191 */
31961943
BR
1192 switch (tf->command) {
1193 case ATA_CMD_READ:
1194 case ATA_CMD_READ_EXT:
1195 case ATA_CMD_WRITE:
1196 case ATA_CMD_WRITE_EXT:
c15d85c8 1197 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1198 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1199 break;
1200#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1201 case ATA_CMD_FPDMA_READ:
1202 case ATA_CMD_FPDMA_WRITE:
8b260248 1203 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1204 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1205 break;
1206#endif /* FIXME: remove this line when NCQ added */
1207 default:
1208 /* The only other commands EDMA supports in non-queued and
1209 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1210 * of which are defined/used by Linux. If we get here, this
1211 * driver needs work.
1212 *
1213 * FIXME: modify libata to give qc_prep a return value and
1214 * return error here.
1215 */
1216 BUG_ON(tf->command);
1217 break;
1218 }
1219 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1220 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1221 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1222 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1223 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1224 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1225 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1226 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1227 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1228
e4e7b892
JG
1229 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1230 return;
1231 mv_fill_sg(qc);
1232}
1233
1234/**
1235 * mv_qc_prep_iie - Host specific command preparation.
1236 * @qc: queued command to prepare
1237 *
1238 * This routine simply redirects to the general purpose routine
1239 * if command is not DMA. Else, it handles prep of the CRQB
1240 * (command request block), does some sanity checking, and calls
1241 * the SG load routine.
1242 *
1243 * LOCKING:
1244 * Inherited from caller.
1245 */
1246static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1247{
1248 struct ata_port *ap = qc->ap;
1249 struct mv_port_priv *pp = ap->private_data;
1250 struct mv_crqb_iie *crqb;
1251 struct ata_taskfile *tf;
a6432436 1252 unsigned in_index;
e4e7b892
JG
1253 u32 flags = 0;
1254
c5d3e45a 1255 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1256 return;
1257
e4e7b892
JG
1258 /* Fill in Gen IIE command request block
1259 */
1260 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1261 flags |= CRQB_FLAG_READ;
1262
beec7dbc 1263 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1264 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1265 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1266 what we use as our tag */
e4e7b892 1267
bdd4ddde
JG
1268 /* get current queue index from software */
1269 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1270
1271 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1272 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1273 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1274 crqb->flags = cpu_to_le32(flags);
1275
1276 tf = &qc->tf;
1277 crqb->ata_cmd[0] = cpu_to_le32(
1278 (tf->command << 16) |
1279 (tf->feature << 24)
1280 );
1281 crqb->ata_cmd[1] = cpu_to_le32(
1282 (tf->lbal << 0) |
1283 (tf->lbam << 8) |
1284 (tf->lbah << 16) |
1285 (tf->device << 24)
1286 );
1287 crqb->ata_cmd[2] = cpu_to_le32(
1288 (tf->hob_lbal << 0) |
1289 (tf->hob_lbam << 8) |
1290 (tf->hob_lbah << 16) |
1291 (tf->hob_feature << 24)
1292 );
1293 crqb->ata_cmd[3] = cpu_to_le32(
1294 (tf->nsect << 0) |
1295 (tf->hob_nsect << 8)
1296 );
1297
1298 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1299 return;
31961943
BR
1300 mv_fill_sg(qc);
1301}
1302
05b308e1
BR
1303/**
1304 * mv_qc_issue - Initiate a command to the host
1305 * @qc: queued command to start
1306 *
1307 * This routine simply redirects to the general purpose routine
1308 * if command is not DMA. Else, it sanity checks our local
1309 * caches of the request producer/consumer indices then enables
1310 * DMA and bumps the request producer index.
1311 *
1312 * LOCKING:
1313 * Inherited from caller.
1314 */
9a3d9eb0 1315static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1316{
c5d3e45a
JG
1317 struct ata_port *ap = qc->ap;
1318 void __iomem *port_mmio = mv_ap_base(ap);
1319 struct mv_port_priv *pp = ap->private_data;
1320 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1321 u32 in_index;
31961943 1322
c5d3e45a 1323 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1324 /* We're about to send a non-EDMA capable command to the
1325 * port. Turn off EDMA so there won't be problems accessing
1326 * shadow block, etc registers.
1327 */
c5d3e45a 1328 mv_stop_dma(ap);
31961943
BR
1329 return ata_qc_issue_prot(qc);
1330 }
1331
bdd4ddde
JG
1332 mv_start_dma(port_mmio, hpriv, pp);
1333
1334 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1335
31961943 1336 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1337 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1338 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1339
bdd4ddde 1340 pp->req_idx++;
31961943 1341
bdd4ddde 1342 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1343
1344 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1345 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1346 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1347
1348 return 0;
1349}
1350
05b308e1
BR
1351/**
1352 * mv_err_intr - Handle error interrupts on the port
1353 * @ap: ATA channel to manipulate
9b358e30 1354 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1355 *
1356 * In most cases, just clear the interrupt and move on. However,
1357 * some cases require an eDMA reset, which is done right before
1358 * the COMRESET in mv_phy_reset(). The SERR case requires a
1359 * clear of pending errors in the SATA SERROR register. Finally,
1360 * if the port disabled DMA, update our cached copy to match.
1361 *
1362 * LOCKING:
1363 * Inherited from caller.
1364 */
bdd4ddde 1365static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1366{
1367 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1368 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_host_priv *hpriv = ap->host->private_data;
1371 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1372 unsigned int action = 0, err_mask = 0;
1373 struct ata_eh_info *ehi = &ap->eh_info;
20f733e7 1374
bdd4ddde 1375 ata_ehi_clear_desc(ehi);
20f733e7 1376
bdd4ddde
JG
1377 if (!edma_enabled) {
1378 /* just a guess: do we need to do this? should we
1379 * expand this, and do it in all cases?
1380 */
81952c54
TH
1381 sata_scr_read(ap, SCR_ERROR, &serr);
1382 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1383 }
bdd4ddde
JG
1384
1385 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1386
1387 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1388
1389 /*
1390 * all generations share these EDMA error cause bits
1391 */
1392
1393 if (edma_err_cause & EDMA_ERR_DEV)
1394 err_mask |= AC_ERR_DEV;
1395 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1396 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1397 EDMA_ERR_INTRL_PAR)) {
1398 err_mask |= AC_ERR_ATA_BUS;
1399 action |= ATA_EH_HARDRESET;
1400 ata_ehi_push_desc(ehi, ", parity error");
1401 }
1402 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 ata_ehi_hotplugged(ehi);
1404 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 ", dev disconnect" : ", dev connect");
1406 }
1407
ee9ccdf7 1408 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1409 eh_freeze_mask = EDMA_EH_FREEZE_5;
1410
1411 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 struct mv_port_priv *pp = ap->private_data;
1413 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1415 }
1416 } else {
1417 eh_freeze_mask = EDMA_EH_FREEZE;
1418
1419 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 struct mv_port_priv *pp = ap->private_data;
1421 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1423 }
1424
1425 if (edma_err_cause & EDMA_ERR_SERR) {
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
1428 err_mask = AC_ERR_ATA_BUS;
1429 action |= ATA_EH_HARDRESET;
1430 }
afb0edd9 1431 }
20f733e7
BR
1432
1433 /* Clear EDMA now that SERR cleanup done */
1434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1435
bdd4ddde
JG
1436 if (!err_mask) {
1437 err_mask = AC_ERR_OTHER;
1438 action |= ATA_EH_HARDRESET;
1439 }
1440
1441 ehi->serror |= serr;
1442 ehi->action |= action;
1443
1444 if (qc)
1445 qc->err_mask |= err_mask;
1446 else
1447 ehi->err_mask |= err_mask;
1448
1449 if (edma_err_cause & eh_freeze_mask)
1450 ata_port_freeze(ap);
1451 else
1452 ata_port_abort(ap);
1453}
1454
1455static void mv_intr_pio(struct ata_port *ap)
1456{
1457 struct ata_queued_cmd *qc;
1458 u8 ata_status;
1459
1460 /* ignore spurious intr if drive still BUSY */
1461 ata_status = readb(ap->ioaddr.status_addr);
1462 if (unlikely(ata_status & ATA_BUSY))
1463 return;
1464
1465 /* get active ATA command */
1466 qc = ata_qc_from_tag(ap, ap->active_tag);
1467 if (unlikely(!qc)) /* no active tag */
1468 return;
1469 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1470 return;
1471
1472 /* and finally, complete the ATA command */
1473 qc->err_mask |= ac_err_mask(ata_status);
1474 ata_qc_complete(qc);
1475}
1476
1477static void mv_intr_edma(struct ata_port *ap)
1478{
1479 void __iomem *port_mmio = mv_ap_base(ap);
1480 struct mv_host_priv *hpriv = ap->host->private_data;
1481 struct mv_port_priv *pp = ap->private_data;
1482 struct ata_queued_cmd *qc;
1483 u32 out_index, in_index;
1484 bool work_done = false;
1485
1486 /* get h/w response queue pointer */
1487 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1488 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1489
1490 while (1) {
1491 u16 status;
6c1153e0 1492 unsigned int tag;
bdd4ddde
JG
1493
1494 /* get s/w response queue last-read pointer, and compare */
1495 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1496 if (in_index == out_index)
1497 break;
1498
bdd4ddde
JG
1499 /* 50xx: get active ATA command */
1500 if (IS_GEN_I(hpriv))
6c1153e0 1501 tag = ap->active_tag;
bdd4ddde 1502
6c1153e0
JG
1503 /* Gen II/IIE: get active ATA command via tag, to enable
1504 * support for queueing. this works transparently for
1505 * queued and non-queued modes.
bdd4ddde 1506 */
6c1153e0
JG
1507 else if (IS_GEN_II(hpriv))
1508 tag = (le16_to_cpu(pp->crpb[out_index].id)
1509 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1510
6c1153e0
JG
1511 else /* IS_GEN_IIE */
1512 tag = (le16_to_cpu(pp->crpb[out_index].id)
1513 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1514
6c1153e0 1515 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1516
1517 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1518 * bits (WARNING: might not necessarily be associated
1519 * with this command), which -should- be clear
1520 * if all is well
1521 */
1522 status = le16_to_cpu(pp->crpb[out_index].flags);
1523 if (unlikely(status & 0xff)) {
1524 mv_err_intr(ap, qc);
1525 return;
1526 }
1527
1528 /* and finally, complete the ATA command */
1529 if (qc) {
1530 qc->err_mask |=
1531 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1532 ata_qc_complete(qc);
1533 }
1534
1535 /* advance software response queue pointer, to
1536 * indicate (after the loop completes) to hardware
1537 * that we have consumed a response queue entry.
1538 */
1539 work_done = true;
1540 pp->resp_idx++;
1541 }
1542
1543 if (work_done)
1544 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1545 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1546 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1547}
1548
05b308e1
BR
1549/**
1550 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1551 * @host: host specific structure
05b308e1
BR
1552 * @relevant: port error bits relevant to this host controller
1553 * @hc: which host controller we're to look at
1554 *
1555 * Read then write clear the HC interrupt status then walk each
1556 * port connected to the HC and see if it needs servicing. Port
1557 * success ints are reported in the HC interrupt status reg, the
1558 * port error ints are reported in the higher level main
1559 * interrupt status register and thus are passed in via the
1560 * 'relevant' argument.
1561 *
1562 * LOCKING:
1563 * Inherited from caller.
1564 */
cca3974e 1565static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1566{
0d5ff566 1567 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1568 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1569 u32 hc_irq_cause;
c5d3e45a 1570 int port, port0;
20f733e7 1571
35177265 1572 if (hc == 0)
20f733e7 1573 port0 = 0;
35177265 1574 else
20f733e7 1575 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1576
1577 /* we'll need the HC success int register in most cases */
1578 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1579 if (!hc_irq_cause)
1580 return;
1581
1582 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1583
1584 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1585 hc,relevant,hc_irq_cause);
1586
1587 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1588 struct ata_port *ap = host->ports[port];
63af2a5c 1589 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1590 int have_err_bits, hard_port, shift;
55d8ca4f 1591
bdd4ddde 1592 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1593 continue;
1594
31961943 1595 shift = port << 1; /* (port * 2) */
20f733e7
BR
1596 if (port >= MV_PORTS_PER_HC) {
1597 shift++; /* skip bit 8 in the HC Main IRQ reg */
1598 }
bdd4ddde
JG
1599 have_err_bits = ((PORT0_ERR << shift) & relevant);
1600
1601 if (unlikely(have_err_bits)) {
1602 struct ata_queued_cmd *qc;
8b260248 1603
20f733e7 1604 qc = ata_qc_from_tag(ap, ap->active_tag);
bdd4ddde
JG
1605 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1606 continue;
1607
1608 mv_err_intr(ap, qc);
1609 continue;
1610 }
1611
1612 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1613
1614 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1615 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1616 mv_intr_edma(ap);
1617 } else {
1618 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1619 mv_intr_pio(ap);
20f733e7
BR
1620 }
1621 }
1622 VPRINTK("EXIT\n");
1623}
1624
bdd4ddde
JG
1625static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1626{
1627 struct ata_port *ap;
1628 struct ata_queued_cmd *qc;
1629 struct ata_eh_info *ehi;
1630 unsigned int i, err_mask, printed = 0;
1631 u32 err_cause;
1632
1633 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1634
1635 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1636 err_cause);
1637
1638 DPRINTK("All regs @ PCI error\n");
1639 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1640
1641 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1642
1643 for (i = 0; i < host->n_ports; i++) {
1644 ap = host->ports[i];
1645 if (!ata_port_offline(ap)) {
1646 ehi = &ap->eh_info;
1647 ata_ehi_clear_desc(ehi);
1648 if (!printed++)
1649 ata_ehi_push_desc(ehi,
1650 "PCI err cause 0x%08x", err_cause);
1651 err_mask = AC_ERR_HOST_BUS;
1652 ehi->action = ATA_EH_HARDRESET;
1653 qc = ata_qc_from_tag(ap, ap->active_tag);
1654 if (qc)
1655 qc->err_mask |= err_mask;
1656 else
1657 ehi->err_mask |= err_mask;
1658
1659 ata_port_freeze(ap);
1660 }
1661 }
1662}
1663
05b308e1 1664/**
c5d3e45a 1665 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1666 * @irq: unused
1667 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1668 *
1669 * Read the read only register to determine if any host
1670 * controllers have pending interrupts. If so, call lower level
1671 * routine to handle. Also check for PCI errors which are only
1672 * reported here.
1673 *
8b260248 1674 * LOCKING:
cca3974e 1675 * This routine holds the host lock while processing pending
05b308e1
BR
1676 * interrupts.
1677 */
7d12e780 1678static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1679{
cca3974e 1680 struct ata_host *host = dev_instance;
20f733e7 1681 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1682 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1683 u32 irq_stat;
1684
20f733e7 1685 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1686
1687 /* check the cases where we either have nothing pending or have read
1688 * a bogus register value which can indicate HW removal or PCI fault
1689 */
35177265 1690 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1691 return IRQ_NONE;
20f733e7 1692
cca3974e
JG
1693 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1694 spin_lock(&host->lock);
20f733e7 1695
bdd4ddde
JG
1696 if (unlikely(irq_stat & PCI_ERR)) {
1697 mv_pci_error(host, mmio);
1698 handled = 1;
1699 goto out_unlock; /* skip all other HC irq handling */
1700 }
1701
20f733e7
BR
1702 for (hc = 0; hc < n_hcs; hc++) {
1703 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1704 if (relevant) {
cca3974e 1705 mv_host_intr(host, relevant, hc);
bdd4ddde 1706 handled = 1;
20f733e7
BR
1707 }
1708 }
615ab953 1709
bdd4ddde 1710out_unlock:
cca3974e 1711 spin_unlock(&host->lock);
20f733e7
BR
1712
1713 return IRQ_RETVAL(handled);
1714}
1715
c9d39130
JG
1716static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1717{
1718 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1719 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1720
1721 return hc_mmio + ofs;
1722}
1723
1724static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1725{
1726 unsigned int ofs;
1727
1728 switch (sc_reg_in) {
1729 case SCR_STATUS:
1730 case SCR_ERROR:
1731 case SCR_CONTROL:
1732 ofs = sc_reg_in * sizeof(u32);
1733 break;
1734 default:
1735 ofs = 0xffffffffU;
1736 break;
1737 }
1738 return ofs;
1739}
1740
1741static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1742{
0d5ff566
TH
1743 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1744 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1745 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1746
1747 if (ofs != 0xffffffffU)
0d5ff566 1748 return readl(addr + ofs);
c9d39130
JG
1749 else
1750 return (u32) ofs;
1751}
1752
1753static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1754{
0d5ff566
TH
1755 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1756 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1757 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1758
1759 if (ofs != 0xffffffffU)
0d5ff566 1760 writelfl(val, addr + ofs);
c9d39130
JG
1761}
1762
522479fb
JG
1763static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1764{
522479fb
JG
1765 int early_5080;
1766
44c10138 1767 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1768
1769 if (!early_5080) {
1770 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1771 tmp |= (1 << 0);
1772 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1773 }
1774
1775 mv_reset_pci_bus(pdev, mmio);
1776}
1777
1778static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1779{
1780 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1781}
1782
47c2b677 1783static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1784 void __iomem *mmio)
1785{
c9d39130
JG
1786 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1787 u32 tmp;
1788
1789 tmp = readl(phy_mmio + MV5_PHY_MODE);
1790
1791 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1792 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1793}
1794
47c2b677 1795static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1796{
522479fb
JG
1797 u32 tmp;
1798
1799 writel(0, mmio + MV_GPIO_PORT_CTL);
1800
1801 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1802
1803 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1804 tmp |= ~(1 << 0);
1805 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1806}
1807
2a47ce06
JG
1808static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1809 unsigned int port)
bca1c4eb 1810{
c9d39130
JG
1811 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1812 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1813 u32 tmp;
1814 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1815
1816 if (fix_apm_sq) {
1817 tmp = readl(phy_mmio + MV5_LT_MODE);
1818 tmp |= (1 << 19);
1819 writel(tmp, phy_mmio + MV5_LT_MODE);
1820
1821 tmp = readl(phy_mmio + MV5_PHY_CTL);
1822 tmp &= ~0x3;
1823 tmp |= 0x1;
1824 writel(tmp, phy_mmio + MV5_PHY_CTL);
1825 }
1826
1827 tmp = readl(phy_mmio + MV5_PHY_MODE);
1828 tmp &= ~mask;
1829 tmp |= hpriv->signal[port].pre;
1830 tmp |= hpriv->signal[port].amps;
1831 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1832}
1833
c9d39130
JG
1834
1835#undef ZERO
1836#define ZERO(reg) writel(0, port_mmio + (reg))
1837static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1838 unsigned int port)
1839{
1840 void __iomem *port_mmio = mv_port_base(mmio, port);
1841
1842 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1843
1844 mv_channel_reset(hpriv, mmio, port);
1845
1846 ZERO(0x028); /* command */
1847 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1848 ZERO(0x004); /* timer */
1849 ZERO(0x008); /* irq err cause */
1850 ZERO(0x00c); /* irq err mask */
1851 ZERO(0x010); /* rq bah */
1852 ZERO(0x014); /* rq inp */
1853 ZERO(0x018); /* rq outp */
1854 ZERO(0x01c); /* respq bah */
1855 ZERO(0x024); /* respq outp */
1856 ZERO(0x020); /* respq inp */
1857 ZERO(0x02c); /* test control */
1858 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1859}
1860#undef ZERO
1861
1862#define ZERO(reg) writel(0, hc_mmio + (reg))
1863static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1864 unsigned int hc)
47c2b677 1865{
c9d39130
JG
1866 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1867 u32 tmp;
1868
1869 ZERO(0x00c);
1870 ZERO(0x010);
1871 ZERO(0x014);
1872 ZERO(0x018);
1873
1874 tmp = readl(hc_mmio + 0x20);
1875 tmp &= 0x1c1c1c1c;
1876 tmp |= 0x03030303;
1877 writel(tmp, hc_mmio + 0x20);
1878}
1879#undef ZERO
1880
1881static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1882 unsigned int n_hc)
1883{
1884 unsigned int hc, port;
1885
1886 for (hc = 0; hc < n_hc; hc++) {
1887 for (port = 0; port < MV_PORTS_PER_HC; port++)
1888 mv5_reset_hc_port(hpriv, mmio,
1889 (hc * MV_PORTS_PER_HC) + port);
1890
1891 mv5_reset_one_hc(hpriv, mmio, hc);
1892 }
1893
1894 return 0;
47c2b677
JG
1895}
1896
101ffae2
JG
1897#undef ZERO
1898#define ZERO(reg) writel(0, mmio + (reg))
1899static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1900{
1901 u32 tmp;
1902
1903 tmp = readl(mmio + MV_PCI_MODE);
1904 tmp &= 0xff00ffff;
1905 writel(tmp, mmio + MV_PCI_MODE);
1906
1907 ZERO(MV_PCI_DISC_TIMER);
1908 ZERO(MV_PCI_MSI_TRIGGER);
1909 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1910 ZERO(HC_MAIN_IRQ_MASK_OFS);
1911 ZERO(MV_PCI_SERR_MASK);
1912 ZERO(PCI_IRQ_CAUSE_OFS);
1913 ZERO(PCI_IRQ_MASK_OFS);
1914 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1915 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1916 ZERO(MV_PCI_ERR_ATTRIBUTE);
1917 ZERO(MV_PCI_ERR_COMMAND);
1918}
1919#undef ZERO
1920
1921static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1922{
1923 u32 tmp;
1924
1925 mv5_reset_flash(hpriv, mmio);
1926
1927 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1928 tmp &= 0x3;
1929 tmp |= (1 << 5) | (1 << 6);
1930 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1931}
1932
1933/**
1934 * mv6_reset_hc - Perform the 6xxx global soft reset
1935 * @mmio: base address of the HBA
1936 *
1937 * This routine only applies to 6xxx parts.
1938 *
1939 * LOCKING:
1940 * Inherited from caller.
1941 */
c9d39130
JG
1942static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1943 unsigned int n_hc)
101ffae2
JG
1944{
1945 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1946 int i, rc = 0;
1947 u32 t;
1948
1949 /* Following procedure defined in PCI "main command and status
1950 * register" table.
1951 */
1952 t = readl(reg);
1953 writel(t | STOP_PCI_MASTER, reg);
1954
1955 for (i = 0; i < 1000; i++) {
1956 udelay(1);
1957 t = readl(reg);
1958 if (PCI_MASTER_EMPTY & t) {
1959 break;
1960 }
1961 }
1962 if (!(PCI_MASTER_EMPTY & t)) {
1963 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1964 rc = 1;
1965 goto done;
1966 }
1967
1968 /* set reset */
1969 i = 5;
1970 do {
1971 writel(t | GLOB_SFT_RST, reg);
1972 t = readl(reg);
1973 udelay(1);
1974 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1975
1976 if (!(GLOB_SFT_RST & t)) {
1977 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1978 rc = 1;
1979 goto done;
1980 }
1981
1982 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1983 i = 5;
1984 do {
1985 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1986 t = readl(reg);
1987 udelay(1);
1988 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1989
1990 if (GLOB_SFT_RST & t) {
1991 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1992 rc = 1;
1993 }
1994done:
1995 return rc;
1996}
1997
47c2b677 1998static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1999 void __iomem *mmio)
2000{
2001 void __iomem *port_mmio;
2002 u32 tmp;
2003
ba3fe8fb
JG
2004 tmp = readl(mmio + MV_RESET_CFG);
2005 if ((tmp & (1 << 0)) == 0) {
47c2b677 2006 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2007 hpriv->signal[idx].pre = 0x1 << 5;
2008 return;
2009 }
2010
2011 port_mmio = mv_port_base(mmio, idx);
2012 tmp = readl(port_mmio + PHY_MODE2);
2013
2014 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2015 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2016}
2017
47c2b677 2018static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2019{
47c2b677 2020 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2021}
2022
c9d39130 2023static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2024 unsigned int port)
bca1c4eb 2025{
c9d39130
JG
2026 void __iomem *port_mmio = mv_port_base(mmio, port);
2027
bca1c4eb 2028 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2029 int fix_phy_mode2 =
2030 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2031 int fix_phy_mode4 =
47c2b677
JG
2032 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2033 u32 m2, tmp;
2034
2035 if (fix_phy_mode2) {
2036 m2 = readl(port_mmio + PHY_MODE2);
2037 m2 &= ~(1 << 16);
2038 m2 |= (1 << 31);
2039 writel(m2, port_mmio + PHY_MODE2);
2040
2041 udelay(200);
2042
2043 m2 = readl(port_mmio + PHY_MODE2);
2044 m2 &= ~((1 << 16) | (1 << 31));
2045 writel(m2, port_mmio + PHY_MODE2);
2046
2047 udelay(200);
2048 }
2049
2050 /* who knows what this magic does */
2051 tmp = readl(port_mmio + PHY_MODE3);
2052 tmp &= ~0x7F800000;
2053 tmp |= 0x2A800000;
2054 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2055
2056 if (fix_phy_mode4) {
47c2b677 2057 u32 m4;
bca1c4eb
JG
2058
2059 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2060
2061 if (hp_flags & MV_HP_ERRATA_60X1B2)
2062 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2063
2064 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2065
2066 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2067
2068 if (hp_flags & MV_HP_ERRATA_60X1B2)
2069 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2070 }
2071
2072 /* Revert values of pre-emphasis and signal amps to the saved ones */
2073 m2 = readl(port_mmio + PHY_MODE2);
2074
2075 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2076 m2 |= hpriv->signal[port].amps;
2077 m2 |= hpriv->signal[port].pre;
47c2b677 2078 m2 &= ~(1 << 16);
bca1c4eb 2079
e4e7b892
JG
2080 /* according to mvSata 3.6.1, some IIE values are fixed */
2081 if (IS_GEN_IIE(hpriv)) {
2082 m2 &= ~0xC30FF01F;
2083 m2 |= 0x0000900F;
2084 }
2085
bca1c4eb
JG
2086 writel(m2, port_mmio + PHY_MODE2);
2087}
2088
c9d39130
JG
2089static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2090 unsigned int port_no)
2091{
2092 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2093
2094 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2095
ee9ccdf7 2096 if (IS_GEN_II(hpriv)) {
c9d39130 2097 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2098 ifctl |= (1 << 7); /* enable gen2i speed */
2099 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2100 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2101 }
2102
2103 udelay(25); /* allow reset propagation */
2104
2105 /* Spec never mentions clearing the bit. Marvell's driver does
2106 * clear the bit, however.
2107 */
2108 writelfl(0, port_mmio + EDMA_CMD_OFS);
2109
2110 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2111
ee9ccdf7 2112 if (IS_GEN_I(hpriv))
c9d39130
JG
2113 mdelay(1);
2114}
2115
05b308e1 2116/**
bdd4ddde 2117 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2118 * @ap: ATA channel to manipulate
2119 *
2120 * Part of this is taken from __sata_phy_reset and modified to
2121 * not sleep since this routine gets called from interrupt level.
2122 *
2123 * LOCKING:
2124 * Inherited from caller. This is coded to safe to call at
2125 * interrupt level, i.e. it does not sleep.
31961943 2126 */
bdd4ddde
JG
2127static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2128 unsigned long deadline)
20f733e7 2129{
095fec88 2130 struct mv_port_priv *pp = ap->private_data;
cca3974e 2131 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2132 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2133 int retry = 5;
2134 u32 sstatus;
20f733e7
BR
2135
2136 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2137
095fec88 2138 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
31961943
BR
2139 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2140 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
20f733e7 2141
22374677
JG
2142 /* Issue COMRESET via SControl */
2143comreset_retry:
81952c54 2144 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
bdd4ddde 2145 msleep(1);
22374677 2146
81952c54 2147 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
bdd4ddde 2148 msleep(20);
22374677 2149
31961943 2150 do {
81952c54 2151 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2152 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2153 break;
22374677 2154
bdd4ddde 2155 msleep(1);
c5d3e45a 2156 } while (time_before(jiffies, deadline));
20f733e7 2157
22374677 2158 /* work around errata */
ee9ccdf7 2159 if (IS_GEN_II(hpriv) &&
22374677
JG
2160 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2161 (retry-- > 0))
2162 goto comreset_retry;
095fec88
JG
2163
2164 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
31961943
BR
2165 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2166 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2167
bdd4ddde
JG
2168 if (ata_port_offline(ap)) {
2169 *class = ATA_DEV_NONE;
20f733e7
BR
2170 return;
2171 }
2172
22374677
JG
2173 /* even after SStatus reflects that device is ready,
2174 * it seems to take a while for link to be fully
2175 * established (and thus Status no longer 0x80/0x7F),
2176 * so we poll a bit for that, here.
2177 */
2178 retry = 20;
2179 while (1) {
2180 u8 drv_stat = ata_check_status(ap);
2181 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2182 break;
bdd4ddde 2183 msleep(500);
22374677
JG
2184 if (retry-- <= 0)
2185 break;
bdd4ddde
JG
2186 if (time_after(jiffies, deadline))
2187 break;
22374677
JG
2188 }
2189
bdd4ddde
JG
2190 /* FIXME: if we passed the deadline, the following
2191 * code probably produces an invalid result
2192 */
20f733e7 2193
bdd4ddde
JG
2194 /* finally, read device signature from TF registers */
2195 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2196
2197 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2198
bdd4ddde 2199 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2200
bca1c4eb 2201 VPRINTK("EXIT\n");
20f733e7
BR
2202}
2203
bdd4ddde 2204static int mv_prereset(struct ata_port *ap, unsigned long deadline)
22374677 2205{
bdd4ddde
JG
2206 struct mv_port_priv *pp = ap->private_data;
2207 struct ata_eh_context *ehc = &ap->eh_context;
2208 int rc;
2209
2210 rc = mv_stop_dma(ap);
2211 if (rc)
2212 ehc->i.action |= ATA_EH_HARDRESET;
2213
2214 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2215 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2216 ehc->i.action |= ATA_EH_HARDRESET;
2217 }
2218
2219 /* if we're about to do hardreset, nothing more to do */
2220 if (ehc->i.action & ATA_EH_HARDRESET)
2221 return 0;
2222
2223 if (ata_port_online(ap))
2224 rc = ata_wait_ready(ap, deadline);
2225 else
2226 rc = -ENODEV;
2227
2228 return rc;
22374677
JG
2229}
2230
bdd4ddde
JG
2231static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2232 unsigned long deadline)
31961943 2233{
bdd4ddde 2234 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2235 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2236
bdd4ddde 2237 mv_stop_dma(ap);
31961943 2238
bdd4ddde 2239 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2240
bdd4ddde
JG
2241 mv_phy_reset(ap, class, deadline);
2242
2243 return 0;
2244}
2245
2246static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2247{
2248 u32 serr;
2249
2250 /* print link status */
2251 sata_print_link_status(ap);
31961943 2252
bdd4ddde
JG
2253 /* clear SError */
2254 sata_scr_read(ap, SCR_ERROR, &serr);
2255 sata_scr_write_flush(ap, SCR_ERROR, serr);
2256
2257 /* bail out if no device is present */
2258 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2259 DPRINTK("EXIT, no device\n");
2260 return;
9b358e30 2261 }
bdd4ddde
JG
2262
2263 /* set up device control */
2264 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2265}
2266
2267static void mv_error_handler(struct ata_port *ap)
2268{
2269 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2270 mv_hardreset, mv_postreset);
2271}
2272
2273static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2274{
2275 mv_stop_dma(qc->ap);
2276}
2277
2278static void mv_eh_freeze(struct ata_port *ap)
2279{
2280 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2281 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2282 u32 tmp, mask;
2283 unsigned int shift;
2284
2285 /* FIXME: handle coalescing completion events properly */
2286
2287 shift = ap->port_no * 2;
2288 if (hc > 0)
2289 shift++;
2290
2291 mask = 0x3 << shift;
2292
2293 /* disable assertion of portN err, done events */
2294 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2295 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2296}
2297
2298static void mv_eh_thaw(struct ata_port *ap)
2299{
2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2301 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2302 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2303 void __iomem *port_mmio = mv_ap_base(ap);
2304 u32 tmp, mask, hc_irq_cause;
2305 unsigned int shift, hc_port_no = ap->port_no;
2306
2307 /* FIXME: handle coalescing completion events properly */
2308
2309 shift = ap->port_no * 2;
2310 if (hc > 0) {
2311 shift++;
2312 hc_port_no -= 4;
2313 }
2314
2315 mask = 0x3 << shift;
2316
2317 /* clear EDMA errors on this port */
2318 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2319
2320 /* clear pending irq events */
2321 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2322 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2323 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2324 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2325
2326 /* enable assertion of portN err, done events */
2327 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2328 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2329}
2330
05b308e1
BR
2331/**
2332 * mv_port_init - Perform some early initialization on a single port.
2333 * @port: libata data structure storing shadow register addresses
2334 * @port_mmio: base address of the port
2335 *
2336 * Initialize shadow register mmio addresses, clear outstanding
2337 * interrupts on the port, and unmask interrupts for the future
2338 * start of the port.
2339 *
2340 * LOCKING:
2341 * Inherited from caller.
2342 */
31961943 2343static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2344{
0d5ff566 2345 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2346 unsigned serr_ofs;
2347
8b260248 2348 /* PIO related setup
31961943
BR
2349 */
2350 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2351 port->error_addr =
31961943
BR
2352 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2353 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2354 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2355 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2356 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2357 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2358 port->status_addr =
31961943
BR
2359 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2360 /* special case: control/altstatus doesn't have ATA_REG_ address */
2361 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2362
2363 /* unused: */
8d9db2d2 2364 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2365
31961943
BR
2366 /* Clear any currently outstanding port interrupt conditions */
2367 serr_ofs = mv_scr_offset(SCR_ERROR);
2368 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2369 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2370
20f733e7 2371 /* unmask all EDMA error interrupts */
31961943 2372 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2373
8b260248 2374 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2375 readl(port_mmio + EDMA_CFG_OFS),
2376 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2377 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2378}
2379
4447d351 2380static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2381{
4447d351
TH
2382 struct pci_dev *pdev = to_pci_dev(host->dev);
2383 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2384 u32 hp_flags = hpriv->hp_flags;
2385
bca1c4eb 2386 switch(board_idx) {
47c2b677
JG
2387 case chip_5080:
2388 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2389 hp_flags |= MV_HP_GEN_I;
47c2b677 2390
44c10138 2391 switch (pdev->revision) {
47c2b677
JG
2392 case 0x1:
2393 hp_flags |= MV_HP_ERRATA_50XXB0;
2394 break;
2395 case 0x3:
2396 hp_flags |= MV_HP_ERRATA_50XXB2;
2397 break;
2398 default:
2399 dev_printk(KERN_WARNING, &pdev->dev,
2400 "Applying 50XXB2 workarounds to unknown rev\n");
2401 hp_flags |= MV_HP_ERRATA_50XXB2;
2402 break;
2403 }
2404 break;
2405
bca1c4eb
JG
2406 case chip_504x:
2407 case chip_508x:
47c2b677 2408 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2409 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2410
44c10138 2411 switch (pdev->revision) {
47c2b677
JG
2412 case 0x0:
2413 hp_flags |= MV_HP_ERRATA_50XXB0;
2414 break;
2415 case 0x3:
2416 hp_flags |= MV_HP_ERRATA_50XXB2;
2417 break;
2418 default:
2419 dev_printk(KERN_WARNING, &pdev->dev,
2420 "Applying B2 workarounds to unknown rev\n");
2421 hp_flags |= MV_HP_ERRATA_50XXB2;
2422 break;
bca1c4eb
JG
2423 }
2424 break;
2425
2426 case chip_604x:
2427 case chip_608x:
47c2b677 2428 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2429 hp_flags |= MV_HP_GEN_II;
47c2b677 2430
44c10138 2431 switch (pdev->revision) {
47c2b677
JG
2432 case 0x7:
2433 hp_flags |= MV_HP_ERRATA_60X1B2;
2434 break;
2435 case 0x9:
2436 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2437 break;
2438 default:
2439 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2440 "Applying B2 workarounds to unknown rev\n");
2441 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2442 break;
2443 }
2444 break;
2445
e4e7b892
JG
2446 case chip_7042:
2447 case chip_6042:
2448 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2449 hp_flags |= MV_HP_GEN_IIE;
2450
44c10138 2451 switch (pdev->revision) {
e4e7b892
JG
2452 case 0x0:
2453 hp_flags |= MV_HP_ERRATA_XX42A0;
2454 break;
2455 case 0x1:
2456 hp_flags |= MV_HP_ERRATA_60X1C0;
2457 break;
2458 default:
2459 dev_printk(KERN_WARNING, &pdev->dev,
2460 "Applying 60X1C0 workarounds to unknown rev\n");
2461 hp_flags |= MV_HP_ERRATA_60X1C0;
2462 break;
2463 }
2464 break;
2465
bca1c4eb
JG
2466 default:
2467 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2468 return 1;
2469 }
2470
2471 hpriv->hp_flags = hp_flags;
2472
2473 return 0;
2474}
2475
05b308e1 2476/**
47c2b677 2477 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2478 * @host: ATA host to initialize
2479 * @board_idx: controller index
05b308e1
BR
2480 *
2481 * If possible, do an early global reset of the host. Then do
2482 * our port init and clear/unmask all/relevant host interrupts.
2483 *
2484 * LOCKING:
2485 * Inherited from caller.
2486 */
4447d351 2487static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2488{
2489 int rc = 0, n_hc, port, hc;
4447d351
TH
2490 struct pci_dev *pdev = to_pci_dev(host->dev);
2491 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2492 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2493
47c2b677
JG
2494 /* global interrupt mask */
2495 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2496
4447d351 2497 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2498 if (rc)
2499 goto done;
2500
4447d351 2501 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2502
4447d351 2503 for (port = 0; port < host->n_ports; port++)
47c2b677 2504 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2505
c9d39130 2506 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2507 if (rc)
20f733e7 2508 goto done;
20f733e7 2509
522479fb
JG
2510 hpriv->ops->reset_flash(hpriv, mmio);
2511 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2512 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2513
4447d351 2514 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2515 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2516 void __iomem *port_mmio = mv_port_base(mmio, port);
2517
2a47ce06 2518 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2519 ifctl |= (1 << 7); /* enable gen2i speed */
2520 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2521 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2522 }
2523
c9d39130 2524 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2525 }
2526
4447d351 2527 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2528 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2529 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2530 }
2531
2532 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2533 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2534
2535 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2536 "(before clear)=0x%08x\n", hc,
2537 readl(hc_mmio + HC_CFG_OFS),
2538 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2539
2540 /* Clear any currently outstanding hc interrupt conditions */
2541 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2542 }
2543
31961943
BR
2544 /* Clear any currently outstanding host interrupt conditions */
2545 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2546
2547 /* and unmask interrupt generation for host regs */
2548 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2549
ee9ccdf7 2550 if (IS_GEN_I(hpriv))
fb621e2f
JG
2551 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2552 else
2553 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2554
2555 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2556 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2557 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2558 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2559 readl(mmio + PCI_IRQ_CAUSE_OFS),
2560 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2561
31961943 2562done:
20f733e7
BR
2563 return rc;
2564}
2565
05b308e1
BR
2566/**
2567 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2568 * @host: ATA host to print info about
05b308e1
BR
2569 *
2570 * FIXME: complete this.
2571 *
2572 * LOCKING:
2573 * Inherited from caller.
2574 */
4447d351 2575static void mv_print_info(struct ata_host *host)
31961943 2576{
4447d351
TH
2577 struct pci_dev *pdev = to_pci_dev(host->dev);
2578 struct mv_host_priv *hpriv = host->private_data;
44c10138 2579 u8 scc;
c1e4fe71 2580 const char *scc_s, *gen;
31961943
BR
2581
2582 /* Use this to determine the HW stepping of the chip so we know
2583 * what errata to workaround
2584 */
31961943
BR
2585 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2586 if (scc == 0)
2587 scc_s = "SCSI";
2588 else if (scc == 0x01)
2589 scc_s = "RAID";
2590 else
c1e4fe71
JG
2591 scc_s = "?";
2592
2593 if (IS_GEN_I(hpriv))
2594 gen = "I";
2595 else if (IS_GEN_II(hpriv))
2596 gen = "II";
2597 else if (IS_GEN_IIE(hpriv))
2598 gen = "IIE";
2599 else
2600 gen = "?";
31961943 2601
a9524a76 2602 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2603 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2604 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2605 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2606}
2607
05b308e1
BR
2608/**
2609 * mv_init_one - handle a positive probe of a Marvell host
2610 * @pdev: PCI device found
2611 * @ent: PCI device ID entry for the matched host
2612 *
2613 * LOCKING:
2614 * Inherited from caller.
2615 */
20f733e7
BR
2616static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2617{
2618 static int printed_version = 0;
20f733e7 2619 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2620 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2621 struct ata_host *host;
2622 struct mv_host_priv *hpriv;
2623 int n_ports, rc;
20f733e7 2624
a9524a76
JG
2625 if (!printed_version++)
2626 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2627
4447d351
TH
2628 /* allocate host */
2629 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2630
2631 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2632 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2633 if (!host || !hpriv)
2634 return -ENOMEM;
2635 host->private_data = hpriv;
2636
2637 /* acquire resources */
24dc5f33
TH
2638 rc = pcim_enable_device(pdev);
2639 if (rc)
20f733e7 2640 return rc;
20f733e7 2641
0d5ff566
TH
2642 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2643 if (rc == -EBUSY)
24dc5f33 2644 pcim_pin_device(pdev);
0d5ff566 2645 if (rc)
24dc5f33 2646 return rc;
4447d351 2647 host->iomap = pcim_iomap_table(pdev);
20f733e7 2648
d88184fb
JG
2649 rc = pci_go_64(pdev);
2650 if (rc)
2651 return rc;
2652
20f733e7 2653 /* initialize adapter */
4447d351 2654 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2655 if (rc)
2656 return rc;
20f733e7 2657
31961943 2658 /* Enable interrupts */
6a59dcf8 2659 if (msi && pci_enable_msi(pdev))
31961943 2660 pci_intx(pdev, 1);
20f733e7 2661
31961943 2662 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2663 mv_print_info(host);
20f733e7 2664
4447d351 2665 pci_set_master(pdev);
ea8b4db9 2666 pci_try_set_mwi(pdev);
4447d351 2667 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2668 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2669}
2670
2671static int __init mv_init(void)
2672{
b7887196 2673 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2674}
2675
2676static void __exit mv_exit(void)
2677{
2678 pci_unregister_driver(&mv_pci_driver);
2679}
2680
2681MODULE_AUTHOR("Brett Russ");
2682MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2683MODULE_LICENSE("GPL");
2684MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2685MODULE_VERSION(DRV_VERSION);
2686
ddef9bb3
JG
2687module_param(msi, int, 0444);
2688MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2689
20f733e7
BR
2690module_init(mv_init);
2691module_exit(mv_exit);