]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/ata/sata_mv.c
libata: implement and use ops inheritance
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
da3dbb17
TH
464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde 473static void mv_error_handler(struct ata_port *ap);
bdd4ddde
JG
474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
f273827e 476static void mv6_dev_config(struct ata_device *dev);
20f733e7 477
2a47ce06
JG
478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 487
2a47ce06
JG
488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
47c2b677
JG
490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
c9d39130
JG
493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
522479fb 495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
72109168
ML
508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
47c2b677 511
eb73d558
ML
512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
c5d3e45a 516static struct scsi_host_template mv5_sht = {
68d1d07b 517 ATA_BASE_SHT(DRV_NAME),
baf14aa1 518 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 519 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
520};
521
522static struct scsi_host_template mv6_sht = {
68d1d07b 523 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 524 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 525 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 526 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
527};
528
029cfd6b
TH
529static struct ata_port_operations mv5_ops = {
530 .inherits = &ata_sff_port_ops,
c9d39130 531
c9d39130
JG
532 .qc_prep = mv_qc_prep,
533 .qc_issue = mv_qc_issue,
534
bdd4ddde
JG
535 .freeze = mv_eh_freeze,
536 .thaw = mv_eh_thaw,
029cfd6b
TH
537 .error_handler = mv_error_handler,
538 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 539
c9d39130
JG
540 .scr_read = mv5_scr_read,
541 .scr_write = mv5_scr_write,
542
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
c9d39130
JG
545};
546
029cfd6b
TH
547static struct ata_port_operations mv6_ops = {
548 .inherits = &mv5_ops,
138bfdd0 549 .qc_defer = ata_std_qc_defer,
029cfd6b 550 .dev_config = mv6_dev_config,
20f733e7
BR
551 .scr_read = mv_scr_read,
552 .scr_write = mv_scr_write,
20f733e7
BR
553};
554
029cfd6b
TH
555static struct ata_port_operations mv_iie_ops = {
556 .inherits = &mv6_ops,
557 .dev_config = ATA_OP_NULL,
e4e7b892 558 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
559};
560
98ac62de 561static const struct ata_port_info mv_port_info[] = {
20f733e7 562 { /* chip_504x */
cca3974e 563 .flags = MV_COMMON_FLAGS,
31961943 564 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 565 .udma_mask = ATA_UDMA6,
c9d39130 566 .port_ops = &mv5_ops,
20f733e7
BR
567 },
568 { /* chip_508x */
c5d3e45a 569 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 570 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 571 .udma_mask = ATA_UDMA6,
c9d39130 572 .port_ops = &mv5_ops,
20f733e7 573 },
47c2b677 574 { /* chip_5080 */
c5d3e45a 575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 576 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 577 .udma_mask = ATA_UDMA6,
c9d39130 578 .port_ops = &mv5_ops,
47c2b677 579 },
20f733e7 580 { /* chip_604x */
138bfdd0
ML
581 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
582 ATA_FLAG_NCQ,
31961943 583 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 584 .udma_mask = ATA_UDMA6,
c9d39130 585 .port_ops = &mv6_ops,
20f733e7
BR
586 },
587 { /* chip_608x */
c5d3e45a 588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 589 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 590 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 591 .udma_mask = ATA_UDMA6,
c9d39130 592 .port_ops = &mv6_ops,
20f733e7 593 },
e4e7b892 594 { /* chip_6042 */
138bfdd0
ML
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
e4e7b892 597 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 598 .udma_mask = ATA_UDMA6,
e4e7b892
JG
599 .port_ops = &mv_iie_ops,
600 },
601 { /* chip_7042 */
138bfdd0
ML
602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 ATA_FLAG_NCQ,
e4e7b892 604 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 605 .udma_mask = ATA_UDMA6,
e4e7b892
JG
606 .port_ops = &mv_iie_ops,
607 },
f351b2d6
SB
608 { /* chip_soc */
609 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv_iie_ops,
613 },
20f733e7
BR
614};
615
3b7d697d 616static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
617 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
618 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
619 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
620 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
621 /* RocketRAID 1740/174x have different identifiers */
622 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
623 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
624
625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630
631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632
d9f9c6bc
FA
633 /* Adaptec 1430SA */
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635
02a121da 636 /* Marvell 7042 support */
6a3d586d
MT
637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
02a121da
ML
639 /* Highpoint RocketRAID PCIe series */
640 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
641 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
642
2d2744fc 643 { } /* terminate list */
20f733e7
BR
644};
645
47c2b677
JG
646static const struct mv_hw_ops mv5xxx_ops = {
647 .phy_errata = mv5_phy_errata,
648 .enable_leds = mv5_enable_leds,
649 .read_preamp = mv5_read_preamp,
650 .reset_hc = mv5_reset_hc,
522479fb
JG
651 .reset_flash = mv5_reset_flash,
652 .reset_bus = mv5_reset_bus,
47c2b677
JG
653};
654
655static const struct mv_hw_ops mv6xxx_ops = {
656 .phy_errata = mv6_phy_errata,
657 .enable_leds = mv6_enable_leds,
658 .read_preamp = mv6_read_preamp,
659 .reset_hc = mv6_reset_hc,
522479fb
JG
660 .reset_flash = mv6_reset_flash,
661 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
662};
663
f351b2d6
SB
664static const struct mv_hw_ops mv_soc_ops = {
665 .phy_errata = mv6_phy_errata,
666 .enable_leds = mv_soc_enable_leds,
667 .read_preamp = mv_soc_read_preamp,
668 .reset_hc = mv_soc_reset_hc,
669 .reset_flash = mv_soc_reset_flash,
670 .reset_bus = mv_soc_reset_bus,
671};
672
20f733e7
BR
673/*
674 * Functions
675 */
676
677static inline void writelfl(unsigned long data, void __iomem *addr)
678{
679 writel(data, addr);
680 (void) readl(addr); /* flush to avoid PCI posted write */
681}
682
20f733e7
BR
683static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
684{
685 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
686}
687
c9d39130
JG
688static inline unsigned int mv_hc_from_port(unsigned int port)
689{
690 return port >> MV_PORT_HC_SHIFT;
691}
692
693static inline unsigned int mv_hardport_from_port(unsigned int port)
694{
695 return port & MV_PORT_MASK;
696}
697
698static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
699 unsigned int port)
700{
701 return mv_hc_base(base, mv_hc_from_port(port));
702}
703
20f733e7
BR
704static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
705{
c9d39130 706 return mv_hc_base_from_port(base, port) +
8b260248 707 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 708 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
709}
710
f351b2d6
SB
711static inline void __iomem *mv_host_base(struct ata_host *host)
712{
713 struct mv_host_priv *hpriv = host->private_data;
714 return hpriv->base;
715}
716
20f733e7
BR
717static inline void __iomem *mv_ap_base(struct ata_port *ap)
718{
f351b2d6 719 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
720}
721
cca3974e 722static inline int mv_get_hc_count(unsigned long port_flags)
31961943 723{
cca3974e 724 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
725}
726
c5d3e45a
JG
727static void mv_set_edma_ptrs(void __iomem *port_mmio,
728 struct mv_host_priv *hpriv,
729 struct mv_port_priv *pp)
730{
bdd4ddde
JG
731 u32 index;
732
c5d3e45a
JG
733 /*
734 * initialize request queue
735 */
bdd4ddde
JG
736 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
737
c5d3e45a
JG
738 WARN_ON(pp->crqb_dma & 0x3ff);
739 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 740 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
741 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
742
743 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 744 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
745 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
746 else
bdd4ddde 747 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
748
749 /*
750 * initialize response queue
751 */
bdd4ddde
JG
752 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
753
c5d3e45a
JG
754 WARN_ON(pp->crpb_dma & 0xff);
755 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
756
757 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 758 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
759 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
760 else
bdd4ddde 761 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 762
bdd4ddde 763 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 764 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
765}
766
05b308e1
BR
767/**
768 * mv_start_dma - Enable eDMA engine
769 * @base: port base address
770 * @pp: port private data
771 *
beec7dbc
TH
772 * Verify the local cache of the eDMA state is accurate with a
773 * WARN_ON.
05b308e1
BR
774 *
775 * LOCKING:
776 * Inherited from caller.
777 */
0c58912e 778static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 779 struct mv_port_priv *pp, u8 protocol)
20f733e7 780{
72109168
ML
781 int want_ncq = (protocol == ATA_PROT_NCQ);
782
783 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
784 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
785 if (want_ncq != using_ncq)
786 __mv_stop_dma(ap);
787 }
c5d3e45a 788 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
789 struct mv_host_priv *hpriv = ap->host->private_data;
790 int hard_port = mv_hardport_from_port(ap->port_no);
791 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 792 mv_host_base(ap->host), hard_port);
0c58912e
ML
793 u32 hc_irq_cause, ipending;
794
bdd4ddde 795 /* clear EDMA event indicators, if any */
f630d562 796 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 797
0c58912e
ML
798 /* clear EDMA interrupt indicator, if any */
799 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
800 ipending = (DEV_IRQ << hard_port) |
801 (CRPB_DMA_DONE << hard_port);
802 if (hc_irq_cause & ipending) {
803 writelfl(hc_irq_cause & ~ipending,
804 hc_mmio + HC_IRQ_CAUSE_OFS);
805 }
806
72109168 807 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
808
809 /* clear FIS IRQ Cause */
810 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
811
f630d562 812 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 813
f630d562 814 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
815 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
816 }
f630d562 817 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
818}
819
05b308e1 820/**
0ea9e179 821 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
822 * @ap: ATA channel to manipulate
823 *
beec7dbc
TH
824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
05b308e1
BR
826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
0ea9e179 830static int __mv_stop_dma(struct ata_port *ap)
20f733e7 831{
31961943
BR
832 void __iomem *port_mmio = mv_ap_base(ap);
833 struct mv_port_priv *pp = ap->private_data;
31961943 834 u32 reg;
c5d3e45a 835 int i, err = 0;
31961943 836
4537deb5 837 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 838 /* Disable EDMA if active. The disable bit auto clears.
31961943 839 */
31961943
BR
840 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
841 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 842 } else {
beec7dbc 843 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 844 }
8b260248 845
31961943
BR
846 /* now properly wait for the eDMA to stop */
847 for (i = 1000; i > 0; i--) {
848 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 849 if (!(reg & EDMA_EN))
31961943 850 break;
4537deb5 851
31961943
BR
852 udelay(100);
853 }
854
c5d3e45a 855 if (reg & EDMA_EN) {
f15a1daf 856 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 857 err = -EIO;
31961943 858 }
c5d3e45a
JG
859
860 return err;
20f733e7
BR
861}
862
0ea9e179
JG
863static int mv_stop_dma(struct ata_port *ap)
864{
865 unsigned long flags;
866 int rc;
867
868 spin_lock_irqsave(&ap->host->lock, flags);
869 rc = __mv_stop_dma(ap);
870 spin_unlock_irqrestore(&ap->host->lock, flags);
871
872 return rc;
873}
874
8a70f8dc 875#ifdef ATA_DEBUG
31961943 876static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 877{
31961943
BR
878 int b, w;
879 for (b = 0; b < bytes; ) {
880 DPRINTK("%p: ", start + b);
881 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 882 printk("%08x ", readl(start + b));
31961943
BR
883 b += sizeof(u32);
884 }
885 printk("\n");
886 }
31961943 887}
8a70f8dc
JG
888#endif
889
31961943
BR
890static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
891{
892#ifdef ATA_DEBUG
893 int b, w;
894 u32 dw;
895 for (b = 0; b < bytes; ) {
896 DPRINTK("%02x: ", b);
897 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
898 (void) pci_read_config_dword(pdev, b, &dw);
899 printk("%08x ", dw);
31961943
BR
900 b += sizeof(u32);
901 }
902 printk("\n");
903 }
904#endif
905}
906static void mv_dump_all_regs(void __iomem *mmio_base, int port,
907 struct pci_dev *pdev)
908{
909#ifdef ATA_DEBUG
8b260248 910 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
911 port >> MV_PORT_HC_SHIFT);
912 void __iomem *port_base;
913 int start_port, num_ports, p, start_hc, num_hcs, hc;
914
915 if (0 > port) {
916 start_hc = start_port = 0;
917 num_ports = 8; /* shld be benign for 4 port devs */
918 num_hcs = 2;
919 } else {
920 start_hc = port >> MV_PORT_HC_SHIFT;
921 start_port = port;
922 num_ports = num_hcs = 1;
923 }
8b260248 924 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
925 num_ports > 1 ? num_ports - 1 : start_port);
926
927 if (NULL != pdev) {
928 DPRINTK("PCI config space regs:\n");
929 mv_dump_pci_cfg(pdev, 0x68);
930 }
931 DPRINTK("PCI regs:\n");
932 mv_dump_mem(mmio_base+0xc00, 0x3c);
933 mv_dump_mem(mmio_base+0xd00, 0x34);
934 mv_dump_mem(mmio_base+0xf00, 0x4);
935 mv_dump_mem(mmio_base+0x1d00, 0x6c);
936 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 937 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
938 DPRINTK("HC regs (HC %i):\n", hc);
939 mv_dump_mem(hc_base, 0x1c);
940 }
941 for (p = start_port; p < start_port + num_ports; p++) {
942 port_base = mv_port_base(mmio_base, p);
2dcb407e 943 DPRINTK("EDMA regs (port %i):\n", p);
31961943 944 mv_dump_mem(port_base, 0x54);
2dcb407e 945 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
946 mv_dump_mem(port_base+0x300, 0x60);
947 }
948#endif
20f733e7
BR
949}
950
951static unsigned int mv_scr_offset(unsigned int sc_reg_in)
952{
953 unsigned int ofs;
954
955 switch (sc_reg_in) {
956 case SCR_STATUS:
957 case SCR_CONTROL:
958 case SCR_ERROR:
959 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
960 break;
961 case SCR_ACTIVE:
962 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
963 break;
964 default:
965 ofs = 0xffffffffU;
966 break;
967 }
968 return ofs;
969}
970
da3dbb17 971static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
972{
973 unsigned int ofs = mv_scr_offset(sc_reg_in);
974
da3dbb17
TH
975 if (ofs != 0xffffffffU) {
976 *val = readl(mv_ap_base(ap) + ofs);
977 return 0;
978 } else
979 return -EINVAL;
20f733e7
BR
980}
981
da3dbb17 982static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
983{
984 unsigned int ofs = mv_scr_offset(sc_reg_in);
985
da3dbb17 986 if (ofs != 0xffffffffU) {
20f733e7 987 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
988 return 0;
989 } else
990 return -EINVAL;
20f733e7
BR
991}
992
f273827e
ML
993static void mv6_dev_config(struct ata_device *adev)
994{
995 /*
996 * We don't have hob_nsect when doing NCQ commands on Gen-II.
997 * See mv_qc_prep() for more info.
998 */
999 if (adev->flags & ATA_DFLAG_NCQ)
1000 if (adev->max_sectors > ATA_MAX_SECTORS)
1001 adev->max_sectors = ATA_MAX_SECTORS;
1002}
1003
72109168
ML
1004static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1005 void __iomem *port_mmio, int want_ncq)
e4e7b892 1006{
0c58912e 1007 u32 cfg;
e4e7b892
JG
1008
1009 /* set up non-NCQ EDMA configuration */
0c58912e 1010 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1011
0c58912e 1012 if (IS_GEN_I(hpriv))
e4e7b892
JG
1013 cfg |= (1 << 8); /* enab config burst size mask */
1014
0c58912e 1015 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1016 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1017
1018 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1019 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1021 cfg |= (1 << 18); /* enab early completion */
e728eabe 1022 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1023 }
1024
72109168
ML
1025 if (want_ncq) {
1026 cfg |= EDMA_CFG_NCQ;
1027 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1028 } else
1029 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1030
e4e7b892
JG
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032}
1033
da2fa9ba
ML
1034static void mv_port_free_dma_mem(struct ata_port *ap)
1035{
1036 struct mv_host_priv *hpriv = ap->host->private_data;
1037 struct mv_port_priv *pp = ap->private_data;
eb73d558 1038 int tag;
da2fa9ba
ML
1039
1040 if (pp->crqb) {
1041 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1042 pp->crqb = NULL;
1043 }
1044 if (pp->crpb) {
1045 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1046 pp->crpb = NULL;
1047 }
eb73d558
ML
1048 /*
1049 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1050 * For later hardware, we have one unique sg_tbl per NCQ tag.
1051 */
1052 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1053 if (pp->sg_tbl[tag]) {
1054 if (tag == 0 || !IS_GEN_I(hpriv))
1055 dma_pool_free(hpriv->sg_tbl_pool,
1056 pp->sg_tbl[tag],
1057 pp->sg_tbl_dma[tag]);
1058 pp->sg_tbl[tag] = NULL;
1059 }
da2fa9ba
ML
1060 }
1061}
1062
05b308e1
BR
1063/**
1064 * mv_port_start - Port specific init/start routine.
1065 * @ap: ATA channel to manipulate
1066 *
1067 * Allocate and point to DMA memory, init port private memory,
1068 * zero indices.
1069 *
1070 * LOCKING:
1071 * Inherited from caller.
1072 */
31961943
BR
1073static int mv_port_start(struct ata_port *ap)
1074{
cca3974e
JG
1075 struct device *dev = ap->host->dev;
1076 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1077 struct mv_port_priv *pp;
1078 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1079 unsigned long flags;
dde20207 1080 int tag;
31961943 1081
24dc5f33 1082 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1083 if (!pp)
24dc5f33 1084 return -ENOMEM;
da2fa9ba 1085 ap->private_data = pp;
31961943 1086
da2fa9ba
ML
1087 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1088 if (!pp->crqb)
1089 return -ENOMEM;
1090 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1091
da2fa9ba
ML
1092 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1093 if (!pp->crpb)
1094 goto out_port_free_dma_mem;
1095 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1096
eb73d558
ML
1097 /*
1098 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1099 * For later hardware, we need one unique sg_tbl per NCQ tag.
1100 */
1101 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1102 if (tag == 0 || !IS_GEN_I(hpriv)) {
1103 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1104 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1105 if (!pp->sg_tbl[tag])
1106 goto out_port_free_dma_mem;
1107 } else {
1108 pp->sg_tbl[tag] = pp->sg_tbl[0];
1109 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1110 }
1111 }
31961943 1112
0ea9e179
JG
1113 spin_lock_irqsave(&ap->host->lock, flags);
1114
72109168 1115 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1116 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1117
0ea9e179
JG
1118 spin_unlock_irqrestore(&ap->host->lock, flags);
1119
31961943
BR
1120 /* Don't turn on EDMA here...do it before DMA commands only. Else
1121 * we'll be unable to send non-data, PIO, etc due to restricted access
1122 * to shadow regs.
1123 */
31961943 1124 return 0;
da2fa9ba
ML
1125
1126out_port_free_dma_mem:
1127 mv_port_free_dma_mem(ap);
1128 return -ENOMEM;
31961943
BR
1129}
1130
05b308e1
BR
1131/**
1132 * mv_port_stop - Port specific cleanup/stop routine.
1133 * @ap: ATA channel to manipulate
1134 *
1135 * Stop DMA, cleanup port memory.
1136 *
1137 * LOCKING:
cca3974e 1138 * This routine uses the host lock to protect the DMA stop.
05b308e1 1139 */
31961943
BR
1140static void mv_port_stop(struct ata_port *ap)
1141{
31961943 1142 mv_stop_dma(ap);
da2fa9ba 1143 mv_port_free_dma_mem(ap);
31961943
BR
1144}
1145
05b308e1
BR
1146/**
1147 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1148 * @qc: queued command whose SG list to source from
1149 *
1150 * Populate the SG list and mark the last entry.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
6c08772e 1155static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1156{
1157 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1158 struct scatterlist *sg;
3be6cbd7 1159 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1160 unsigned int si;
31961943 1161
eb73d558 1162 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1163 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1164 dma_addr_t addr = sg_dma_address(sg);
1165 u32 sg_len = sg_dma_len(sg);
22374677 1166
4007b493
OJ
1167 while (sg_len) {
1168 u32 offset = addr & 0xffff;
1169 u32 len = sg_len;
22374677 1170
4007b493
OJ
1171 if ((offset + sg_len > 0x10000))
1172 len = 0x10000 - offset;
1173
1174 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1175 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1176 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1177
1178 sg_len -= len;
1179 addr += len;
1180
3be6cbd7 1181 last_sg = mv_sg;
4007b493 1182 mv_sg++;
4007b493 1183 }
31961943 1184 }
3be6cbd7
JG
1185
1186 if (likely(last_sg))
1187 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1188}
1189
5796d1c4 1190static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1191{
559eedad 1192 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1193 (last ? CRQB_CMD_LAST : 0);
559eedad 1194 *cmdw = cpu_to_le16(tmp);
31961943
BR
1195}
1196
05b308e1
BR
1197/**
1198 * mv_qc_prep - Host specific command preparation.
1199 * @qc: queued command to prepare
1200 *
1201 * This routine simply redirects to the general purpose routine
1202 * if command is not DMA. Else, it handles prep of the CRQB
1203 * (command request block), does some sanity checking, and calls
1204 * the SG load routine.
1205 *
1206 * LOCKING:
1207 * Inherited from caller.
1208 */
31961943
BR
1209static void mv_qc_prep(struct ata_queued_cmd *qc)
1210{
1211 struct ata_port *ap = qc->ap;
1212 struct mv_port_priv *pp = ap->private_data;
e1469874 1213 __le16 *cw;
31961943
BR
1214 struct ata_taskfile *tf;
1215 u16 flags = 0;
a6432436 1216 unsigned in_index;
31961943 1217
138bfdd0
ML
1218 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1219 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1220 return;
20f733e7 1221
31961943
BR
1222 /* Fill in command request block
1223 */
e4e7b892 1224 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1225 flags |= CRQB_FLAG_READ;
beec7dbc 1226 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1227 flags |= qc->tag << CRQB_TAG_SHIFT;
1228
bdd4ddde
JG
1229 /* get current queue index from software */
1230 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1231
1232 pp->crqb[in_index].sg_addr =
eb73d558 1233 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1234 pp->crqb[in_index].sg_addr_hi =
eb73d558 1235 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1236 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1237
a6432436 1238 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1239 tf = &qc->tf;
1240
1241 /* Sadly, the CRQB cannot accomodate all registers--there are
1242 * only 11 bytes...so we must pick and choose required
1243 * registers based on the command. So, we drop feature and
1244 * hob_feature for [RW] DMA commands, but they are needed for
1245 * NCQ. NCQ will drop hob_nsect.
20f733e7 1246 */
31961943
BR
1247 switch (tf->command) {
1248 case ATA_CMD_READ:
1249 case ATA_CMD_READ_EXT:
1250 case ATA_CMD_WRITE:
1251 case ATA_CMD_WRITE_EXT:
c15d85c8 1252 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1253 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1254 break;
31961943
BR
1255 case ATA_CMD_FPDMA_READ:
1256 case ATA_CMD_FPDMA_WRITE:
8b260248 1257 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1258 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1259 break;
31961943
BR
1260 default:
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1265 *
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1268 */
1269 BUG_ON(tf->command);
1270 break;
1271 }
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1281
e4e7b892
JG
1282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1283 return;
1284 mv_fill_sg(qc);
1285}
1286
1287/**
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1290 *
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1295 *
1296 * LOCKING:
1297 * Inherited from caller.
1298 */
1299static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1300{
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
a6432436 1305 unsigned in_index;
e4e7b892
JG
1306 u32 flags = 0;
1307
138bfdd0
ML
1308 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1309 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1310 return;
1311
e4e7b892
JG
1312 /* Fill in Gen IIE command request block
1313 */
1314 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1315 flags |= CRQB_FLAG_READ;
1316
beec7dbc 1317 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1318 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1319 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1320
bdd4ddde
JG
1321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1323
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1327 crqb->flags = cpu_to_le32(flags);
1328
1329 tf = &qc->tf;
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1332 (tf->feature << 24)
1333 );
1334 crqb->ata_cmd[1] = cpu_to_le32(
1335 (tf->lbal << 0) |
1336 (tf->lbam << 8) |
1337 (tf->lbah << 16) |
1338 (tf->device << 24)
1339 );
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1345 );
1346 crqb->ata_cmd[3] = cpu_to_le32(
1347 (tf->nsect << 0) |
1348 (tf->hob_nsect << 8)
1349 );
1350
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1352 return;
31961943
BR
1353 mv_fill_sg(qc);
1354}
1355
05b308e1
BR
1356/**
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1359 *
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1364 *
1365 * LOCKING:
1366 * Inherited from caller.
1367 */
9a3d9eb0 1368static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1369{
c5d3e45a
JG
1370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1373 u32 in_index;
31961943 1374
138bfdd0
ML
1375 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1376 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1380 */
0ea9e179 1381 __mv_stop_dma(ap);
31961943
BR
1382 return ata_qc_issue_prot(qc);
1383 }
1384
72109168 1385 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1386
bdd4ddde 1387 pp->req_idx++;
31961943 1388
bdd4ddde 1389 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1390
1391 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1392 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1393 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1394
1395 return 0;
1396}
1397
05b308e1
BR
1398/**
1399 * mv_err_intr - Handle error interrupts on the port
1400 * @ap: ATA channel to manipulate
9b358e30 1401 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1402 *
1403 * In most cases, just clear the interrupt and move on. However,
1404 * some cases require an eDMA reset, which is done right before
1405 * the COMRESET in mv_phy_reset(). The SERR case requires a
1406 * clear of pending errors in the SATA SERROR register. Finally,
1407 * if the port disabled DMA, update our cached copy to match.
1408 *
1409 * LOCKING:
1410 * Inherited from caller.
1411 */
bdd4ddde 1412static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1413{
1414 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1415 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1416 struct mv_port_priv *pp = ap->private_data;
1417 struct mv_host_priv *hpriv = ap->host->private_data;
1418 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1419 unsigned int action = 0, err_mask = 0;
9af5c9c9 1420 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1421
bdd4ddde 1422 ata_ehi_clear_desc(ehi);
20f733e7 1423
bdd4ddde
JG
1424 if (!edma_enabled) {
1425 /* just a guess: do we need to do this? should we
1426 * expand this, and do it in all cases?
1427 */
936fd732
TH
1428 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1429 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1430 }
bdd4ddde
JG
1431
1432 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1433
1434 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1435
1436 /*
1437 * all generations share these EDMA error cause bits
1438 */
1439
1440 if (edma_err_cause & EDMA_ERR_DEV)
1441 err_mask |= AC_ERR_DEV;
1442 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1443 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1444 EDMA_ERR_INTRL_PAR)) {
1445 err_mask |= AC_ERR_ATA_BUS;
cf480626 1446 action |= ATA_EH_RESET;
b64bbc39 1447 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1448 }
1449 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1450 ata_ehi_hotplugged(ehi);
1451 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1452 "dev disconnect" : "dev connect");
cf480626 1453 action |= ATA_EH_RESET;
bdd4ddde
JG
1454 }
1455
ee9ccdf7 1456 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1457 eh_freeze_mask = EDMA_EH_FREEZE_5;
1458
1459 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1460 pp = ap->private_data;
bdd4ddde 1461 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1462 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1463 }
1464 } else {
1465 eh_freeze_mask = EDMA_EH_FREEZE;
1466
1467 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1468 pp = ap->private_data;
bdd4ddde 1469 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1470 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1471 }
1472
1473 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1476 err_mask = AC_ERR_ATA_BUS;
cf480626 1477 action |= ATA_EH_RESET;
bdd4ddde 1478 }
afb0edd9 1479 }
20f733e7
BR
1480
1481 /* Clear EDMA now that SERR cleanup done */
3606a380 1482 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1483
bdd4ddde
JG
1484 if (!err_mask) {
1485 err_mask = AC_ERR_OTHER;
cf480626 1486 action |= ATA_EH_RESET;
bdd4ddde
JG
1487 }
1488
1489 ehi->serror |= serr;
1490 ehi->action |= action;
1491
1492 if (qc)
1493 qc->err_mask |= err_mask;
1494 else
1495 ehi->err_mask |= err_mask;
1496
1497 if (edma_err_cause & eh_freeze_mask)
1498 ata_port_freeze(ap);
1499 else
1500 ata_port_abort(ap);
1501}
1502
1503static void mv_intr_pio(struct ata_port *ap)
1504{
1505 struct ata_queued_cmd *qc;
1506 u8 ata_status;
1507
1508 /* ignore spurious intr if drive still BUSY */
1509 ata_status = readb(ap->ioaddr.status_addr);
1510 if (unlikely(ata_status & ATA_BUSY))
1511 return;
1512
1513 /* get active ATA command */
9af5c9c9 1514 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1515 if (unlikely(!qc)) /* no active tag */
1516 return;
1517 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1518 return;
1519
1520 /* and finally, complete the ATA command */
1521 qc->err_mask |= ac_err_mask(ata_status);
1522 ata_qc_complete(qc);
1523}
1524
1525static void mv_intr_edma(struct ata_port *ap)
1526{
1527 void __iomem *port_mmio = mv_ap_base(ap);
1528 struct mv_host_priv *hpriv = ap->host->private_data;
1529 struct mv_port_priv *pp = ap->private_data;
1530 struct ata_queued_cmd *qc;
1531 u32 out_index, in_index;
1532 bool work_done = false;
1533
1534 /* get h/w response queue pointer */
1535 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1536 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1537
1538 while (1) {
1539 u16 status;
6c1153e0 1540 unsigned int tag;
bdd4ddde
JG
1541
1542 /* get s/w response queue last-read pointer, and compare */
1543 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1544 if (in_index == out_index)
1545 break;
1546
bdd4ddde 1547 /* 50xx: get active ATA command */
0ea9e179 1548 if (IS_GEN_I(hpriv))
9af5c9c9 1549 tag = ap->link.active_tag;
bdd4ddde 1550
6c1153e0
JG
1551 /* Gen II/IIE: get active ATA command via tag, to enable
1552 * support for queueing. this works transparently for
1553 * queued and non-queued modes.
bdd4ddde 1554 */
8c0aeb4a
ML
1555 else
1556 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1557
6c1153e0 1558 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1559
cb924419
ML
1560 /* For non-NCQ mode, the lower 8 bits of status
1561 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1562 * which should be zero if all went well.
bdd4ddde
JG
1563 */
1564 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1565 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1566 mv_err_intr(ap, qc);
1567 return;
1568 }
1569
1570 /* and finally, complete the ATA command */
1571 if (qc) {
1572 qc->err_mask |=
1573 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1574 ata_qc_complete(qc);
1575 }
1576
0ea9e179 1577 /* advance software response queue pointer, to
bdd4ddde
JG
1578 * indicate (after the loop completes) to hardware
1579 * that we have consumed a response queue entry.
1580 */
1581 work_done = true;
1582 pp->resp_idx++;
1583 }
1584
1585 if (work_done)
1586 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1587 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1588 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1589}
1590
05b308e1
BR
1591/**
1592 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1593 * @host: host specific structure
05b308e1
BR
1594 * @relevant: port error bits relevant to this host controller
1595 * @hc: which host controller we're to look at
1596 *
1597 * Read then write clear the HC interrupt status then walk each
1598 * port connected to the HC and see if it needs servicing. Port
1599 * success ints are reported in the HC interrupt status reg, the
1600 * port error ints are reported in the higher level main
1601 * interrupt status register and thus are passed in via the
1602 * 'relevant' argument.
1603 *
1604 * LOCKING:
1605 * Inherited from caller.
1606 */
cca3974e 1607static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1608{
f351b2d6
SB
1609 struct mv_host_priv *hpriv = host->private_data;
1610 void __iomem *mmio = hpriv->base;
20f733e7 1611 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1612 u32 hc_irq_cause;
f351b2d6 1613 int port, port0, last_port;
20f733e7 1614
35177265 1615 if (hc == 0)
20f733e7 1616 port0 = 0;
35177265 1617 else
20f733e7 1618 port0 = MV_PORTS_PER_HC;
20f733e7 1619
f351b2d6
SB
1620 if (HAS_PCI(host))
1621 last_port = port0 + MV_PORTS_PER_HC;
1622 else
1623 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1624 /* we'll need the HC success int register in most cases */
1625 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1626 if (!hc_irq_cause)
1627 return;
1628
1629 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1630
1631 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1632 hc, relevant, hc_irq_cause);
20f733e7 1633
8f71efe2 1634 for (port = port0; port < last_port; port++) {
cca3974e 1635 struct ata_port *ap = host->ports[port];
8f71efe2 1636 struct mv_port_priv *pp;
bdd4ddde 1637 int have_err_bits, hard_port, shift;
55d8ca4f 1638
bdd4ddde 1639 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1640 continue;
1641
8f71efe2
YL
1642 pp = ap->private_data;
1643
31961943 1644 shift = port << 1; /* (port * 2) */
20f733e7
BR
1645 if (port >= MV_PORTS_PER_HC) {
1646 shift++; /* skip bit 8 in the HC Main IRQ reg */
1647 }
bdd4ddde
JG
1648 have_err_bits = ((PORT0_ERR << shift) & relevant);
1649
1650 if (unlikely(have_err_bits)) {
1651 struct ata_queued_cmd *qc;
8b260248 1652
9af5c9c9 1653 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1654 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1655 continue;
1656
1657 mv_err_intr(ap, qc);
1658 continue;
1659 }
1660
1661 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1662
1663 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1664 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1665 mv_intr_edma(ap);
1666 } else {
1667 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1668 mv_intr_pio(ap);
20f733e7
BR
1669 }
1670 }
1671 VPRINTK("EXIT\n");
1672}
1673
bdd4ddde
JG
1674static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1675{
02a121da 1676 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1677 struct ata_port *ap;
1678 struct ata_queued_cmd *qc;
1679 struct ata_eh_info *ehi;
1680 unsigned int i, err_mask, printed = 0;
1681 u32 err_cause;
1682
02a121da 1683 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1684
1685 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1686 err_cause);
1687
1688 DPRINTK("All regs @ PCI error\n");
1689 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1690
02a121da 1691 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1692
1693 for (i = 0; i < host->n_ports; i++) {
1694 ap = host->ports[i];
936fd732 1695 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1696 ehi = &ap->link.eh_info;
bdd4ddde
JG
1697 ata_ehi_clear_desc(ehi);
1698 if (!printed++)
1699 ata_ehi_push_desc(ehi,
1700 "PCI err cause 0x%08x", err_cause);
1701 err_mask = AC_ERR_HOST_BUS;
cf480626 1702 ehi->action = ATA_EH_RESET;
9af5c9c9 1703 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1704 if (qc)
1705 qc->err_mask |= err_mask;
1706 else
1707 ehi->err_mask |= err_mask;
1708
1709 ata_port_freeze(ap);
1710 }
1711 }
1712}
1713
05b308e1 1714/**
c5d3e45a 1715 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1716 * @irq: unused
1717 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1718 *
1719 * Read the read only register to determine if any host
1720 * controllers have pending interrupts. If so, call lower level
1721 * routine to handle. Also check for PCI errors which are only
1722 * reported here.
1723 *
8b260248 1724 * LOCKING:
cca3974e 1725 * This routine holds the host lock while processing pending
05b308e1
BR
1726 * interrupts.
1727 */
7d12e780 1728static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1729{
cca3974e 1730 struct ata_host *host = dev_instance;
f351b2d6 1731 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1732 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1733 void __iomem *mmio = hpriv->base;
646a4da5 1734 u32 irq_stat, irq_mask;
20f733e7 1735
646a4da5 1736 spin_lock(&host->lock);
f351b2d6
SB
1737
1738 irq_stat = readl(hpriv->main_cause_reg_addr);
1739 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1740
1741 /* check the cases where we either have nothing pending or have read
1742 * a bogus register value which can indicate HW removal or PCI fault
1743 */
646a4da5
ML
1744 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1745 goto out_unlock;
20f733e7 1746
cca3974e 1747 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1748
7bb3c529 1749 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1750 mv_pci_error(host, mmio);
1751 handled = 1;
1752 goto out_unlock; /* skip all other HC irq handling */
1753 }
1754
20f733e7
BR
1755 for (hc = 0; hc < n_hcs; hc++) {
1756 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1757 if (relevant) {
cca3974e 1758 mv_host_intr(host, relevant, hc);
bdd4ddde 1759 handled = 1;
20f733e7
BR
1760 }
1761 }
615ab953 1762
bdd4ddde 1763out_unlock:
cca3974e 1764 spin_unlock(&host->lock);
20f733e7
BR
1765
1766 return IRQ_RETVAL(handled);
1767}
1768
c9d39130
JG
1769static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1770{
1771 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1772 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1773
1774 return hc_mmio + ofs;
1775}
1776
1777static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1778{
1779 unsigned int ofs;
1780
1781 switch (sc_reg_in) {
1782 case SCR_STATUS:
1783 case SCR_ERROR:
1784 case SCR_CONTROL:
1785 ofs = sc_reg_in * sizeof(u32);
1786 break;
1787 default:
1788 ofs = 0xffffffffU;
1789 break;
1790 }
1791 return ofs;
1792}
1793
da3dbb17 1794static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1795{
f351b2d6
SB
1796 struct mv_host_priv *hpriv = ap->host->private_data;
1797 void __iomem *mmio = hpriv->base;
0d5ff566 1798 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1799 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1800
da3dbb17
TH
1801 if (ofs != 0xffffffffU) {
1802 *val = readl(addr + ofs);
1803 return 0;
1804 } else
1805 return -EINVAL;
c9d39130
JG
1806}
1807
da3dbb17 1808static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1809{
f351b2d6
SB
1810 struct mv_host_priv *hpriv = ap->host->private_data;
1811 void __iomem *mmio = hpriv->base;
0d5ff566 1812 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1813 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1814
da3dbb17 1815 if (ofs != 0xffffffffU) {
0d5ff566 1816 writelfl(val, addr + ofs);
da3dbb17
TH
1817 return 0;
1818 } else
1819 return -EINVAL;
c9d39130
JG
1820}
1821
7bb3c529 1822static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1823{
7bb3c529 1824 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1825 int early_5080;
1826
44c10138 1827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
7bb3c529 1835 mv_reset_pci_bus(host, mmio);
522479fb
JG
1836}
1837
1838static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839{
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841}
1842
47c2b677 1843static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1844 void __iomem *mmio)
1845{
c9d39130
JG
1846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1853}
1854
47c2b677 1855static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1856{
522479fb
JG
1857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1866}
1867
2a47ce06
JG
1868static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
bca1c4eb 1870{
c9d39130
JG
1871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1892}
1893
c9d39130
JG
1894
1895#undef ZERO
1896#define ZERO(reg) writel(0, port_mmio + (reg))
1897static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
1899{
1900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1903
1904 mv_channel_reset(hpriv, mmio, port);
1905
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1919}
1920#undef ZERO
1921
1922#define ZERO(reg) writel(0, hc_mmio + (reg))
1923static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc)
47c2b677 1925{
c9d39130
JG
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1927 u32 tmp;
1928
1929 ZERO(0x00c);
1930 ZERO(0x010);
1931 ZERO(0x014);
1932 ZERO(0x018);
1933
1934 tmp = readl(hc_mmio + 0x20);
1935 tmp &= 0x1c1c1c1c;
1936 tmp |= 0x03030303;
1937 writel(tmp, hc_mmio + 0x20);
1938}
1939#undef ZERO
1940
1941static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int n_hc)
1943{
1944 unsigned int hc, port;
1945
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1950
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1952 }
1953
1954 return 0;
47c2b677
JG
1955}
1956
101ffae2
JG
1957#undef ZERO
1958#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 1959static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 1960{
02a121da 1961 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1962 u32 tmp;
1963
1964 tmp = readl(mmio + MV_PCI_MODE);
1965 tmp &= 0xff00ffff;
1966 writel(tmp, mmio + MV_PCI_MODE);
1967
1968 ZERO(MV_PCI_DISC_TIMER);
1969 ZERO(MV_PCI_MSI_TRIGGER);
1970 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1971 ZERO(HC_MAIN_IRQ_MASK_OFS);
1972 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1973 ZERO(hpriv->irq_cause_ofs);
1974 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1975 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1976 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1977 ZERO(MV_PCI_ERR_ATTRIBUTE);
1978 ZERO(MV_PCI_ERR_COMMAND);
1979}
1980#undef ZERO
1981
1982static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1983{
1984 u32 tmp;
1985
1986 mv5_reset_flash(hpriv, mmio);
1987
1988 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1989 tmp &= 0x3;
1990 tmp |= (1 << 5) | (1 << 6);
1991 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1992}
1993
1994/**
1995 * mv6_reset_hc - Perform the 6xxx global soft reset
1996 * @mmio: base address of the HBA
1997 *
1998 * This routine only applies to 6xxx parts.
1999 *
2000 * LOCKING:
2001 * Inherited from caller.
2002 */
c9d39130
JG
2003static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2004 unsigned int n_hc)
101ffae2
JG
2005{
2006 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2007 int i, rc = 0;
2008 u32 t;
2009
2010 /* Following procedure defined in PCI "main command and status
2011 * register" table.
2012 */
2013 t = readl(reg);
2014 writel(t | STOP_PCI_MASTER, reg);
2015
2016 for (i = 0; i < 1000; i++) {
2017 udelay(1);
2018 t = readl(reg);
2dcb407e 2019 if (PCI_MASTER_EMPTY & t)
101ffae2 2020 break;
101ffae2
JG
2021 }
2022 if (!(PCI_MASTER_EMPTY & t)) {
2023 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2024 rc = 1;
2025 goto done;
2026 }
2027
2028 /* set reset */
2029 i = 5;
2030 do {
2031 writel(t | GLOB_SFT_RST, reg);
2032 t = readl(reg);
2033 udelay(1);
2034 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2035
2036 if (!(GLOB_SFT_RST & t)) {
2037 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2038 rc = 1;
2039 goto done;
2040 }
2041
2042 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2043 i = 5;
2044 do {
2045 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2046 t = readl(reg);
2047 udelay(1);
2048 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2049
2050 if (GLOB_SFT_RST & t) {
2051 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2052 rc = 1;
2053 }
2054done:
2055 return rc;
2056}
2057
47c2b677 2058static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2059 void __iomem *mmio)
2060{
2061 void __iomem *port_mmio;
2062 u32 tmp;
2063
ba3fe8fb
JG
2064 tmp = readl(mmio + MV_RESET_CFG);
2065 if ((tmp & (1 << 0)) == 0) {
47c2b677 2066 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2067 hpriv->signal[idx].pre = 0x1 << 5;
2068 return;
2069 }
2070
2071 port_mmio = mv_port_base(mmio, idx);
2072 tmp = readl(port_mmio + PHY_MODE2);
2073
2074 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2075 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2076}
2077
47c2b677 2078static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2079{
47c2b677 2080 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2081}
2082
c9d39130 2083static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2084 unsigned int port)
bca1c4eb 2085{
c9d39130
JG
2086 void __iomem *port_mmio = mv_port_base(mmio, port);
2087
bca1c4eb 2088 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2089 int fix_phy_mode2 =
2090 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2091 int fix_phy_mode4 =
47c2b677
JG
2092 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2093 u32 m2, tmp;
2094
2095 if (fix_phy_mode2) {
2096 m2 = readl(port_mmio + PHY_MODE2);
2097 m2 &= ~(1 << 16);
2098 m2 |= (1 << 31);
2099 writel(m2, port_mmio + PHY_MODE2);
2100
2101 udelay(200);
2102
2103 m2 = readl(port_mmio + PHY_MODE2);
2104 m2 &= ~((1 << 16) | (1 << 31));
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108 }
2109
2110 /* who knows what this magic does */
2111 tmp = readl(port_mmio + PHY_MODE3);
2112 tmp &= ~0x7F800000;
2113 tmp |= 0x2A800000;
2114 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2115
2116 if (fix_phy_mode4) {
47c2b677 2117 u32 m4;
bca1c4eb
JG
2118
2119 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2120
2121 if (hp_flags & MV_HP_ERRATA_60X1B2)
2122 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2123
2124 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2125
2126 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2127
2128 if (hp_flags & MV_HP_ERRATA_60X1B2)
2129 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2130 }
2131
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2 = readl(port_mmio + PHY_MODE2);
2134
2135 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2136 m2 |= hpriv->signal[port].amps;
2137 m2 |= hpriv->signal[port].pre;
47c2b677 2138 m2 &= ~(1 << 16);
bca1c4eb 2139
e4e7b892
JG
2140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv)) {
2142 m2 &= ~0xC30FF01F;
2143 m2 |= 0x0000900F;
2144 }
2145
bca1c4eb
JG
2146 writel(m2, port_mmio + PHY_MODE2);
2147}
2148
f351b2d6
SB
2149/* TODO: use the generic LED interface to configure the SATA Presence */
2150/* & Acitivy LEDs on the board */
2151static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2152 void __iomem *mmio)
2153{
2154 return;
2155}
2156
2157static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2158 void __iomem *mmio)
2159{
2160 void __iomem *port_mmio;
2161 u32 tmp;
2162
2163 port_mmio = mv_port_base(mmio, idx);
2164 tmp = readl(port_mmio + PHY_MODE2);
2165
2166 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2167 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2168}
2169
2170#undef ZERO
2171#define ZERO(reg) writel(0, port_mmio + (reg))
2172static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2173 void __iomem *mmio, unsigned int port)
2174{
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
2177 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2178
2179 mv_channel_reset(hpriv, mmio, port);
2180
2181 ZERO(0x028); /* command */
2182 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2183 ZERO(0x004); /* timer */
2184 ZERO(0x008); /* irq err cause */
2185 ZERO(0x00c); /* irq err mask */
2186 ZERO(0x010); /* rq bah */
2187 ZERO(0x014); /* rq inp */
2188 ZERO(0x018); /* rq outp */
2189 ZERO(0x01c); /* respq bah */
2190 ZERO(0x024); /* respq outp */
2191 ZERO(0x020); /* respq inp */
2192 ZERO(0x02c); /* test control */
2193 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2194}
2195
2196#undef ZERO
2197
2198#define ZERO(reg) writel(0, hc_mmio + (reg))
2199static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2200 void __iomem *mmio)
2201{
2202 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2203
2204 ZERO(0x00c);
2205 ZERO(0x010);
2206 ZERO(0x014);
2207
2208}
2209
2210#undef ZERO
2211
2212static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2213 void __iomem *mmio, unsigned int n_hc)
2214{
2215 unsigned int port;
2216
2217 for (port = 0; port < hpriv->n_ports; port++)
2218 mv_soc_reset_hc_port(hpriv, mmio, port);
2219
2220 mv_soc_reset_one_hc(hpriv, mmio);
2221
2222 return 0;
2223}
2224
2225static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2226 void __iomem *mmio)
2227{
2228 return;
2229}
2230
2231static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2232{
2233 return;
2234}
2235
c9d39130
JG
2236static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2237 unsigned int port_no)
2238{
2239 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2240
2241 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2242
ee9ccdf7 2243 if (IS_GEN_II(hpriv)) {
c9d39130 2244 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2245 ifctl |= (1 << 7); /* enable gen2i speed */
2246 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2247 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2248 }
2249
2250 udelay(25); /* allow reset propagation */
2251
2252 /* Spec never mentions clearing the bit. Marvell's driver does
2253 * clear the bit, however.
2254 */
2255 writelfl(0, port_mmio + EDMA_CMD_OFS);
2256
2257 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2258
ee9ccdf7 2259 if (IS_GEN_I(hpriv))
c9d39130
JG
2260 mdelay(1);
2261}
2262
05b308e1 2263/**
bdd4ddde 2264 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2265 * @ap: ATA channel to manipulate
2266 *
2267 * Part of this is taken from __sata_phy_reset and modified to
2268 * not sleep since this routine gets called from interrupt level.
2269 *
2270 * LOCKING:
2271 * Inherited from caller. This is coded to safe to call at
2272 * interrupt level, i.e. it does not sleep.
31961943 2273 */
bdd4ddde
JG
2274static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2275 unsigned long deadline)
20f733e7 2276{
095fec88 2277 struct mv_port_priv *pp = ap->private_data;
cca3974e 2278 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2279 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2280 int retry = 5;
2281 u32 sstatus;
20f733e7
BR
2282
2283 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2284
da3dbb17
TH
2285#ifdef DEBUG
2286 {
2287 u32 sstatus, serror, scontrol;
2288
2289 mv_scr_read(ap, SCR_STATUS, &sstatus);
2290 mv_scr_read(ap, SCR_ERROR, &serror);
2291 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2292 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2293 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2294 }
2295#endif
20f733e7 2296
22374677
JG
2297 /* Issue COMRESET via SControl */
2298comreset_retry:
936fd732 2299 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2300 msleep(1);
22374677 2301
936fd732 2302 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2303 msleep(20);
22374677 2304
31961943 2305 do {
936fd732 2306 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2307 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2308 break;
22374677 2309
bdd4ddde 2310 msleep(1);
c5d3e45a 2311 } while (time_before(jiffies, deadline));
20f733e7 2312
22374677 2313 /* work around errata */
ee9ccdf7 2314 if (IS_GEN_II(hpriv) &&
22374677
JG
2315 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2316 (retry-- > 0))
2317 goto comreset_retry;
095fec88 2318
da3dbb17
TH
2319#ifdef DEBUG
2320 {
2321 u32 sstatus, serror, scontrol;
2322
2323 mv_scr_read(ap, SCR_STATUS, &sstatus);
2324 mv_scr_read(ap, SCR_ERROR, &serror);
2325 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2326 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2327 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2328 }
2329#endif
31961943 2330
936fd732 2331 if (ata_link_offline(&ap->link)) {
bdd4ddde 2332 *class = ATA_DEV_NONE;
20f733e7
BR
2333 return;
2334 }
2335
22374677
JG
2336 /* even after SStatus reflects that device is ready,
2337 * it seems to take a while for link to be fully
2338 * established (and thus Status no longer 0x80/0x7F),
2339 * so we poll a bit for that, here.
2340 */
2341 retry = 20;
2342 while (1) {
2343 u8 drv_stat = ata_check_status(ap);
2344 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2345 break;
bdd4ddde 2346 msleep(500);
22374677
JG
2347 if (retry-- <= 0)
2348 break;
bdd4ddde
JG
2349 if (time_after(jiffies, deadline))
2350 break;
22374677
JG
2351 }
2352
bdd4ddde
JG
2353 /* FIXME: if we passed the deadline, the following
2354 * code probably produces an invalid result
2355 */
20f733e7 2356
bdd4ddde 2357 /* finally, read device signature from TF registers */
3f19859e 2358 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2359
2360 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2361
bdd4ddde 2362 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2363
bca1c4eb 2364 VPRINTK("EXIT\n");
20f733e7
BR
2365}
2366
cc0680a5 2367static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2368{
cc0680a5 2369 struct ata_port *ap = link->ap;
bdd4ddde 2370 struct mv_port_priv *pp = ap->private_data;
0ea9e179 2371
cf480626 2372 mv_stop_dma(ap);
bdd4ddde 2373
cf480626 2374 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
bdd4ddde 2375 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
bdd4ddde 2376
cf480626 2377 return 0;
22374677
JG
2378}
2379
cc0680a5 2380static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2381 unsigned long deadline)
31961943 2382{
cc0680a5 2383 struct ata_port *ap = link->ap;
bdd4ddde 2384 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2385 void __iomem *mmio = hpriv->base;
31961943 2386
bdd4ddde 2387 mv_stop_dma(ap);
31961943 2388
bdd4ddde 2389 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2390
bdd4ddde
JG
2391 mv_phy_reset(ap, class, deadline);
2392
2393 return 0;
2394}
2395
cc0680a5 2396static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2397{
cc0680a5 2398 struct ata_port *ap = link->ap;
bdd4ddde
JG
2399 u32 serr;
2400
2401 /* print link status */
cc0680a5 2402 sata_print_link_status(link);
31961943 2403
bdd4ddde 2404 /* clear SError */
cc0680a5
TH
2405 sata_scr_read(link, SCR_ERROR, &serr);
2406 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2407
2408 /* bail out if no device is present */
2409 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2410 DPRINTK("EXIT, no device\n");
2411 return;
9b358e30 2412 }
bdd4ddde
JG
2413
2414 /* set up device control */
2415 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2416}
2417
2418static void mv_error_handler(struct ata_port *ap)
2419{
2420 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2421 mv_hardreset, mv_postreset);
2422}
2423
bdd4ddde
JG
2424static void mv_eh_freeze(struct ata_port *ap)
2425{
f351b2d6 2426 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
2430
2431 /* FIXME: handle coalescing completion events properly */
2432
2433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
2436
2437 mask = 0x3 << shift;
2438
2439 /* disable assertion of portN err, done events */
f351b2d6
SB
2440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2442}
2443
2444static void mv_eh_thaw(struct ata_port *ap)
2445{
f351b2d6
SB
2446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
2460 }
2461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
f351b2d6
SB
2474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2476}
2477
05b308e1
BR
2478/**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
31961943 2490static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2491{
0d5ff566 2492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2493 unsigned serr_ofs;
2494
8b260248 2495 /* PIO related setup
31961943
BR
2496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2498 port->error_addr =
31961943
BR
2499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2505 port->status_addr =
31961943
BR
2506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
8d9db2d2 2511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2512
31961943
BR
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
646a4da5
ML
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2520
8b260248 2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2525}
2526
4447d351 2527static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2528{
4447d351
TH
2529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2531 u32 hp_flags = hpriv->hp_flags;
2532
5796d1c4 2533 switch (board_idx) {
47c2b677
JG
2534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2536 hp_flags |= MV_HP_GEN_I;
47c2b677 2537
44c10138 2538 switch (pdev->revision) {
47c2b677
JG
2539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
bca1c4eb
JG
2553 case chip_504x:
2554 case chip_508x:
47c2b677 2555 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2556 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2557
44c10138 2558 switch (pdev->revision) {
47c2b677
JG
2559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
bca1c4eb
JG
2570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
47c2b677 2575 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2576 hp_flags |= MV_HP_GEN_II;
47c2b677 2577
44c10138 2578 switch (pdev->revision) {
47c2b677
JG
2579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2589 break;
2590 }
2591 break;
2592
e4e7b892 2593 case chip_7042:
02a121da 2594 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
4e520033
ML
2598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2623 }
e4e7b892
JG
2624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2626 hp_flags |= MV_HP_GEN_IIE;
2627
44c10138 2628 switch (pdev->revision) {
e4e7b892
JG
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
f351b2d6
SB
2642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
e4e7b892 2646
bca1c4eb 2647 default:
f351b2d6 2648 dev_printk(KERN_ERR, host->dev,
5796d1c4 2649 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
02a121da
ML
2654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
bca1c4eb
JG
2663
2664 return 0;
2665}
2666
05b308e1 2667/**
47c2b677 2668 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
05b308e1
BR
2671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
4447d351 2678static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2679{
2680 int rc = 0, n_hc, port, hc;
4447d351 2681 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2682 void __iomem *mmio = hpriv->base;
47c2b677 2683
4447d351 2684 rc = mv_chip_id(host, board_idx);
bca1c4eb 2685 if (rc)
f351b2d6
SB
2686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2700
4447d351 2701 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2702
4447d351 2703 for (port = 0; port < host->n_ports; port++)
47c2b677 2704 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2705
c9d39130 2706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2707 if (rc)
20f733e7 2708 goto done;
20f733e7 2709
522479fb 2710 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2711 hpriv->ops->reset_bus(host, mmio);
47c2b677 2712 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2713
4447d351 2714 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2715 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
2a47ce06 2718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
c9d39130 2724 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2725 }
2726
4447d351 2727 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2728 struct ata_port *ap = host->ports[port];
2a47ce06 2729 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
7bb3c529 2733#ifdef CONFIG_PCI
f351b2d6
SB
2734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
7bb3c529 2739#endif
20f733e7
BR
2740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2752 }
2753
f351b2d6
SB
2754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2757
f351b2d6
SB
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
2766
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
2780done:
2781 return rc;
2782}
fb621e2f 2783
fbf14e2f
BB
2784static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785{
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802}
2803
f351b2d6
SB
2804/**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812static int mv_platform_probe(struct platform_device *pdev)
2813{
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
20f733e7 2822
f351b2d6
SB
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2825
f351b2d6
SB
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
f1cb0ea1
SB
2854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
f351b2d6
SB
2856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
fbf14e2f
BB
2858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
f351b2d6
SB
2862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873}
2874
2875/*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883static int __devexit mv_platform_remove(struct platform_device *pdev)
2884{
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2887
2888 ata_host_detach(host);
f351b2d6 2889 return 0;
20f733e7
BR
2890}
2891
f351b2d6
SB
2892static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899};
2900
2901
7bb3c529 2902#ifdef CONFIG_PCI
f351b2d6
SB
2903static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
7bb3c529
SB
2906
2907static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
f351b2d6 2910 .probe = mv_pci_init_one,
7bb3c529
SB
2911 .remove = ata_pci_remove_one,
2912};
2913
2914/*
2915 * module options
2916 */
2917static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920/* move to PCI layer or libata core? */
2921static int pci_go_64(struct pci_dev *pdev)
2922{
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951}
2952
05b308e1
BR
2953/**
2954 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2955 * @host: ATA host to print info about
05b308e1
BR
2956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
4447d351 2962static void mv_print_info(struct ata_host *host)
31961943 2963{
4447d351
TH
2964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
44c10138 2966 u8 scc;
c1e4fe71 2967 const char *scc_s, *gen;
31961943
BR
2968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
31961943
BR
2972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
c1e4fe71
JG
2978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
31961943 2988
a9524a76 2989 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993}
2994
05b308e1 2995/**
f351b2d6 2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
f351b2d6
SB
3003static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
20f733e7 3005{
2dcb407e 3006 static int printed_version;
20f733e7 3007 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
20f733e7 3012
a9524a76
JG
3013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3015
4447d351
TH
3016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
f351b2d6 3024 hpriv->n_ports = n_ports;
4447d351
TH
3025
3026 /* acquire resources */
24dc5f33
TH
3027 rc = pcim_enable_device(pdev);
3028 if (rc)
20f733e7 3029 return rc;
20f733e7 3030
0d5ff566
TH
3031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
24dc5f33 3033 pcim_pin_device(pdev);
0d5ff566 3034 if (rc)
24dc5f33 3035 return rc;
4447d351 3036 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3038
d88184fb
JG
3039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
da2fa9ba
ML
3043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
20f733e7 3047 /* initialize adapter */
4447d351 3048 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3049 if (rc)
3050 return rc;
20f733e7 3051
31961943 3052 /* Enable interrupts */
6a59dcf8 3053 if (msi && pci_enable_msi(pdev))
31961943 3054 pci_intx(pdev, 1);
20f733e7 3055
31961943 3056 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3057 mv_print_info(host);
20f733e7 3058
4447d351 3059 pci_set_master(pdev);
ea8b4db9 3060 pci_try_set_mwi(pdev);
4447d351 3061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3063}
7bb3c529 3064#endif
20f733e7 3065
f351b2d6
SB
3066static int mv_platform_probe(struct platform_device *pdev);
3067static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
20f733e7
BR
3069static int __init mv_init(void)
3070{
7bb3c529
SB
3071 int rc = -ENODEV;
3072#ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3074 if (rc < 0)
3075 return rc;
3076#endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079#ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3082#endif
3083 return rc;
20f733e7
BR
3084}
3085
3086static void __exit mv_exit(void)
3087{
7bb3c529 3088#ifdef CONFIG_PCI
20f733e7 3089 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3090#endif
f351b2d6 3091 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3092}
3093
3094MODULE_AUTHOR("Brett Russ");
3095MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096MODULE_LICENSE("GPL");
3097MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098MODULE_VERSION(DRV_VERSION);
2e7e1214 3099MODULE_ALIAS("platform:sata_mv");
20f733e7 3100
7bb3c529 3101#ifdef CONFIG_PCI
ddef9bb3
JG
3102module_param(msi, int, 0444);
3103MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3104#endif
ddef9bb3 3105
20f733e7
BR
3106module_init(mv_init);
3107module_exit(mv_exit);