]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/ata/sata_mv.c
sata_mv: simplify request/response queue handling
[mirror_ubuntu-zesty-kernel.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
e12bef50 4 * Copyright 2008: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
7 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
4a05e209
JG
25/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
1fd2e1c2
ML
33 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
4a05e209
JG
40
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
e49856d8 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
4a05e209 44
40f0bc2d 45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
4a05e209 46
4a05e209
JG
47 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
4a05e209
JG
65*/
66
20f733e7
BR
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
8d8b6004 74#include <linux/dmapool.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
f351b2d6
SB
77#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
15a32632 79#include <linux/mbus.h>
20f733e7 80#include <scsi/scsi_host.h>
193515d5 81#include <scsi/scsi_cmnd.h>
6c08772e 82#include <scsi/scsi_device.h>
20f733e7 83#include <linux/libata.h>
20f733e7
BR
84
85#define DRV_NAME "sata_mv"
1fd2e1c2 86#define DRV_VERSION "1.20"
20f733e7
BR
87
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
20f733e7 105 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 106 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
20f733e7
BR
109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
31961943
BR
115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 124 MV_MAX_SG_CT = 256,
31961943 125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 126
352fab70 127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 128 MV_PORT_HC_SHIFT = 2,
352fab70
ML
129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529 136 /* SoC integrated controllers, no PCI interface */
e12bef50 137 MV_FLAG_SOC = (1 << 28),
7bb3c529 138
c5d3e45a 139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
47c2b677 142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 143
31961943
BR
144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
c5d3e45a 146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
20f733e7
BR
159 /* PCI interface registers */
160
31961943
BR
161 PCI_COMMAND_OFS = 0xc00,
162
20f733e7
BR
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
522479fb
JG
168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
02a121da
ML
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
02a121da
ML
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 186
20f733e7
BR
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
352fab70
ML
191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
20f733e7
BR
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
f9f7fe01 208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
20f733e7
BR
209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
fb621e2f
JG
211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
f351b2d6 213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
352fab70
ML
219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
31961943
BR
224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
0c58912e 230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
17c5aab5 231
e12bef50 232 LTMODE_OFS = 0x30c,
17c5aab5
ML
233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
47c2b677 235 PHY_MODE3 = 0x310,
bca1c4eb
JG
236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
e12bef50
ML
238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
17c5aab5 241
e12bef50 242 FIS_CFG_OFS = 0x360,
17c5aab5
ML
243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
c9d39130
JG
245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
e12bef50 248 SATA_INTERFACE_CFG = 0x050,
bca1c4eb
JG
249
250 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
0c58912e
ML
254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7
BR
261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 278
6c1153e0 279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
6c1153e0 285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 286
6c1153e0 287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
6c1153e0 294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 295
6c1153e0 296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
40f0bc2d
ML
303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
646a4da5 306
bdd4ddde
JG
307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
6c1153e0 313 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
e12bef50 321
bdd4ddde
JG
322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
6c1153e0 329 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
20f733e7 333
31961943
BR
334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
0ea9e179
JG
345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 349
c9d39130 350 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 351 EDMA_ARB_CFG = 0x38,
bca1c4eb 352
352fab70
ML
353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
31961943
BR
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 361 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 366
31961943 367 /* Port private flags (pp_flags) */
0ea9e179 368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
20f733e7
BR
370};
371
ee9ccdf7
JG
372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 376
15a32632
LB
377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
095fec88 380enum {
baf14aa1
JG
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
095fec88 385
0ea9e179
JG
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
095fec88
JG
389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
0ea9e179 391 /* ditto, for response queue */
095fec88
JG
392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
522479fb
JG
395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
e4e7b892
JG
401 chip_6042,
402 chip_7042,
f351b2d6 403 chip_soc,
522479fb
JG
404};
405
31961943
BR
406/* Command ReQuest Block: 32B */
407struct mv_crqb {
e1469874
ML
408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
31961943 412};
20f733e7 413
e4e7b892 414struct mv_crqb_iie {
e1469874
ML
415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
e4e7b892
JG
420};
421
31961943
BR
422/* Command ResPonse Block: 8B */
423struct mv_crpb {
e1469874
ML
424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
20f733e7
BR
427};
428
31961943
BR
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
e1469874
ML
431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
31961943 435};
20f733e7 436
31961943
BR
437struct mv_port_priv {
438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
eb73d558
ML
442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
31961943
BR
448 u32 pp_flags;
449};
450
bca1c4eb
JG
451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
02a121da
ML
456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
f351b2d6
SB
460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
02a121da
ML
464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
da2fa9ba
ML
467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
02a121da
ML
475};
476
47c2b677 477struct mv_hw_ops {
2a47ce06
JG
478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
487};
488
da3dbb17
TH
489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
bdd4ddde
JG
500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
f273827e 502static void mv6_dev_config(struct ata_device *dev);
20f733e7 503
2a47ce06
JG
504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
47c2b677
JG
506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
c9d39130
JG
509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
522479fb 511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 513
2a47ce06
JG
514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
47c2b677
JG
516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
c9d39130
JG
519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
522479fb 521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 533 unsigned int port_no);
e12bef50 534static int mv_stop_edma(struct ata_port *ap);
b562468c 535static int mv_stop_edma_engine(void __iomem *port_mmio);
e12bef50 536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
47c2b677 537
e49856d8
ML
538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
47c2b677 543
eb73d558
ML
544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
c5d3e45a 548static struct scsi_host_template mv5_sht = {
68d1d07b 549 ATA_BASE_SHT(DRV_NAME),
baf14aa1 550 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 551 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
552};
553
554static struct scsi_host_template mv6_sht = {
68d1d07b 555 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 556 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 557 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 558 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
559};
560
029cfd6b
TH
561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
c9d39130
JG
563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
c9d39130 566
bdd4ddde
JG
567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
a1efdaba 569 .hardreset = mv_hardreset,
a1efdaba 570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 571 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 572
c9d39130
JG
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
c9d39130
JG
578};
579
029cfd6b
TH
580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
e49856d8 582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
f273827e 583 .dev_config = mv6_dev_config,
20f733e7
BR
584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
e49856d8
ML
587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
20f733e7
BR
591};
592
029cfd6b
TH
593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
e49856d8 595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
029cfd6b 596 .dev_config = ATA_OP_NULL,
e4e7b892 597 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
598};
599
98ac62de 600static const struct ata_port_info mv_port_info[] = {
20f733e7 601 { /* chip_504x */
cca3974e 602 .flags = MV_COMMON_FLAGS,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv5_ops,
20f733e7
BR
606 },
607 { /* chip_508x */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
c9d39130 611 .port_ops = &mv5_ops,
20f733e7 612 },
47c2b677 613 { /* chip_5080 */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
c9d39130 617 .port_ops = &mv5_ops,
47c2b677 618 },
20f733e7 619 { /* chip_604x */
138bfdd0 620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 622 ATA_FLAG_NCQ,
31961943 623 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 624 .udma_mask = ATA_UDMA6,
c9d39130 625 .port_ops = &mv6_ops,
20f733e7
BR
626 },
627 { /* chip_608x */
c5d3e45a 628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 631 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 632 .udma_mask = ATA_UDMA6,
c9d39130 633 .port_ops = &mv6_ops,
20f733e7 634 },
e4e7b892 635 { /* chip_6042 */
138bfdd0 636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 638 ATA_FLAG_NCQ,
e4e7b892 639 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 640 .udma_mask = ATA_UDMA6,
e4e7b892
JG
641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
138bfdd0 644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 646 ATA_FLAG_NCQ,
e4e7b892 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
e4e7b892
JG
649 .port_ops = &mv_iie_ops,
650 },
f351b2d6 651 { /* chip_soc */
02c1f32f 652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
02c1f32f 654 ATA_FLAG_NCQ | MV_FLAG_SOC,
17c5aab5
ML
655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
f351b2d6 658 },
20f733e7
BR
659};
660
3b7d697d 661static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
669
670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675
676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
d9f9c6bc
FA
678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
02a121da 681 /* Marvell 7042 support */
6a3d586d
MT
682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
02a121da
ML
684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
2d2744fc 688 { } /* terminate list */
20f733e7
BR
689};
690
47c2b677
JG
691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
522479fb
JG
696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
47c2b677
JG
698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
522479fb
JG
705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
707};
708
f351b2d6
SB
709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
20f733e7
BR
718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
c9d39130
JG
728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
1cfd19ae
ML
738/*
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
742 *
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
746 *
747 * Note that port and hardport may be the same variable in some cases.
748 */
749#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
750{ \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
754}
755
352fab70
ML
756static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757{
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759}
760
c9d39130
JG
761static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 unsigned int port)
763{
764 return mv_hc_base(base, mv_hc_from_port(port));
765}
766
20f733e7
BR
767static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768{
c9d39130 769 return mv_hc_base_from_port(base, port) +
8b260248 770 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
772}
773
e12bef50
ML
774static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775{
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778
779 return hc_mmio + ofs;
780}
781
f351b2d6
SB
782static inline void __iomem *mv_host_base(struct ata_host *host)
783{
784 struct mv_host_priv *hpriv = host->private_data;
785 return hpriv->base;
786}
787
20f733e7
BR
788static inline void __iomem *mv_ap_base(struct ata_port *ap)
789{
f351b2d6 790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
791}
792
cca3974e 793static inline int mv_get_hc_count(unsigned long port_flags)
31961943 794{
cca3974e 795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
796}
797
c5d3e45a
JG
798static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
801{
bdd4ddde
JG
802 u32 index;
803
c5d3e45a
JG
804 /*
805 * initialize request queue
806 */
fcfb1f77
ML
807 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
808 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
bdd4ddde 809
c5d3e45a
JG
810 WARN_ON(pp->crqb_dma & 0x3ff);
811 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 812 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
813 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814
815 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 816 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
817 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 else
bdd4ddde 819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
820
821 /*
822 * initialize response queue
823 */
fcfb1f77
ML
824 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
825 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
bdd4ddde 826
c5d3e45a
JG
827 WARN_ON(pp->crpb_dma & 0xff);
828 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829
830 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 831 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
832 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 else
bdd4ddde 834 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 835
bdd4ddde 836 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 837 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
838}
839
05b308e1
BR
840/**
841 * mv_start_dma - Enable eDMA engine
842 * @base: port base address
843 * @pp: port private data
844 *
beec7dbc
TH
845 * Verify the local cache of the eDMA state is accurate with a
846 * WARN_ON.
05b308e1
BR
847 *
848 * LOCKING:
849 * Inherited from caller.
850 */
0c58912e 851static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 852 struct mv_port_priv *pp, u8 protocol)
20f733e7 853{
72109168
ML
854 int want_ncq = (protocol == ATA_PROT_NCQ);
855
856 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
857 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
858 if (want_ncq != using_ncq)
b562468c 859 mv_stop_edma(ap);
72109168 860 }
c5d3e45a 861 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 862 struct mv_host_priv *hpriv = ap->host->private_data;
352fab70 863 int hardport = mv_hardport_from_port(ap->port_no);
0c58912e 864 void __iomem *hc_mmio = mv_hc_base_from_port(
352fab70 865 mv_host_base(ap->host), hardport);
0c58912e
ML
866 u32 hc_irq_cause, ipending;
867
bdd4ddde 868 /* clear EDMA event indicators, if any */
f630d562 869 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 870
0c58912e
ML
871 /* clear EDMA interrupt indicator, if any */
872 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
352fab70 873 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
0c58912e
ML
874 if (hc_irq_cause & ipending) {
875 writelfl(hc_irq_cause & ~ipending,
876 hc_mmio + HC_IRQ_CAUSE_OFS);
877 }
878
e12bef50 879 mv_edma_cfg(ap, want_ncq);
0c58912e
ML
880
881 /* clear FIS IRQ Cause */
882 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883
f630d562 884 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 885
f630d562 886 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
887 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
888 }
20f733e7
BR
889}
890
05b308e1 891/**
e12bef50 892 * mv_stop_edma_engine - Disable eDMA engine
b562468c 893 * @port_mmio: io base address
05b308e1
BR
894 *
895 * LOCKING:
896 * Inherited from caller.
897 */
b562468c 898static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 899{
b562468c 900 int i;
31961943 901
b562468c
ML
902 /* Disable eDMA. The disable bit auto clears. */
903 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
8b260248 904
b562468c
ML
905 /* Wait for the chip to confirm eDMA is off. */
906 for (i = 10000; i > 0; i--) {
907 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 908 if (!(reg & EDMA_EN))
b562468c
ML
909 return 0;
910 udelay(10);
31961943 911 }
b562468c 912 return -EIO;
20f733e7
BR
913}
914
e12bef50 915static int mv_stop_edma(struct ata_port *ap)
0ea9e179 916{
b562468c
ML
917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
0ea9e179 919
b562468c
ML
920 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
921 return 0;
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 if (mv_stop_edma_engine(port_mmio)) {
924 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
925 return -EIO;
926 }
927 return 0;
0ea9e179
JG
928}
929
8a70f8dc 930#ifdef ATA_DEBUG
31961943 931static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 932{
31961943
BR
933 int b, w;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 937 printk("%08x ", readl(start + b));
31961943
BR
938 b += sizeof(u32);
939 }
940 printk("\n");
941 }
31961943 942}
8a70f8dc
JG
943#endif
944
31961943
BR
945static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946{
947#ifdef ATA_DEBUG
948 int b, w;
949 u32 dw;
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
953 (void) pci_read_config_dword(pdev, b, &dw);
954 printk("%08x ", dw);
31961943
BR
955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
959#endif
960}
961static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
963{
964#ifdef ATA_DEBUG
8b260248 965 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
969
970 if (0 > port) {
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
973 num_hcs = 2;
974 } else {
975 start_hc = port >> MV_PORT_HC_SHIFT;
976 start_port = port;
977 num_ports = num_hcs = 1;
978 }
8b260248 979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
980 num_ports > 1 ? num_ports - 1 : start_port);
981
982 if (NULL != pdev) {
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
985 }
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 992 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
995 }
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
2dcb407e 998 DPRINTK("EDMA regs (port %i):\n", p);
31961943 999 mv_dump_mem(port_base, 0x54);
2dcb407e 1000 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1001 mv_dump_mem(port_base+0x300, 0x60);
1002 }
1003#endif
20f733e7
BR
1004}
1005
1006static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007{
1008 unsigned int ofs;
1009
1010 switch (sc_reg_in) {
1011 case SCR_STATUS:
1012 case SCR_CONTROL:
1013 case SCR_ERROR:
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 break;
1016 case SCR_ACTIVE:
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1018 break;
1019 default:
1020 ofs = 0xffffffffU;
1021 break;
1022 }
1023 return ofs;
1024}
1025
da3dbb17 1026static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1027{
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029
da3dbb17
TH
1030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1032 return 0;
1033 } else
1034 return -EINVAL;
20f733e7
BR
1035}
1036
da3dbb17 1037static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
da3dbb17 1041 if (ofs != 0xffffffffU) {
20f733e7 1042 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1043 return 0;
1044 } else
1045 return -EINVAL;
20f733e7
BR
1046}
1047
f273827e
ML
1048static void mv6_dev_config(struct ata_device *adev)
1049{
1050 /*
e49856d8
ML
1051 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1052 *
1053 * Gen-II does not support NCQ over a port multiplier
1054 * (no FIS-based switching).
1055 *
f273827e
ML
1056 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1057 * See mv_qc_prep() for more info.
1058 */
e49856d8 1059 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1060 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1061 adev->flags &= ~ATA_DFLAG_NCQ;
352fab70
ML
1062 ata_dev_printk(adev, KERN_INFO,
1063 "NCQ disabled for command-based switching\n");
1064 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1065 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1066 ata_dev_printk(adev, KERN_INFO,
1067 "max_sectors limited to %u for NCQ\n",
1068 adev->max_sectors);
1069 }
e49856d8 1070 }
f273827e
ML
1071}
1072
e49856d8
ML
1073static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1074{
1075 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1076 /*
1077 * Various bit settings required for operation
1078 * in FIS-based switching (fbs) mode on GenIIe:
1079 */
1080 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1081 old_ltmode = readl(port_mmio + LTMODE_OFS);
1082 if (enable_fbs) {
1083 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1084 new_ltmode = old_ltmode | LTMODE_BIT8;
1085 } else { /* disable fbs */
1086 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1087 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1088 }
1089 if (new_fcfg != old_fcfg)
1090 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1091 if (new_ltmode != old_ltmode)
1092 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
f273827e
ML
1093}
1094
e12bef50 1095static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
e4e7b892 1096{
0c58912e 1097 u32 cfg;
e12bef50
ML
1098 struct mv_port_priv *pp = ap->private_data;
1099 struct mv_host_priv *hpriv = ap->host->private_data;
1100 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1101
1102 /* set up non-NCQ EDMA configuration */
0c58912e 1103 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1104
0c58912e 1105 if (IS_GEN_I(hpriv))
e4e7b892
JG
1106 cfg |= (1 << 8); /* enab config burst size mask */
1107
0c58912e 1108 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1109 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1110
1111 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1112 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1113 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1114 cfg |= (1 << 18); /* enab early completion */
e728eabe 1115 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e49856d8
ML
1116
1117 if (want_ncq && sata_pmp_attached(ap)) {
1118 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1119 mv_config_fbs(port_mmio, 1);
1120 } else {
1121 mv_config_fbs(port_mmio, 0);
1122 }
e4e7b892
JG
1123 }
1124
72109168
ML
1125 if (want_ncq) {
1126 cfg |= EDMA_CFG_NCQ;
1127 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1128 } else
1129 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1130
e4e7b892
JG
1131 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1132}
1133
da2fa9ba
ML
1134static void mv_port_free_dma_mem(struct ata_port *ap)
1135{
1136 struct mv_host_priv *hpriv = ap->host->private_data;
1137 struct mv_port_priv *pp = ap->private_data;
eb73d558 1138 int tag;
da2fa9ba
ML
1139
1140 if (pp->crqb) {
1141 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1142 pp->crqb = NULL;
1143 }
1144 if (pp->crpb) {
1145 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1146 pp->crpb = NULL;
1147 }
eb73d558
ML
1148 /*
1149 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1150 * For later hardware, we have one unique sg_tbl per NCQ tag.
1151 */
1152 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1153 if (pp->sg_tbl[tag]) {
1154 if (tag == 0 || !IS_GEN_I(hpriv))
1155 dma_pool_free(hpriv->sg_tbl_pool,
1156 pp->sg_tbl[tag],
1157 pp->sg_tbl_dma[tag]);
1158 pp->sg_tbl[tag] = NULL;
1159 }
da2fa9ba
ML
1160 }
1161}
1162
05b308e1
BR
1163/**
1164 * mv_port_start - Port specific init/start routine.
1165 * @ap: ATA channel to manipulate
1166 *
1167 * Allocate and point to DMA memory, init port private memory,
1168 * zero indices.
1169 *
1170 * LOCKING:
1171 * Inherited from caller.
1172 */
31961943
BR
1173static int mv_port_start(struct ata_port *ap)
1174{
cca3974e
JG
1175 struct device *dev = ap->host->dev;
1176 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1177 struct mv_port_priv *pp;
dde20207 1178 int tag;
31961943 1179
24dc5f33 1180 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1181 if (!pp)
24dc5f33 1182 return -ENOMEM;
da2fa9ba 1183 ap->private_data = pp;
31961943 1184
da2fa9ba
ML
1185 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1186 if (!pp->crqb)
1187 return -ENOMEM;
1188 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1189
da2fa9ba
ML
1190 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1191 if (!pp->crpb)
1192 goto out_port_free_dma_mem;
1193 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1194
eb73d558
ML
1195 /*
1196 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1197 * For later hardware, we need one unique sg_tbl per NCQ tag.
1198 */
1199 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1200 if (tag == 0 || !IS_GEN_I(hpriv)) {
1201 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1202 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1203 if (!pp->sg_tbl[tag])
1204 goto out_port_free_dma_mem;
1205 } else {
1206 pp->sg_tbl[tag] = pp->sg_tbl[0];
1207 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1208 }
1209 }
31961943 1210 return 0;
da2fa9ba
ML
1211
1212out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1214 return -ENOMEM;
31961943
BR
1215}
1216
05b308e1
BR
1217/**
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1220 *
1221 * Stop DMA, cleanup port memory.
1222 *
1223 * LOCKING:
cca3974e 1224 * This routine uses the host lock to protect the DMA stop.
05b308e1 1225 */
31961943
BR
1226static void mv_port_stop(struct ata_port *ap)
1227{
e12bef50 1228 mv_stop_edma(ap);
da2fa9ba 1229 mv_port_free_dma_mem(ap);
31961943
BR
1230}
1231
05b308e1
BR
1232/**
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1235 *
1236 * Populate the SG list and mark the last entry.
1237 *
1238 * LOCKING:
1239 * Inherited from caller.
1240 */
6c08772e 1241static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1242{
1243 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1244 struct scatterlist *sg;
3be6cbd7 1245 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1246 unsigned int si;
31961943 1247
eb73d558 1248 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
22374677 1252
4007b493
OJ
1253 while (sg_len) {
1254 u32 offset = addr & 0xffff;
1255 u32 len = sg_len;
22374677 1256
4007b493
OJ
1257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
1259
1260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1263
1264 sg_len -= len;
1265 addr += len;
1266
3be6cbd7 1267 last_sg = mv_sg;
4007b493 1268 mv_sg++;
4007b493 1269 }
31961943 1270 }
3be6cbd7
JG
1271
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1274}
1275
5796d1c4 1276static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1277{
559eedad 1278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1279 (last ? CRQB_CMD_LAST : 0);
559eedad 1280 *cmdw = cpu_to_le16(tmp);
31961943
BR
1281}
1282
05b308e1
BR
1283/**
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1286 *
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1291 *
1292 * LOCKING:
1293 * Inherited from caller.
1294 */
31961943
BR
1295static void mv_qc_prep(struct ata_queued_cmd *qc)
1296{
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
e1469874 1299 __le16 *cw;
31961943
BR
1300 struct ata_taskfile *tf;
1301 u16 flags = 0;
a6432436 1302 unsigned in_index;
31961943 1303
138bfdd0
ML
1304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1306 return;
20f733e7 1307
31961943
BR
1308 /* Fill in command request block
1309 */
e4e7b892 1310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1311 flags |= CRQB_FLAG_READ;
beec7dbc 1312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1313 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1314 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1315
bdd4ddde 1316 /* get current queue index from software */
fcfb1f77 1317 in_index = pp->req_idx;
a6432436
ML
1318
1319 pp->crqb[in_index].sg_addr =
eb73d558 1320 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1321 pp->crqb[in_index].sg_addr_hi =
eb73d558 1322 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1323 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1324
a6432436 1325 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1326 tf = &qc->tf;
1327
1328 /* Sadly, the CRQB cannot accomodate all registers--there are
1329 * only 11 bytes...so we must pick and choose required
1330 * registers based on the command. So, we drop feature and
1331 * hob_feature for [RW] DMA commands, but they are needed for
1332 * NCQ. NCQ will drop hob_nsect.
20f733e7 1333 */
31961943
BR
1334 switch (tf->command) {
1335 case ATA_CMD_READ:
1336 case ATA_CMD_READ_EXT:
1337 case ATA_CMD_WRITE:
1338 case ATA_CMD_WRITE_EXT:
c15d85c8 1339 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1340 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 break;
31961943
BR
1342 case ATA_CMD_FPDMA_READ:
1343 case ATA_CMD_FPDMA_WRITE:
8b260248 1344 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1345 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 break;
31961943
BR
1347 default:
1348 /* The only other commands EDMA supports in non-queued and
1349 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1350 * of which are defined/used by Linux. If we get here, this
1351 * driver needs work.
1352 *
1353 * FIXME: modify libata to give qc_prep a return value and
1354 * return error here.
1355 */
1356 BUG_ON(tf->command);
1357 break;
1358 }
1359 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1364 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1366 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1367 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1368
e4e7b892
JG
1369 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1370 return;
1371 mv_fill_sg(qc);
1372}
1373
1374/**
1375 * mv_qc_prep_iie - Host specific command preparation.
1376 * @qc: queued command to prepare
1377 *
1378 * This routine simply redirects to the general purpose routine
1379 * if command is not DMA. Else, it handles prep of the CRQB
1380 * (command request block), does some sanity checking, and calls
1381 * the SG load routine.
1382 *
1383 * LOCKING:
1384 * Inherited from caller.
1385 */
1386static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387{
1388 struct ata_port *ap = qc->ap;
1389 struct mv_port_priv *pp = ap->private_data;
1390 struct mv_crqb_iie *crqb;
1391 struct ata_taskfile *tf;
a6432436 1392 unsigned in_index;
e4e7b892
JG
1393 u32 flags = 0;
1394
138bfdd0
ML
1395 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1396 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1397 return;
1398
e12bef50 1399 /* Fill in Gen IIE command request block */
e4e7b892
JG
1400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1402
beec7dbc 1403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1404 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 1406 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 1407
bdd4ddde 1408 /* get current queue index from software */
fcfb1f77 1409 in_index = pp->req_idx;
a6432436
ML
1410
1411 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1412 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1413 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1414 crqb->flags = cpu_to_le32(flags);
1415
1416 tf = &qc->tf;
1417 crqb->ata_cmd[0] = cpu_to_le32(
1418 (tf->command << 16) |
1419 (tf->feature << 24)
1420 );
1421 crqb->ata_cmd[1] = cpu_to_le32(
1422 (tf->lbal << 0) |
1423 (tf->lbam << 8) |
1424 (tf->lbah << 16) |
1425 (tf->device << 24)
1426 );
1427 crqb->ata_cmd[2] = cpu_to_le32(
1428 (tf->hob_lbal << 0) |
1429 (tf->hob_lbam << 8) |
1430 (tf->hob_lbah << 16) |
1431 (tf->hob_feature << 24)
1432 );
1433 crqb->ata_cmd[3] = cpu_to_le32(
1434 (tf->nsect << 0) |
1435 (tf->hob_nsect << 8)
1436 );
1437
1438 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1439 return;
31961943
BR
1440 mv_fill_sg(qc);
1441}
1442
05b308e1
BR
1443/**
1444 * mv_qc_issue - Initiate a command to the host
1445 * @qc: queued command to start
1446 *
1447 * This routine simply redirects to the general purpose routine
1448 * if command is not DMA. Else, it sanity checks our local
1449 * caches of the request producer/consumer indices then enables
1450 * DMA and bumps the request producer index.
1451 *
1452 * LOCKING:
1453 * Inherited from caller.
1454 */
9a3d9eb0 1455static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1456{
c5d3e45a
JG
1457 struct ata_port *ap = qc->ap;
1458 void __iomem *port_mmio = mv_ap_base(ap);
1459 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1460 u32 in_index;
31961943 1461
138bfdd0
ML
1462 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1463 (qc->tf.protocol != ATA_PROT_NCQ)) {
17c5aab5
ML
1464 /*
1465 * We're about to send a non-EDMA capable command to the
31961943
BR
1466 * port. Turn off EDMA so there won't be problems accessing
1467 * shadow block, etc registers.
1468 */
b562468c 1469 mv_stop_edma(ap);
e49856d8 1470 mv_pmp_select(ap, qc->dev->link->pmp);
9363c382 1471 return ata_sff_qc_issue(qc);
31961943
BR
1472 }
1473
72109168 1474 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1475
fcfb1f77
ML
1476 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1477 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1478
1479 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1480 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1481 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1482
1483 return 0;
1484}
1485
05b308e1
BR
1486/**
1487 * mv_err_intr - Handle error interrupts on the port
1488 * @ap: ATA channel to manipulate
9b358e30 1489 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1490 *
1491 * In most cases, just clear the interrupt and move on. However,
e12bef50
ML
1492 * some cases require an eDMA reset, which also performs a COMRESET.
1493 * The SERR case requires a clear of pending errors in the SATA
1494 * SERROR register. Finally, if the port disabled DMA,
1495 * update our cached copy to match.
05b308e1
BR
1496 *
1497 * LOCKING:
1498 * Inherited from caller.
1499 */
bdd4ddde 1500static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1501{
1502 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1503 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1504 struct mv_port_priv *pp = ap->private_data;
1505 struct mv_host_priv *hpriv = ap->host->private_data;
1506 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1507 unsigned int action = 0, err_mask = 0;
9af5c9c9 1508 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1509
bdd4ddde 1510 ata_ehi_clear_desc(ehi);
20f733e7 1511
bdd4ddde
JG
1512 if (!edma_enabled) {
1513 /* just a guess: do we need to do this? should we
1514 * expand this, and do it in all cases?
1515 */
936fd732
TH
1516 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1517 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1518 }
bdd4ddde
JG
1519
1520 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1521
352fab70 1522 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
bdd4ddde
JG
1523
1524 /*
352fab70 1525 * All generations share these EDMA error cause bits:
bdd4ddde 1526 */
bdd4ddde
JG
1527 if (edma_err_cause & EDMA_ERR_DEV)
1528 err_mask |= AC_ERR_DEV;
1529 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1530 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1531 EDMA_ERR_INTRL_PAR)) {
1532 err_mask |= AC_ERR_ATA_BUS;
cf480626 1533 action |= ATA_EH_RESET;
b64bbc39 1534 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1535 }
1536 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1537 ata_ehi_hotplugged(ehi);
1538 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1539 "dev disconnect" : "dev connect");
cf480626 1540 action |= ATA_EH_RESET;
bdd4ddde
JG
1541 }
1542
352fab70
ML
1543 /*
1544 * Gen-I has a different SELF_DIS bit,
1545 * different FREEZE bits, and no SERR bit:
1546 */
ee9ccdf7 1547 if (IS_GEN_I(hpriv)) {
bdd4ddde 1548 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 1549 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 1550 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1551 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1552 }
1553 } else {
1554 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 1555 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 1556 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1557 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 1558 }
bdd4ddde 1559 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1560 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1562 err_mask = AC_ERR_ATA_BUS;
cf480626 1563 action |= ATA_EH_RESET;
bdd4ddde 1564 }
afb0edd9 1565 }
20f733e7
BR
1566
1567 /* Clear EDMA now that SERR cleanup done */
3606a380 1568 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1569
bdd4ddde
JG
1570 if (!err_mask) {
1571 err_mask = AC_ERR_OTHER;
cf480626 1572 action |= ATA_EH_RESET;
bdd4ddde
JG
1573 }
1574
1575 ehi->serror |= serr;
1576 ehi->action |= action;
1577
1578 if (qc)
1579 qc->err_mask |= err_mask;
1580 else
1581 ehi->err_mask |= err_mask;
1582
1583 if (edma_err_cause & eh_freeze_mask)
1584 ata_port_freeze(ap);
1585 else
1586 ata_port_abort(ap);
1587}
1588
1589static void mv_intr_pio(struct ata_port *ap)
1590{
1591 struct ata_queued_cmd *qc;
1592 u8 ata_status;
1593
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1597 return;
1598
1599 /* get active ATA command */
9af5c9c9 1600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1601 if (unlikely(!qc)) /* no active tag */
1602 return;
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 return;
1605
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1609}
1610
fcfb1f77
ML
1611static void mv_process_crpb_response(struct ata_port *ap,
1612 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1613{
1614 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1615
1616 if (qc) {
1617 u8 ata_status;
1618 u16 edma_status = le16_to_cpu(response->flags);
1619 /*
1620 * edma_status from a response queue entry:
1621 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1622 * MSB is saved ATA status from command completion.
1623 */
1624 if (!ncq_enabled) {
1625 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1626 if (err_cause) {
1627 /*
1628 * Error will be seen/handled by mv_err_intr().
1629 * So do nothing at all here.
1630 */
1631 return;
1632 }
1633 }
1634 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 qc->err_mask |= ac_err_mask(ata_status);
1636 ata_qc_complete(qc);
1637 } else {
1638 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 __func__, tag);
1640 }
1641}
1642
1643static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
bdd4ddde
JG
1644{
1645 void __iomem *port_mmio = mv_ap_base(ap);
1646 struct mv_host_priv *hpriv = ap->host->private_data;
fcfb1f77 1647 u32 in_index;
bdd4ddde 1648 bool work_done = false;
fcfb1f77 1649 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
bdd4ddde 1650
fcfb1f77 1651 /* Get the hardware queue position index */
bdd4ddde
JG
1652 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1653 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1654
fcfb1f77
ML
1655 /* Process new responses from since the last time we looked */
1656 while (in_index != pp->resp_idx) {
6c1153e0 1657 unsigned int tag;
fcfb1f77 1658 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
bdd4ddde 1659
fcfb1f77 1660 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
bdd4ddde 1661
fcfb1f77
ML
1662 if (IS_GEN_I(hpriv)) {
1663 /* 50xx: no NCQ, only one command active at a time */
9af5c9c9 1664 tag = ap->link.active_tag;
fcfb1f77
ML
1665 } else {
1666 /* Gen II/IIE: get command tag from CRPB entry */
1667 tag = le16_to_cpu(response->id) & 0x1f;
bdd4ddde 1668 }
fcfb1f77 1669 mv_process_crpb_response(ap, response, tag, ncq_enabled);
bdd4ddde 1670 work_done = true;
bdd4ddde
JG
1671 }
1672
352fab70 1673 /* Update the software queue position index in hardware */
bdd4ddde
JG
1674 if (work_done)
1675 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
fcfb1f77 1676 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
bdd4ddde 1677 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1678}
1679
05b308e1
BR
1680/**
1681 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1682 * @host: host specific structure
05b308e1
BR
1683 * @relevant: port error bits relevant to this host controller
1684 * @hc: which host controller we're to look at
1685 *
1686 * Read then write clear the HC interrupt status then walk each
1687 * port connected to the HC and see if it needs servicing. Port
1688 * success ints are reported in the HC interrupt status reg, the
1689 * port error ints are reported in the higher level main
1690 * interrupt status register and thus are passed in via the
1691 * 'relevant' argument.
1692 *
1693 * LOCKING:
1694 * Inherited from caller.
1695 */
cca3974e 1696static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1697{
f351b2d6
SB
1698 struct mv_host_priv *hpriv = host->private_data;
1699 void __iomem *mmio = hpriv->base;
20f733e7 1700 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1701 u32 hc_irq_cause;
f351b2d6 1702 int port, port0, last_port;
20f733e7 1703
35177265 1704 if (hc == 0)
20f733e7 1705 port0 = 0;
35177265 1706 else
20f733e7 1707 port0 = MV_PORTS_PER_HC;
20f733e7 1708
f351b2d6
SB
1709 if (HAS_PCI(host))
1710 last_port = port0 + MV_PORTS_PER_HC;
1711 else
1712 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1713 /* we'll need the HC success int register in most cases */
1714 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1715 if (!hc_irq_cause)
1716 return;
1717
1718 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1719
1720 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1721 hc, relevant, hc_irq_cause);
20f733e7 1722
8f71efe2 1723 for (port = port0; port < last_port; port++) {
cca3974e 1724 struct ata_port *ap = host->ports[port];
8f71efe2 1725 struct mv_port_priv *pp;
352fab70 1726 int have_err_bits, hardport, shift;
55d8ca4f 1727
bdd4ddde 1728 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1729 continue;
1730
8f71efe2
YL
1731 pp = ap->private_data;
1732
31961943 1733 shift = port << 1; /* (port * 2) */
e12bef50 1734 if (port >= MV_PORTS_PER_HC)
20f733e7 1735 shift++; /* skip bit 8 in the HC Main IRQ reg */
e12bef50 1736
352fab70 1737 have_err_bits = ((ERR_IRQ << shift) & relevant);
bdd4ddde
JG
1738
1739 if (unlikely(have_err_bits)) {
1740 struct ata_queued_cmd *qc;
8b260248 1741
9af5c9c9 1742 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1743 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1744 continue;
1745
1746 mv_err_intr(ap, qc);
1747 continue;
1748 }
1749
352fab70 1750 hardport = mv_hardport_from_port(port); /* range 0..3 */
bdd4ddde
JG
1751
1752 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
352fab70 1753 if ((DMA_IRQ << hardport) & hc_irq_cause)
fcfb1f77 1754 mv_process_crpb_entries(ap, pp);
bdd4ddde 1755 } else {
352fab70 1756 if ((DEV_IRQ << hardport) & hc_irq_cause)
bdd4ddde 1757 mv_intr_pio(ap);
20f733e7
BR
1758 }
1759 }
1760 VPRINTK("EXIT\n");
1761}
1762
bdd4ddde
JG
1763static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1764{
02a121da 1765 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1766 struct ata_port *ap;
1767 struct ata_queued_cmd *qc;
1768 struct ata_eh_info *ehi;
1769 unsigned int i, err_mask, printed = 0;
1770 u32 err_cause;
1771
02a121da 1772 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1773
1774 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1775 err_cause);
1776
1777 DPRINTK("All regs @ PCI error\n");
1778 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1779
02a121da 1780 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1781
1782 for (i = 0; i < host->n_ports; i++) {
1783 ap = host->ports[i];
936fd732 1784 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1785 ehi = &ap->link.eh_info;
bdd4ddde
JG
1786 ata_ehi_clear_desc(ehi);
1787 if (!printed++)
1788 ata_ehi_push_desc(ehi,
1789 "PCI err cause 0x%08x", err_cause);
1790 err_mask = AC_ERR_HOST_BUS;
cf480626 1791 ehi->action = ATA_EH_RESET;
9af5c9c9 1792 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1793 if (qc)
1794 qc->err_mask |= err_mask;
1795 else
1796 ehi->err_mask |= err_mask;
1797
1798 ata_port_freeze(ap);
1799 }
1800 }
1801}
1802
05b308e1 1803/**
c5d3e45a 1804 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1805 * @irq: unused
1806 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1807 *
1808 * Read the read only register to determine if any host
1809 * controllers have pending interrupts. If so, call lower level
1810 * routine to handle. Also check for PCI errors which are only
1811 * reported here.
1812 *
8b260248 1813 * LOCKING:
cca3974e 1814 * This routine holds the host lock while processing pending
05b308e1
BR
1815 * interrupts.
1816 */
7d12e780 1817static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1818{
cca3974e 1819 struct ata_host *host = dev_instance;
f351b2d6 1820 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1821 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1822 void __iomem *mmio = hpriv->base;
352fab70 1823 u32 main_cause, main_mask;
20f733e7 1824
646a4da5 1825 spin_lock(&host->lock);
352fab70
ML
1826 main_cause = readl(hpriv->main_cause_reg_addr);
1827 main_mask = readl(hpriv->main_mask_reg_addr);
1828 /*
1829 * Deal with cases where we either have nothing pending, or have read
1830 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 1831 */
352fab70 1832 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
646a4da5 1833 goto out_unlock;
20f733e7 1834
cca3974e 1835 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1836
352fab70 1837 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1838 mv_pci_error(host, mmio);
1839 handled = 1;
1840 goto out_unlock; /* skip all other HC irq handling */
1841 }
1842
20f733e7 1843 for (hc = 0; hc < n_hcs; hc++) {
352fab70 1844 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
20f733e7 1845 if (relevant) {
cca3974e 1846 mv_host_intr(host, relevant, hc);
bdd4ddde 1847 handled = 1;
20f733e7
BR
1848 }
1849 }
615ab953 1850
bdd4ddde 1851out_unlock:
cca3974e 1852 spin_unlock(&host->lock);
20f733e7
BR
1853 return IRQ_RETVAL(handled);
1854}
1855
c9d39130
JG
1856static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1857{
1858 unsigned int ofs;
1859
1860 switch (sc_reg_in) {
1861 case SCR_STATUS:
1862 case SCR_ERROR:
1863 case SCR_CONTROL:
1864 ofs = sc_reg_in * sizeof(u32);
1865 break;
1866 default:
1867 ofs = 0xffffffffU;
1868 break;
1869 }
1870 return ofs;
1871}
1872
da3dbb17 1873static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1874{
f351b2d6
SB
1875 struct mv_host_priv *hpriv = ap->host->private_data;
1876 void __iomem *mmio = hpriv->base;
0d5ff566 1877 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1878 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1879
da3dbb17
TH
1880 if (ofs != 0xffffffffU) {
1881 *val = readl(addr + ofs);
1882 return 0;
1883 } else
1884 return -EINVAL;
c9d39130
JG
1885}
1886
da3dbb17 1887static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1888{
f351b2d6
SB
1889 struct mv_host_priv *hpriv = ap->host->private_data;
1890 void __iomem *mmio = hpriv->base;
0d5ff566 1891 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1892 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1893
da3dbb17 1894 if (ofs != 0xffffffffU) {
0d5ff566 1895 writelfl(val, addr + ofs);
da3dbb17
TH
1896 return 0;
1897 } else
1898 return -EINVAL;
c9d39130
JG
1899}
1900
7bb3c529 1901static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1902{
7bb3c529 1903 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1904 int early_5080;
1905
44c10138 1906 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1907
1908 if (!early_5080) {
1909 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910 tmp |= (1 << 0);
1911 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 }
1913
7bb3c529 1914 mv_reset_pci_bus(host, mmio);
522479fb
JG
1915}
1916
1917static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1918{
1919 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1920}
1921
47c2b677 1922static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1923 void __iomem *mmio)
1924{
c9d39130
JG
1925 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1926 u32 tmp;
1927
1928 tmp = readl(phy_mmio + MV5_PHY_MODE);
1929
1930 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1931 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1932}
1933
47c2b677 1934static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1935{
522479fb
JG
1936 u32 tmp;
1937
1938 writel(0, mmio + MV_GPIO_PORT_CTL);
1939
1940 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1941
1942 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1943 tmp |= ~(1 << 0);
1944 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1945}
1946
2a47ce06
JG
1947static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int port)
bca1c4eb 1949{
c9d39130
JG
1950 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1951 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1952 u32 tmp;
1953 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1954
1955 if (fix_apm_sq) {
1956 tmp = readl(phy_mmio + MV5_LT_MODE);
1957 tmp |= (1 << 19);
1958 writel(tmp, phy_mmio + MV5_LT_MODE);
1959
1960 tmp = readl(phy_mmio + MV5_PHY_CTL);
1961 tmp &= ~0x3;
1962 tmp |= 0x1;
1963 writel(tmp, phy_mmio + MV5_PHY_CTL);
1964 }
1965
1966 tmp = readl(phy_mmio + MV5_PHY_MODE);
1967 tmp &= ~mask;
1968 tmp |= hpriv->signal[port].pre;
1969 tmp |= hpriv->signal[port].amps;
1970 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1971}
1972
c9d39130
JG
1973
1974#undef ZERO
1975#define ZERO(reg) writel(0, port_mmio + (reg))
1976static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1977 unsigned int port)
1978{
1979 void __iomem *port_mmio = mv_port_base(mmio, port);
1980
b562468c
ML
1981 /*
1982 * The datasheet warns against setting ATA_RST when EDMA is active
1983 * (but doesn't say what the problem might be). So we first try
1984 * to disable the EDMA engine before doing the ATA_RST operation.
1985 */
e12bef50 1986 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
1987
1988 ZERO(0x028); /* command */
1989 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1990 ZERO(0x004); /* timer */
1991 ZERO(0x008); /* irq err cause */
1992 ZERO(0x00c); /* irq err mask */
1993 ZERO(0x010); /* rq bah */
1994 ZERO(0x014); /* rq inp */
1995 ZERO(0x018); /* rq outp */
1996 ZERO(0x01c); /* respq bah */
1997 ZERO(0x024); /* respq outp */
1998 ZERO(0x020); /* respq inp */
1999 ZERO(0x02c); /* test control */
2000 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2001}
2002#undef ZERO
2003
2004#define ZERO(reg) writel(0, hc_mmio + (reg))
2005static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2006 unsigned int hc)
47c2b677 2007{
c9d39130
JG
2008 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2009 u32 tmp;
2010
2011 ZERO(0x00c);
2012 ZERO(0x010);
2013 ZERO(0x014);
2014 ZERO(0x018);
2015
2016 tmp = readl(hc_mmio + 0x20);
2017 tmp &= 0x1c1c1c1c;
2018 tmp |= 0x03030303;
2019 writel(tmp, hc_mmio + 0x20);
2020}
2021#undef ZERO
2022
2023static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2024 unsigned int n_hc)
2025{
2026 unsigned int hc, port;
2027
2028 for (hc = 0; hc < n_hc; hc++) {
2029 for (port = 0; port < MV_PORTS_PER_HC; port++)
2030 mv5_reset_hc_port(hpriv, mmio,
2031 (hc * MV_PORTS_PER_HC) + port);
2032
2033 mv5_reset_one_hc(hpriv, mmio, hc);
2034 }
2035
2036 return 0;
47c2b677
JG
2037}
2038
101ffae2
JG
2039#undef ZERO
2040#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2041static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2042{
02a121da 2043 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2044 u32 tmp;
2045
2046 tmp = readl(mmio + MV_PCI_MODE);
2047 tmp &= 0xff00ffff;
2048 writel(tmp, mmio + MV_PCI_MODE);
2049
2050 ZERO(MV_PCI_DISC_TIMER);
2051 ZERO(MV_PCI_MSI_TRIGGER);
2052 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2053 ZERO(HC_MAIN_IRQ_MASK_OFS);
2054 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2055 ZERO(hpriv->irq_cause_ofs);
2056 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2057 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2058 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2059 ZERO(MV_PCI_ERR_ATTRIBUTE);
2060 ZERO(MV_PCI_ERR_COMMAND);
2061}
2062#undef ZERO
2063
2064static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2065{
2066 u32 tmp;
2067
2068 mv5_reset_flash(hpriv, mmio);
2069
2070 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2071 tmp &= 0x3;
2072 tmp |= (1 << 5) | (1 << 6);
2073 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2074}
2075
2076/**
2077 * mv6_reset_hc - Perform the 6xxx global soft reset
2078 * @mmio: base address of the HBA
2079 *
2080 * This routine only applies to 6xxx parts.
2081 *
2082 * LOCKING:
2083 * Inherited from caller.
2084 */
c9d39130
JG
2085static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2086 unsigned int n_hc)
101ffae2
JG
2087{
2088 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2089 int i, rc = 0;
2090 u32 t;
2091
2092 /* Following procedure defined in PCI "main command and status
2093 * register" table.
2094 */
2095 t = readl(reg);
2096 writel(t | STOP_PCI_MASTER, reg);
2097
2098 for (i = 0; i < 1000; i++) {
2099 udelay(1);
2100 t = readl(reg);
2dcb407e 2101 if (PCI_MASTER_EMPTY & t)
101ffae2 2102 break;
101ffae2
JG
2103 }
2104 if (!(PCI_MASTER_EMPTY & t)) {
2105 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 rc = 1;
2107 goto done;
2108 }
2109
2110 /* set reset */
2111 i = 5;
2112 do {
2113 writel(t | GLOB_SFT_RST, reg);
2114 t = readl(reg);
2115 udelay(1);
2116 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2117
2118 if (!(GLOB_SFT_RST & t)) {
2119 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2120 rc = 1;
2121 goto done;
2122 }
2123
2124 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2125 i = 5;
2126 do {
2127 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2128 t = readl(reg);
2129 udelay(1);
2130 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2131
2132 if (GLOB_SFT_RST & t) {
2133 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2134 rc = 1;
2135 }
094e50b2
ML
2136 /*
2137 * Temporary: wait 3 seconds before port-probing can happen,
2138 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2139 * This can go away once hotplug is fully/correctly implemented.
2140 */
2141 if (rc == 0)
2142 msleep(3000);
101ffae2
JG
2143done:
2144 return rc;
2145}
2146
47c2b677 2147static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2148 void __iomem *mmio)
2149{
2150 void __iomem *port_mmio;
2151 u32 tmp;
2152
ba3fe8fb
JG
2153 tmp = readl(mmio + MV_RESET_CFG);
2154 if ((tmp & (1 << 0)) == 0) {
47c2b677 2155 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2156 hpriv->signal[idx].pre = 0x1 << 5;
2157 return;
2158 }
2159
2160 port_mmio = mv_port_base(mmio, idx);
2161 tmp = readl(port_mmio + PHY_MODE2);
2162
2163 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2164 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2165}
2166
47c2b677 2167static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2168{
47c2b677 2169 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2170}
2171
c9d39130 2172static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2173 unsigned int port)
bca1c4eb 2174{
c9d39130
JG
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
bca1c4eb 2177 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2178 int fix_phy_mode2 =
2179 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2180 int fix_phy_mode4 =
47c2b677
JG
2181 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2182 u32 m2, tmp;
2183
2184 if (fix_phy_mode2) {
2185 m2 = readl(port_mmio + PHY_MODE2);
2186 m2 &= ~(1 << 16);
2187 m2 |= (1 << 31);
2188 writel(m2, port_mmio + PHY_MODE2);
2189
2190 udelay(200);
2191
2192 m2 = readl(port_mmio + PHY_MODE2);
2193 m2 &= ~((1 << 16) | (1 << 31));
2194 writel(m2, port_mmio + PHY_MODE2);
2195
2196 udelay(200);
2197 }
2198
2199 /* who knows what this magic does */
2200 tmp = readl(port_mmio + PHY_MODE3);
2201 tmp &= ~0x7F800000;
2202 tmp |= 0x2A800000;
2203 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2204
2205 if (fix_phy_mode4) {
47c2b677 2206 u32 m4;
bca1c4eb
JG
2207
2208 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2209
2210 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2211 tmp = readl(port_mmio + PHY_MODE3);
bca1c4eb 2212
e12bef50 2213 /* workaround for errata FEr SATA#10 (part 1) */
bca1c4eb
JG
2214 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2215
2216 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2217
2218 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2219 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2220 }
2221
2222 /* Revert values of pre-emphasis and signal amps to the saved ones */
2223 m2 = readl(port_mmio + PHY_MODE2);
2224
2225 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2226 m2 |= hpriv->signal[port].amps;
2227 m2 |= hpriv->signal[port].pre;
47c2b677 2228 m2 &= ~(1 << 16);
bca1c4eb 2229
e4e7b892
JG
2230 /* according to mvSata 3.6.1, some IIE values are fixed */
2231 if (IS_GEN_IIE(hpriv)) {
2232 m2 &= ~0xC30FF01F;
2233 m2 |= 0x0000900F;
2234 }
2235
bca1c4eb
JG
2236 writel(m2, port_mmio + PHY_MODE2);
2237}
2238
f351b2d6
SB
2239/* TODO: use the generic LED interface to configure the SATA Presence */
2240/* & Acitivy LEDs on the board */
2241static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2242 void __iomem *mmio)
2243{
2244 return;
2245}
2246
2247static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2248 void __iomem *mmio)
2249{
2250 void __iomem *port_mmio;
2251 u32 tmp;
2252
2253 port_mmio = mv_port_base(mmio, idx);
2254 tmp = readl(port_mmio + PHY_MODE2);
2255
2256 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2257 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2258}
2259
2260#undef ZERO
2261#define ZERO(reg) writel(0, port_mmio + (reg))
2262static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2263 void __iomem *mmio, unsigned int port)
2264{
2265 void __iomem *port_mmio = mv_port_base(mmio, port);
2266
b562468c
ML
2267 /*
2268 * The datasheet warns against setting ATA_RST when EDMA is active
2269 * (but doesn't say what the problem might be). So we first try
2270 * to disable the EDMA engine before doing the ATA_RST operation.
2271 */
e12bef50 2272 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
2273
2274 ZERO(0x028); /* command */
2275 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2276 ZERO(0x004); /* timer */
2277 ZERO(0x008); /* irq err cause */
2278 ZERO(0x00c); /* irq err mask */
2279 ZERO(0x010); /* rq bah */
2280 ZERO(0x014); /* rq inp */
2281 ZERO(0x018); /* rq outp */
2282 ZERO(0x01c); /* respq bah */
2283 ZERO(0x024); /* respq outp */
2284 ZERO(0x020); /* respq inp */
2285 ZERO(0x02c); /* test control */
2286 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2287}
2288
2289#undef ZERO
2290
2291#define ZERO(reg) writel(0, hc_mmio + (reg))
2292static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2293 void __iomem *mmio)
2294{
2295 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2296
2297 ZERO(0x00c);
2298 ZERO(0x010);
2299 ZERO(0x014);
2300
2301}
2302
2303#undef ZERO
2304
2305static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2306 void __iomem *mmio, unsigned int n_hc)
2307{
2308 unsigned int port;
2309
2310 for (port = 0; port < hpriv->n_ports; port++)
2311 mv_soc_reset_hc_port(hpriv, mmio, port);
2312
2313 mv_soc_reset_one_hc(hpriv, mmio);
2314
2315 return 0;
2316}
2317
2318static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2319 void __iomem *mmio)
2320{
2321 return;
2322}
2323
2324static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2325{
2326 return;
2327}
2328
b67a1064
ML
2329static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2330{
2331 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2332
2333 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2334 if (want_gen2i)
2335 ifctl |= (1 << 7); /* enable gen2i speed */
2336 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2337}
2338
b562468c
ML
2339/*
2340 * Caller must ensure that EDMA is not active,
2341 * by first doing mv_stop_edma() where needed.
2342 */
e12bef50 2343static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
2344 unsigned int port_no)
2345{
2346 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2347
0d8be5cb 2348 mv_stop_edma_engine(port_mmio);
c9d39130
JG
2349 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2350
b67a1064
ML
2351 if (!IS_GEN_I(hpriv)) {
2352 /* Enable 3.0gb/s link speed */
2353 mv_setup_ifctl(port_mmio, 1);
c9d39130 2354 }
b67a1064
ML
2355 /*
2356 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2357 * link, and physical layers. It resets all SATA interface registers
2358 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
c9d39130 2359 */
b67a1064
ML
2360 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2361 udelay(25); /* allow reset propagation */
c9d39130
JG
2362 writelfl(0, port_mmio + EDMA_CMD_OFS);
2363
2364 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2365
ee9ccdf7 2366 if (IS_GEN_I(hpriv))
c9d39130
JG
2367 mdelay(1);
2368}
2369
e49856d8 2370static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 2371{
e49856d8
ML
2372 if (sata_pmp_supported(ap)) {
2373 void __iomem *port_mmio = mv_ap_base(ap);
2374 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2375 int old = reg & 0xf;
22374677 2376
e49856d8
ML
2377 if (old != pmp) {
2378 reg = (reg & ~0xf) | pmp;
2379 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2380 }
22374677 2381 }
20f733e7
BR
2382}
2383
e49856d8
ML
2384static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2385 unsigned long deadline)
22374677 2386{
e49856d8
ML
2387 mv_pmp_select(link->ap, sata_srst_pmp(link));
2388 return sata_std_hardreset(link, class, deadline);
2389}
bdd4ddde 2390
e49856d8
ML
2391static int mv_softreset(struct ata_link *link, unsigned int *class,
2392 unsigned long deadline)
2393{
2394 mv_pmp_select(link->ap, sata_srst_pmp(link));
2395 return ata_sff_softreset(link, class, deadline);
22374677
JG
2396}
2397
cc0680a5 2398static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2399 unsigned long deadline)
31961943 2400{
cc0680a5 2401 struct ata_port *ap = link->ap;
bdd4ddde 2402 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 2403 struct mv_port_priv *pp = ap->private_data;
f351b2d6 2404 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
2405 int rc, attempts = 0, extra = 0;
2406 u32 sstatus;
2407 bool online;
31961943 2408
e12bef50 2409 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 2410 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
bdd4ddde 2411
0d8be5cb
ML
2412 /* Workaround for errata FEr SATA#10 (part 2) */
2413 do {
17c5aab5
ML
2414 const unsigned long *timing =
2415 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 2416
17c5aab5
ML
2417 rc = sata_link_hardreset(link, timing, deadline + extra,
2418 &online, NULL);
2419 if (rc)
0d8be5cb 2420 return rc;
0d8be5cb
ML
2421 sata_scr_read(link, SCR_STATUS, &sstatus);
2422 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2423 /* Force 1.5gb/s link speed and try again */
2424 mv_setup_ifctl(mv_ap_base(ap), 0);
2425 if (time_after(jiffies + HZ, deadline))
2426 extra = HZ; /* only extend it once, max */
2427 }
2428 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
bdd4ddde 2429
17c5aab5 2430 return rc;
bdd4ddde
JG
2431}
2432
bdd4ddde
JG
2433static void mv_eh_freeze(struct ata_port *ap)
2434{
f351b2d6 2435 struct mv_host_priv *hpriv = ap->host->private_data;
1cfd19ae 2436 unsigned int shift, hardport, port = ap->port_no;
352fab70 2437 u32 main_mask;
bdd4ddde
JG
2438
2439 /* FIXME: handle coalescing completion events properly */
2440
1cfd19ae
ML
2441 mv_stop_edma(ap);
2442 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
bdd4ddde 2443
bdd4ddde 2444 /* disable assertion of portN err, done events */
352fab70
ML
2445 main_mask = readl(hpriv->main_mask_reg_addr);
2446 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2447 writelfl(main_mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2448}
2449
2450static void mv_eh_thaw(struct ata_port *ap)
2451{
f351b2d6 2452 struct mv_host_priv *hpriv = ap->host->private_data;
1cfd19ae
ML
2453 unsigned int shift, hardport, port = ap->port_no;
2454 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 2455 void __iomem *port_mmio = mv_ap_base(ap);
352fab70 2456 u32 main_mask, hc_irq_cause;
bdd4ddde
JG
2457
2458 /* FIXME: handle coalescing completion events properly */
2459
1cfd19ae 2460 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
bdd4ddde 2461
bdd4ddde
JG
2462 /* clear EDMA errors on this port */
2463 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464
2465 /* clear pending irq events */
2466 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1cfd19ae
ML
2467 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2468 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
2469
2470 /* enable assertion of portN err, done events */
352fab70
ML
2471 main_mask = readl(hpriv->main_mask_reg_addr);
2472 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2473 writelfl(main_mask, hpriv->main_mask_reg_addr);
31961943
BR
2474}
2475
05b308e1
BR
2476/**
2477 * mv_port_init - Perform some early initialization on a single port.
2478 * @port: libata data structure storing shadow register addresses
2479 * @port_mmio: base address of the port
2480 *
2481 * Initialize shadow register mmio addresses, clear outstanding
2482 * interrupts on the port, and unmask interrupts for the future
2483 * start of the port.
2484 *
2485 * LOCKING:
2486 * Inherited from caller.
2487 */
31961943 2488static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2489{
0d5ff566 2490 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2491 unsigned serr_ofs;
2492
8b260248 2493 /* PIO related setup
31961943
BR
2494 */
2495 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2496 port->error_addr =
31961943
BR
2497 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2498 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2499 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2500 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2501 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2502 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2503 port->status_addr =
31961943
BR
2504 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2505 /* special case: control/altstatus doesn't have ATA_REG_ address */
2506 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2507
2508 /* unused: */
8d9db2d2 2509 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2510
31961943
BR
2511 /* Clear any currently outstanding port interrupt conditions */
2512 serr_ofs = mv_scr_offset(SCR_ERROR);
2513 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2514 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2515
646a4da5
ML
2516 /* unmask all non-transient EDMA error interrupts */
2517 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2518
8b260248 2519 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2520 readl(port_mmio + EDMA_CFG_OFS),
2521 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2522 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2523}
2524
4447d351 2525static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2526{
4447d351
TH
2527 struct pci_dev *pdev = to_pci_dev(host->dev);
2528 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2529 u32 hp_flags = hpriv->hp_flags;
2530
5796d1c4 2531 switch (board_idx) {
47c2b677
JG
2532 case chip_5080:
2533 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2534 hp_flags |= MV_HP_GEN_I;
47c2b677 2535
44c10138 2536 switch (pdev->revision) {
47c2b677
JG
2537 case 0x1:
2538 hp_flags |= MV_HP_ERRATA_50XXB0;
2539 break;
2540 case 0x3:
2541 hp_flags |= MV_HP_ERRATA_50XXB2;
2542 break;
2543 default:
2544 dev_printk(KERN_WARNING, &pdev->dev,
2545 "Applying 50XXB2 workarounds to unknown rev\n");
2546 hp_flags |= MV_HP_ERRATA_50XXB2;
2547 break;
2548 }
2549 break;
2550
bca1c4eb
JG
2551 case chip_504x:
2552 case chip_508x:
47c2b677 2553 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2554 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2555
44c10138 2556 switch (pdev->revision) {
47c2b677
JG
2557 case 0x0:
2558 hp_flags |= MV_HP_ERRATA_50XXB0;
2559 break;
2560 case 0x3:
2561 hp_flags |= MV_HP_ERRATA_50XXB2;
2562 break;
2563 default:
2564 dev_printk(KERN_WARNING, &pdev->dev,
2565 "Applying B2 workarounds to unknown rev\n");
2566 hp_flags |= MV_HP_ERRATA_50XXB2;
2567 break;
bca1c4eb
JG
2568 }
2569 break;
2570
2571 case chip_604x:
2572 case chip_608x:
47c2b677 2573 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2574 hp_flags |= MV_HP_GEN_II;
47c2b677 2575
44c10138 2576 switch (pdev->revision) {
47c2b677
JG
2577 case 0x7:
2578 hp_flags |= MV_HP_ERRATA_60X1B2;
2579 break;
2580 case 0x9:
2581 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2582 break;
2583 default:
2584 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2585 "Applying B2 workarounds to unknown rev\n");
2586 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2587 break;
2588 }
2589 break;
2590
e4e7b892 2591 case chip_7042:
02a121da 2592 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2593 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2594 (pdev->device == 0x2300 || pdev->device == 0x2310))
2595 {
4e520033
ML
2596 /*
2597 * Highpoint RocketRAID PCIe 23xx series cards:
2598 *
2599 * Unconfigured drives are treated as "Legacy"
2600 * by the BIOS, and it overwrites sector 8 with
2601 * a "Lgcy" metadata block prior to Linux boot.
2602 *
2603 * Configured drives (RAID or JBOD) leave sector 8
2604 * alone, but instead overwrite a high numbered
2605 * sector for the RAID metadata. This sector can
2606 * be determined exactly, by truncating the physical
2607 * drive capacity to a nice even GB value.
2608 *
2609 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2610 *
2611 * Warn the user, lest they think we're just buggy.
2612 */
2613 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2614 " BIOS CORRUPTS DATA on all attached drives,"
2615 " regardless of if/how they are configured."
2616 " BEWARE!\n");
2617 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2618 " use sectors 8-9 on \"Legacy\" drives,"
2619 " and avoid the final two gigabytes on"
2620 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2621 }
e4e7b892
JG
2622 case chip_6042:
2623 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2624 hp_flags |= MV_HP_GEN_IIE;
2625
44c10138 2626 switch (pdev->revision) {
e4e7b892
JG
2627 case 0x0:
2628 hp_flags |= MV_HP_ERRATA_XX42A0;
2629 break;
2630 case 0x1:
2631 hp_flags |= MV_HP_ERRATA_60X1C0;
2632 break;
2633 default:
2634 dev_printk(KERN_WARNING, &pdev->dev,
2635 "Applying 60X1C0 workarounds to unknown rev\n");
2636 hp_flags |= MV_HP_ERRATA_60X1C0;
2637 break;
2638 }
2639 break;
f351b2d6
SB
2640 case chip_soc:
2641 hpriv->ops = &mv_soc_ops;
2642 hp_flags |= MV_HP_ERRATA_60X1C0;
2643 break;
e4e7b892 2644
bca1c4eb 2645 default:
f351b2d6 2646 dev_printk(KERN_ERR, host->dev,
5796d1c4 2647 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2648 return 1;
2649 }
2650
2651 hpriv->hp_flags = hp_flags;
02a121da
ML
2652 if (hp_flags & MV_HP_PCIE) {
2653 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2654 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2655 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2656 } else {
2657 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2660 }
bca1c4eb
JG
2661
2662 return 0;
2663}
2664
05b308e1 2665/**
47c2b677 2666 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2667 * @host: ATA host to initialize
2668 * @board_idx: controller index
05b308e1
BR
2669 *
2670 * If possible, do an early global reset of the host. Then do
2671 * our port init and clear/unmask all/relevant host interrupts.
2672 *
2673 * LOCKING:
2674 * Inherited from caller.
2675 */
4447d351 2676static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2677{
2678 int rc = 0, n_hc, port, hc;
4447d351 2679 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2680 void __iomem *mmio = hpriv->base;
47c2b677 2681
4447d351 2682 rc = mv_chip_id(host, board_idx);
bca1c4eb 2683 if (rc)
352fab70 2684 goto done;
f351b2d6
SB
2685
2686 if (HAS_PCI(host)) {
352fab70
ML
2687 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2688 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
f351b2d6 2689 } else {
352fab70
ML
2690 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
f351b2d6 2692 }
352fab70
ML
2693
2694 /* global interrupt mask: 0 == mask everything */
f351b2d6 2695 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2696
4447d351 2697 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2698
4447d351 2699 for (port = 0; port < host->n_ports; port++)
47c2b677 2700 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2701
c9d39130 2702 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2703 if (rc)
20f733e7 2704 goto done;
20f733e7 2705
522479fb 2706 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2707 hpriv->ops->reset_bus(host, mmio);
47c2b677 2708 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2709
4447d351 2710 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2711 struct ata_port *ap = host->ports[port];
2a47ce06 2712 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2713
2714 mv_port_init(&ap->ioaddr, port_mmio);
2715
7bb3c529 2716#ifdef CONFIG_PCI
f351b2d6
SB
2717 if (HAS_PCI(host)) {
2718 unsigned int offset = port_mmio - mmio;
2719 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2720 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2721 }
7bb3c529 2722#endif
20f733e7
BR
2723 }
2724
2725 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2726 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2727
2728 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2729 "(before clear)=0x%08x\n", hc,
2730 readl(hc_mmio + HC_CFG_OFS),
2731 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2732
2733 /* Clear any currently outstanding hc interrupt conditions */
2734 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2735 }
2736
f351b2d6
SB
2737 if (HAS_PCI(host)) {
2738 /* Clear any currently outstanding host interrupt conditions */
2739 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2740
f351b2d6
SB
2741 /* and unmask interrupt generation for host regs */
2742 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2743 if (IS_GEN_I(hpriv))
2744 writelfl(~HC_MAIN_MASKED_IRQS_5,
2745 hpriv->main_mask_reg_addr);
2746 else
2747 writelfl(~HC_MAIN_MASKED_IRQS,
2748 hpriv->main_mask_reg_addr);
2749
2750 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2751 "PCI int cause/mask=0x%08x/0x%08x\n",
2752 readl(hpriv->main_cause_reg_addr),
2753 readl(hpriv->main_mask_reg_addr),
2754 readl(mmio + hpriv->irq_cause_ofs),
2755 readl(mmio + hpriv->irq_mask_ofs));
2756 } else {
2757 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2758 hpriv->main_mask_reg_addr);
2759 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2760 readl(hpriv->main_cause_reg_addr),
2761 readl(hpriv->main_mask_reg_addr));
2762 }
2763done:
2764 return rc;
2765}
fb621e2f 2766
fbf14e2f
BB
2767static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2768{
2769 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2770 MV_CRQB_Q_SZ, 0);
2771 if (!hpriv->crqb_pool)
2772 return -ENOMEM;
2773
2774 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2775 MV_CRPB_Q_SZ, 0);
2776 if (!hpriv->crpb_pool)
2777 return -ENOMEM;
2778
2779 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2780 MV_SG_TBL_SZ, 0);
2781 if (!hpriv->sg_tbl_pool)
2782 return -ENOMEM;
2783
2784 return 0;
2785}
2786
15a32632
LB
2787static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2788 struct mbus_dram_target_info *dram)
2789{
2790 int i;
2791
2792 for (i = 0; i < 4; i++) {
2793 writel(0, hpriv->base + WINDOW_CTRL(i));
2794 writel(0, hpriv->base + WINDOW_BASE(i));
2795 }
2796
2797 for (i = 0; i < dram->num_cs; i++) {
2798 struct mbus_dram_window *cs = dram->cs + i;
2799
2800 writel(((cs->size - 1) & 0xffff0000) |
2801 (cs->mbus_attr << 8) |
2802 (dram->mbus_dram_target_id << 4) | 1,
2803 hpriv->base + WINDOW_CTRL(i));
2804 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2805 }
2806}
2807
f351b2d6
SB
2808/**
2809 * mv_platform_probe - handle a positive probe of an soc Marvell
2810 * host
2811 * @pdev: platform device found
2812 *
2813 * LOCKING:
2814 * Inherited from caller.
2815 */
2816static int mv_platform_probe(struct platform_device *pdev)
2817{
2818 static int printed_version;
2819 const struct mv_sata_platform_data *mv_platform_data;
2820 const struct ata_port_info *ppi[] =
2821 { &mv_port_info[chip_soc], NULL };
2822 struct ata_host *host;
2823 struct mv_host_priv *hpriv;
2824 struct resource *res;
2825 int n_ports, rc;
20f733e7 2826
f351b2d6
SB
2827 if (!printed_version++)
2828 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2829
f351b2d6
SB
2830 /*
2831 * Simple resource validation ..
2832 */
2833 if (unlikely(pdev->num_resources != 2)) {
2834 dev_err(&pdev->dev, "invalid number of resources\n");
2835 return -EINVAL;
2836 }
2837
2838 /*
2839 * Get the register base first
2840 */
2841 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2842 if (res == NULL)
2843 return -EINVAL;
2844
2845 /* allocate host */
2846 mv_platform_data = pdev->dev.platform_data;
2847 n_ports = mv_platform_data->n_ports;
2848
2849 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2850 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2851
2852 if (!host || !hpriv)
2853 return -ENOMEM;
2854 host->private_data = hpriv;
2855 hpriv->n_ports = n_ports;
2856
2857 host->iomap = NULL;
f1cb0ea1
SB
2858 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2859 res->end - res->start + 1);
f351b2d6
SB
2860 hpriv->base -= MV_SATAHC0_REG_BASE;
2861
15a32632
LB
2862 /*
2863 * (Re-)program MBUS remapping windows if we are asked to.
2864 */
2865 if (mv_platform_data->dram != NULL)
2866 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2867
fbf14e2f
BB
2868 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2869 if (rc)
2870 return rc;
2871
f351b2d6
SB
2872 /* initialize adapter */
2873 rc = mv_init_host(host, chip_soc);
2874 if (rc)
2875 return rc;
2876
2877 dev_printk(KERN_INFO, &pdev->dev,
2878 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2879 host->n_ports);
2880
2881 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2882 IRQF_SHARED, &mv6_sht);
2883}
2884
2885/*
2886 *
2887 * mv_platform_remove - unplug a platform interface
2888 * @pdev: platform device
2889 *
2890 * A platform bus SATA device has been unplugged. Perform the needed
2891 * cleanup. Also called on module unload for any active devices.
2892 */
2893static int __devexit mv_platform_remove(struct platform_device *pdev)
2894{
2895 struct device *dev = &pdev->dev;
2896 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2897
2898 ata_host_detach(host);
f351b2d6 2899 return 0;
20f733e7
BR
2900}
2901
f351b2d6
SB
2902static struct platform_driver mv_platform_driver = {
2903 .probe = mv_platform_probe,
2904 .remove = __devexit_p(mv_platform_remove),
2905 .driver = {
2906 .name = DRV_NAME,
2907 .owner = THIS_MODULE,
2908 },
2909};
2910
2911
7bb3c529 2912#ifdef CONFIG_PCI
f351b2d6
SB
2913static int mv_pci_init_one(struct pci_dev *pdev,
2914 const struct pci_device_id *ent);
2915
7bb3c529
SB
2916
2917static struct pci_driver mv_pci_driver = {
2918 .name = DRV_NAME,
2919 .id_table = mv_pci_tbl,
f351b2d6 2920 .probe = mv_pci_init_one,
7bb3c529
SB
2921 .remove = ata_pci_remove_one,
2922};
2923
2924/*
2925 * module options
2926 */
2927static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2928
2929
2930/* move to PCI layer or libata core? */
2931static int pci_go_64(struct pci_dev *pdev)
2932{
2933 int rc;
2934
2935 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2936 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2937 if (rc) {
2938 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2939 if (rc) {
2940 dev_printk(KERN_ERR, &pdev->dev,
2941 "64-bit DMA enable failed\n");
2942 return rc;
2943 }
2944 }
2945 } else {
2946 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2947 if (rc) {
2948 dev_printk(KERN_ERR, &pdev->dev,
2949 "32-bit DMA enable failed\n");
2950 return rc;
2951 }
2952 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2953 if (rc) {
2954 dev_printk(KERN_ERR, &pdev->dev,
2955 "32-bit consistent DMA enable failed\n");
2956 return rc;
2957 }
2958 }
2959
2960 return rc;
2961}
2962
05b308e1
BR
2963/**
2964 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2965 * @host: ATA host to print info about
05b308e1
BR
2966 *
2967 * FIXME: complete this.
2968 *
2969 * LOCKING:
2970 * Inherited from caller.
2971 */
4447d351 2972static void mv_print_info(struct ata_host *host)
31961943 2973{
4447d351
TH
2974 struct pci_dev *pdev = to_pci_dev(host->dev);
2975 struct mv_host_priv *hpriv = host->private_data;
44c10138 2976 u8 scc;
c1e4fe71 2977 const char *scc_s, *gen;
31961943
BR
2978
2979 /* Use this to determine the HW stepping of the chip so we know
2980 * what errata to workaround
2981 */
31961943
BR
2982 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2983 if (scc == 0)
2984 scc_s = "SCSI";
2985 else if (scc == 0x01)
2986 scc_s = "RAID";
2987 else
c1e4fe71
JG
2988 scc_s = "?";
2989
2990 if (IS_GEN_I(hpriv))
2991 gen = "I";
2992 else if (IS_GEN_II(hpriv))
2993 gen = "II";
2994 else if (IS_GEN_IIE(hpriv))
2995 gen = "IIE";
2996 else
2997 gen = "?";
31961943 2998
a9524a76 2999 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3000 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3001 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3002 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3003}
3004
05b308e1 3005/**
f351b2d6 3006 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3007 * @pdev: PCI device found
3008 * @ent: PCI device ID entry for the matched host
3009 *
3010 * LOCKING:
3011 * Inherited from caller.
3012 */
f351b2d6
SB
3013static int mv_pci_init_one(struct pci_dev *pdev,
3014 const struct pci_device_id *ent)
20f733e7 3015{
2dcb407e 3016 static int printed_version;
20f733e7 3017 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3018 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3019 struct ata_host *host;
3020 struct mv_host_priv *hpriv;
3021 int n_ports, rc;
20f733e7 3022
a9524a76
JG
3023 if (!printed_version++)
3024 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3025
4447d351
TH
3026 /* allocate host */
3027 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3028
3029 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3030 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3031 if (!host || !hpriv)
3032 return -ENOMEM;
3033 host->private_data = hpriv;
f351b2d6 3034 hpriv->n_ports = n_ports;
4447d351
TH
3035
3036 /* acquire resources */
24dc5f33
TH
3037 rc = pcim_enable_device(pdev);
3038 if (rc)
20f733e7 3039 return rc;
20f733e7 3040
0d5ff566
TH
3041 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3042 if (rc == -EBUSY)
24dc5f33 3043 pcim_pin_device(pdev);
0d5ff566 3044 if (rc)
24dc5f33 3045 return rc;
4447d351 3046 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3047 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3048
d88184fb
JG
3049 rc = pci_go_64(pdev);
3050 if (rc)
3051 return rc;
3052
da2fa9ba
ML
3053 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3054 if (rc)
3055 return rc;
3056
20f733e7 3057 /* initialize adapter */
4447d351 3058 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3059 if (rc)
3060 return rc;
20f733e7 3061
31961943 3062 /* Enable interrupts */
6a59dcf8 3063 if (msi && pci_enable_msi(pdev))
31961943 3064 pci_intx(pdev, 1);
20f733e7 3065
31961943 3066 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3067 mv_print_info(host);
20f733e7 3068
4447d351 3069 pci_set_master(pdev);
ea8b4db9 3070 pci_try_set_mwi(pdev);
4447d351 3071 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3072 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3073}
7bb3c529 3074#endif
20f733e7 3075
f351b2d6
SB
3076static int mv_platform_probe(struct platform_device *pdev);
3077static int __devexit mv_platform_remove(struct platform_device *pdev);
3078
20f733e7
BR
3079static int __init mv_init(void)
3080{
7bb3c529
SB
3081 int rc = -ENODEV;
3082#ifdef CONFIG_PCI
3083 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3084 if (rc < 0)
3085 return rc;
3086#endif
3087 rc = platform_driver_register(&mv_platform_driver);
3088
3089#ifdef CONFIG_PCI
3090 if (rc < 0)
3091 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3092#endif
3093 return rc;
20f733e7
BR
3094}
3095
3096static void __exit mv_exit(void)
3097{
7bb3c529 3098#ifdef CONFIG_PCI
20f733e7 3099 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3100#endif
f351b2d6 3101 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3102}
3103
3104MODULE_AUTHOR("Brett Russ");
3105MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3106MODULE_LICENSE("GPL");
3107MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3108MODULE_VERSION(DRV_VERSION);
17c5aab5 3109MODULE_ALIAS("platform:" DRV_NAME);
20f733e7 3110
7bb3c529 3111#ifdef CONFIG_PCI
ddef9bb3
JG
3112module_param(msi, int, 0444);
3113MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3114#endif
ddef9bb3 3115
20f733e7
BR
3116module_init(mv_init);
3117module_exit(mv_exit);