]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ata/sata_mv.c
pata_scc.c: add thaw ops
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
84
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
155 /* PCI interface registers */
156
157 PCI_COMMAND_OFS = 0xc00,
158
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
312
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
328
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
331
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348 };
349
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
360
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368 };
369
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
379 };
380
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
387 };
388
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
395 };
396
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
402 };
403
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
410 };
411
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
423 u32 pp_flags;
424 };
425
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429 };
430
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
450 };
451
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463
464 static void mv_irq_clear(struct ata_port *ap);
465 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
469 static int mv_port_start(struct ata_port *ap);
470 static void mv_port_stop(struct ata_port *ap);
471 static void mv_qc_prep(struct ata_queued_cmd *qc);
472 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
473 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
474 static void mv_error_handler(struct ata_port *ap);
475 static void mv_eh_freeze(struct ata_port *ap);
476 static void mv_eh_thaw(struct ata_port *ap);
477 static void mv6_dev_config(struct ata_device *dev);
478
479 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port);
481 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 void __iomem *mmio);
484 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int n_hc);
486 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
487 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
488
489 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
491 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
494 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
496 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 void __iomem *mmio);
499 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 void __iomem *mmio);
501 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio);
505 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
509 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511 static int __mv_stop_dma(struct ata_port *ap);
512
513 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
516 */
517 static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE,
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
524 .sg_tablesize = MV_MAX_SG_CT / 2,
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
530 .slave_configure = ata_scsi_slave_config,
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
533 };
534
535 static struct scsi_host_template mv6_sht = {
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
542 .this_id = ATA_SHT_THIS_ID,
543 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
546 .use_clustering = 1,
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .slave_destroy = ata_scsi_slave_destroy,
551 .bios_param = ata_std_bios_param,
552 };
553
554 static const struct ata_port_operations mv5_ops = {
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
560
561 .cable_detect = ata_cable_sata,
562
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
565 .data_xfer = ata_data_xfer,
566
567 .irq_clear = mv_irq_clear,
568 .irq_on = ata_irq_on,
569
570 .error_handler = mv_error_handler,
571 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw,
573
574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
576
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
579 };
580
581 static const struct ata_port_operations mv6_ops = {
582 .dev_config = mv6_dev_config,
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
588
589 .cable_detect = ata_cable_sata,
590
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
593 .data_xfer = ata_data_xfer,
594
595 .irq_clear = mv_irq_clear,
596 .irq_on = ata_irq_on,
597
598 .error_handler = mv_error_handler,
599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
601 .qc_defer = ata_std_qc_defer,
602
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
605
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
608 };
609
610 static const struct ata_port_operations mv_iie_ops = {
611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
616
617 .cable_detect = ata_cable_sata,
618
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
621 .data_xfer = ata_data_xfer,
622
623 .irq_clear = mv_irq_clear,
624 .irq_on = ata_irq_on,
625
626 .error_handler = mv_error_handler,
627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
629 .qc_defer = ata_std_qc_defer,
630
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
633
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
636 };
637
638 static const struct ata_port_info mv_port_info[] = {
639 { /* chip_504x */
640 .flags = MV_COMMON_FLAGS,
641 .pio_mask = 0x1f, /* pio0-4 */
642 .udma_mask = ATA_UDMA6,
643 .port_ops = &mv5_ops,
644 },
645 { /* chip_508x */
646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv5_ops,
650 },
651 { /* chip_5080 */
652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv5_ops,
656 },
657 { /* chip_604x */
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv6_ops,
663 },
664 { /* chip_608x */
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
667 .pio_mask = 0x1f, /* pio0-4 */
668 .udma_mask = ATA_UDMA6,
669 .port_ops = &mv6_ops,
670 },
671 { /* chip_6042 */
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
674 .pio_mask = 0x1f, /* pio0-4 */
675 .udma_mask = ATA_UDMA6,
676 .port_ops = &mv_iie_ops,
677 },
678 { /* chip_7042 */
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 ATA_FLAG_NCQ,
681 .pio_mask = 0x1f, /* pio0-4 */
682 .udma_mask = ATA_UDMA6,
683 .port_ops = &mv_iie_ops,
684 },
685 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
690 },
691 };
692
693 static const struct pci_device_id mv_pci_tbl[] = {
694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
701
702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
707
708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709
710 /* Adaptec 1430SA */
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712
713 /* Marvell 7042 support */
714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715
716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719
720 { } /* terminate list */
721 };
722
723 static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
730 };
731
732 static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
739 };
740
741 static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
748 };
749
750 /*
751 * Functions
752 */
753
754 static inline void writelfl(unsigned long data, void __iomem *addr)
755 {
756 writel(data, addr);
757 (void) readl(addr); /* flush to avoid PCI posted write */
758 }
759
760 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761 {
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
763 }
764
765 static inline unsigned int mv_hc_from_port(unsigned int port)
766 {
767 return port >> MV_PORT_HC_SHIFT;
768 }
769
770 static inline unsigned int mv_hardport_from_port(unsigned int port)
771 {
772 return port & MV_PORT_MASK;
773 }
774
775 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 unsigned int port)
777 {
778 return mv_hc_base(base, mv_hc_from_port(port));
779 }
780
781 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782 {
783 return mv_hc_base_from_port(base, port) +
784 MV_SATAHC_ARBTR_REG_SZ +
785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
786 }
787
788 static inline void __iomem *mv_host_base(struct ata_host *host)
789 {
790 struct mv_host_priv *hpriv = host->private_data;
791 return hpriv->base;
792 }
793
794 static inline void __iomem *mv_ap_base(struct ata_port *ap)
795 {
796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
797 }
798
799 static inline int mv_get_hc_count(unsigned long port_flags)
800 {
801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
802 }
803
804 static void mv_irq_clear(struct ata_port *ap)
805 {
806 }
807
808 static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
811 {
812 u32 index;
813
814 /*
815 * initialize request queue
816 */
817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818
819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
825 writelfl((pp->crqb_dma & 0xffffffff) | index,
826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 else
828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
829
830 /*
831 * initialize response queue
832 */
833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834
835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
839 writelfl((pp->crpb_dma & 0xffffffff) | index,
840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 else
842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
843
844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
846 }
847
848 /**
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
852 *
853 * Verify the local cache of the eDMA state is accurate with a
854 * WARN_ON.
855 *
856 * LOCKING:
857 * Inherited from caller.
858 */
859 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
860 struct mv_port_priv *pp, u8 protocol)
861 {
862 int want_ncq = (protocol == ATA_PROT_NCQ);
863
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap);
868 }
869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
873 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
874 u32 hc_irq_cause, ipending;
875
876 /* clear EDMA event indicators, if any */
877 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
878
879 /* clear EDMA interrupt indicator, if any */
880 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 ipending = (DEV_IRQ << hard_port) |
882 (CRPB_DMA_DONE << hard_port);
883 if (hc_irq_cause & ipending) {
884 writelfl(hc_irq_cause & ~ipending,
885 hc_mmio + HC_IRQ_CAUSE_OFS);
886 }
887
888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
889
890 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892
893 mv_set_edma_ptrs(port_mmio, hpriv, pp);
894
895 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 }
898 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
899 }
900
901 /**
902 * __mv_stop_dma - Disable eDMA engine
903 * @ap: ATA channel to manipulate
904 *
905 * Verify the local cache of the eDMA state is accurate with a
906 * WARN_ON.
907 *
908 * LOCKING:
909 * Inherited from caller.
910 */
911 static int __mv_stop_dma(struct ata_port *ap)
912 {
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
915 u32 reg;
916 int i, err = 0;
917
918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
919 /* Disable EDMA if active. The disable bit auto clears.
920 */
921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 } else {
924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
925 }
926
927 /* now properly wait for the eDMA to stop */
928 for (i = 1000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS);
930 if (!(reg & EDMA_EN))
931 break;
932
933 udelay(100);
934 }
935
936 if (reg & EDMA_EN) {
937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
938 err = -EIO;
939 }
940
941 return err;
942 }
943
944 static int mv_stop_dma(struct ata_port *ap)
945 {
946 unsigned long flags;
947 int rc;
948
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
952
953 return rc;
954 }
955
956 #ifdef ATA_DEBUG
957 static void mv_dump_mem(void __iomem *start, unsigned bytes)
958 {
959 int b, w;
960 for (b = 0; b < bytes; ) {
961 DPRINTK("%p: ", start + b);
962 for (w = 0; b < bytes && w < 4; w++) {
963 printk("%08x ", readl(start + b));
964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
968 }
969 #endif
970
971 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
972 {
973 #ifdef ATA_DEBUG
974 int b, w;
975 u32 dw;
976 for (b = 0; b < bytes; ) {
977 DPRINTK("%02x: ", b);
978 for (w = 0; b < bytes && w < 4; w++) {
979 (void) pci_read_config_dword(pdev, b, &dw);
980 printk("%08x ", dw);
981 b += sizeof(u32);
982 }
983 printk("\n");
984 }
985 #endif
986 }
987 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 struct pci_dev *pdev)
989 {
990 #ifdef ATA_DEBUG
991 void __iomem *hc_base = mv_hc_base(mmio_base,
992 port >> MV_PORT_HC_SHIFT);
993 void __iomem *port_base;
994 int start_port, num_ports, p, start_hc, num_hcs, hc;
995
996 if (0 > port) {
997 start_hc = start_port = 0;
998 num_ports = 8; /* shld be benign for 4 port devs */
999 num_hcs = 2;
1000 } else {
1001 start_hc = port >> MV_PORT_HC_SHIFT;
1002 start_port = port;
1003 num_ports = num_hcs = 1;
1004 }
1005 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1006 num_ports > 1 ? num_ports - 1 : start_port);
1007
1008 if (NULL != pdev) {
1009 DPRINTK("PCI config space regs:\n");
1010 mv_dump_pci_cfg(pdev, 0x68);
1011 }
1012 DPRINTK("PCI regs:\n");
1013 mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 mv_dump_mem(mmio_base+0xd00, 0x34);
1015 mv_dump_mem(mmio_base+0xf00, 0x4);
1016 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1018 hc_base = mv_hc_base(mmio_base, hc);
1019 DPRINTK("HC regs (HC %i):\n", hc);
1020 mv_dump_mem(hc_base, 0x1c);
1021 }
1022 for (p = start_port; p < start_port + num_ports; p++) {
1023 port_base = mv_port_base(mmio_base, p);
1024 DPRINTK("EDMA regs (port %i):\n", p);
1025 mv_dump_mem(port_base, 0x54);
1026 DPRINTK("SATA regs (port %i):\n", p);
1027 mv_dump_mem(port_base+0x300, 0x60);
1028 }
1029 #endif
1030 }
1031
1032 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033 {
1034 unsigned int ofs;
1035
1036 switch (sc_reg_in) {
1037 case SCR_STATUS:
1038 case SCR_CONTROL:
1039 case SCR_ERROR:
1040 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1041 break;
1042 case SCR_ACTIVE:
1043 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1044 break;
1045 default:
1046 ofs = 0xffffffffU;
1047 break;
1048 }
1049 return ofs;
1050 }
1051
1052 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1053 {
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
1056 if (ofs != 0xffffffffU) {
1057 *val = readl(mv_ap_base(ap) + ofs);
1058 return 0;
1059 } else
1060 return -EINVAL;
1061 }
1062
1063 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1064 {
1065 unsigned int ofs = mv_scr_offset(sc_reg_in);
1066
1067 if (ofs != 0xffffffffU) {
1068 writelfl(val, mv_ap_base(ap) + ofs);
1069 return 0;
1070 } else
1071 return -EINVAL;
1072 }
1073
1074 static void mv6_dev_config(struct ata_device *adev)
1075 {
1076 /*
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info.
1079 */
1080 if (adev->flags & ATA_DFLAG_NCQ)
1081 if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS;
1083 }
1084
1085 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 void __iomem *port_mmio, int want_ncq)
1087 {
1088 u32 cfg;
1089
1090 /* set up non-NCQ EDMA configuration */
1091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1092
1093 if (IS_GEN_I(hpriv))
1094 cfg |= (1 << 8); /* enab config burst size mask */
1095
1096 else if (IS_GEN_II(hpriv))
1097 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098
1099 else if (IS_GEN_IIE(hpriv)) {
1100 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1102 cfg |= (1 << 18); /* enab early completion */
1103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1104 }
1105
1106 if (want_ncq) {
1107 cfg |= EDMA_CFG_NCQ;
1108 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1109 } else
1110 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111
1112 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1113 }
1114
1115 static void mv_port_free_dma_mem(struct ata_port *ap)
1116 {
1117 struct mv_host_priv *hpriv = ap->host->private_data;
1118 struct mv_port_priv *pp = ap->private_data;
1119 int tag;
1120
1121 if (pp->crqb) {
1122 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 pp->crqb = NULL;
1124 }
1125 if (pp->crpb) {
1126 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 pp->crpb = NULL;
1128 }
1129 /*
1130 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 */
1133 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 if (pp->sg_tbl[tag]) {
1135 if (tag == 0 || !IS_GEN_I(hpriv))
1136 dma_pool_free(hpriv->sg_tbl_pool,
1137 pp->sg_tbl[tag],
1138 pp->sg_tbl_dma[tag]);
1139 pp->sg_tbl[tag] = NULL;
1140 }
1141 }
1142 }
1143
1144 /**
1145 * mv_port_start - Port specific init/start routine.
1146 * @ap: ATA channel to manipulate
1147 *
1148 * Allocate and point to DMA memory, init port private memory,
1149 * zero indices.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
1154 static int mv_port_start(struct ata_port *ap)
1155 {
1156 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data;
1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags;
1161 int tag, rc;
1162
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp)
1165 return -ENOMEM;
1166 ap->private_data = pp;
1167
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
1170 return rc;
1171
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb)
1174 return -ENOMEM;
1175 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1176
1177 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1178 if (!pp->crpb)
1179 goto out_port_free_dma_mem;
1180 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1181
1182 /*
1183 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1184 * For later hardware, we need one unique sg_tbl per NCQ tag.
1185 */
1186 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1187 if (tag == 0 || !IS_GEN_I(hpriv)) {
1188 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1189 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1190 if (!pp->sg_tbl[tag])
1191 goto out_port_free_dma_mem;
1192 } else {
1193 pp->sg_tbl[tag] = pp->sg_tbl[0];
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1195 }
1196 }
1197
1198 spin_lock_irqsave(&ap->host->lock, flags);
1199
1200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1202
1203 spin_unlock_irqrestore(&ap->host->lock, flags);
1204
1205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1207 * to shadow regs.
1208 */
1209 return 0;
1210
1211 out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1213 return -ENOMEM;
1214 }
1215
1216 /**
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1219 *
1220 * Stop DMA, cleanup port memory.
1221 *
1222 * LOCKING:
1223 * This routine uses the host lock to protect the DMA stop.
1224 */
1225 static void mv_port_stop(struct ata_port *ap)
1226 {
1227 mv_stop_dma(ap);
1228 mv_port_free_dma_mem(ap);
1229 }
1230
1231 /**
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1234 *
1235 * Populate the SG list and mark the last entry.
1236 *
1237 * LOCKING:
1238 * Inherited from caller.
1239 */
1240 static void mv_fill_sg(struct ata_queued_cmd *qc)
1241 {
1242 struct mv_port_priv *pp = qc->ap->private_data;
1243 struct scatterlist *sg;
1244 struct mv_sg *mv_sg, *last_sg = NULL;
1245 unsigned int si;
1246
1247 mv_sg = pp->sg_tbl[qc->tag];
1248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
1251
1252 while (sg_len) {
1253 u32 offset = addr & 0xffff;
1254 u32 len = sg_len;
1255
1256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
1258
1259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1262
1263 sg_len -= len;
1264 addr += len;
1265
1266 last_sg = mv_sg;
1267 mv_sg++;
1268 }
1269 }
1270
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1273 }
1274
1275 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1276 {
1277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1278 (last ? CRQB_CMD_LAST : 0);
1279 *cmdw = cpu_to_le16(tmp);
1280 }
1281
1282 /**
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1285 *
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1290 *
1291 * LOCKING:
1292 * Inherited from caller.
1293 */
1294 static void mv_qc_prep(struct ata_queued_cmd *qc)
1295 {
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
1298 __le16 *cw;
1299 struct ata_taskfile *tf;
1300 u16 flags = 0;
1301 unsigned in_index;
1302
1303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
1305 return;
1306
1307 /* Fill in command request block
1308 */
1309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1310 flags |= CRQB_FLAG_READ;
1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1312 flags |= qc->tag << CRQB_TAG_SHIFT;
1313
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1316
1317 pp->crqb[in_index].sg_addr =
1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1319 pp->crqb[in_index].sg_addr_hi =
1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1322
1323 cw = &pp->crqb[in_index].ata_cmd[0];
1324 tf = &qc->tf;
1325
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1331 */
1332 switch (tf->command) {
1333 case ATA_CMD_READ:
1334 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE:
1336 case ATA_CMD_WRITE_EXT:
1337 case ATA_CMD_WRITE_FUA_EXT:
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 break;
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1344 break;
1345 default:
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1350 *
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1353 */
1354 BUG_ON(tf->command);
1355 break;
1356 }
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1368 return;
1369 mv_fill_sg(qc);
1370 }
1371
1372 /**
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1375 *
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1380 *
1381 * LOCKING:
1382 * Inherited from caller.
1383 */
1384 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385 {
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
1390 unsigned in_index;
1391 u32 flags = 0;
1392
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
1395 return;
1396
1397 /* Fill in Gen IIE command request block
1398 */
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1401
1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1403 flags |= qc->tag << CRQB_TAG_SHIFT;
1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1405
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1408
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1412 crqb->flags = cpu_to_le32(flags);
1413
1414 tf = &qc->tf;
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1417 (tf->feature << 24)
1418 );
1419 crqb->ata_cmd[1] = cpu_to_le32(
1420 (tf->lbal << 0) |
1421 (tf->lbam << 8) |
1422 (tf->lbah << 16) |
1423 (tf->device << 24)
1424 );
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1430 );
1431 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->nsect << 0) |
1433 (tf->hob_nsect << 8)
1434 );
1435
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1437 return;
1438 mv_fill_sg(qc);
1439 }
1440
1441 /**
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1444 *
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1449 *
1450 * LOCKING:
1451 * Inherited from caller.
1452 */
1453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1454 {
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
1458 u32 in_index;
1459
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
1462 /* We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers.
1465 */
1466 __mv_stop_dma(ap);
1467 return ata_qc_issue_prot(qc);
1468 }
1469
1470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1471
1472 pp->req_idx++;
1473
1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1475
1476 /* and write the request in pointer to kick the EDMA to life */
1477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1479
1480 return 0;
1481 }
1482
1483 /**
1484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
1486 * @reset_allowed: bool: 0 == don't trigger from reset here
1487 *
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1493 *
1494 * LOCKING:
1495 * Inherited from caller.
1496 */
1497 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1498 {
1499 void __iomem *port_mmio = mv_ap_base(ap);
1500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
1505 struct ata_eh_info *ehi = &ap->link.eh_info;
1506
1507 ata_ehi_clear_desc(ehi);
1508
1509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1512 */
1513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1515 }
1516
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1520
1521 /*
1522 * all generations share these EDMA error cause bits
1523 */
1524
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
1532 ata_ehi_push_desc(ehi, "parity error");
1533 }
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1537 "dev disconnect" : "dev connect");
1538 action |= ATA_EH_HARDRESET;
1539 }
1540
1541 if (IS_GEN_I(hpriv)) {
1542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1547 ata_ehi_push_desc(ehi, "EDMA self-disable");
1548 }
1549 } else {
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1551
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1555 ata_ehi_push_desc(ehi, "EDMA self-disable");
1556 }
1557
1558 if (edma_err_cause & EDMA_ERR_SERR) {
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1563 }
1564 }
1565
1566 /* Clear EDMA now that SERR cleanup done */
1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1568
1569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586 }
1587
1588 static void mv_intr_pio(struct ata_port *ap)
1589 {
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608 }
1609
1610 static void mv_intr_edma(struct ata_port *ap)
1611 {
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
1625 unsigned int tag;
1626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
1632 /* 50xx: get active ATA command */
1633 if (IS_GEN_I(hpriv))
1634 tag = ap->link.active_tag;
1635
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
1639 */
1640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1642
1643 qc = ata_qc_from_tag(ap, tag);
1644
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
1648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
1662 /* advance software response queue pointer, to
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
1670 if (work_done)
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1674 }
1675
1676 /**
1677 * mv_host_intr - Handle all interrupts on the given host controller
1678 * @host: host specific structure
1679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1681 *
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1688 *
1689 * LOCKING:
1690 * Inherited from caller.
1691 */
1692 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1693 {
1694 struct mv_host_priv *hpriv = host->private_data;
1695 void __iomem *mmio = hpriv->base;
1696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1697 u32 hc_irq_cause;
1698 int port, port0, last_port;
1699
1700 if (hc == 0)
1701 port0 = 0;
1702 else
1703 port0 = MV_PORTS_PER_HC;
1704
1705 if (HAS_PCI(host))
1706 last_port = port0 + MV_PORTS_PER_HC;
1707 else
1708 last_port = port0 + hpriv->n_ports;
1709 /* we'll need the HC success int register in most cases */
1710 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1711 if (!hc_irq_cause)
1712 return;
1713
1714 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1715
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1717 hc, relevant, hc_irq_cause);
1718
1719 for (port = port0; port < last_port; port++) {
1720 struct ata_port *ap = host->ports[port];
1721 struct mv_port_priv *pp;
1722 int have_err_bits, hard_port, shift;
1723
1724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1725 continue;
1726
1727 pp = ap->private_data;
1728
1729 shift = port << 1; /* (port * 2) */
1730 if (port >= MV_PORTS_PER_HC) {
1731 shift++; /* skip bit 8 in the HC Main IRQ reg */
1732 }
1733 have_err_bits = ((PORT0_ERR << shift) & relevant);
1734
1735 if (unlikely(have_err_bits)) {
1736 struct ata_queued_cmd *qc;
1737
1738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1739 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1740 continue;
1741
1742 mv_err_intr(ap, qc);
1743 continue;
1744 }
1745
1746 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1747
1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1749 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1750 mv_intr_edma(ap);
1751 } else {
1752 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1753 mv_intr_pio(ap);
1754 }
1755 }
1756 VPRINTK("EXIT\n");
1757 }
1758
1759 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1760 {
1761 struct mv_host_priv *hpriv = host->private_data;
1762 struct ata_port *ap;
1763 struct ata_queued_cmd *qc;
1764 struct ata_eh_info *ehi;
1765 unsigned int i, err_mask, printed = 0;
1766 u32 err_cause;
1767
1768 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1769
1770 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1771 err_cause);
1772
1773 DPRINTK("All regs @ PCI error\n");
1774 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1775
1776 writelfl(0, mmio + hpriv->irq_cause_ofs);
1777
1778 for (i = 0; i < host->n_ports; i++) {
1779 ap = host->ports[i];
1780 if (!ata_link_offline(&ap->link)) {
1781 ehi = &ap->link.eh_info;
1782 ata_ehi_clear_desc(ehi);
1783 if (!printed++)
1784 ata_ehi_push_desc(ehi,
1785 "PCI err cause 0x%08x", err_cause);
1786 err_mask = AC_ERR_HOST_BUS;
1787 ehi->action = ATA_EH_HARDRESET;
1788 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1789 if (qc)
1790 qc->err_mask |= err_mask;
1791 else
1792 ehi->err_mask |= err_mask;
1793
1794 ata_port_freeze(ap);
1795 }
1796 }
1797 }
1798
1799 /**
1800 * mv_interrupt - Main interrupt event handler
1801 * @irq: unused
1802 * @dev_instance: private data; in this case the host structure
1803 *
1804 * Read the read only register to determine if any host
1805 * controllers have pending interrupts. If so, call lower level
1806 * routine to handle. Also check for PCI errors which are only
1807 * reported here.
1808 *
1809 * LOCKING:
1810 * This routine holds the host lock while processing pending
1811 * interrupts.
1812 */
1813 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1814 {
1815 struct ata_host *host = dev_instance;
1816 struct mv_host_priv *hpriv = host->private_data;
1817 unsigned int hc, handled = 0, n_hcs;
1818 void __iomem *mmio = hpriv->base;
1819 u32 irq_stat, irq_mask;
1820
1821 spin_lock(&host->lock);
1822
1823 irq_stat = readl(hpriv->main_cause_reg_addr);
1824 irq_mask = readl(hpriv->main_mask_reg_addr);
1825
1826 /* check the cases where we either have nothing pending or have read
1827 * a bogus register value which can indicate HW removal or PCI fault
1828 */
1829 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1830 goto out_unlock;
1831
1832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1833
1834 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1835 mv_pci_error(host, mmio);
1836 handled = 1;
1837 goto out_unlock; /* skip all other HC irq handling */
1838 }
1839
1840 for (hc = 0; hc < n_hcs; hc++) {
1841 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1842 if (relevant) {
1843 mv_host_intr(host, relevant, hc);
1844 handled = 1;
1845 }
1846 }
1847
1848 out_unlock:
1849 spin_unlock(&host->lock);
1850
1851 return IRQ_RETVAL(handled);
1852 }
1853
1854 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1855 {
1856 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1857 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1858
1859 return hc_mmio + ofs;
1860 }
1861
1862 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1863 {
1864 unsigned int ofs;
1865
1866 switch (sc_reg_in) {
1867 case SCR_STATUS:
1868 case SCR_ERROR:
1869 case SCR_CONTROL:
1870 ofs = sc_reg_in * sizeof(u32);
1871 break;
1872 default:
1873 ofs = 0xffffffffU;
1874 break;
1875 }
1876 return ofs;
1877 }
1878
1879 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1880 {
1881 struct mv_host_priv *hpriv = ap->host->private_data;
1882 void __iomem *mmio = hpriv->base;
1883 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1884 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1885
1886 if (ofs != 0xffffffffU) {
1887 *val = readl(addr + ofs);
1888 return 0;
1889 } else
1890 return -EINVAL;
1891 }
1892
1893 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1894 {
1895 struct mv_host_priv *hpriv = ap->host->private_data;
1896 void __iomem *mmio = hpriv->base;
1897 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1898 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1899
1900 if (ofs != 0xffffffffU) {
1901 writelfl(val, addr + ofs);
1902 return 0;
1903 } else
1904 return -EINVAL;
1905 }
1906
1907 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1908 {
1909 struct pci_dev *pdev = to_pci_dev(host->dev);
1910 int early_5080;
1911
1912 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1913
1914 if (!early_5080) {
1915 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1916 tmp |= (1 << 0);
1917 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1918 }
1919
1920 mv_reset_pci_bus(host, mmio);
1921 }
1922
1923 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1924 {
1925 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1926 }
1927
1928 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1929 void __iomem *mmio)
1930 {
1931 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1932 u32 tmp;
1933
1934 tmp = readl(phy_mmio + MV5_PHY_MODE);
1935
1936 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1937 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1938 }
1939
1940 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1941 {
1942 u32 tmp;
1943
1944 writel(0, mmio + MV_GPIO_PORT_CTL);
1945
1946 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1947
1948 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1949 tmp |= ~(1 << 0);
1950 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1951 }
1952
1953 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1954 unsigned int port)
1955 {
1956 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1957 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1958 u32 tmp;
1959 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1960
1961 if (fix_apm_sq) {
1962 tmp = readl(phy_mmio + MV5_LT_MODE);
1963 tmp |= (1 << 19);
1964 writel(tmp, phy_mmio + MV5_LT_MODE);
1965
1966 tmp = readl(phy_mmio + MV5_PHY_CTL);
1967 tmp &= ~0x3;
1968 tmp |= 0x1;
1969 writel(tmp, phy_mmio + MV5_PHY_CTL);
1970 }
1971
1972 tmp = readl(phy_mmio + MV5_PHY_MODE);
1973 tmp &= ~mask;
1974 tmp |= hpriv->signal[port].pre;
1975 tmp |= hpriv->signal[port].amps;
1976 writel(tmp, phy_mmio + MV5_PHY_MODE);
1977 }
1978
1979
1980 #undef ZERO
1981 #define ZERO(reg) writel(0, port_mmio + (reg))
1982 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int port)
1984 {
1985 void __iomem *port_mmio = mv_port_base(mmio, port);
1986
1987 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1988
1989 mv_channel_reset(hpriv, mmio, port);
1990
1991 ZERO(0x028); /* command */
1992 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1993 ZERO(0x004); /* timer */
1994 ZERO(0x008); /* irq err cause */
1995 ZERO(0x00c); /* irq err mask */
1996 ZERO(0x010); /* rq bah */
1997 ZERO(0x014); /* rq inp */
1998 ZERO(0x018); /* rq outp */
1999 ZERO(0x01c); /* respq bah */
2000 ZERO(0x024); /* respq outp */
2001 ZERO(0x020); /* respq inp */
2002 ZERO(0x02c); /* test control */
2003 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2004 }
2005 #undef ZERO
2006
2007 #define ZERO(reg) writel(0, hc_mmio + (reg))
2008 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2009 unsigned int hc)
2010 {
2011 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2012 u32 tmp;
2013
2014 ZERO(0x00c);
2015 ZERO(0x010);
2016 ZERO(0x014);
2017 ZERO(0x018);
2018
2019 tmp = readl(hc_mmio + 0x20);
2020 tmp &= 0x1c1c1c1c;
2021 tmp |= 0x03030303;
2022 writel(tmp, hc_mmio + 0x20);
2023 }
2024 #undef ZERO
2025
2026 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 unsigned int n_hc)
2028 {
2029 unsigned int hc, port;
2030
2031 for (hc = 0; hc < n_hc; hc++) {
2032 for (port = 0; port < MV_PORTS_PER_HC; port++)
2033 mv5_reset_hc_port(hpriv, mmio,
2034 (hc * MV_PORTS_PER_HC) + port);
2035
2036 mv5_reset_one_hc(hpriv, mmio, hc);
2037 }
2038
2039 return 0;
2040 }
2041
2042 #undef ZERO
2043 #define ZERO(reg) writel(0, mmio + (reg))
2044 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2045 {
2046 struct mv_host_priv *hpriv = host->private_data;
2047 u32 tmp;
2048
2049 tmp = readl(mmio + MV_PCI_MODE);
2050 tmp &= 0xff00ffff;
2051 writel(tmp, mmio + MV_PCI_MODE);
2052
2053 ZERO(MV_PCI_DISC_TIMER);
2054 ZERO(MV_PCI_MSI_TRIGGER);
2055 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2056 ZERO(HC_MAIN_IRQ_MASK_OFS);
2057 ZERO(MV_PCI_SERR_MASK);
2058 ZERO(hpriv->irq_cause_ofs);
2059 ZERO(hpriv->irq_mask_ofs);
2060 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2061 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2062 ZERO(MV_PCI_ERR_ATTRIBUTE);
2063 ZERO(MV_PCI_ERR_COMMAND);
2064 }
2065 #undef ZERO
2066
2067 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2068 {
2069 u32 tmp;
2070
2071 mv5_reset_flash(hpriv, mmio);
2072
2073 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2074 tmp &= 0x3;
2075 tmp |= (1 << 5) | (1 << 6);
2076 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2077 }
2078
2079 /**
2080 * mv6_reset_hc - Perform the 6xxx global soft reset
2081 * @mmio: base address of the HBA
2082 *
2083 * This routine only applies to 6xxx parts.
2084 *
2085 * LOCKING:
2086 * Inherited from caller.
2087 */
2088 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2089 unsigned int n_hc)
2090 {
2091 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2092 int i, rc = 0;
2093 u32 t;
2094
2095 /* Following procedure defined in PCI "main command and status
2096 * register" table.
2097 */
2098 t = readl(reg);
2099 writel(t | STOP_PCI_MASTER, reg);
2100
2101 for (i = 0; i < 1000; i++) {
2102 udelay(1);
2103 t = readl(reg);
2104 if (PCI_MASTER_EMPTY & t)
2105 break;
2106 }
2107 if (!(PCI_MASTER_EMPTY & t)) {
2108 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2109 rc = 1;
2110 goto done;
2111 }
2112
2113 /* set reset */
2114 i = 5;
2115 do {
2116 writel(t | GLOB_SFT_RST, reg);
2117 t = readl(reg);
2118 udelay(1);
2119 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2120
2121 if (!(GLOB_SFT_RST & t)) {
2122 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2123 rc = 1;
2124 goto done;
2125 }
2126
2127 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2128 i = 5;
2129 do {
2130 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2131 t = readl(reg);
2132 udelay(1);
2133 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2134
2135 if (GLOB_SFT_RST & t) {
2136 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2137 rc = 1;
2138 }
2139 done:
2140 return rc;
2141 }
2142
2143 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2144 void __iomem *mmio)
2145 {
2146 void __iomem *port_mmio;
2147 u32 tmp;
2148
2149 tmp = readl(mmio + MV_RESET_CFG);
2150 if ((tmp & (1 << 0)) == 0) {
2151 hpriv->signal[idx].amps = 0x7 << 8;
2152 hpriv->signal[idx].pre = 0x1 << 5;
2153 return;
2154 }
2155
2156 port_mmio = mv_port_base(mmio, idx);
2157 tmp = readl(port_mmio + PHY_MODE2);
2158
2159 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2160 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2161 }
2162
2163 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2164 {
2165 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2166 }
2167
2168 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2169 unsigned int port)
2170 {
2171 void __iomem *port_mmio = mv_port_base(mmio, port);
2172
2173 u32 hp_flags = hpriv->hp_flags;
2174 int fix_phy_mode2 =
2175 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2176 int fix_phy_mode4 =
2177 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2178 u32 m2, tmp;
2179
2180 if (fix_phy_mode2) {
2181 m2 = readl(port_mmio + PHY_MODE2);
2182 m2 &= ~(1 << 16);
2183 m2 |= (1 << 31);
2184 writel(m2, port_mmio + PHY_MODE2);
2185
2186 udelay(200);
2187
2188 m2 = readl(port_mmio + PHY_MODE2);
2189 m2 &= ~((1 << 16) | (1 << 31));
2190 writel(m2, port_mmio + PHY_MODE2);
2191
2192 udelay(200);
2193 }
2194
2195 /* who knows what this magic does */
2196 tmp = readl(port_mmio + PHY_MODE3);
2197 tmp &= ~0x7F800000;
2198 tmp |= 0x2A800000;
2199 writel(tmp, port_mmio + PHY_MODE3);
2200
2201 if (fix_phy_mode4) {
2202 u32 m4;
2203
2204 m4 = readl(port_mmio + PHY_MODE4);
2205
2206 if (hp_flags & MV_HP_ERRATA_60X1B2)
2207 tmp = readl(port_mmio + 0x310);
2208
2209 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2210
2211 writel(m4, port_mmio + PHY_MODE4);
2212
2213 if (hp_flags & MV_HP_ERRATA_60X1B2)
2214 writel(tmp, port_mmio + 0x310);
2215 }
2216
2217 /* Revert values of pre-emphasis and signal amps to the saved ones */
2218 m2 = readl(port_mmio + PHY_MODE2);
2219
2220 m2 &= ~MV_M2_PREAMP_MASK;
2221 m2 |= hpriv->signal[port].amps;
2222 m2 |= hpriv->signal[port].pre;
2223 m2 &= ~(1 << 16);
2224
2225 /* according to mvSata 3.6.1, some IIE values are fixed */
2226 if (IS_GEN_IIE(hpriv)) {
2227 m2 &= ~0xC30FF01F;
2228 m2 |= 0x0000900F;
2229 }
2230
2231 writel(m2, port_mmio + PHY_MODE2);
2232 }
2233
2234 /* TODO: use the generic LED interface to configure the SATA Presence */
2235 /* & Acitivy LEDs on the board */
2236 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2237 void __iomem *mmio)
2238 {
2239 return;
2240 }
2241
2242 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2243 void __iomem *mmio)
2244 {
2245 void __iomem *port_mmio;
2246 u32 tmp;
2247
2248 port_mmio = mv_port_base(mmio, idx);
2249 tmp = readl(port_mmio + PHY_MODE2);
2250
2251 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2252 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2253 }
2254
2255 #undef ZERO
2256 #define ZERO(reg) writel(0, port_mmio + (reg))
2257 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2258 void __iomem *mmio, unsigned int port)
2259 {
2260 void __iomem *port_mmio = mv_port_base(mmio, port);
2261
2262 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2263
2264 mv_channel_reset(hpriv, mmio, port);
2265
2266 ZERO(0x028); /* command */
2267 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2268 ZERO(0x004); /* timer */
2269 ZERO(0x008); /* irq err cause */
2270 ZERO(0x00c); /* irq err mask */
2271 ZERO(0x010); /* rq bah */
2272 ZERO(0x014); /* rq inp */
2273 ZERO(0x018); /* rq outp */
2274 ZERO(0x01c); /* respq bah */
2275 ZERO(0x024); /* respq outp */
2276 ZERO(0x020); /* respq inp */
2277 ZERO(0x02c); /* test control */
2278 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2279 }
2280
2281 #undef ZERO
2282
2283 #define ZERO(reg) writel(0, hc_mmio + (reg))
2284 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio)
2286 {
2287 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2288
2289 ZERO(0x00c);
2290 ZERO(0x010);
2291 ZERO(0x014);
2292
2293 }
2294
2295 #undef ZERO
2296
2297 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2298 void __iomem *mmio, unsigned int n_hc)
2299 {
2300 unsigned int port;
2301
2302 for (port = 0; port < hpriv->n_ports; port++)
2303 mv_soc_reset_hc_port(hpriv, mmio, port);
2304
2305 mv_soc_reset_one_hc(hpriv, mmio);
2306
2307 return 0;
2308 }
2309
2310 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2311 void __iomem *mmio)
2312 {
2313 return;
2314 }
2315
2316 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2317 {
2318 return;
2319 }
2320
2321 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2322 unsigned int port_no)
2323 {
2324 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2325
2326 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2327
2328 if (IS_GEN_II(hpriv)) {
2329 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2330 ifctl |= (1 << 7); /* enable gen2i speed */
2331 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2332 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 }
2334
2335 udelay(25); /* allow reset propagation */
2336
2337 /* Spec never mentions clearing the bit. Marvell's driver does
2338 * clear the bit, however.
2339 */
2340 writelfl(0, port_mmio + EDMA_CMD_OFS);
2341
2342 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2343
2344 if (IS_GEN_I(hpriv))
2345 mdelay(1);
2346 }
2347
2348 /**
2349 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2350 * @ap: ATA channel to manipulate
2351 *
2352 * Part of this is taken from __sata_phy_reset and modified to
2353 * not sleep since this routine gets called from interrupt level.
2354 *
2355 * LOCKING:
2356 * Inherited from caller. This is coded to safe to call at
2357 * interrupt level, i.e. it does not sleep.
2358 */
2359 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2360 unsigned long deadline)
2361 {
2362 struct mv_port_priv *pp = ap->private_data;
2363 struct mv_host_priv *hpriv = ap->host->private_data;
2364 void __iomem *port_mmio = mv_ap_base(ap);
2365 int retry = 5;
2366 u32 sstatus;
2367
2368 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2369
2370 #ifdef DEBUG
2371 {
2372 u32 sstatus, serror, scontrol;
2373
2374 mv_scr_read(ap, SCR_STATUS, &sstatus);
2375 mv_scr_read(ap, SCR_ERROR, &serror);
2376 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2377 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2378 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2379 }
2380 #endif
2381
2382 /* Issue COMRESET via SControl */
2383 comreset_retry:
2384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2385 msleep(1);
2386
2387 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2388 msleep(20);
2389
2390 do {
2391 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2392 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2393 break;
2394
2395 msleep(1);
2396 } while (time_before(jiffies, deadline));
2397
2398 /* work around errata */
2399 if (IS_GEN_II(hpriv) &&
2400 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2401 (retry-- > 0))
2402 goto comreset_retry;
2403
2404 #ifdef DEBUG
2405 {
2406 u32 sstatus, serror, scontrol;
2407
2408 mv_scr_read(ap, SCR_STATUS, &sstatus);
2409 mv_scr_read(ap, SCR_ERROR, &serror);
2410 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2411 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2412 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2413 }
2414 #endif
2415
2416 if (ata_link_offline(&ap->link)) {
2417 *class = ATA_DEV_NONE;
2418 return;
2419 }
2420
2421 /* even after SStatus reflects that device is ready,
2422 * it seems to take a while for link to be fully
2423 * established (and thus Status no longer 0x80/0x7F),
2424 * so we poll a bit for that, here.
2425 */
2426 retry = 20;
2427 while (1) {
2428 u8 drv_stat = ata_check_status(ap);
2429 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2430 break;
2431 msleep(500);
2432 if (retry-- <= 0)
2433 break;
2434 if (time_after(jiffies, deadline))
2435 break;
2436 }
2437
2438 /* FIXME: if we passed the deadline, the following
2439 * code probably produces an invalid result
2440 */
2441
2442 /* finally, read device signature from TF registers */
2443 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2444
2445 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2446
2447 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2448
2449 VPRINTK("EXIT\n");
2450 }
2451
2452 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2453 {
2454 struct ata_port *ap = link->ap;
2455 struct mv_port_priv *pp = ap->private_data;
2456 struct ata_eh_context *ehc = &link->eh_context;
2457 int rc;
2458
2459 rc = mv_stop_dma(ap);
2460 if (rc)
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462
2463 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2464 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2465 ehc->i.action |= ATA_EH_HARDRESET;
2466 }
2467
2468 /* if we're about to do hardreset, nothing more to do */
2469 if (ehc->i.action & ATA_EH_HARDRESET)
2470 return 0;
2471
2472 if (ata_link_online(link))
2473 rc = ata_wait_ready(ap, deadline);
2474 else
2475 rc = -ENODEV;
2476
2477 return rc;
2478 }
2479
2480 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2481 unsigned long deadline)
2482 {
2483 struct ata_port *ap = link->ap;
2484 struct mv_host_priv *hpriv = ap->host->private_data;
2485 void __iomem *mmio = hpriv->base;
2486
2487 mv_stop_dma(ap);
2488
2489 mv_channel_reset(hpriv, mmio, ap->port_no);
2490
2491 mv_phy_reset(ap, class, deadline);
2492
2493 return 0;
2494 }
2495
2496 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2497 {
2498 struct ata_port *ap = link->ap;
2499 u32 serr;
2500
2501 /* print link status */
2502 sata_print_link_status(link);
2503
2504 /* clear SError */
2505 sata_scr_read(link, SCR_ERROR, &serr);
2506 sata_scr_write_flush(link, SCR_ERROR, serr);
2507
2508 /* bail out if no device is present */
2509 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2510 DPRINTK("EXIT, no device\n");
2511 return;
2512 }
2513
2514 /* set up device control */
2515 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2516 }
2517
2518 static void mv_error_handler(struct ata_port *ap)
2519 {
2520 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2521 mv_hardreset, mv_postreset);
2522 }
2523
2524 static void mv_eh_freeze(struct ata_port *ap)
2525 {
2526 struct mv_host_priv *hpriv = ap->host->private_data;
2527 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2528 u32 tmp, mask;
2529 unsigned int shift;
2530
2531 /* FIXME: handle coalescing completion events properly */
2532
2533 shift = ap->port_no * 2;
2534 if (hc > 0)
2535 shift++;
2536
2537 mask = 0x3 << shift;
2538
2539 /* disable assertion of portN err, done events */
2540 tmp = readl(hpriv->main_mask_reg_addr);
2541 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2542 }
2543
2544 static void mv_eh_thaw(struct ata_port *ap)
2545 {
2546 struct mv_host_priv *hpriv = ap->host->private_data;
2547 void __iomem *mmio = hpriv->base;
2548 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2549 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2550 void __iomem *port_mmio = mv_ap_base(ap);
2551 u32 tmp, mask, hc_irq_cause;
2552 unsigned int shift, hc_port_no = ap->port_no;
2553
2554 /* FIXME: handle coalescing completion events properly */
2555
2556 shift = ap->port_no * 2;
2557 if (hc > 0) {
2558 shift++;
2559 hc_port_no -= 4;
2560 }
2561
2562 mask = 0x3 << shift;
2563
2564 /* clear EDMA errors on this port */
2565 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2566
2567 /* clear pending irq events */
2568 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2569 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2570 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2571 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2572
2573 /* enable assertion of portN err, done events */
2574 tmp = readl(hpriv->main_mask_reg_addr);
2575 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2576 }
2577
2578 /**
2579 * mv_port_init - Perform some early initialization on a single port.
2580 * @port: libata data structure storing shadow register addresses
2581 * @port_mmio: base address of the port
2582 *
2583 * Initialize shadow register mmio addresses, clear outstanding
2584 * interrupts on the port, and unmask interrupts for the future
2585 * start of the port.
2586 *
2587 * LOCKING:
2588 * Inherited from caller.
2589 */
2590 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2591 {
2592 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2593 unsigned serr_ofs;
2594
2595 /* PIO related setup
2596 */
2597 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2598 port->error_addr =
2599 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2600 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2601 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2602 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2603 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2604 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2605 port->status_addr =
2606 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2607 /* special case: control/altstatus doesn't have ATA_REG_ address */
2608 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2609
2610 /* unused: */
2611 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2612
2613 /* Clear any currently outstanding port interrupt conditions */
2614 serr_ofs = mv_scr_offset(SCR_ERROR);
2615 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2616 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2617
2618 /* unmask all non-transient EDMA error interrupts */
2619 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2620
2621 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2622 readl(port_mmio + EDMA_CFG_OFS),
2623 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2624 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2625 }
2626
2627 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2628 {
2629 struct pci_dev *pdev = to_pci_dev(host->dev);
2630 struct mv_host_priv *hpriv = host->private_data;
2631 u32 hp_flags = hpriv->hp_flags;
2632
2633 switch (board_idx) {
2634 case chip_5080:
2635 hpriv->ops = &mv5xxx_ops;
2636 hp_flags |= MV_HP_GEN_I;
2637
2638 switch (pdev->revision) {
2639 case 0x1:
2640 hp_flags |= MV_HP_ERRATA_50XXB0;
2641 break;
2642 case 0x3:
2643 hp_flags |= MV_HP_ERRATA_50XXB2;
2644 break;
2645 default:
2646 dev_printk(KERN_WARNING, &pdev->dev,
2647 "Applying 50XXB2 workarounds to unknown rev\n");
2648 hp_flags |= MV_HP_ERRATA_50XXB2;
2649 break;
2650 }
2651 break;
2652
2653 case chip_504x:
2654 case chip_508x:
2655 hpriv->ops = &mv5xxx_ops;
2656 hp_flags |= MV_HP_GEN_I;
2657
2658 switch (pdev->revision) {
2659 case 0x0:
2660 hp_flags |= MV_HP_ERRATA_50XXB0;
2661 break;
2662 case 0x3:
2663 hp_flags |= MV_HP_ERRATA_50XXB2;
2664 break;
2665 default:
2666 dev_printk(KERN_WARNING, &pdev->dev,
2667 "Applying B2 workarounds to unknown rev\n");
2668 hp_flags |= MV_HP_ERRATA_50XXB2;
2669 break;
2670 }
2671 break;
2672
2673 case chip_604x:
2674 case chip_608x:
2675 hpriv->ops = &mv6xxx_ops;
2676 hp_flags |= MV_HP_GEN_II;
2677
2678 switch (pdev->revision) {
2679 case 0x7:
2680 hp_flags |= MV_HP_ERRATA_60X1B2;
2681 break;
2682 case 0x9:
2683 hp_flags |= MV_HP_ERRATA_60X1C0;
2684 break;
2685 default:
2686 dev_printk(KERN_WARNING, &pdev->dev,
2687 "Applying B2 workarounds to unknown rev\n");
2688 hp_flags |= MV_HP_ERRATA_60X1B2;
2689 break;
2690 }
2691 break;
2692
2693 case chip_7042:
2694 hp_flags |= MV_HP_PCIE;
2695 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2696 (pdev->device == 0x2300 || pdev->device == 0x2310))
2697 {
2698 /*
2699 * Highpoint RocketRAID PCIe 23xx series cards:
2700 *
2701 * Unconfigured drives are treated as "Legacy"
2702 * by the BIOS, and it overwrites sector 8 with
2703 * a "Lgcy" metadata block prior to Linux boot.
2704 *
2705 * Configured drives (RAID or JBOD) leave sector 8
2706 * alone, but instead overwrite a high numbered
2707 * sector for the RAID metadata. This sector can
2708 * be determined exactly, by truncating the physical
2709 * drive capacity to a nice even GB value.
2710 *
2711 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2712 *
2713 * Warn the user, lest they think we're just buggy.
2714 */
2715 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2716 " BIOS CORRUPTS DATA on all attached drives,"
2717 " regardless of if/how they are configured."
2718 " BEWARE!\n");
2719 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2720 " use sectors 8-9 on \"Legacy\" drives,"
2721 " and avoid the final two gigabytes on"
2722 " all RocketRAID BIOS initialized drives.\n");
2723 }
2724 case chip_6042:
2725 hpriv->ops = &mv6xxx_ops;
2726 hp_flags |= MV_HP_GEN_IIE;
2727
2728 switch (pdev->revision) {
2729 case 0x0:
2730 hp_flags |= MV_HP_ERRATA_XX42A0;
2731 break;
2732 case 0x1:
2733 hp_flags |= MV_HP_ERRATA_60X1C0;
2734 break;
2735 default:
2736 dev_printk(KERN_WARNING, &pdev->dev,
2737 "Applying 60X1C0 workarounds to unknown rev\n");
2738 hp_flags |= MV_HP_ERRATA_60X1C0;
2739 break;
2740 }
2741 break;
2742 case chip_soc:
2743 hpriv->ops = &mv_soc_ops;
2744 hp_flags |= MV_HP_ERRATA_60X1C0;
2745 break;
2746
2747 default:
2748 dev_printk(KERN_ERR, host->dev,
2749 "BUG: invalid board index %u\n", board_idx);
2750 return 1;
2751 }
2752
2753 hpriv->hp_flags = hp_flags;
2754 if (hp_flags & MV_HP_PCIE) {
2755 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2756 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2757 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2758 } else {
2759 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2760 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2761 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2762 }
2763
2764 return 0;
2765 }
2766
2767 /**
2768 * mv_init_host - Perform some early initialization of the host.
2769 * @host: ATA host to initialize
2770 * @board_idx: controller index
2771 *
2772 * If possible, do an early global reset of the host. Then do
2773 * our port init and clear/unmask all/relevant host interrupts.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 */
2778 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2779 {
2780 int rc = 0, n_hc, port, hc;
2781 struct mv_host_priv *hpriv = host->private_data;
2782 void __iomem *mmio = hpriv->base;
2783
2784 rc = mv_chip_id(host, board_idx);
2785 if (rc)
2786 goto done;
2787
2788 if (HAS_PCI(host)) {
2789 hpriv->main_cause_reg_addr = hpriv->base +
2790 HC_MAIN_IRQ_CAUSE_OFS;
2791 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2792 } else {
2793 hpriv->main_cause_reg_addr = hpriv->base +
2794 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2795 hpriv->main_mask_reg_addr = hpriv->base +
2796 HC_SOC_MAIN_IRQ_MASK_OFS;
2797 }
2798 /* global interrupt mask */
2799 writel(0, hpriv->main_mask_reg_addr);
2800
2801 n_hc = mv_get_hc_count(host->ports[0]->flags);
2802
2803 for (port = 0; port < host->n_ports; port++)
2804 hpriv->ops->read_preamp(hpriv, port, mmio);
2805
2806 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2807 if (rc)
2808 goto done;
2809
2810 hpriv->ops->reset_flash(hpriv, mmio);
2811 hpriv->ops->reset_bus(host, mmio);
2812 hpriv->ops->enable_leds(hpriv, mmio);
2813
2814 for (port = 0; port < host->n_ports; port++) {
2815 if (IS_GEN_II(hpriv)) {
2816 void __iomem *port_mmio = mv_port_base(mmio, port);
2817
2818 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2819 ifctl |= (1 << 7); /* enable gen2i speed */
2820 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2821 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2822 }
2823
2824 hpriv->ops->phy_errata(hpriv, mmio, port);
2825 }
2826
2827 for (port = 0; port < host->n_ports; port++) {
2828 struct ata_port *ap = host->ports[port];
2829 void __iomem *port_mmio = mv_port_base(mmio, port);
2830
2831 mv_port_init(&ap->ioaddr, port_mmio);
2832
2833 #ifdef CONFIG_PCI
2834 if (HAS_PCI(host)) {
2835 unsigned int offset = port_mmio - mmio;
2836 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2837 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2838 }
2839 #endif
2840 }
2841
2842 for (hc = 0; hc < n_hc; hc++) {
2843 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2844
2845 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2846 "(before clear)=0x%08x\n", hc,
2847 readl(hc_mmio + HC_CFG_OFS),
2848 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2849
2850 /* Clear any currently outstanding hc interrupt conditions */
2851 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2852 }
2853
2854 if (HAS_PCI(host)) {
2855 /* Clear any currently outstanding host interrupt conditions */
2856 writelfl(0, mmio + hpriv->irq_cause_ofs);
2857
2858 /* and unmask interrupt generation for host regs */
2859 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2860 if (IS_GEN_I(hpriv))
2861 writelfl(~HC_MAIN_MASKED_IRQS_5,
2862 hpriv->main_mask_reg_addr);
2863 else
2864 writelfl(~HC_MAIN_MASKED_IRQS,
2865 hpriv->main_mask_reg_addr);
2866
2867 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2868 "PCI int cause/mask=0x%08x/0x%08x\n",
2869 readl(hpriv->main_cause_reg_addr),
2870 readl(hpriv->main_mask_reg_addr),
2871 readl(mmio + hpriv->irq_cause_ofs),
2872 readl(mmio + hpriv->irq_mask_ofs));
2873 } else {
2874 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2875 hpriv->main_mask_reg_addr);
2876 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2877 readl(hpriv->main_cause_reg_addr),
2878 readl(hpriv->main_mask_reg_addr));
2879 }
2880 done:
2881 return rc;
2882 }
2883
2884 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2885 {
2886 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2887 MV_CRQB_Q_SZ, 0);
2888 if (!hpriv->crqb_pool)
2889 return -ENOMEM;
2890
2891 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2892 MV_CRPB_Q_SZ, 0);
2893 if (!hpriv->crpb_pool)
2894 return -ENOMEM;
2895
2896 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2897 MV_SG_TBL_SZ, 0);
2898 if (!hpriv->sg_tbl_pool)
2899 return -ENOMEM;
2900
2901 return 0;
2902 }
2903
2904 /**
2905 * mv_platform_probe - handle a positive probe of an soc Marvell
2906 * host
2907 * @pdev: platform device found
2908 *
2909 * LOCKING:
2910 * Inherited from caller.
2911 */
2912 static int mv_platform_probe(struct platform_device *pdev)
2913 {
2914 static int printed_version;
2915 const struct mv_sata_platform_data *mv_platform_data;
2916 const struct ata_port_info *ppi[] =
2917 { &mv_port_info[chip_soc], NULL };
2918 struct ata_host *host;
2919 struct mv_host_priv *hpriv;
2920 struct resource *res;
2921 int n_ports, rc;
2922
2923 if (!printed_version++)
2924 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2925
2926 /*
2927 * Simple resource validation ..
2928 */
2929 if (unlikely(pdev->num_resources != 2)) {
2930 dev_err(&pdev->dev, "invalid number of resources\n");
2931 return -EINVAL;
2932 }
2933
2934 /*
2935 * Get the register base first
2936 */
2937 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2938 if (res == NULL)
2939 return -EINVAL;
2940
2941 /* allocate host */
2942 mv_platform_data = pdev->dev.platform_data;
2943 n_ports = mv_platform_data->n_ports;
2944
2945 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2946 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2947
2948 if (!host || !hpriv)
2949 return -ENOMEM;
2950 host->private_data = hpriv;
2951 hpriv->n_ports = n_ports;
2952
2953 host->iomap = NULL;
2954 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2955 hpriv->base -= MV_SATAHC0_REG_BASE;
2956
2957 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2958 if (rc)
2959 return rc;
2960
2961 /* initialize adapter */
2962 rc = mv_init_host(host, chip_soc);
2963 if (rc)
2964 return rc;
2965
2966 dev_printk(KERN_INFO, &pdev->dev,
2967 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2968 host->n_ports);
2969
2970 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2971 IRQF_SHARED, &mv6_sht);
2972 }
2973
2974 /*
2975 *
2976 * mv_platform_remove - unplug a platform interface
2977 * @pdev: platform device
2978 *
2979 * A platform bus SATA device has been unplugged. Perform the needed
2980 * cleanup. Also called on module unload for any active devices.
2981 */
2982 static int __devexit mv_platform_remove(struct platform_device *pdev)
2983 {
2984 struct device *dev = &pdev->dev;
2985 struct ata_host *host = dev_get_drvdata(dev);
2986 struct mv_host_priv *hpriv = host->private_data;
2987 void __iomem *base = hpriv->base;
2988
2989 ata_host_detach(host);
2990 iounmap(base);
2991 return 0;
2992 }
2993
2994 static struct platform_driver mv_platform_driver = {
2995 .probe = mv_platform_probe,
2996 .remove = __devexit_p(mv_platform_remove),
2997 .driver = {
2998 .name = DRV_NAME,
2999 .owner = THIS_MODULE,
3000 },
3001 };
3002
3003
3004 #ifdef CONFIG_PCI
3005 static int mv_pci_init_one(struct pci_dev *pdev,
3006 const struct pci_device_id *ent);
3007
3008
3009 static struct pci_driver mv_pci_driver = {
3010 .name = DRV_NAME,
3011 .id_table = mv_pci_tbl,
3012 .probe = mv_pci_init_one,
3013 .remove = ata_pci_remove_one,
3014 };
3015
3016 /*
3017 * module options
3018 */
3019 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3020
3021
3022 /* move to PCI layer or libata core? */
3023 static int pci_go_64(struct pci_dev *pdev)
3024 {
3025 int rc;
3026
3027 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3028 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3029 if (rc) {
3030 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3031 if (rc) {
3032 dev_printk(KERN_ERR, &pdev->dev,
3033 "64-bit DMA enable failed\n");
3034 return rc;
3035 }
3036 }
3037 } else {
3038 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3039 if (rc) {
3040 dev_printk(KERN_ERR, &pdev->dev,
3041 "32-bit DMA enable failed\n");
3042 return rc;
3043 }
3044 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3045 if (rc) {
3046 dev_printk(KERN_ERR, &pdev->dev,
3047 "32-bit consistent DMA enable failed\n");
3048 return rc;
3049 }
3050 }
3051
3052 return rc;
3053 }
3054
3055 /**
3056 * mv_print_info - Dump key info to kernel log for perusal.
3057 * @host: ATA host to print info about
3058 *
3059 * FIXME: complete this.
3060 *
3061 * LOCKING:
3062 * Inherited from caller.
3063 */
3064 static void mv_print_info(struct ata_host *host)
3065 {
3066 struct pci_dev *pdev = to_pci_dev(host->dev);
3067 struct mv_host_priv *hpriv = host->private_data;
3068 u8 scc;
3069 const char *scc_s, *gen;
3070
3071 /* Use this to determine the HW stepping of the chip so we know
3072 * what errata to workaround
3073 */
3074 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3075 if (scc == 0)
3076 scc_s = "SCSI";
3077 else if (scc == 0x01)
3078 scc_s = "RAID";
3079 else
3080 scc_s = "?";
3081
3082 if (IS_GEN_I(hpriv))
3083 gen = "I";
3084 else if (IS_GEN_II(hpriv))
3085 gen = "II";
3086 else if (IS_GEN_IIE(hpriv))
3087 gen = "IIE";
3088 else
3089 gen = "?";
3090
3091 dev_printk(KERN_INFO, &pdev->dev,
3092 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3093 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3094 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3095 }
3096
3097 /**
3098 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3099 * @pdev: PCI device found
3100 * @ent: PCI device ID entry for the matched host
3101 *
3102 * LOCKING:
3103 * Inherited from caller.
3104 */
3105 static int mv_pci_init_one(struct pci_dev *pdev,
3106 const struct pci_device_id *ent)
3107 {
3108 static int printed_version;
3109 unsigned int board_idx = (unsigned int)ent->driver_data;
3110 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3111 struct ata_host *host;
3112 struct mv_host_priv *hpriv;
3113 int n_ports, rc;
3114
3115 if (!printed_version++)
3116 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3117
3118 /* allocate host */
3119 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3120
3121 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3122 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3123 if (!host || !hpriv)
3124 return -ENOMEM;
3125 host->private_data = hpriv;
3126 hpriv->n_ports = n_ports;
3127
3128 /* acquire resources */
3129 rc = pcim_enable_device(pdev);
3130 if (rc)
3131 return rc;
3132
3133 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3134 if (rc == -EBUSY)
3135 pcim_pin_device(pdev);
3136 if (rc)
3137 return rc;
3138 host->iomap = pcim_iomap_table(pdev);
3139 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3140
3141 rc = pci_go_64(pdev);
3142 if (rc)
3143 return rc;
3144
3145 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3146 if (rc)
3147 return rc;
3148
3149 /* initialize adapter */
3150 rc = mv_init_host(host, board_idx);
3151 if (rc)
3152 return rc;
3153
3154 /* Enable interrupts */
3155 if (msi && pci_enable_msi(pdev))
3156 pci_intx(pdev, 1);
3157
3158 mv_dump_pci_cfg(pdev, 0x68);
3159 mv_print_info(host);
3160
3161 pci_set_master(pdev);
3162 pci_try_set_mwi(pdev);
3163 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3164 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3165 }
3166 #endif
3167
3168 static int mv_platform_probe(struct platform_device *pdev);
3169 static int __devexit mv_platform_remove(struct platform_device *pdev);
3170
3171 static int __init mv_init(void)
3172 {
3173 int rc = -ENODEV;
3174 #ifdef CONFIG_PCI
3175 rc = pci_register_driver(&mv_pci_driver);
3176 if (rc < 0)
3177 return rc;
3178 #endif
3179 rc = platform_driver_register(&mv_platform_driver);
3180
3181 #ifdef CONFIG_PCI
3182 if (rc < 0)
3183 pci_unregister_driver(&mv_pci_driver);
3184 #endif
3185 return rc;
3186 }
3187
3188 static void __exit mv_exit(void)
3189 {
3190 #ifdef CONFIG_PCI
3191 pci_unregister_driver(&mv_pci_driver);
3192 #endif
3193 platform_driver_unregister(&mv_platform_driver);
3194 }
3195
3196 MODULE_AUTHOR("Brett Russ");
3197 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3198 MODULE_LICENSE("GPL");
3199 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3200 MODULE_VERSION(DRV_VERSION);
3201
3202 #ifdef CONFIG_PCI
3203 module_param(msi, int, 0444);
3204 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3205 #endif
3206
3207 module_init(mv_init);
3208 module_exit(mv_exit);