]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ata/sata_mv.c
libata: make reset related methods proper port operations
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
84
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
155 /* PCI interface registers */
156
157 PCI_COMMAND_OFS = 0xc00,
158
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
312
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
328
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
331
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348 };
349
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
360
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368 };
369
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
379 };
380
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
387 };
388
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
395 };
396
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
402 };
403
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
410 };
411
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
423 u32 pp_flags;
424 };
425
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429 };
430
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
450 };
451
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static int mv_prereset(struct ata_link *link, unsigned long deadline);
474 static int mv_hardreset(struct ata_link *link, unsigned int *class,
475 unsigned long deadline);
476 static void mv_postreset(struct ata_link *link, unsigned int *classes);
477 static void mv_eh_freeze(struct ata_port *ap);
478 static void mv_eh_thaw(struct ata_port *ap);
479 static void mv6_dev_config(struct ata_device *dev);
480
481 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
482 unsigned int port);
483 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
484 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
485 void __iomem *mmio);
486 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int n_hc);
488 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
489 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
490
491 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
492 unsigned int port);
493 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
494 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
495 void __iomem *mmio);
496 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
497 unsigned int n_hc);
498 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
499 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
500 void __iomem *mmio);
501 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
502 void __iomem *mmio);
503 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
504 void __iomem *mmio, unsigned int n_hc);
505 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
506 void __iomem *mmio);
507 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
508 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
509 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int port_no);
511 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
512 void __iomem *port_mmio, int want_ncq);
513 static int __mv_stop_dma(struct ata_port *ap);
514
515 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
516 * because we have to allow room for worst case splitting of
517 * PRDs for 64K boundaries in mv_fill_sg().
518 */
519 static struct scsi_host_template mv5_sht = {
520 ATA_BASE_SHT(DRV_NAME),
521 .sg_tablesize = MV_MAX_SG_CT / 2,
522 .dma_boundary = MV_DMA_BOUNDARY,
523 };
524
525 static struct scsi_host_template mv6_sht = {
526 ATA_NCQ_SHT(DRV_NAME),
527 .can_queue = MV_MAX_Q_DEPTH - 1,
528 .sg_tablesize = MV_MAX_SG_CT / 2,
529 .dma_boundary = MV_DMA_BOUNDARY,
530 };
531
532 static struct ata_port_operations mv5_ops = {
533 .inherits = &ata_sff_port_ops,
534
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
537
538 .freeze = mv_eh_freeze,
539 .thaw = mv_eh_thaw,
540 .prereset = mv_prereset,
541 .hardreset = mv_hardreset,
542 .postreset = mv_postreset,
543 .error_handler = ata_std_error_handler, /* avoid SFF EH */
544 .post_internal_cmd = ATA_OP_NULL,
545
546 .scr_read = mv5_scr_read,
547 .scr_write = mv5_scr_write,
548
549 .port_start = mv_port_start,
550 .port_stop = mv_port_stop,
551 };
552
553 static struct ata_port_operations mv6_ops = {
554 .inherits = &mv5_ops,
555 .qc_defer = ata_std_qc_defer,
556 .dev_config = mv6_dev_config,
557 .scr_read = mv_scr_read,
558 .scr_write = mv_scr_write,
559 };
560
561 static struct ata_port_operations mv_iie_ops = {
562 .inherits = &mv6_ops,
563 .dev_config = ATA_OP_NULL,
564 .qc_prep = mv_qc_prep_iie,
565 };
566
567 static const struct ata_port_info mv_port_info[] = {
568 { /* chip_504x */
569 .flags = MV_COMMON_FLAGS,
570 .pio_mask = 0x1f, /* pio0-4 */
571 .udma_mask = ATA_UDMA6,
572 .port_ops = &mv5_ops,
573 },
574 { /* chip_508x */
575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
579 },
580 { /* chip_5080 */
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
585 },
586 { /* chip_604x */
587 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
588 ATA_FLAG_NCQ,
589 .pio_mask = 0x1f, /* pio0-4 */
590 .udma_mask = ATA_UDMA6,
591 .port_ops = &mv6_ops,
592 },
593 { /* chip_608x */
594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops,
599 },
600 { /* chip_6042 */
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv_iie_ops,
606 },
607 { /* chip_7042 */
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
609 ATA_FLAG_NCQ,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv_iie_ops,
613 },
614 { /* chip_soc */
615 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv_iie_ops,
619 },
620 };
621
622 static const struct pci_device_id mv_pci_tbl[] = {
623 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
625 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
626 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
629 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
630
631 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
633 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
634 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
635 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
636
637 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
638
639 /* Adaptec 1430SA */
640 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
641
642 /* Marvell 7042 support */
643 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644
645 /* Highpoint RocketRAID PCIe series */
646 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
647 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
648
649 { } /* terminate list */
650 };
651
652 static const struct mv_hw_ops mv5xxx_ops = {
653 .phy_errata = mv5_phy_errata,
654 .enable_leds = mv5_enable_leds,
655 .read_preamp = mv5_read_preamp,
656 .reset_hc = mv5_reset_hc,
657 .reset_flash = mv5_reset_flash,
658 .reset_bus = mv5_reset_bus,
659 };
660
661 static const struct mv_hw_ops mv6xxx_ops = {
662 .phy_errata = mv6_phy_errata,
663 .enable_leds = mv6_enable_leds,
664 .read_preamp = mv6_read_preamp,
665 .reset_hc = mv6_reset_hc,
666 .reset_flash = mv6_reset_flash,
667 .reset_bus = mv_reset_pci_bus,
668 };
669
670 static const struct mv_hw_ops mv_soc_ops = {
671 .phy_errata = mv6_phy_errata,
672 .enable_leds = mv_soc_enable_leds,
673 .read_preamp = mv_soc_read_preamp,
674 .reset_hc = mv_soc_reset_hc,
675 .reset_flash = mv_soc_reset_flash,
676 .reset_bus = mv_soc_reset_bus,
677 };
678
679 /*
680 * Functions
681 */
682
683 static inline void writelfl(unsigned long data, void __iomem *addr)
684 {
685 writel(data, addr);
686 (void) readl(addr); /* flush to avoid PCI posted write */
687 }
688
689 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
690 {
691 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
692 }
693
694 static inline unsigned int mv_hc_from_port(unsigned int port)
695 {
696 return port >> MV_PORT_HC_SHIFT;
697 }
698
699 static inline unsigned int mv_hardport_from_port(unsigned int port)
700 {
701 return port & MV_PORT_MASK;
702 }
703
704 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
705 unsigned int port)
706 {
707 return mv_hc_base(base, mv_hc_from_port(port));
708 }
709
710 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
711 {
712 return mv_hc_base_from_port(base, port) +
713 MV_SATAHC_ARBTR_REG_SZ +
714 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
715 }
716
717 static inline void __iomem *mv_host_base(struct ata_host *host)
718 {
719 struct mv_host_priv *hpriv = host->private_data;
720 return hpriv->base;
721 }
722
723 static inline void __iomem *mv_ap_base(struct ata_port *ap)
724 {
725 return mv_port_base(mv_host_base(ap->host), ap->port_no);
726 }
727
728 static inline int mv_get_hc_count(unsigned long port_flags)
729 {
730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
731 }
732
733 static void mv_set_edma_ptrs(void __iomem *port_mmio,
734 struct mv_host_priv *hpriv,
735 struct mv_port_priv *pp)
736 {
737 u32 index;
738
739 /*
740 * initialize request queue
741 */
742 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
743
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl((pp->crqb_dma & 0xffffffff) | index,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
753 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
754
755 /*
756 * initialize response queue
757 */
758 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
759
760 WARN_ON(pp->crpb_dma & 0xff);
761 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
762
763 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
764 writelfl((pp->crpb_dma & 0xffffffff) | index,
765 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766 else
767 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
768
769 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
770 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
771 }
772
773 /**
774 * mv_start_dma - Enable eDMA engine
775 * @base: port base address
776 * @pp: port private data
777 *
778 * Verify the local cache of the eDMA state is accurate with a
779 * WARN_ON.
780 *
781 * LOCKING:
782 * Inherited from caller.
783 */
784 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
785 struct mv_port_priv *pp, u8 protocol)
786 {
787 int want_ncq = (protocol == ATA_PROT_NCQ);
788
789 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
790 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
791 if (want_ncq != using_ncq)
792 __mv_stop_dma(ap);
793 }
794 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
795 struct mv_host_priv *hpriv = ap->host->private_data;
796 int hard_port = mv_hardport_from_port(ap->port_no);
797 void __iomem *hc_mmio = mv_hc_base_from_port(
798 mv_host_base(ap->host), hard_port);
799 u32 hc_irq_cause, ipending;
800
801 /* clear EDMA event indicators, if any */
802 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
803
804 /* clear EDMA interrupt indicator, if any */
805 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
806 ipending = (DEV_IRQ << hard_port) |
807 (CRPB_DMA_DONE << hard_port);
808 if (hc_irq_cause & ipending) {
809 writelfl(hc_irq_cause & ~ipending,
810 hc_mmio + HC_IRQ_CAUSE_OFS);
811 }
812
813 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
814
815 /* clear FIS IRQ Cause */
816 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
817
818 mv_set_edma_ptrs(port_mmio, hpriv, pp);
819
820 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 }
823 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
824 }
825
826 /**
827 * __mv_stop_dma - Disable eDMA engine
828 * @ap: ATA channel to manipulate
829 *
830 * Verify the local cache of the eDMA state is accurate with a
831 * WARN_ON.
832 *
833 * LOCKING:
834 * Inherited from caller.
835 */
836 static int __mv_stop_dma(struct ata_port *ap)
837 {
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
840 u32 reg;
841 int i, err = 0;
842
843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
844 /* Disable EDMA if active. The disable bit auto clears.
845 */
846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
848 } else {
849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
850 }
851
852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
855 if (!(reg & EDMA_EN))
856 break;
857
858 udelay(100);
859 }
860
861 if (reg & EDMA_EN) {
862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
863 err = -EIO;
864 }
865
866 return err;
867 }
868
869 static int mv_stop_dma(struct ata_port *ap)
870 {
871 unsigned long flags;
872 int rc;
873
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
877
878 return rc;
879 }
880
881 #ifdef ATA_DEBUG
882 static void mv_dump_mem(void __iomem *start, unsigned bytes)
883 {
884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
888 printk("%08x ", readl(start + b));
889 b += sizeof(u32);
890 }
891 printk("\n");
892 }
893 }
894 #endif
895
896 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897 {
898 #ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 (void) pci_read_config_dword(pdev, b, &dw);
905 printk("%08x ", dw);
906 b += sizeof(u32);
907 }
908 printk("\n");
909 }
910 #endif
911 }
912 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
914 {
915 #ifdef ATA_DEBUG
916 void __iomem *hc_base = mv_hc_base(mmio_base,
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
920
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
929 }
930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 num_ports > 1 ? num_ports - 1 : start_port);
932
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
936 }
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943 hc_base = mv_hc_base(mmio_base, hc);
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
946 }
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
949 DPRINTK("EDMA regs (port %i):\n", p);
950 mv_dump_mem(port_base, 0x54);
951 DPRINTK("SATA regs (port %i):\n", p);
952 mv_dump_mem(port_base+0x300, 0x60);
953 }
954 #endif
955 }
956
957 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958 {
959 unsigned int ofs;
960
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
973 }
974 return ofs;
975 }
976
977 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
978 {
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
980
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
986 }
987
988 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
989 {
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
991
992 if (ofs != 0xffffffffU) {
993 writelfl(val, mv_ap_base(ap) + ofs);
994 return 0;
995 } else
996 return -EINVAL;
997 }
998
999 static void mv6_dev_config(struct ata_device *adev)
1000 {
1001 /*
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1004 */
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1008 }
1009
1010 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1011 void __iomem *port_mmio, int want_ncq)
1012 {
1013 u32 cfg;
1014
1015 /* set up non-NCQ EDMA configuration */
1016 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1017
1018 if (IS_GEN_I(hpriv))
1019 cfg |= (1 << 8); /* enab config burst size mask */
1020
1021 else if (IS_GEN_II(hpriv))
1022 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1023
1024 else if (IS_GEN_IIE(hpriv)) {
1025 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1027 cfg |= (1 << 18); /* enab early completion */
1028 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1029 }
1030
1031 if (want_ncq) {
1032 cfg |= EDMA_CFG_NCQ;
1033 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1034 } else
1035 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1036
1037 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1038 }
1039
1040 static void mv_port_free_dma_mem(struct ata_port *ap)
1041 {
1042 struct mv_host_priv *hpriv = ap->host->private_data;
1043 struct mv_port_priv *pp = ap->private_data;
1044 int tag;
1045
1046 if (pp->crqb) {
1047 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1048 pp->crqb = NULL;
1049 }
1050 if (pp->crpb) {
1051 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1052 pp->crpb = NULL;
1053 }
1054 /*
1055 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1056 * For later hardware, we have one unique sg_tbl per NCQ tag.
1057 */
1058 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1059 if (pp->sg_tbl[tag]) {
1060 if (tag == 0 || !IS_GEN_I(hpriv))
1061 dma_pool_free(hpriv->sg_tbl_pool,
1062 pp->sg_tbl[tag],
1063 pp->sg_tbl_dma[tag]);
1064 pp->sg_tbl[tag] = NULL;
1065 }
1066 }
1067 }
1068
1069 /**
1070 * mv_port_start - Port specific init/start routine.
1071 * @ap: ATA channel to manipulate
1072 *
1073 * Allocate and point to DMA memory, init port private memory,
1074 * zero indices.
1075 *
1076 * LOCKING:
1077 * Inherited from caller.
1078 */
1079 static int mv_port_start(struct ata_port *ap)
1080 {
1081 struct device *dev = ap->host->dev;
1082 struct mv_host_priv *hpriv = ap->host->private_data;
1083 struct mv_port_priv *pp;
1084 void __iomem *port_mmio = mv_ap_base(ap);
1085 unsigned long flags;
1086 int tag;
1087
1088 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1089 if (!pp)
1090 return -ENOMEM;
1091 ap->private_data = pp;
1092
1093 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1094 if (!pp->crqb)
1095 return -ENOMEM;
1096 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1097
1098 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1099 if (!pp->crpb)
1100 goto out_port_free_dma_mem;
1101 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1102
1103 /*
1104 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1105 * For later hardware, we need one unique sg_tbl per NCQ tag.
1106 */
1107 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1108 if (tag == 0 || !IS_GEN_I(hpriv)) {
1109 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1110 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1111 if (!pp->sg_tbl[tag])
1112 goto out_port_free_dma_mem;
1113 } else {
1114 pp->sg_tbl[tag] = pp->sg_tbl[0];
1115 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1116 }
1117 }
1118
1119 spin_lock_irqsave(&ap->host->lock, flags);
1120
1121 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1122 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1123
1124 spin_unlock_irqrestore(&ap->host->lock, flags);
1125
1126 /* Don't turn on EDMA here...do it before DMA commands only. Else
1127 * we'll be unable to send non-data, PIO, etc due to restricted access
1128 * to shadow regs.
1129 */
1130 return 0;
1131
1132 out_port_free_dma_mem:
1133 mv_port_free_dma_mem(ap);
1134 return -ENOMEM;
1135 }
1136
1137 /**
1138 * mv_port_stop - Port specific cleanup/stop routine.
1139 * @ap: ATA channel to manipulate
1140 *
1141 * Stop DMA, cleanup port memory.
1142 *
1143 * LOCKING:
1144 * This routine uses the host lock to protect the DMA stop.
1145 */
1146 static void mv_port_stop(struct ata_port *ap)
1147 {
1148 mv_stop_dma(ap);
1149 mv_port_free_dma_mem(ap);
1150 }
1151
1152 /**
1153 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1154 * @qc: queued command whose SG list to source from
1155 *
1156 * Populate the SG list and mark the last entry.
1157 *
1158 * LOCKING:
1159 * Inherited from caller.
1160 */
1161 static void mv_fill_sg(struct ata_queued_cmd *qc)
1162 {
1163 struct mv_port_priv *pp = qc->ap->private_data;
1164 struct scatterlist *sg;
1165 struct mv_sg *mv_sg, *last_sg = NULL;
1166 unsigned int si;
1167
1168 mv_sg = pp->sg_tbl[qc->tag];
1169 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1170 dma_addr_t addr = sg_dma_address(sg);
1171 u32 sg_len = sg_dma_len(sg);
1172
1173 while (sg_len) {
1174 u32 offset = addr & 0xffff;
1175 u32 len = sg_len;
1176
1177 if ((offset + sg_len > 0x10000))
1178 len = 0x10000 - offset;
1179
1180 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1181 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1182 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1183
1184 sg_len -= len;
1185 addr += len;
1186
1187 last_sg = mv_sg;
1188 mv_sg++;
1189 }
1190 }
1191
1192 if (likely(last_sg))
1193 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1194 }
1195
1196 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1197 {
1198 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1199 (last ? CRQB_CMD_LAST : 0);
1200 *cmdw = cpu_to_le16(tmp);
1201 }
1202
1203 /**
1204 * mv_qc_prep - Host specific command preparation.
1205 * @qc: queued command to prepare
1206 *
1207 * This routine simply redirects to the general purpose routine
1208 * if command is not DMA. Else, it handles prep of the CRQB
1209 * (command request block), does some sanity checking, and calls
1210 * the SG load routine.
1211 *
1212 * LOCKING:
1213 * Inherited from caller.
1214 */
1215 static void mv_qc_prep(struct ata_queued_cmd *qc)
1216 {
1217 struct ata_port *ap = qc->ap;
1218 struct mv_port_priv *pp = ap->private_data;
1219 __le16 *cw;
1220 struct ata_taskfile *tf;
1221 u16 flags = 0;
1222 unsigned in_index;
1223
1224 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1225 (qc->tf.protocol != ATA_PROT_NCQ))
1226 return;
1227
1228 /* Fill in command request block
1229 */
1230 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1231 flags |= CRQB_FLAG_READ;
1232 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1233 flags |= qc->tag << CRQB_TAG_SHIFT;
1234
1235 /* get current queue index from software */
1236 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1237
1238 pp->crqb[in_index].sg_addr =
1239 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1240 pp->crqb[in_index].sg_addr_hi =
1241 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1242 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1243
1244 cw = &pp->crqb[in_index].ata_cmd[0];
1245 tf = &qc->tf;
1246
1247 /* Sadly, the CRQB cannot accomodate all registers--there are
1248 * only 11 bytes...so we must pick and choose required
1249 * registers based on the command. So, we drop feature and
1250 * hob_feature for [RW] DMA commands, but they are needed for
1251 * NCQ. NCQ will drop hob_nsect.
1252 */
1253 switch (tf->command) {
1254 case ATA_CMD_READ:
1255 case ATA_CMD_READ_EXT:
1256 case ATA_CMD_WRITE:
1257 case ATA_CMD_WRITE_EXT:
1258 case ATA_CMD_WRITE_FUA_EXT:
1259 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1260 break;
1261 case ATA_CMD_FPDMA_READ:
1262 case ATA_CMD_FPDMA_WRITE:
1263 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1264 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1265 break;
1266 default:
1267 /* The only other commands EDMA supports in non-queued and
1268 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1269 * of which are defined/used by Linux. If we get here, this
1270 * driver needs work.
1271 *
1272 * FIXME: modify libata to give qc_prep a return value and
1273 * return error here.
1274 */
1275 BUG_ON(tf->command);
1276 break;
1277 }
1278 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1285 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1286 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1287
1288 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1289 return;
1290 mv_fill_sg(qc);
1291 }
1292
1293 /**
1294 * mv_qc_prep_iie - Host specific command preparation.
1295 * @qc: queued command to prepare
1296 *
1297 * This routine simply redirects to the general purpose routine
1298 * if command is not DMA. Else, it handles prep of the CRQB
1299 * (command request block), does some sanity checking, and calls
1300 * the SG load routine.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1306 {
1307 struct ata_port *ap = qc->ap;
1308 struct mv_port_priv *pp = ap->private_data;
1309 struct mv_crqb_iie *crqb;
1310 struct ata_taskfile *tf;
1311 unsigned in_index;
1312 u32 flags = 0;
1313
1314 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1315 (qc->tf.protocol != ATA_PROT_NCQ))
1316 return;
1317
1318 /* Fill in Gen IIE command request block
1319 */
1320 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1321 flags |= CRQB_FLAG_READ;
1322
1323 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1324 flags |= qc->tag << CRQB_TAG_SHIFT;
1325 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1326
1327 /* get current queue index from software */
1328 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1329
1330 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1331 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1332 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1333 crqb->flags = cpu_to_le32(flags);
1334
1335 tf = &qc->tf;
1336 crqb->ata_cmd[0] = cpu_to_le32(
1337 (tf->command << 16) |
1338 (tf->feature << 24)
1339 );
1340 crqb->ata_cmd[1] = cpu_to_le32(
1341 (tf->lbal << 0) |
1342 (tf->lbam << 8) |
1343 (tf->lbah << 16) |
1344 (tf->device << 24)
1345 );
1346 crqb->ata_cmd[2] = cpu_to_le32(
1347 (tf->hob_lbal << 0) |
1348 (tf->hob_lbam << 8) |
1349 (tf->hob_lbah << 16) |
1350 (tf->hob_feature << 24)
1351 );
1352 crqb->ata_cmd[3] = cpu_to_le32(
1353 (tf->nsect << 0) |
1354 (tf->hob_nsect << 8)
1355 );
1356
1357 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1358 return;
1359 mv_fill_sg(qc);
1360 }
1361
1362 /**
1363 * mv_qc_issue - Initiate a command to the host
1364 * @qc: queued command to start
1365 *
1366 * This routine simply redirects to the general purpose routine
1367 * if command is not DMA. Else, it sanity checks our local
1368 * caches of the request producer/consumer indices then enables
1369 * DMA and bumps the request producer index.
1370 *
1371 * LOCKING:
1372 * Inherited from caller.
1373 */
1374 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1375 {
1376 struct ata_port *ap = qc->ap;
1377 void __iomem *port_mmio = mv_ap_base(ap);
1378 struct mv_port_priv *pp = ap->private_data;
1379 u32 in_index;
1380
1381 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1382 (qc->tf.protocol != ATA_PROT_NCQ)) {
1383 /* We're about to send a non-EDMA capable command to the
1384 * port. Turn off EDMA so there won't be problems accessing
1385 * shadow block, etc registers.
1386 */
1387 __mv_stop_dma(ap);
1388 return ata_qc_issue_prot(qc);
1389 }
1390
1391 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1392
1393 pp->req_idx++;
1394
1395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1396
1397 /* and write the request in pointer to kick the EDMA to life */
1398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1400
1401 return 0;
1402 }
1403
1404 /**
1405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
1407 * @reset_allowed: bool: 0 == don't trigger from reset here
1408 *
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1414 *
1415 * LOCKING:
1416 * Inherited from caller.
1417 */
1418 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1419 {
1420 void __iomem *port_mmio = mv_ap_base(ap);
1421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
1426 struct ata_eh_info *ehi = &ap->link.eh_info;
1427
1428 ata_ehi_clear_desc(ehi);
1429
1430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1433 */
1434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1436 }
1437
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1439
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1441
1442 /*
1443 * all generations share these EDMA error cause bits
1444 */
1445
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
1452 action |= ATA_EH_RESET;
1453 ata_ehi_push_desc(ehi, "parity error");
1454 }
1455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1458 "dev disconnect" : "dev connect");
1459 action |= ATA_EH_RESET;
1460 }
1461
1462 if (IS_GEN_I(hpriv)) {
1463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1464
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1466 pp = ap->private_data;
1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1468 ata_ehi_push_desc(ehi, "EDMA self-disable");
1469 }
1470 } else {
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1474 pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
1477 }
1478
1479 if (edma_err_cause & EDMA_ERR_SERR) {
1480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1482 err_mask = AC_ERR_ATA_BUS;
1483 action |= ATA_EH_RESET;
1484 }
1485 }
1486
1487 /* Clear EDMA now that SERR cleanup done */
1488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1489
1490 if (!err_mask) {
1491 err_mask = AC_ERR_OTHER;
1492 action |= ATA_EH_RESET;
1493 }
1494
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1497
1498 if (qc)
1499 qc->err_mask |= err_mask;
1500 else
1501 ehi->err_mask |= err_mask;
1502
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1505 else
1506 ata_port_abort(ap);
1507 }
1508
1509 static void mv_intr_pio(struct ata_port *ap)
1510 {
1511 struct ata_queued_cmd *qc;
1512 u8 ata_status;
1513
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1517 return;
1518
1519 /* get active ATA command */
1520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1521 if (unlikely(!qc)) /* no active tag */
1522 return;
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1524 return;
1525
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1529 }
1530
1531 static void mv_intr_edma(struct ata_port *ap)
1532 {
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1539
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1543
1544 while (1) {
1545 u16 status;
1546 unsigned int tag;
1547
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1551 break;
1552
1553 /* 50xx: get active ATA command */
1554 if (IS_GEN_I(hpriv))
1555 tag = ap->link.active_tag;
1556
1557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
1560 */
1561 else
1562 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1563
1564 qc = ata_qc_from_tag(ap, tag);
1565
1566 /* For non-NCQ mode, the lower 8 bits of status
1567 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1568 * which should be zero if all went well.
1569 */
1570 status = le16_to_cpu(pp->crpb[out_index].flags);
1571 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1572 mv_err_intr(ap, qc);
1573 return;
1574 }
1575
1576 /* and finally, complete the ATA command */
1577 if (qc) {
1578 qc->err_mask |=
1579 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1580 ata_qc_complete(qc);
1581 }
1582
1583 /* advance software response queue pointer, to
1584 * indicate (after the loop completes) to hardware
1585 * that we have consumed a response queue entry.
1586 */
1587 work_done = true;
1588 pp->resp_idx++;
1589 }
1590
1591 if (work_done)
1592 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1593 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1594 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1595 }
1596
1597 /**
1598 * mv_host_intr - Handle all interrupts on the given host controller
1599 * @host: host specific structure
1600 * @relevant: port error bits relevant to this host controller
1601 * @hc: which host controller we're to look at
1602 *
1603 * Read then write clear the HC interrupt status then walk each
1604 * port connected to the HC and see if it needs servicing. Port
1605 * success ints are reported in the HC interrupt status reg, the
1606 * port error ints are reported in the higher level main
1607 * interrupt status register and thus are passed in via the
1608 * 'relevant' argument.
1609 *
1610 * LOCKING:
1611 * Inherited from caller.
1612 */
1613 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1614 {
1615 struct mv_host_priv *hpriv = host->private_data;
1616 void __iomem *mmio = hpriv->base;
1617 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1618 u32 hc_irq_cause;
1619 int port, port0, last_port;
1620
1621 if (hc == 0)
1622 port0 = 0;
1623 else
1624 port0 = MV_PORTS_PER_HC;
1625
1626 if (HAS_PCI(host))
1627 last_port = port0 + MV_PORTS_PER_HC;
1628 else
1629 last_port = port0 + hpriv->n_ports;
1630 /* we'll need the HC success int register in most cases */
1631 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1632 if (!hc_irq_cause)
1633 return;
1634
1635 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1636
1637 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1638 hc, relevant, hc_irq_cause);
1639
1640 for (port = port0; port < last_port; port++) {
1641 struct ata_port *ap = host->ports[port];
1642 struct mv_port_priv *pp;
1643 int have_err_bits, hard_port, shift;
1644
1645 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1646 continue;
1647
1648 pp = ap->private_data;
1649
1650 shift = port << 1; /* (port * 2) */
1651 if (port >= MV_PORTS_PER_HC) {
1652 shift++; /* skip bit 8 in the HC Main IRQ reg */
1653 }
1654 have_err_bits = ((PORT0_ERR << shift) & relevant);
1655
1656 if (unlikely(have_err_bits)) {
1657 struct ata_queued_cmd *qc;
1658
1659 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1660 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1661 continue;
1662
1663 mv_err_intr(ap, qc);
1664 continue;
1665 }
1666
1667 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1668
1669 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1670 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1671 mv_intr_edma(ap);
1672 } else {
1673 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1674 mv_intr_pio(ap);
1675 }
1676 }
1677 VPRINTK("EXIT\n");
1678 }
1679
1680 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1681 {
1682 struct mv_host_priv *hpriv = host->private_data;
1683 struct ata_port *ap;
1684 struct ata_queued_cmd *qc;
1685 struct ata_eh_info *ehi;
1686 unsigned int i, err_mask, printed = 0;
1687 u32 err_cause;
1688
1689 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1690
1691 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1692 err_cause);
1693
1694 DPRINTK("All regs @ PCI error\n");
1695 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1696
1697 writelfl(0, mmio + hpriv->irq_cause_ofs);
1698
1699 for (i = 0; i < host->n_ports; i++) {
1700 ap = host->ports[i];
1701 if (!ata_link_offline(&ap->link)) {
1702 ehi = &ap->link.eh_info;
1703 ata_ehi_clear_desc(ehi);
1704 if (!printed++)
1705 ata_ehi_push_desc(ehi,
1706 "PCI err cause 0x%08x", err_cause);
1707 err_mask = AC_ERR_HOST_BUS;
1708 ehi->action = ATA_EH_RESET;
1709 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1710 if (qc)
1711 qc->err_mask |= err_mask;
1712 else
1713 ehi->err_mask |= err_mask;
1714
1715 ata_port_freeze(ap);
1716 }
1717 }
1718 }
1719
1720 /**
1721 * mv_interrupt - Main interrupt event handler
1722 * @irq: unused
1723 * @dev_instance: private data; in this case the host structure
1724 *
1725 * Read the read only register to determine if any host
1726 * controllers have pending interrupts. If so, call lower level
1727 * routine to handle. Also check for PCI errors which are only
1728 * reported here.
1729 *
1730 * LOCKING:
1731 * This routine holds the host lock while processing pending
1732 * interrupts.
1733 */
1734 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1735 {
1736 struct ata_host *host = dev_instance;
1737 struct mv_host_priv *hpriv = host->private_data;
1738 unsigned int hc, handled = 0, n_hcs;
1739 void __iomem *mmio = hpriv->base;
1740 u32 irq_stat, irq_mask;
1741
1742 spin_lock(&host->lock);
1743
1744 irq_stat = readl(hpriv->main_cause_reg_addr);
1745 irq_mask = readl(hpriv->main_mask_reg_addr);
1746
1747 /* check the cases where we either have nothing pending or have read
1748 * a bogus register value which can indicate HW removal or PCI fault
1749 */
1750 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1751 goto out_unlock;
1752
1753 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1754
1755 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1756 mv_pci_error(host, mmio);
1757 handled = 1;
1758 goto out_unlock; /* skip all other HC irq handling */
1759 }
1760
1761 for (hc = 0; hc < n_hcs; hc++) {
1762 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1763 if (relevant) {
1764 mv_host_intr(host, relevant, hc);
1765 handled = 1;
1766 }
1767 }
1768
1769 out_unlock:
1770 spin_unlock(&host->lock);
1771
1772 return IRQ_RETVAL(handled);
1773 }
1774
1775 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1776 {
1777 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1778 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1779
1780 return hc_mmio + ofs;
1781 }
1782
1783 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1784 {
1785 unsigned int ofs;
1786
1787 switch (sc_reg_in) {
1788 case SCR_STATUS:
1789 case SCR_ERROR:
1790 case SCR_CONTROL:
1791 ofs = sc_reg_in * sizeof(u32);
1792 break;
1793 default:
1794 ofs = 0xffffffffU;
1795 break;
1796 }
1797 return ofs;
1798 }
1799
1800 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1801 {
1802 struct mv_host_priv *hpriv = ap->host->private_data;
1803 void __iomem *mmio = hpriv->base;
1804 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1805 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1806
1807 if (ofs != 0xffffffffU) {
1808 *val = readl(addr + ofs);
1809 return 0;
1810 } else
1811 return -EINVAL;
1812 }
1813
1814 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1815 {
1816 struct mv_host_priv *hpriv = ap->host->private_data;
1817 void __iomem *mmio = hpriv->base;
1818 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1819 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1820
1821 if (ofs != 0xffffffffU) {
1822 writelfl(val, addr + ofs);
1823 return 0;
1824 } else
1825 return -EINVAL;
1826 }
1827
1828 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1829 {
1830 struct pci_dev *pdev = to_pci_dev(host->dev);
1831 int early_5080;
1832
1833 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1834
1835 if (!early_5080) {
1836 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1837 tmp |= (1 << 0);
1838 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1839 }
1840
1841 mv_reset_pci_bus(host, mmio);
1842 }
1843
1844 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1845 {
1846 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1847 }
1848
1849 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1850 void __iomem *mmio)
1851 {
1852 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1853 u32 tmp;
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856
1857 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1858 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1859 }
1860
1861 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1862 {
1863 u32 tmp;
1864
1865 writel(0, mmio + MV_GPIO_PORT_CTL);
1866
1867 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1868
1869 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1870 tmp |= ~(1 << 0);
1871 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1872 }
1873
1874 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1875 unsigned int port)
1876 {
1877 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1878 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1879 u32 tmp;
1880 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1881
1882 if (fix_apm_sq) {
1883 tmp = readl(phy_mmio + MV5_LT_MODE);
1884 tmp |= (1 << 19);
1885 writel(tmp, phy_mmio + MV5_LT_MODE);
1886
1887 tmp = readl(phy_mmio + MV5_PHY_CTL);
1888 tmp &= ~0x3;
1889 tmp |= 0x1;
1890 writel(tmp, phy_mmio + MV5_PHY_CTL);
1891 }
1892
1893 tmp = readl(phy_mmio + MV5_PHY_MODE);
1894 tmp &= ~mask;
1895 tmp |= hpriv->signal[port].pre;
1896 tmp |= hpriv->signal[port].amps;
1897 writel(tmp, phy_mmio + MV5_PHY_MODE);
1898 }
1899
1900
1901 #undef ZERO
1902 #define ZERO(reg) writel(0, port_mmio + (reg))
1903 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int port)
1905 {
1906 void __iomem *port_mmio = mv_port_base(mmio, port);
1907
1908 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1909
1910 mv_channel_reset(hpriv, mmio, port);
1911
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1925 }
1926 #undef ZERO
1927
1928 #define ZERO(reg) writel(0, hc_mmio + (reg))
1929 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int hc)
1931 {
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 u32 tmp;
1934
1935 ZERO(0x00c);
1936 ZERO(0x010);
1937 ZERO(0x014);
1938 ZERO(0x018);
1939
1940 tmp = readl(hc_mmio + 0x20);
1941 tmp &= 0x1c1c1c1c;
1942 tmp |= 0x03030303;
1943 writel(tmp, hc_mmio + 0x20);
1944 }
1945 #undef ZERO
1946
1947 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int n_hc)
1949 {
1950 unsigned int hc, port;
1951
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1956
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1958 }
1959
1960 return 0;
1961 }
1962
1963 #undef ZERO
1964 #define ZERO(reg) writel(0, mmio + (reg))
1965 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1966 {
1967 struct mv_host_priv *hpriv = host->private_data;
1968 u32 tmp;
1969
1970 tmp = readl(mmio + MV_PCI_MODE);
1971 tmp &= 0xff00ffff;
1972 writel(tmp, mmio + MV_PCI_MODE);
1973
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
1979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
1981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1985 }
1986 #undef ZERO
1987
1988 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1989 {
1990 u32 tmp;
1991
1992 mv5_reset_flash(hpriv, mmio);
1993
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1995 tmp &= 0x3;
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1998 }
1999
2000 /**
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2003 *
2004 * This routine only applies to 6xxx parts.
2005 *
2006 * LOCKING:
2007 * Inherited from caller.
2008 */
2009 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
2011 {
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2013 int i, rc = 0;
2014 u32 t;
2015
2016 /* Following procedure defined in PCI "main command and status
2017 * register" table.
2018 */
2019 t = readl(reg);
2020 writel(t | STOP_PCI_MASTER, reg);
2021
2022 for (i = 0; i < 1000; i++) {
2023 udelay(1);
2024 t = readl(reg);
2025 if (PCI_MASTER_EMPTY & t)
2026 break;
2027 }
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 rc = 1;
2031 goto done;
2032 }
2033
2034 /* set reset */
2035 i = 5;
2036 do {
2037 writel(t | GLOB_SFT_RST, reg);
2038 t = readl(reg);
2039 udelay(1);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2041
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2044 rc = 1;
2045 goto done;
2046 }
2047
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2049 i = 5;
2050 do {
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2052 t = readl(reg);
2053 udelay(1);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2055
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2058 rc = 1;
2059 }
2060 done:
2061 return rc;
2062 }
2063
2064 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2065 void __iomem *mmio)
2066 {
2067 void __iomem *port_mmio;
2068 u32 tmp;
2069
2070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
2072 hpriv->signal[idx].amps = 0x7 << 8;
2073 hpriv->signal[idx].pre = 0x1 << 5;
2074 return;
2075 }
2076
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2079
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2082 }
2083
2084 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2085 {
2086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2087 }
2088
2089 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2090 unsigned int port)
2091 {
2092 void __iomem *port_mmio = mv_port_base(mmio, port);
2093
2094 u32 hp_flags = hpriv->hp_flags;
2095 int fix_phy_mode2 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2097 int fix_phy_mode4 =
2098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2099 u32 m2, tmp;
2100
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~(1 << 16);
2104 m2 |= (1 << 31);
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2112
2113 udelay(200);
2114 }
2115
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2118 tmp &= ~0x7F800000;
2119 tmp |= 0x2A800000;
2120 writel(tmp, port_mmio + PHY_MODE3);
2121
2122 if (fix_phy_mode4) {
2123 u32 m4;
2124
2125 m4 = readl(port_mmio + PHY_MODE4);
2126
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
2128 tmp = readl(port_mmio + 0x310);
2129
2130 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2131
2132 writel(m4, port_mmio + PHY_MODE4);
2133
2134 if (hp_flags & MV_HP_ERRATA_60X1B2)
2135 writel(tmp, port_mmio + 0x310);
2136 }
2137
2138 /* Revert values of pre-emphasis and signal amps to the saved ones */
2139 m2 = readl(port_mmio + PHY_MODE2);
2140
2141 m2 &= ~MV_M2_PREAMP_MASK;
2142 m2 |= hpriv->signal[port].amps;
2143 m2 |= hpriv->signal[port].pre;
2144 m2 &= ~(1 << 16);
2145
2146 /* according to mvSata 3.6.1, some IIE values are fixed */
2147 if (IS_GEN_IIE(hpriv)) {
2148 m2 &= ~0xC30FF01F;
2149 m2 |= 0x0000900F;
2150 }
2151
2152 writel(m2, port_mmio + PHY_MODE2);
2153 }
2154
2155 /* TODO: use the generic LED interface to configure the SATA Presence */
2156 /* & Acitivy LEDs on the board */
2157 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2158 void __iomem *mmio)
2159 {
2160 return;
2161 }
2162
2163 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2164 void __iomem *mmio)
2165 {
2166 void __iomem *port_mmio;
2167 u32 tmp;
2168
2169 port_mmio = mv_port_base(mmio, idx);
2170 tmp = readl(port_mmio + PHY_MODE2);
2171
2172 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2173 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2174 }
2175
2176 #undef ZERO
2177 #define ZERO(reg) writel(0, port_mmio + (reg))
2178 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2179 void __iomem *mmio, unsigned int port)
2180 {
2181 void __iomem *port_mmio = mv_port_base(mmio, port);
2182
2183 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2184
2185 mv_channel_reset(hpriv, mmio, port);
2186
2187 ZERO(0x028); /* command */
2188 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2189 ZERO(0x004); /* timer */
2190 ZERO(0x008); /* irq err cause */
2191 ZERO(0x00c); /* irq err mask */
2192 ZERO(0x010); /* rq bah */
2193 ZERO(0x014); /* rq inp */
2194 ZERO(0x018); /* rq outp */
2195 ZERO(0x01c); /* respq bah */
2196 ZERO(0x024); /* respq outp */
2197 ZERO(0x020); /* respq inp */
2198 ZERO(0x02c); /* test control */
2199 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2200 }
2201
2202 #undef ZERO
2203
2204 #define ZERO(reg) writel(0, hc_mmio + (reg))
2205 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2206 void __iomem *mmio)
2207 {
2208 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2209
2210 ZERO(0x00c);
2211 ZERO(0x010);
2212 ZERO(0x014);
2213
2214 }
2215
2216 #undef ZERO
2217
2218 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int n_hc)
2220 {
2221 unsigned int port;
2222
2223 for (port = 0; port < hpriv->n_ports; port++)
2224 mv_soc_reset_hc_port(hpriv, mmio, port);
2225
2226 mv_soc_reset_one_hc(hpriv, mmio);
2227
2228 return 0;
2229 }
2230
2231 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2232 void __iomem *mmio)
2233 {
2234 return;
2235 }
2236
2237 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2238 {
2239 return;
2240 }
2241
2242 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2243 unsigned int port_no)
2244 {
2245 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2246
2247 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2248
2249 if (IS_GEN_II(hpriv)) {
2250 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2251 ifctl |= (1 << 7); /* enable gen2i speed */
2252 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2254 }
2255
2256 udelay(25); /* allow reset propagation */
2257
2258 /* Spec never mentions clearing the bit. Marvell's driver does
2259 * clear the bit, however.
2260 */
2261 writelfl(0, port_mmio + EDMA_CMD_OFS);
2262
2263 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2264
2265 if (IS_GEN_I(hpriv))
2266 mdelay(1);
2267 }
2268
2269 /**
2270 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2271 * @ap: ATA channel to manipulate
2272 *
2273 * Part of this is taken from __sata_phy_reset and modified to
2274 * not sleep since this routine gets called from interrupt level.
2275 *
2276 * LOCKING:
2277 * Inherited from caller. This is coded to safe to call at
2278 * interrupt level, i.e. it does not sleep.
2279 */
2280 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2281 unsigned long deadline)
2282 {
2283 struct mv_port_priv *pp = ap->private_data;
2284 struct mv_host_priv *hpriv = ap->host->private_data;
2285 void __iomem *port_mmio = mv_ap_base(ap);
2286 int retry = 5;
2287 u32 sstatus;
2288
2289 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2290
2291 #ifdef DEBUG
2292 {
2293 u32 sstatus, serror, scontrol;
2294
2295 mv_scr_read(ap, SCR_STATUS, &sstatus);
2296 mv_scr_read(ap, SCR_ERROR, &serror);
2297 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2298 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2299 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2300 }
2301 #endif
2302
2303 /* Issue COMRESET via SControl */
2304 comreset_retry:
2305 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2306 msleep(1);
2307
2308 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2309 msleep(20);
2310
2311 do {
2312 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2313 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2314 break;
2315
2316 msleep(1);
2317 } while (time_before(jiffies, deadline));
2318
2319 /* work around errata */
2320 if (IS_GEN_II(hpriv) &&
2321 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2322 (retry-- > 0))
2323 goto comreset_retry;
2324
2325 #ifdef DEBUG
2326 {
2327 u32 sstatus, serror, scontrol;
2328
2329 mv_scr_read(ap, SCR_STATUS, &sstatus);
2330 mv_scr_read(ap, SCR_ERROR, &serror);
2331 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2332 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2333 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2334 }
2335 #endif
2336
2337 if (ata_link_offline(&ap->link)) {
2338 *class = ATA_DEV_NONE;
2339 return;
2340 }
2341
2342 /* even after SStatus reflects that device is ready,
2343 * it seems to take a while for link to be fully
2344 * established (and thus Status no longer 0x80/0x7F),
2345 * so we poll a bit for that, here.
2346 */
2347 retry = 20;
2348 while (1) {
2349 u8 drv_stat = ata_check_status(ap);
2350 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2351 break;
2352 msleep(500);
2353 if (retry-- <= 0)
2354 break;
2355 if (time_after(jiffies, deadline))
2356 break;
2357 }
2358
2359 /* FIXME: if we passed the deadline, the following
2360 * code probably produces an invalid result
2361 */
2362
2363 /* finally, read device signature from TF registers */
2364 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2365
2366 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2367
2368 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2369
2370 VPRINTK("EXIT\n");
2371 }
2372
2373 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2374 {
2375 struct ata_port *ap = link->ap;
2376 struct mv_port_priv *pp = ap->private_data;
2377
2378 mv_stop_dma(ap);
2379
2380 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2381 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2382
2383 return 0;
2384 }
2385
2386 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2387 unsigned long deadline)
2388 {
2389 struct ata_port *ap = link->ap;
2390 struct mv_host_priv *hpriv = ap->host->private_data;
2391 void __iomem *mmio = hpriv->base;
2392
2393 mv_stop_dma(ap);
2394
2395 mv_channel_reset(hpriv, mmio, ap->port_no);
2396
2397 mv_phy_reset(ap, class, deadline);
2398
2399 return 0;
2400 }
2401
2402 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2403 {
2404 struct ata_port *ap = link->ap;
2405 u32 serr;
2406
2407 /* print link status */
2408 sata_print_link_status(link);
2409
2410 /* clear SError */
2411 sata_scr_read(link, SCR_ERROR, &serr);
2412 sata_scr_write_flush(link, SCR_ERROR, serr);
2413
2414 /* bail out if no device is present */
2415 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2416 DPRINTK("EXIT, no device\n");
2417 return;
2418 }
2419
2420 /* set up device control */
2421 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2422 }
2423
2424 static void mv_eh_freeze(struct ata_port *ap)
2425 {
2426 struct mv_host_priv *hpriv = ap->host->private_data;
2427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
2430
2431 /* FIXME: handle coalescing completion events properly */
2432
2433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
2436
2437 mask = 0x3 << shift;
2438
2439 /* disable assertion of portN err, done events */
2440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2442 }
2443
2444 static void mv_eh_thaw(struct ata_port *ap)
2445 {
2446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
2448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
2460 }
2461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
2474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2476 }
2477
2478 /**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
2490 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2491 {
2492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2493 unsigned serr_ofs;
2494
2495 /* PIO related setup
2496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2498 port->error_addr =
2499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2505 port->status_addr =
2506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
2511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2512
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2520
2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2525 }
2526
2527 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2528 {
2529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
2531 u32 hp_flags = hpriv->hp_flags;
2532
2533 switch (board_idx) {
2534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
2536 hp_flags |= MV_HP_GEN_I;
2537
2538 switch (pdev->revision) {
2539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
2553 case chip_504x:
2554 case chip_508x:
2555 hpriv->ops = &mv5xxx_ops;
2556 hp_flags |= MV_HP_GEN_I;
2557
2558 switch (pdev->revision) {
2559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
2570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
2575 hpriv->ops = &mv6xxx_ops;
2576 hp_flags |= MV_HP_GEN_II;
2577
2578 switch (pdev->revision) {
2579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
2584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
2589 break;
2590 }
2591 break;
2592
2593 case chip_7042:
2594 hp_flags |= MV_HP_PCIE;
2595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
2598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
2623 }
2624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
2626 hp_flags |= MV_HP_GEN_IIE;
2627
2628 switch (pdev->revision) {
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
2642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
2646
2647 default:
2648 dev_printk(KERN_ERR, host->dev,
2649 "BUG: invalid board index %u\n", board_idx);
2650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
2654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /**
2668 * mv_init_host - Perform some early initialization of the host.
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
2671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
2678 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2679 {
2680 int rc = 0, n_hc, port, hc;
2681 struct mv_host_priv *hpriv = host->private_data;
2682 void __iomem *mmio = hpriv->base;
2683
2684 rc = mv_chip_id(host, board_idx);
2685 if (rc)
2686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
2700
2701 n_hc = mv_get_hc_count(host->ports[0]->flags);
2702
2703 for (port = 0; port < host->n_ports; port++)
2704 hpriv->ops->read_preamp(hpriv, port, mmio);
2705
2706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2707 if (rc)
2708 goto done;
2709
2710 hpriv->ops->reset_flash(hpriv, mmio);
2711 hpriv->ops->reset_bus(host, mmio);
2712 hpriv->ops->enable_leds(hpriv, mmio);
2713
2714 for (port = 0; port < host->n_ports; port++) {
2715 if (IS_GEN_II(hpriv)) {
2716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
2718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
2724 hpriv->ops->phy_errata(hpriv, mmio, port);
2725 }
2726
2727 for (port = 0; port < host->n_ports; port++) {
2728 struct ata_port *ap = host->ports[port];
2729 void __iomem *port_mmio = mv_port_base(mmio, port);
2730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
2733 #ifdef CONFIG_PCI
2734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
2739 #endif
2740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
2743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2752 }
2753
2754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
2757
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
2766
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
2780 done:
2781 return rc;
2782 }
2783
2784 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785 {
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802 }
2803
2804 /**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812 static int mv_platform_probe(struct platform_device *pdev)
2813 {
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
2822
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2825
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
2854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
2856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
2858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
2862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873 }
2874
2875 /*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883 static int __devexit mv_platform_remove(struct platform_device *pdev)
2884 {
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
2887
2888 ata_host_detach(host);
2889 return 0;
2890 }
2891
2892 static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899 };
2900
2901
2902 #ifdef CONFIG_PCI
2903 static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
2906
2907 static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
2910 .probe = mv_pci_init_one,
2911 .remove = ata_pci_remove_one,
2912 };
2913
2914 /*
2915 * module options
2916 */
2917 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920 /* move to PCI layer or libata core? */
2921 static int pci_go_64(struct pci_dev *pdev)
2922 {
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951 }
2952
2953 /**
2954 * mv_print_info - Dump key info to kernel log for perusal.
2955 * @host: ATA host to print info about
2956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
2962 static void mv_print_info(struct ata_host *host)
2963 {
2964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
2966 u8 scc;
2967 const char *scc_s, *gen;
2968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
2972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
2978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
2988
2989 dev_printk(KERN_INFO, &pdev->dev,
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993 }
2994
2995 /**
2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
3003 static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
3005 {
3006 static int printed_version;
3007 unsigned int board_idx = (unsigned int)ent->driver_data;
3008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
3012
3013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3015
3016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
3024 hpriv->n_ports = n_ports;
3025
3026 /* acquire resources */
3027 rc = pcim_enable_device(pdev);
3028 if (rc)
3029 return rc;
3030
3031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
3033 pcim_pin_device(pdev);
3034 if (rc)
3035 return rc;
3036 host->iomap = pcim_iomap_table(pdev);
3037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3038
3039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
3043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
3047 /* initialize adapter */
3048 rc = mv_init_host(host, board_idx);
3049 if (rc)
3050 return rc;
3051
3052 /* Enable interrupts */
3053 if (msi && pci_enable_msi(pdev))
3054 pci_intx(pdev, 1);
3055
3056 mv_dump_pci_cfg(pdev, 0x68);
3057 mv_print_info(host);
3058
3059 pci_set_master(pdev);
3060 pci_try_set_mwi(pdev);
3061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3063 }
3064 #endif
3065
3066 static int mv_platform_probe(struct platform_device *pdev);
3067 static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
3069 static int __init mv_init(void)
3070 {
3071 int rc = -ENODEV;
3072 #ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
3074 if (rc < 0)
3075 return rc;
3076 #endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079 #ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
3082 #endif
3083 return rc;
3084 }
3085
3086 static void __exit mv_exit(void)
3087 {
3088 #ifdef CONFIG_PCI
3089 pci_unregister_driver(&mv_pci_driver);
3090 #endif
3091 platform_driver_unregister(&mv_platform_driver);
3092 }
3093
3094 MODULE_AUTHOR("Brett Russ");
3095 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096 MODULE_LICENSE("GPL");
3097 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098 MODULE_VERSION(DRV_VERSION);
3099 MODULE_ALIAS("platform:sata_mv");
3100
3101 #ifdef CONFIG_PCI
3102 module_param(msi, int, 0444);
3103 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3104 #endif
3105
3106 module_init(mv_init);
3107 module_exit(mv_exit);