2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
88 MV_IO_BAR
= 2, /* offset 0x18: IO space */
89 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
97 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
98 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
99 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
102 MV_SATAHC0_REG_BASE
= 0x20000,
103 MV_FLASH_CTL
= 0x1046c,
104 MV_GPIO_PORT_CTL
= 0x104f0,
105 MV_RESET_CFG
= 0x180d8,
107 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
108 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
109 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
110 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
113 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
120 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
122 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT
= 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC
= (1 << 28),
136 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
137 ATA_FLAG_MMIO
| ATA_FLAG_NO_ATAPI
|
138 ATA_FLAG_PIO_POLLING
,
139 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
141 CRQB_FLAG_READ
= (1 << 0),
143 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT
= 8,
146 CRQB_CMD_CS
= (0x2 << 11),
147 CRQB_CMD_LAST
= (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT
= 8,
150 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL
= (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS
= 0xc00,
159 PCI_MAIN_CMD_STS_OFS
= 0xd30,
160 STOP_PCI_MASTER
= (1 << 2),
161 PCI_MASTER_EMPTY
= (1 << 3),
162 GLOB_SFT_RST
= (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
166 MV_PCI_DISC_TIMER
= 0xd04,
167 MV_PCI_MSI_TRIGGER
= 0xc38,
168 MV_PCI_SERR_MASK
= 0xc28,
169 MV_PCI_XBAR_TMOUT
= 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
173 MV_PCI_ERR_COMMAND
= 0x1d50,
175 PCI_IRQ_CAUSE_OFS
= 0x1d58,
176 PCI_IRQ_MASK_OFS
= 0x1d5c,
177 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS
= 0x1900,
180 PCIE_IRQ_MASK_OFS
= 0x1910,
181 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS
= 0x20024,
187 PORT0_ERR
= (1 << 0), /* shift by port # */
188 PORT0_DONE
= (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE
= (1 << 8),
195 PORTS_4_7_COAL_DONE
= (1 << 17),
196 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT
= (1 << 22),
198 SELF_INT
= (1 << 23),
199 TWSI_INT
= (1 << 24),
200 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
204 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
206 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
208 HC_MAIN_MASKED_IRQS_SOC
= (PORTS_0_3_COAL_DONE
| HC_MAIN_RSVD_SOC
),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS
= 0x14,
214 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
215 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
216 DEV_IRQ
= (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS
= 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
232 SATA_INTERFACE_CTL
= 0x050,
234 MV_M2_PREAMP_MASK
= 0x7e0,
238 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
245 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
246 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV
= (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
251 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5
= (1 << 5),
280 EDMA_ERR_UNDERRUN_5
= (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
283 EDMA_ERR_LNK_CTRL_RX_1
|
284 EDMA_ERR_LNK_CTRL_RX_3
|
285 EDMA_ERR_LNK_CTRL_TX
,
287 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
297 EDMA_ERR_LNK_CTRL_RX_2
|
298 EDMA_ERR_LNK_DATA_RX
|
299 EDMA_ERR_LNK_DATA_TX
|
300 EDMA_ERR_TRANS_PROTO
,
301 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
306 EDMA_ERR_UNDERRUN_5
|
307 EDMA_ERR_SELF_DIS_5
|
313 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
317 EDMA_REQ_Q_PTR_SHIFT
= 5,
319 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT
= 3,
324 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
325 EDMA_EN
= (1 << 0), /* enable EDMA */
326 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST
= (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT
= 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI
= (1 << 0),
334 MV_HP_ERRATA_50XXB0
= (1 << 1),
335 MV_HP_ERRATA_50XXB2
= (1 << 2),
336 MV_HP_ERRATA_60X1B2
= (1 << 3),
337 MV_HP_ERRATA_60X1C0
= (1 << 4),
338 MV_HP_ERRATA_XX42A0
= (1 << 5),
339 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET
= (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY
= 0xffffU
,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv
{
413 struct mv_crqb
*crqb
;
415 struct mv_crpb
*crpb
;
417 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
418 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
420 unsigned int req_idx
;
421 unsigned int resp_idx
;
426 struct mv_port_signal
{
431 struct mv_host_priv
{
433 struct mv_port_signal signal
[8];
434 const struct mv_hw_ops
*ops
;
437 void __iomem
*main_cause_reg_addr
;
438 void __iomem
*main_mask_reg_addr
;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool
*crqb_pool
;
448 struct dma_pool
*crpb_pool
;
449 struct dma_pool
*sg_tbl_pool
;
453 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
455 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
456 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
458 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
460 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
461 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
464 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
465 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
466 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
467 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
468 static int mv_port_start(struct ata_port
*ap
);
469 static void mv_port_stop(struct ata_port
*ap
);
470 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
471 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
473 static void mv_error_handler(struct ata_port
*ap
);
474 static void mv_eh_freeze(struct ata_port
*ap
);
475 static void mv_eh_thaw(struct ata_port
*ap
);
476 static void mv6_dev_config(struct ata_device
*dev
);
478 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
480 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
481 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
483 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
485 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
486 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
488 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
490 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
491 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
493 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
495 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
496 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
498 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
500 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
501 void __iomem
*mmio
, unsigned int n_hc
);
502 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
504 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
505 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
506 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
507 unsigned int port_no
);
508 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
509 void __iomem
*port_mmio
, int want_ncq
);
510 static int __mv_stop_dma(struct ata_port
*ap
);
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
516 static struct scsi_host_template mv5_sht
= {
517 ATA_BASE_SHT(DRV_NAME
),
518 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
519 .dma_boundary
= MV_DMA_BOUNDARY
,
522 static struct scsi_host_template mv6_sht
= {
523 ATA_NCQ_SHT(DRV_NAME
),
524 .can_queue
= MV_MAX_Q_DEPTH
- 1,
525 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
526 .dma_boundary
= MV_DMA_BOUNDARY
,
529 static struct ata_port_operations mv5_ops
= {
530 .inherits
= &ata_sff_port_ops
,
532 .qc_prep
= mv_qc_prep
,
533 .qc_issue
= mv_qc_issue
,
535 .freeze
= mv_eh_freeze
,
537 .error_handler
= mv_error_handler
,
538 .post_internal_cmd
= ATA_OP_NULL
,
540 .scr_read
= mv5_scr_read
,
541 .scr_write
= mv5_scr_write
,
543 .port_start
= mv_port_start
,
544 .port_stop
= mv_port_stop
,
547 static struct ata_port_operations mv6_ops
= {
548 .inherits
= &mv5_ops
,
549 .qc_defer
= ata_std_qc_defer
,
550 .dev_config
= mv6_dev_config
,
551 .scr_read
= mv_scr_read
,
552 .scr_write
= mv_scr_write
,
555 static struct ata_port_operations mv_iie_ops
= {
556 .inherits
= &mv6_ops
,
557 .dev_config
= ATA_OP_NULL
,
558 .qc_prep
= mv_qc_prep_iie
,
561 static const struct ata_port_info mv_port_info
[] = {
563 .flags
= MV_COMMON_FLAGS
,
564 .pio_mask
= 0x1f, /* pio0-4 */
565 .udma_mask
= ATA_UDMA6
,
566 .port_ops
= &mv5_ops
,
569 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
570 .pio_mask
= 0x1f, /* pio0-4 */
571 .udma_mask
= ATA_UDMA6
,
572 .port_ops
= &mv5_ops
,
575 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
576 .pio_mask
= 0x1f, /* pio0-4 */
577 .udma_mask
= ATA_UDMA6
,
578 .port_ops
= &mv5_ops
,
581 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
583 .pio_mask
= 0x1f, /* pio0-4 */
584 .udma_mask
= ATA_UDMA6
,
585 .port_ops
= &mv6_ops
,
588 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
589 ATA_FLAG_NCQ
| MV_FLAG_DUAL_HC
,
590 .pio_mask
= 0x1f, /* pio0-4 */
591 .udma_mask
= ATA_UDMA6
,
592 .port_ops
= &mv6_ops
,
595 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
597 .pio_mask
= 0x1f, /* pio0-4 */
598 .udma_mask
= ATA_UDMA6
,
599 .port_ops
= &mv_iie_ops
,
602 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
604 .pio_mask
= 0x1f, /* pio0-4 */
605 .udma_mask
= ATA_UDMA6
,
606 .port_ops
= &mv_iie_ops
,
609 .flags
= MV_COMMON_FLAGS
| MV_FLAG_SOC
,
610 .pio_mask
= 0x1f, /* pio0-4 */
611 .udma_mask
= ATA_UDMA6
,
612 .port_ops
= &mv_iie_ops
,
616 static const struct pci_device_id mv_pci_tbl
[] = {
617 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
618 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
619 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
620 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
621 /* RocketRAID 1740/174x have different identifiers */
622 { PCI_VDEVICE(TTI
, 0x1740), chip_508x
},
623 { PCI_VDEVICE(TTI
, 0x1742), chip_508x
},
625 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
626 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
627 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
628 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
629 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
631 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
634 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
636 /* Marvell 7042 support */
637 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
639 /* Highpoint RocketRAID PCIe series */
640 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
641 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
643 { } /* terminate list */
646 static const struct mv_hw_ops mv5xxx_ops
= {
647 .phy_errata
= mv5_phy_errata
,
648 .enable_leds
= mv5_enable_leds
,
649 .read_preamp
= mv5_read_preamp
,
650 .reset_hc
= mv5_reset_hc
,
651 .reset_flash
= mv5_reset_flash
,
652 .reset_bus
= mv5_reset_bus
,
655 static const struct mv_hw_ops mv6xxx_ops
= {
656 .phy_errata
= mv6_phy_errata
,
657 .enable_leds
= mv6_enable_leds
,
658 .read_preamp
= mv6_read_preamp
,
659 .reset_hc
= mv6_reset_hc
,
660 .reset_flash
= mv6_reset_flash
,
661 .reset_bus
= mv_reset_pci_bus
,
664 static const struct mv_hw_ops mv_soc_ops
= {
665 .phy_errata
= mv6_phy_errata
,
666 .enable_leds
= mv_soc_enable_leds
,
667 .read_preamp
= mv_soc_read_preamp
,
668 .reset_hc
= mv_soc_reset_hc
,
669 .reset_flash
= mv_soc_reset_flash
,
670 .reset_bus
= mv_soc_reset_bus
,
677 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
680 (void) readl(addr
); /* flush to avoid PCI posted write */
683 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
685 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
688 static inline unsigned int mv_hc_from_port(unsigned int port
)
690 return port
>> MV_PORT_HC_SHIFT
;
693 static inline unsigned int mv_hardport_from_port(unsigned int port
)
695 return port
& MV_PORT_MASK
;
698 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
701 return mv_hc_base(base
, mv_hc_from_port(port
));
704 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
706 return mv_hc_base_from_port(base
, port
) +
707 MV_SATAHC_ARBTR_REG_SZ
+
708 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
711 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
713 struct mv_host_priv
*hpriv
= host
->private_data
;
717 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
719 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
722 static inline int mv_get_hc_count(unsigned long port_flags
)
724 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
727 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
728 struct mv_host_priv
*hpriv
,
729 struct mv_port_priv
*pp
)
734 * initialize request queue
736 index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
738 WARN_ON(pp
->crqb_dma
& 0x3ff);
739 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
740 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
741 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
743 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
744 writelfl((pp
->crqb_dma
& 0xffffffff) | index
,
745 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
747 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
750 * initialize response queue
752 index
= (pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_RSP_Q_PTR_SHIFT
;
754 WARN_ON(pp
->crpb_dma
& 0xff);
755 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
757 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
758 writelfl((pp
->crpb_dma
& 0xffffffff) | index
,
759 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
761 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
763 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
764 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
768 * mv_start_dma - Enable eDMA engine
769 * @base: port base address
770 * @pp: port private data
772 * Verify the local cache of the eDMA state is accurate with a
776 * Inherited from caller.
778 static void mv_start_dma(struct ata_port
*ap
, void __iomem
*port_mmio
,
779 struct mv_port_priv
*pp
, u8 protocol
)
781 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
783 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
784 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
785 if (want_ncq
!= using_ncq
)
788 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
789 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
790 int hard_port
= mv_hardport_from_port(ap
->port_no
);
791 void __iomem
*hc_mmio
= mv_hc_base_from_port(
792 mv_host_base(ap
->host
), hard_port
);
793 u32 hc_irq_cause
, ipending
;
795 /* clear EDMA event indicators, if any */
796 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
798 /* clear EDMA interrupt indicator, if any */
799 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
800 ipending
= (DEV_IRQ
<< hard_port
) |
801 (CRPB_DMA_DONE
<< hard_port
);
802 if (hc_irq_cause
& ipending
) {
803 writelfl(hc_irq_cause
& ~ipending
,
804 hc_mmio
+ HC_IRQ_CAUSE_OFS
);
807 mv_edma_cfg(pp
, hpriv
, port_mmio
, want_ncq
);
809 /* clear FIS IRQ Cause */
810 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
812 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
814 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
815 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
817 WARN_ON(!(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
)));
821 * __mv_stop_dma - Disable eDMA engine
822 * @ap: ATA channel to manipulate
824 * Verify the local cache of the eDMA state is accurate with a
828 * Inherited from caller.
830 static int __mv_stop_dma(struct ata_port
*ap
)
832 void __iomem
*port_mmio
= mv_ap_base(ap
);
833 struct mv_port_priv
*pp
= ap
->private_data
;
837 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
838 /* Disable EDMA if active. The disable bit auto clears.
840 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
841 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
843 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
846 /* now properly wait for the eDMA to stop */
847 for (i
= 1000; i
> 0; i
--) {
848 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
849 if (!(reg
& EDMA_EN
))
856 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
863 static int mv_stop_dma(struct ata_port
*ap
)
868 spin_lock_irqsave(&ap
->host
->lock
, flags
);
869 rc
= __mv_stop_dma(ap
);
870 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
876 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
879 for (b
= 0; b
< bytes
; ) {
880 DPRINTK("%p: ", start
+ b
);
881 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
882 printk("%08x ", readl(start
+ b
));
890 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
895 for (b
= 0; b
< bytes
; ) {
896 DPRINTK("%02x: ", b
);
897 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
898 (void) pci_read_config_dword(pdev
, b
, &dw
);
906 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
907 struct pci_dev
*pdev
)
910 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
911 port
>> MV_PORT_HC_SHIFT
);
912 void __iomem
*port_base
;
913 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
916 start_hc
= start_port
= 0;
917 num_ports
= 8; /* shld be benign for 4 port devs */
920 start_hc
= port
>> MV_PORT_HC_SHIFT
;
922 num_ports
= num_hcs
= 1;
924 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
925 num_ports
> 1 ? num_ports
- 1 : start_port
);
928 DPRINTK("PCI config space regs:\n");
929 mv_dump_pci_cfg(pdev
, 0x68);
931 DPRINTK("PCI regs:\n");
932 mv_dump_mem(mmio_base
+0xc00, 0x3c);
933 mv_dump_mem(mmio_base
+0xd00, 0x34);
934 mv_dump_mem(mmio_base
+0xf00, 0x4);
935 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
936 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
937 hc_base
= mv_hc_base(mmio_base
, hc
);
938 DPRINTK("HC regs (HC %i):\n", hc
);
939 mv_dump_mem(hc_base
, 0x1c);
941 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
942 port_base
= mv_port_base(mmio_base
, p
);
943 DPRINTK("EDMA regs (port %i):\n", p
);
944 mv_dump_mem(port_base
, 0x54);
945 DPRINTK("SATA regs (port %i):\n", p
);
946 mv_dump_mem(port_base
+0x300, 0x60);
951 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
959 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
962 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
971 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
973 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
975 if (ofs
!= 0xffffffffU
) {
976 *val
= readl(mv_ap_base(ap
) + ofs
);
982 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
984 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
986 if (ofs
!= 0xffffffffU
) {
987 writelfl(val
, mv_ap_base(ap
) + ofs
);
993 static void mv6_dev_config(struct ata_device
*adev
)
996 * We don't have hob_nsect when doing NCQ commands on Gen-II.
997 * See mv_qc_prep() for more info.
999 if (adev
->flags
& ATA_DFLAG_NCQ
)
1000 if (adev
->max_sectors
> ATA_MAX_SECTORS
)
1001 adev
->max_sectors
= ATA_MAX_SECTORS
;
1004 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
1005 void __iomem
*port_mmio
, int want_ncq
)
1009 /* set up non-NCQ EDMA configuration */
1010 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1012 if (IS_GEN_I(hpriv
))
1013 cfg
|= (1 << 8); /* enab config burst size mask */
1015 else if (IS_GEN_II(hpriv
))
1016 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1018 else if (IS_GEN_IIE(hpriv
)) {
1019 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1021 cfg
|= (1 << 18); /* enab early completion */
1022 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1026 cfg
|= EDMA_CFG_NCQ
;
1027 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1029 pp
->pp_flags
&= ~MV_PP_FLAG_NCQ_EN
;
1031 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1034 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1036 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1037 struct mv_port_priv
*pp
= ap
->private_data
;
1041 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1045 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1049 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1050 * For later hardware, we have one unique sg_tbl per NCQ tag.
1052 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1053 if (pp
->sg_tbl
[tag
]) {
1054 if (tag
== 0 || !IS_GEN_I(hpriv
))
1055 dma_pool_free(hpriv
->sg_tbl_pool
,
1057 pp
->sg_tbl_dma
[tag
]);
1058 pp
->sg_tbl
[tag
] = NULL
;
1064 * mv_port_start - Port specific init/start routine.
1065 * @ap: ATA channel to manipulate
1067 * Allocate and point to DMA memory, init port private memory,
1071 * Inherited from caller.
1073 static int mv_port_start(struct ata_port
*ap
)
1075 struct device
*dev
= ap
->host
->dev
;
1076 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1077 struct mv_port_priv
*pp
;
1078 void __iomem
*port_mmio
= mv_ap_base(ap
);
1079 unsigned long flags
;
1082 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1085 ap
->private_data
= pp
;
1087 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1090 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1092 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1094 goto out_port_free_dma_mem
;
1095 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1098 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1099 * For later hardware, we need one unique sg_tbl per NCQ tag.
1101 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1102 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1103 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1104 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1105 if (!pp
->sg_tbl
[tag
])
1106 goto out_port_free_dma_mem
;
1108 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1109 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1113 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1115 mv_edma_cfg(pp
, hpriv
, port_mmio
, 0);
1116 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1118 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1120 /* Don't turn on EDMA here...do it before DMA commands only. Else
1121 * we'll be unable to send non-data, PIO, etc due to restricted access
1126 out_port_free_dma_mem
:
1127 mv_port_free_dma_mem(ap
);
1132 * mv_port_stop - Port specific cleanup/stop routine.
1133 * @ap: ATA channel to manipulate
1135 * Stop DMA, cleanup port memory.
1138 * This routine uses the host lock to protect the DMA stop.
1140 static void mv_port_stop(struct ata_port
*ap
)
1143 mv_port_free_dma_mem(ap
);
1147 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1148 * @qc: queued command whose SG list to source from
1150 * Populate the SG list and mark the last entry.
1153 * Inherited from caller.
1155 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1157 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1158 struct scatterlist
*sg
;
1159 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1162 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1163 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1164 dma_addr_t addr
= sg_dma_address(sg
);
1165 u32 sg_len
= sg_dma_len(sg
);
1168 u32 offset
= addr
& 0xffff;
1171 if ((offset
+ sg_len
> 0x10000))
1172 len
= 0x10000 - offset
;
1174 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1175 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1176 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1186 if (likely(last_sg
))
1187 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1190 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1192 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1193 (last
? CRQB_CMD_LAST
: 0);
1194 *cmdw
= cpu_to_le16(tmp
);
1198 * mv_qc_prep - Host specific command preparation.
1199 * @qc: queued command to prepare
1201 * This routine simply redirects to the general purpose routine
1202 * if command is not DMA. Else, it handles prep of the CRQB
1203 * (command request block), does some sanity checking, and calls
1204 * the SG load routine.
1207 * Inherited from caller.
1209 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1211 struct ata_port
*ap
= qc
->ap
;
1212 struct mv_port_priv
*pp
= ap
->private_data
;
1214 struct ata_taskfile
*tf
;
1218 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1219 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1222 /* Fill in command request block
1224 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1225 flags
|= CRQB_FLAG_READ
;
1226 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1227 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1229 /* get current queue index from software */
1230 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1232 pp
->crqb
[in_index
].sg_addr
=
1233 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1234 pp
->crqb
[in_index
].sg_addr_hi
=
1235 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1236 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1238 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1241 /* Sadly, the CRQB cannot accomodate all registers--there are
1242 * only 11 bytes...so we must pick and choose required
1243 * registers based on the command. So, we drop feature and
1244 * hob_feature for [RW] DMA commands, but they are needed for
1245 * NCQ. NCQ will drop hob_nsect.
1247 switch (tf
->command
) {
1249 case ATA_CMD_READ_EXT
:
1251 case ATA_CMD_WRITE_EXT
:
1252 case ATA_CMD_WRITE_FUA_EXT
:
1253 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1255 case ATA_CMD_FPDMA_READ
:
1256 case ATA_CMD_FPDMA_WRITE
:
1257 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1258 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1269 BUG_ON(tf
->command
);
1272 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1273 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1274 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1275 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1276 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1277 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1278 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1279 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1280 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1282 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1297 * Inherited from caller.
1299 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1301 struct ata_port
*ap
= qc
->ap
;
1302 struct mv_port_priv
*pp
= ap
->private_data
;
1303 struct mv_crqb_iie
*crqb
;
1304 struct ata_taskfile
*tf
;
1308 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1309 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1312 /* Fill in Gen IIE command request block
1314 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1315 flags
|= CRQB_FLAG_READ
;
1317 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1318 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1319 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1321 /* get current queue index from software */
1322 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1324 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1325 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1326 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1327 crqb
->flags
= cpu_to_le32(flags
);
1330 crqb
->ata_cmd
[0] = cpu_to_le32(
1331 (tf
->command
<< 16) |
1334 crqb
->ata_cmd
[1] = cpu_to_le32(
1340 crqb
->ata_cmd
[2] = cpu_to_le32(
1341 (tf
->hob_lbal
<< 0) |
1342 (tf
->hob_lbam
<< 8) |
1343 (tf
->hob_lbah
<< 16) |
1344 (tf
->hob_feature
<< 24)
1346 crqb
->ata_cmd
[3] = cpu_to_le32(
1348 (tf
->hob_nsect
<< 8)
1351 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1366 * Inherited from caller.
1368 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1370 struct ata_port
*ap
= qc
->ap
;
1371 void __iomem
*port_mmio
= mv_ap_base(ap
);
1372 struct mv_port_priv
*pp
= ap
->private_data
;
1375 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1376 (qc
->tf
.protocol
!= ATA_PROT_NCQ
)) {
1377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1382 return ata_qc_issue_prot(qc
);
1385 mv_start_dma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
1389 in_index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
1391 /* and write the request in pointer to kick the EDMA to life */
1392 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1393 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1399 * mv_err_intr - Handle error interrupts on the port
1400 * @ap: ATA channel to manipulate
1401 * @reset_allowed: bool: 0 == don't trigger from reset here
1403 * In most cases, just clear the interrupt and move on. However,
1404 * some cases require an eDMA reset, which is done right before
1405 * the COMRESET in mv_phy_reset(). The SERR case requires a
1406 * clear of pending errors in the SATA SERROR register. Finally,
1407 * if the port disabled DMA, update our cached copy to match.
1410 * Inherited from caller.
1412 static void mv_err_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1414 void __iomem
*port_mmio
= mv_ap_base(ap
);
1415 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1416 struct mv_port_priv
*pp
= ap
->private_data
;
1417 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1418 unsigned int edma_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
1419 unsigned int action
= 0, err_mask
= 0;
1420 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1422 ata_ehi_clear_desc(ehi
);
1424 if (!edma_enabled
) {
1425 /* just a guess: do we need to do this? should we
1426 * expand this, and do it in all cases?
1428 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1429 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1432 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1434 ata_ehi_push_desc(ehi
, "edma_err 0x%08x", edma_err_cause
);
1437 * all generations share these EDMA error cause bits
1440 if (edma_err_cause
& EDMA_ERR_DEV
)
1441 err_mask
|= AC_ERR_DEV
;
1442 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1443 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1444 EDMA_ERR_INTRL_PAR
)) {
1445 err_mask
|= AC_ERR_ATA_BUS
;
1446 action
|= ATA_EH_RESET
;
1447 ata_ehi_push_desc(ehi
, "parity error");
1449 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1450 ata_ehi_hotplugged(ehi
);
1451 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1452 "dev disconnect" : "dev connect");
1453 action
|= ATA_EH_RESET
;
1456 if (IS_GEN_I(hpriv
)) {
1457 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1459 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1460 pp
= ap
->private_data
;
1461 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1462 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1465 eh_freeze_mask
= EDMA_EH_FREEZE
;
1467 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1468 pp
= ap
->private_data
;
1469 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1470 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1473 if (edma_err_cause
& EDMA_ERR_SERR
) {
1474 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1475 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1476 err_mask
= AC_ERR_ATA_BUS
;
1477 action
|= ATA_EH_RESET
;
1481 /* Clear EDMA now that SERR cleanup done */
1482 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1485 err_mask
= AC_ERR_OTHER
;
1486 action
|= ATA_EH_RESET
;
1489 ehi
->serror
|= serr
;
1490 ehi
->action
|= action
;
1493 qc
->err_mask
|= err_mask
;
1495 ehi
->err_mask
|= err_mask
;
1497 if (edma_err_cause
& eh_freeze_mask
)
1498 ata_port_freeze(ap
);
1503 static void mv_intr_pio(struct ata_port
*ap
)
1505 struct ata_queued_cmd
*qc
;
1508 /* ignore spurious intr if drive still BUSY */
1509 ata_status
= readb(ap
->ioaddr
.status_addr
);
1510 if (unlikely(ata_status
& ATA_BUSY
))
1513 /* get active ATA command */
1514 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1515 if (unlikely(!qc
)) /* no active tag */
1517 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
) /* polling; we don't own qc */
1520 /* and finally, complete the ATA command */
1521 qc
->err_mask
|= ac_err_mask(ata_status
);
1522 ata_qc_complete(qc
);
1525 static void mv_intr_edma(struct ata_port
*ap
)
1527 void __iomem
*port_mmio
= mv_ap_base(ap
);
1528 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1529 struct mv_port_priv
*pp
= ap
->private_data
;
1530 struct ata_queued_cmd
*qc
;
1531 u32 out_index
, in_index
;
1532 bool work_done
= false;
1534 /* get h/w response queue pointer */
1535 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1536 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1542 /* get s/w response queue last-read pointer, and compare */
1543 out_index
= pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
;
1544 if (in_index
== out_index
)
1547 /* 50xx: get active ATA command */
1548 if (IS_GEN_I(hpriv
))
1549 tag
= ap
->link
.active_tag
;
1551 /* Gen II/IIE: get active ATA command via tag, to enable
1552 * support for queueing. this works transparently for
1553 * queued and non-queued modes.
1556 tag
= le16_to_cpu(pp
->crpb
[out_index
].id
) & 0x1f;
1558 qc
= ata_qc_from_tag(ap
, tag
);
1560 /* For non-NCQ mode, the lower 8 bits of status
1561 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1562 * which should be zero if all went well.
1564 status
= le16_to_cpu(pp
->crpb
[out_index
].flags
);
1565 if ((status
& 0xff) && !(pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)) {
1566 mv_err_intr(ap
, qc
);
1570 /* and finally, complete the ATA command */
1573 ac_err_mask(status
>> CRPB_FLAG_STATUS_SHIFT
);
1574 ata_qc_complete(qc
);
1577 /* advance software response queue pointer, to
1578 * indicate (after the loop completes) to hardware
1579 * that we have consumed a response queue entry.
1586 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
1587 (out_index
<< EDMA_RSP_Q_PTR_SHIFT
),
1588 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1592 * mv_host_intr - Handle all interrupts on the given host controller
1593 * @host: host specific structure
1594 * @relevant: port error bits relevant to this host controller
1595 * @hc: which host controller we're to look at
1597 * Read then write clear the HC interrupt status then walk each
1598 * port connected to the HC and see if it needs servicing. Port
1599 * success ints are reported in the HC interrupt status reg, the
1600 * port error ints are reported in the higher level main
1601 * interrupt status register and thus are passed in via the
1602 * 'relevant' argument.
1605 * Inherited from caller.
1607 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1609 struct mv_host_priv
*hpriv
= host
->private_data
;
1610 void __iomem
*mmio
= hpriv
->base
;
1611 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1613 int port
, port0
, last_port
;
1618 port0
= MV_PORTS_PER_HC
;
1621 last_port
= port0
+ MV_PORTS_PER_HC
;
1623 last_port
= port0
+ hpriv
->n_ports
;
1624 /* we'll need the HC success int register in most cases */
1625 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1629 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1631 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1632 hc
, relevant
, hc_irq_cause
);
1634 for (port
= port0
; port
< last_port
; port
++) {
1635 struct ata_port
*ap
= host
->ports
[port
];
1636 struct mv_port_priv
*pp
;
1637 int have_err_bits
, hard_port
, shift
;
1639 if ((!ap
) || (ap
->flags
& ATA_FLAG_DISABLED
))
1642 pp
= ap
->private_data
;
1644 shift
= port
<< 1; /* (port * 2) */
1645 if (port
>= MV_PORTS_PER_HC
) {
1646 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1648 have_err_bits
= ((PORT0_ERR
<< shift
) & relevant
);
1650 if (unlikely(have_err_bits
)) {
1651 struct ata_queued_cmd
*qc
;
1653 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1654 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1657 mv_err_intr(ap
, qc
);
1661 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1663 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1664 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
)
1667 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
)
1674 static void mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
1676 struct mv_host_priv
*hpriv
= host
->private_data
;
1677 struct ata_port
*ap
;
1678 struct ata_queued_cmd
*qc
;
1679 struct ata_eh_info
*ehi
;
1680 unsigned int i
, err_mask
, printed
= 0;
1683 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
1685 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1688 DPRINTK("All regs @ PCI error\n");
1689 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1691 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
1693 for (i
= 0; i
< host
->n_ports
; i
++) {
1694 ap
= host
->ports
[i
];
1695 if (!ata_link_offline(&ap
->link
)) {
1696 ehi
= &ap
->link
.eh_info
;
1697 ata_ehi_clear_desc(ehi
);
1699 ata_ehi_push_desc(ehi
,
1700 "PCI err cause 0x%08x", err_cause
);
1701 err_mask
= AC_ERR_HOST_BUS
;
1702 ehi
->action
= ATA_EH_RESET
;
1703 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1705 qc
->err_mask
|= err_mask
;
1707 ehi
->err_mask
|= err_mask
;
1709 ata_port_freeze(ap
);
1715 * mv_interrupt - Main interrupt event handler
1717 * @dev_instance: private data; in this case the host structure
1719 * Read the read only register to determine if any host
1720 * controllers have pending interrupts. If so, call lower level
1721 * routine to handle. Also check for PCI errors which are only
1725 * This routine holds the host lock while processing pending
1728 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1730 struct ata_host
*host
= dev_instance
;
1731 struct mv_host_priv
*hpriv
= host
->private_data
;
1732 unsigned int hc
, handled
= 0, n_hcs
;
1733 void __iomem
*mmio
= hpriv
->base
;
1734 u32 irq_stat
, irq_mask
;
1736 spin_lock(&host
->lock
);
1738 irq_stat
= readl(hpriv
->main_cause_reg_addr
);
1739 irq_mask
= readl(hpriv
->main_mask_reg_addr
);
1741 /* check the cases where we either have nothing pending or have read
1742 * a bogus register value which can indicate HW removal or PCI fault
1744 if (!(irq_stat
& irq_mask
) || (0xffffffffU
== irq_stat
))
1747 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1749 if (unlikely((irq_stat
& PCI_ERR
) && HAS_PCI(host
))) {
1750 mv_pci_error(host
, mmio
);
1752 goto out_unlock
; /* skip all other HC irq handling */
1755 for (hc
= 0; hc
< n_hcs
; hc
++) {
1756 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1758 mv_host_intr(host
, relevant
, hc
);
1764 spin_unlock(&host
->lock
);
1766 return IRQ_RETVAL(handled
);
1769 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1771 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1772 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1774 return hc_mmio
+ ofs
;
1777 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1781 switch (sc_reg_in
) {
1785 ofs
= sc_reg_in
* sizeof(u32
);
1794 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1796 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1797 void __iomem
*mmio
= hpriv
->base
;
1798 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1799 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1801 if (ofs
!= 0xffffffffU
) {
1802 *val
= readl(addr
+ ofs
);
1808 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1810 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1811 void __iomem
*mmio
= hpriv
->base
;
1812 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1813 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1815 if (ofs
!= 0xffffffffU
) {
1816 writelfl(val
, addr
+ ofs
);
1822 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
1824 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1827 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
1830 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1832 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1835 mv_reset_pci_bus(host
, mmio
);
1838 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1840 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1843 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1846 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1849 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1851 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1852 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1855 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1859 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1863 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1865 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1868 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1871 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1872 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1874 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1877 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1879 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1881 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1884 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1887 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1889 tmp
|= hpriv
->signal
[port
].pre
;
1890 tmp
|= hpriv
->signal
[port
].amps
;
1891 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1896 #define ZERO(reg) writel(0, port_mmio + (reg))
1897 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1900 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1902 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1904 mv_channel_reset(hpriv
, mmio
, port
);
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1922 #define ZERO(reg) writel(0, hc_mmio + (reg))
1923 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1926 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1934 tmp
= readl(hc_mmio
+ 0x20);
1937 writel(tmp
, hc_mmio
+ 0x20);
1941 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1944 unsigned int hc
, port
;
1946 for (hc
= 0; hc
< n_hc
; hc
++) {
1947 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1948 mv5_reset_hc_port(hpriv
, mmio
,
1949 (hc
* MV_PORTS_PER_HC
) + port
);
1951 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1958 #define ZERO(reg) writel(0, mmio + (reg))
1959 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
1961 struct mv_host_priv
*hpriv
= host
->private_data
;
1964 tmp
= readl(mmio
+ MV_PCI_MODE
);
1966 writel(tmp
, mmio
+ MV_PCI_MODE
);
1968 ZERO(MV_PCI_DISC_TIMER
);
1969 ZERO(MV_PCI_MSI_TRIGGER
);
1970 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1971 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1972 ZERO(MV_PCI_SERR_MASK
);
1973 ZERO(hpriv
->irq_cause_ofs
);
1974 ZERO(hpriv
->irq_mask_ofs
);
1975 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1976 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1977 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1978 ZERO(MV_PCI_ERR_COMMAND
);
1982 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1986 mv5_reset_flash(hpriv
, mmio
);
1988 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1990 tmp
|= (1 << 5) | (1 << 6);
1991 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1995 * mv6_reset_hc - Perform the 6xxx global soft reset
1996 * @mmio: base address of the HBA
1998 * This routine only applies to 6xxx parts.
2001 * Inherited from caller.
2003 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2006 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
2010 /* Following procedure defined in PCI "main command and status
2014 writel(t
| STOP_PCI_MASTER
, reg
);
2016 for (i
= 0; i
< 1000; i
++) {
2019 if (PCI_MASTER_EMPTY
& t
)
2022 if (!(PCI_MASTER_EMPTY
& t
)) {
2023 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2031 writel(t
| GLOB_SFT_RST
, reg
);
2034 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2036 if (!(GLOB_SFT_RST
& t
)) {
2037 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2042 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2045 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2048 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2050 if (GLOB_SFT_RST
& t
) {
2051 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2058 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2061 void __iomem
*port_mmio
;
2064 tmp
= readl(mmio
+ MV_RESET_CFG
);
2065 if ((tmp
& (1 << 0)) == 0) {
2066 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2067 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2071 port_mmio
= mv_port_base(mmio
, idx
);
2072 tmp
= readl(port_mmio
+ PHY_MODE2
);
2074 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2075 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2078 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2080 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
2083 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2086 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2088 u32 hp_flags
= hpriv
->hp_flags
;
2090 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2092 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2095 if (fix_phy_mode2
) {
2096 m2
= readl(port_mmio
+ PHY_MODE2
);
2099 writel(m2
, port_mmio
+ PHY_MODE2
);
2103 m2
= readl(port_mmio
+ PHY_MODE2
);
2104 m2
&= ~((1 << 16) | (1 << 31));
2105 writel(m2
, port_mmio
+ PHY_MODE2
);
2110 /* who knows what this magic does */
2111 tmp
= readl(port_mmio
+ PHY_MODE3
);
2114 writel(tmp
, port_mmio
+ PHY_MODE3
);
2116 if (fix_phy_mode4
) {
2119 m4
= readl(port_mmio
+ PHY_MODE4
);
2121 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2122 tmp
= readl(port_mmio
+ 0x310);
2124 m4
= (m4
& ~(1 << 1)) | (1 << 0);
2126 writel(m4
, port_mmio
+ PHY_MODE4
);
2128 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2129 writel(tmp
, port_mmio
+ 0x310);
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2
= readl(port_mmio
+ PHY_MODE2
);
2135 m2
&= ~MV_M2_PREAMP_MASK
;
2136 m2
|= hpriv
->signal
[port
].amps
;
2137 m2
|= hpriv
->signal
[port
].pre
;
2140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv
)) {
2146 writel(m2
, port_mmio
+ PHY_MODE2
);
2149 /* TODO: use the generic LED interface to configure the SATA Presence */
2150 /* & Acitivy LEDs on the board */
2151 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
2157 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2160 void __iomem
*port_mmio
;
2163 port_mmio
= mv_port_base(mmio
, idx
);
2164 tmp
= readl(port_mmio
+ PHY_MODE2
);
2166 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2167 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2171 #define ZERO(reg) writel(0, port_mmio + (reg))
2172 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
2173 void __iomem
*mmio
, unsigned int port
)
2175 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2177 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
2179 mv_channel_reset(hpriv
, mmio
, port
);
2181 ZERO(0x028); /* command */
2182 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
2183 ZERO(0x004); /* timer */
2184 ZERO(0x008); /* irq err cause */
2185 ZERO(0x00c); /* irq err mask */
2186 ZERO(0x010); /* rq bah */
2187 ZERO(0x014); /* rq inp */
2188 ZERO(0x018); /* rq outp */
2189 ZERO(0x01c); /* respq bah */
2190 ZERO(0x024); /* respq outp */
2191 ZERO(0x020); /* respq inp */
2192 ZERO(0x02c); /* test control */
2193 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
2198 #define ZERO(reg) writel(0, hc_mmio + (reg))
2199 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
2202 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
2212 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
2213 void __iomem
*mmio
, unsigned int n_hc
)
2217 for (port
= 0; port
< hpriv
->n_ports
; port
++)
2218 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
2220 mv_soc_reset_one_hc(hpriv
, mmio
);
2225 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
2231 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2236 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2237 unsigned int port_no
)
2239 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2241 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
2243 if (IS_GEN_II(hpriv
)) {
2244 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2245 ifctl
|= (1 << 7); /* enable gen2i speed */
2246 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2247 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2250 udelay(25); /* allow reset propagation */
2252 /* Spec never mentions clearing the bit. Marvell's driver does
2253 * clear the bit, however.
2255 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2257 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2259 if (IS_GEN_I(hpriv
))
2264 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2265 * @ap: ATA channel to manipulate
2267 * Part of this is taken from __sata_phy_reset and modified to
2268 * not sleep since this routine gets called from interrupt level.
2271 * Inherited from caller. This is coded to safe to call at
2272 * interrupt level, i.e. it does not sleep.
2274 static void mv_phy_reset(struct ata_port
*ap
, unsigned int *class,
2275 unsigned long deadline
)
2277 struct mv_port_priv
*pp
= ap
->private_data
;
2278 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2279 void __iomem
*port_mmio
= mv_ap_base(ap
);
2283 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
2287 u32 sstatus
, serror
, scontrol
;
2289 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2290 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2291 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2292 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2293 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2297 /* Issue COMRESET via SControl */
2299 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x301);
2302 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x300);
2306 sata_scr_read(&ap
->link
, SCR_STATUS
, &sstatus
);
2307 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
2311 } while (time_before(jiffies
, deadline
));
2313 /* work around errata */
2314 if (IS_GEN_II(hpriv
) &&
2315 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
2317 goto comreset_retry
;
2321 u32 sstatus
, serror
, scontrol
;
2323 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2324 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2325 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2326 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2327 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2331 if (ata_link_offline(&ap
->link
)) {
2332 *class = ATA_DEV_NONE
;
2336 /* even after SStatus reflects that device is ready,
2337 * it seems to take a while for link to be fully
2338 * established (and thus Status no longer 0x80/0x7F),
2339 * so we poll a bit for that, here.
2343 u8 drv_stat
= ata_check_status(ap
);
2344 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2349 if (time_after(jiffies
, deadline
))
2353 /* FIXME: if we passed the deadline, the following
2354 * code probably produces an invalid result
2357 /* finally, read device signature from TF registers */
2358 *class = ata_dev_try_classify(ap
->link
.device
, 1, NULL
);
2360 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2362 WARN_ON(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2367 static int mv_prereset(struct ata_link
*link
, unsigned long deadline
)
2369 struct ata_port
*ap
= link
->ap
;
2370 struct mv_port_priv
*pp
= ap
->private_data
;
2374 if (!(pp
->pp_flags
& MV_PP_FLAG_HAD_A_RESET
))
2375 pp
->pp_flags
|= MV_PP_FLAG_HAD_A_RESET
;
2380 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
2381 unsigned long deadline
)
2383 struct ata_port
*ap
= link
->ap
;
2384 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2385 void __iomem
*mmio
= hpriv
->base
;
2389 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
2391 mv_phy_reset(ap
, class, deadline
);
2396 static void mv_postreset(struct ata_link
*link
, unsigned int *classes
)
2398 struct ata_port
*ap
= link
->ap
;
2401 /* print link status */
2402 sata_print_link_status(link
);
2405 sata_scr_read(link
, SCR_ERROR
, &serr
);
2406 sata_scr_write_flush(link
, SCR_ERROR
, serr
);
2408 /* bail out if no device is present */
2409 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2410 DPRINTK("EXIT, no device\n");
2414 /* set up device control */
2415 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2418 static void mv_error_handler(struct ata_port
*ap
)
2420 ata_do_eh(ap
, mv_prereset
, ata_std_softreset
,
2421 mv_hardreset
, mv_postreset
);
2424 static void mv_eh_freeze(struct ata_port
*ap
)
2426 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2427 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2431 /* FIXME: handle coalescing completion events properly */
2433 shift
= ap
->port_no
* 2;
2437 mask
= 0x3 << shift
;
2439 /* disable assertion of portN err, done events */
2440 tmp
= readl(hpriv
->main_mask_reg_addr
);
2441 writelfl(tmp
& ~mask
, hpriv
->main_mask_reg_addr
);
2444 static void mv_eh_thaw(struct ata_port
*ap
)
2446 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2447 void __iomem
*mmio
= hpriv
->base
;
2448 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2449 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2450 void __iomem
*port_mmio
= mv_ap_base(ap
);
2451 u32 tmp
, mask
, hc_irq_cause
;
2452 unsigned int shift
, hc_port_no
= ap
->port_no
;
2454 /* FIXME: handle coalescing completion events properly */
2456 shift
= ap
->port_no
* 2;
2462 mask
= 0x3 << shift
;
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2467 /* clear pending irq events */
2468 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2469 hc_irq_cause
&= ~(1 << hc_port_no
); /* clear CRPB-done */
2470 hc_irq_cause
&= ~(1 << (hc_port_no
+ 8)); /* clear Device int */
2471 writel(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2473 /* enable assertion of portN err, done events */
2474 tmp
= readl(hpriv
->main_mask_reg_addr
);
2475 writelfl(tmp
| mask
, hpriv
->main_mask_reg_addr
);
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2488 * Inherited from caller.
2490 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2492 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2495 /* PIO related setup
2497 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2499 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2500 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2501 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2502 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2503 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2504 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2506 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2511 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2515 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2516 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2522 readl(port_mmio
+ EDMA_CFG_OFS
),
2523 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2524 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2527 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2529 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2530 struct mv_host_priv
*hpriv
= host
->private_data
;
2531 u32 hp_flags
= hpriv
->hp_flags
;
2533 switch (board_idx
) {
2535 hpriv
->ops
= &mv5xxx_ops
;
2536 hp_flags
|= MV_HP_GEN_I
;
2538 switch (pdev
->revision
) {
2540 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2543 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2546 dev_printk(KERN_WARNING
, &pdev
->dev
,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2555 hpriv
->ops
= &mv5xxx_ops
;
2556 hp_flags
|= MV_HP_GEN_I
;
2558 switch (pdev
->revision
) {
2560 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2563 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2566 dev_printk(KERN_WARNING
, &pdev
->dev
,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2575 hpriv
->ops
= &mv6xxx_ops
;
2576 hp_flags
|= MV_HP_GEN_II
;
2578 switch (pdev
->revision
) {
2580 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2583 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2586 dev_printk(KERN_WARNING
, &pdev
->dev
,
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2594 hp_flags
|= MV_HP_PCIE
;
2595 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
2596 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2613 * Warn the user, lest they think we're just buggy.
2615 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2619 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
2625 hpriv
->ops
= &mv6xxx_ops
;
2626 hp_flags
|= MV_HP_GEN_IIE
;
2628 switch (pdev
->revision
) {
2630 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2633 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2636 dev_printk(KERN_WARNING
, &pdev
->dev
,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2643 hpriv
->ops
= &mv_soc_ops
;
2644 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2648 dev_printk(KERN_ERR
, host
->dev
,
2649 "BUG: invalid board index %u\n", board_idx
);
2653 hpriv
->hp_flags
= hp_flags
;
2654 if (hp_flags
& MV_HP_PCIE
) {
2655 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
2656 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
2657 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
2659 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
2660 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
2661 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
2668 * mv_init_host - Perform some early initialization of the host.
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2676 * Inherited from caller.
2678 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2680 int rc
= 0, n_hc
, port
, hc
;
2681 struct mv_host_priv
*hpriv
= host
->private_data
;
2682 void __iomem
*mmio
= hpriv
->base
;
2684 rc
= mv_chip_id(host
, board_idx
);
2688 if (HAS_PCI(host
)) {
2689 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2690 HC_MAIN_IRQ_CAUSE_OFS
;
2691 hpriv
->main_mask_reg_addr
= hpriv
->base
+ HC_MAIN_IRQ_MASK_OFS
;
2693 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS
;
2695 hpriv
->main_mask_reg_addr
= hpriv
->base
+
2696 HC_SOC_MAIN_IRQ_MASK_OFS
;
2698 /* global interrupt mask */
2699 writel(0, hpriv
->main_mask_reg_addr
);
2701 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2703 for (port
= 0; port
< host
->n_ports
; port
++)
2704 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2706 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2710 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2711 hpriv
->ops
->reset_bus(host
, mmio
);
2712 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2714 for (port
= 0; port
< host
->n_ports
; port
++) {
2715 if (IS_GEN_II(hpriv
)) {
2716 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2718 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2719 ifctl
|= (1 << 7); /* enable gen2i speed */
2720 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2721 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2724 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2727 for (port
= 0; port
< host
->n_ports
; port
++) {
2728 struct ata_port
*ap
= host
->ports
[port
];
2729 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2731 mv_port_init(&ap
->ioaddr
, port_mmio
);
2734 if (HAS_PCI(host
)) {
2735 unsigned int offset
= port_mmio
- mmio
;
2736 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
2737 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
2742 for (hc
= 0; hc
< n_hc
; hc
++) {
2743 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc
,
2747 readl(hc_mmio
+ HC_CFG_OFS
),
2748 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2754 if (HAS_PCI(host
)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
2760 if (IS_GEN_I(hpriv
))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5
,
2762 hpriv
->main_mask_reg_addr
);
2764 writelfl(~HC_MAIN_MASKED_IRQS
,
2765 hpriv
->main_mask_reg_addr
);
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv
->main_cause_reg_addr
),
2770 readl(hpriv
->main_mask_reg_addr
),
2771 readl(mmio
+ hpriv
->irq_cause_ofs
),
2772 readl(mmio
+ hpriv
->irq_mask_ofs
));
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC
,
2775 hpriv
->main_mask_reg_addr
);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv
->main_cause_reg_addr
),
2778 readl(hpriv
->main_mask_reg_addr
));
2784 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
2786 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
2788 if (!hpriv
->crqb_pool
)
2791 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
2793 if (!hpriv
->crpb_pool
)
2796 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
2798 if (!hpriv
->sg_tbl_pool
)
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2807 * @pdev: platform device found
2810 * Inherited from caller.
2812 static int mv_platform_probe(struct platform_device
*pdev
)
2814 static int printed_version
;
2815 const struct mv_sata_platform_data
*mv_platform_data
;
2816 const struct ata_port_info
*ppi
[] =
2817 { &mv_port_info
[chip_soc
], NULL
};
2818 struct ata_host
*host
;
2819 struct mv_host_priv
*hpriv
;
2820 struct resource
*res
;
2823 if (!printed_version
++)
2824 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2827 * Simple resource validation ..
2829 if (unlikely(pdev
->num_resources
!= 2)) {
2830 dev_err(&pdev
->dev
, "invalid number of resources\n");
2835 * Get the register base first
2837 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2842 mv_platform_data
= pdev
->dev
.platform_data
;
2843 n_ports
= mv_platform_data
->n_ports
;
2845 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2846 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2848 if (!host
|| !hpriv
)
2850 host
->private_data
= hpriv
;
2851 hpriv
->n_ports
= n_ports
;
2854 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
2855 res
->end
- res
->start
+ 1);
2856 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
2858 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
2862 /* initialize adapter */
2863 rc
= mv_init_host(host
, chip_soc
);
2867 dev_printk(KERN_INFO
, &pdev
->dev
,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
2871 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
2872 IRQF_SHARED
, &mv6_sht
);
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2883 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
2885 struct device
*dev
= &pdev
->dev
;
2886 struct ata_host
*host
= dev_get_drvdata(dev
);
2888 ata_host_detach(host
);
2892 static struct platform_driver mv_platform_driver
= {
2893 .probe
= mv_platform_probe
,
2894 .remove
= __devexit_p(mv_platform_remove
),
2897 .owner
= THIS_MODULE
,
2903 static int mv_pci_init_one(struct pci_dev
*pdev
,
2904 const struct pci_device_id
*ent
);
2907 static struct pci_driver mv_pci_driver
= {
2909 .id_table
= mv_pci_tbl
,
2910 .probe
= mv_pci_init_one
,
2911 .remove
= ata_pci_remove_one
,
2917 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
2920 /* move to PCI layer or libata core? */
2921 static int pci_go_64(struct pci_dev
*pdev
)
2925 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2926 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2928 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2930 dev_printk(KERN_ERR
, &pdev
->dev
,
2931 "64-bit DMA enable failed\n");
2936 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2938 dev_printk(KERN_ERR
, &pdev
->dev
,
2939 "32-bit DMA enable failed\n");
2942 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2944 dev_printk(KERN_ERR
, &pdev
->dev
,
2945 "32-bit consistent DMA enable failed\n");
2954 * mv_print_info - Dump key info to kernel log for perusal.
2955 * @host: ATA host to print info about
2957 * FIXME: complete this.
2960 * Inherited from caller.
2962 static void mv_print_info(struct ata_host
*host
)
2964 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2965 struct mv_host_priv
*hpriv
= host
->private_data
;
2967 const char *scc_s
, *gen
;
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2972 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2975 else if (scc
== 0x01)
2980 if (IS_GEN_I(hpriv
))
2982 else if (IS_GEN_II(hpriv
))
2984 else if (IS_GEN_IIE(hpriv
))
2989 dev_printk(KERN_INFO
, &pdev
->dev
,
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
2992 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
3001 * Inherited from caller.
3003 static int mv_pci_init_one(struct pci_dev
*pdev
,
3004 const struct pci_device_id
*ent
)
3006 static int printed_version
;
3007 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
3008 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
3009 struct ata_host
*host
;
3010 struct mv_host_priv
*hpriv
;
3013 if (!printed_version
++)
3014 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3017 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
3019 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3020 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3021 if (!host
|| !hpriv
)
3023 host
->private_data
= hpriv
;
3024 hpriv
->n_ports
= n_ports
;
3026 /* acquire resources */
3027 rc
= pcim_enable_device(pdev
);
3031 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
3033 pcim_pin_device(pdev
);
3036 host
->iomap
= pcim_iomap_table(pdev
);
3037 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
3039 rc
= pci_go_64(pdev
);
3043 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3047 /* initialize adapter */
3048 rc
= mv_init_host(host
, board_idx
);
3052 /* Enable interrupts */
3053 if (msi
&& pci_enable_msi(pdev
))
3056 mv_dump_pci_cfg(pdev
, 0x68);
3057 mv_print_info(host
);
3059 pci_set_master(pdev
);
3060 pci_try_set_mwi(pdev
);
3061 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
3062 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
3066 static int mv_platform_probe(struct platform_device
*pdev
);
3067 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
3069 static int __init
mv_init(void)
3073 rc
= pci_register_driver(&mv_pci_driver
);
3077 rc
= platform_driver_register(&mv_platform_driver
);
3081 pci_unregister_driver(&mv_pci_driver
);
3086 static void __exit
mv_exit(void)
3089 pci_unregister_driver(&mv_pci_driver
);
3091 platform_driver_unregister(&mv_platform_driver
);
3094 MODULE_AUTHOR("Brett Russ");
3095 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096 MODULE_LICENSE("GPL");
3097 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
3098 MODULE_VERSION(DRV_VERSION
);
3099 MODULE_ALIAS("platform:sata_mv");
3102 module_param(msi
, int, 0444);
3103 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
3106 module_init(mv_init
);
3107 module_exit(mv_exit
);