]>
Commit | Line | Data |
---|---|---|
20f733e7 BR |
1 | /* |
2 | * sata_mv.c - Marvell SATA support | |
3 | * | |
4 | * Copyright 2005: EMC Corporation, all rights reserved. | |
5 | * | |
6 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; version 2 of the License. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/blkdev.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/dma-mapping.h> | |
32 | #include "scsi.h" | |
33 | #include <scsi/scsi_host.h> | |
34 | #include <linux/libata.h> | |
35 | #include <asm/io.h> | |
36 | ||
37 | #define DRV_NAME "sata_mv" | |
7e6c1208 | 38 | #define DRV_VERSION "0.25" |
20f733e7 BR |
39 | |
40 | enum { | |
41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | |
42 | MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ | |
43 | MV_IO_BAR = 2, /* offset 0x18: IO space */ | |
44 | MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ | |
45 | ||
46 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ | |
47 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ | |
48 | ||
49 | MV_PCI_REG_BASE = 0, | |
50 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | |
51 | MV_SATAHC0_REG_BASE = 0x20000, | |
52 | ||
53 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
54 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
55 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | |
56 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | |
57 | ||
31961943 | 58 | MV_USE_Q_DEPTH = ATA_DEF_QUEUE, |
20f733e7 | 59 | |
31961943 BR |
60 | MV_MAX_Q_DEPTH = 32, |
61 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, | |
62 | ||
63 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | |
64 | * CRPB needs alignment on a 256B boundary. Size == 256B | |
65 | * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB | |
66 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B | |
67 | */ | |
68 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | |
69 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | |
70 | MV_MAX_SG_CT = 176, | |
71 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), | |
72 | MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), | |
73 | ||
74 | /* Our DMA boundary is determined by an ePRD being unable to handle | |
75 | * anything larger than 64KB | |
76 | */ | |
77 | MV_DMA_BOUNDARY = 0xffffU, | |
20f733e7 BR |
78 | |
79 | MV_PORTS_PER_HC = 4, | |
80 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ | |
81 | MV_PORT_HC_SHIFT = 2, | |
31961943 | 82 | /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ |
20f733e7 BR |
83 | MV_PORT_MASK = 3, |
84 | ||
85 | /* Host Flags */ | |
86 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | |
87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | |
31961943 BR |
88 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ |
89 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | |
90 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | |
91 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | | |
92 | MV_FLAG_GLBL_SFT_RST), | |
20f733e7 BR |
93 | |
94 | chip_504x = 0, | |
95 | chip_508x = 1, | |
96 | chip_604x = 2, | |
97 | chip_608x = 3, | |
98 | ||
31961943 BR |
99 | CRQB_FLAG_READ = (1 << 0), |
100 | CRQB_TAG_SHIFT = 1, | |
101 | CRQB_CMD_ADDR_SHIFT = 8, | |
102 | CRQB_CMD_CS = (0x2 << 11), | |
103 | CRQB_CMD_LAST = (1 << 15), | |
104 | ||
105 | CRPB_FLAG_STATUS_SHIFT = 8, | |
106 | ||
107 | EPRD_FLAG_END_OF_TBL = (1 << 31), | |
108 | ||
20f733e7 BR |
109 | /* PCI interface registers */ |
110 | ||
31961943 BR |
111 | PCI_COMMAND_OFS = 0xc00, |
112 | ||
20f733e7 BR |
113 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
114 | STOP_PCI_MASTER = (1 << 2), | |
115 | PCI_MASTER_EMPTY = (1 << 3), | |
116 | GLOB_SFT_RST = (1 << 4), | |
117 | ||
118 | PCI_IRQ_CAUSE_OFS = 0x1d58, | |
119 | PCI_IRQ_MASK_OFS = 0x1d5c, | |
120 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ | |
121 | ||
122 | HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, | |
123 | HC_MAIN_IRQ_MASK_OFS = 0x1d64, | |
124 | PORT0_ERR = (1 << 0), /* shift by port # */ | |
125 | PORT0_DONE = (1 << 1), /* shift by port # */ | |
126 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ | |
127 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ | |
128 | PCI_ERR = (1 << 18), | |
129 | TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ | |
130 | TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ | |
131 | PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ | |
132 | GPIO_INT = (1 << 22), | |
133 | SELF_INT = (1 << 23), | |
134 | TWSI_INT = (1 << 24), | |
135 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | |
136 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | |
137 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | |
138 | HC_MAIN_RSVD), | |
139 | ||
140 | /* SATAHC registers */ | |
141 | HC_CFG_OFS = 0, | |
142 | ||
143 | HC_IRQ_CAUSE_OFS = 0x14, | |
31961943 | 144 | CRPB_DMA_DONE = (1 << 0), /* shift by port # */ |
20f733e7 BR |
145 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ |
146 | DEV_IRQ = (1 << 8), /* shift by port # */ | |
147 | ||
148 | /* Shadow block registers */ | |
31961943 BR |
149 | SHD_BLK_OFS = 0x100, |
150 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | |
20f733e7 BR |
151 | |
152 | /* SATA registers */ | |
153 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | |
154 | SATA_ACTIVE_OFS = 0x350, | |
155 | ||
156 | /* Port registers */ | |
157 | EDMA_CFG_OFS = 0, | |
31961943 BR |
158 | EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ |
159 | EDMA_CFG_NCQ = (1 << 5), | |
160 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | |
161 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | |
162 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | |
20f733e7 BR |
163 | |
164 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | |
165 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | |
166 | EDMA_ERR_D_PAR = (1 << 0), | |
167 | EDMA_ERR_PRD_PAR = (1 << 1), | |
168 | EDMA_ERR_DEV = (1 << 2), | |
169 | EDMA_ERR_DEV_DCON = (1 << 3), | |
170 | EDMA_ERR_DEV_CON = (1 << 4), | |
171 | EDMA_ERR_SERR = (1 << 5), | |
172 | EDMA_ERR_SELF_DIS = (1 << 7), | |
173 | EDMA_ERR_BIST_ASYNC = (1 << 8), | |
174 | EDMA_ERR_CRBQ_PAR = (1 << 9), | |
175 | EDMA_ERR_CRPB_PAR = (1 << 10), | |
176 | EDMA_ERR_INTRL_PAR = (1 << 11), | |
177 | EDMA_ERR_IORDY = (1 << 12), | |
178 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), | |
179 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), | |
180 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), | |
181 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), | |
182 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), | |
183 | EDMA_ERR_TRANS_PROTO = (1 << 31), | |
184 | EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | |
185 | EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | | |
186 | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | | |
187 | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | | |
188 | EDMA_ERR_LNK_DATA_RX | | |
189 | EDMA_ERR_LNK_DATA_TX | | |
190 | EDMA_ERR_TRANS_PROTO), | |
191 | ||
31961943 BR |
192 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
193 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | |
194 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | |
195 | ||
196 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | |
197 | EDMA_REQ_Q_PTR_SHIFT = 5, | |
198 | ||
199 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | |
200 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | |
201 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | |
202 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, | |
203 | EDMA_RSP_Q_PTR_SHIFT = 3, | |
204 | ||
20f733e7 BR |
205 | EDMA_CMD_OFS = 0x28, |
206 | EDMA_EN = (1 << 0), | |
207 | EDMA_DS = (1 << 1), | |
208 | ATA_RST = (1 << 2), | |
209 | ||
31961943 BR |
210 | /* Host private flags (hp_flags) */ |
211 | MV_HP_FLAG_MSI = (1 << 0), | |
20f733e7 | 212 | |
31961943 BR |
213 | /* Port private flags (pp_flags) */ |
214 | MV_PP_FLAG_EDMA_EN = (1 << 0), | |
215 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | |
20f733e7 BR |
216 | }; |
217 | ||
31961943 BR |
218 | /* Command ReQuest Block: 32B */ |
219 | struct mv_crqb { | |
220 | u32 sg_addr; | |
221 | u32 sg_addr_hi; | |
222 | u16 ctrl_flags; | |
223 | u16 ata_cmd[11]; | |
224 | }; | |
20f733e7 | 225 | |
31961943 BR |
226 | /* Command ResPonse Block: 8B */ |
227 | struct mv_crpb { | |
228 | u16 id; | |
229 | u16 flags; | |
230 | u32 tmstmp; | |
20f733e7 BR |
231 | }; |
232 | ||
31961943 BR |
233 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
234 | struct mv_sg { | |
235 | u32 addr; | |
236 | u32 flags_size; | |
237 | u32 addr_hi; | |
238 | u32 reserved; | |
239 | }; | |
20f733e7 | 240 | |
31961943 BR |
241 | struct mv_port_priv { |
242 | struct mv_crqb *crqb; | |
243 | dma_addr_t crqb_dma; | |
244 | struct mv_crpb *crpb; | |
245 | dma_addr_t crpb_dma; | |
246 | struct mv_sg *sg_tbl; | |
247 | dma_addr_t sg_tbl_dma; | |
248 | ||
249 | unsigned req_producer; /* cp of req_in_ptr */ | |
250 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | |
251 | u32 pp_flags; | |
252 | }; | |
253 | ||
254 | struct mv_host_priv { | |
255 | u32 hp_flags; | |
20f733e7 BR |
256 | }; |
257 | ||
258 | static void mv_irq_clear(struct ata_port *ap); | |
259 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | |
260 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | |
261 | static void mv_phy_reset(struct ata_port *ap); | |
31961943 BR |
262 | static void mv_host_stop(struct ata_host_set *host_set); |
263 | static int mv_port_start(struct ata_port *ap); | |
264 | static void mv_port_stop(struct ata_port *ap); | |
265 | static void mv_qc_prep(struct ata_queued_cmd *qc); | |
266 | static int mv_qc_issue(struct ata_queued_cmd *qc); | |
20f733e7 BR |
267 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
268 | struct pt_regs *regs); | |
31961943 | 269 | static void mv_eng_timeout(struct ata_port *ap); |
20f733e7 BR |
270 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
271 | ||
272 | static Scsi_Host_Template mv_sht = { | |
273 | .module = THIS_MODULE, | |
274 | .name = DRV_NAME, | |
275 | .ioctl = ata_scsi_ioctl, | |
276 | .queuecommand = ata_scsi_queuecmd, | |
277 | .eh_strategy_handler = ata_scsi_error, | |
31961943 | 278 | .can_queue = MV_USE_Q_DEPTH, |
20f733e7 | 279 | .this_id = ATA_SHT_THIS_ID, |
31961943 | 280 | .sg_tablesize = MV_MAX_SG_CT, |
20f733e7 BR |
281 | .max_sectors = ATA_MAX_SECTORS, |
282 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | |
283 | .emulated = ATA_SHT_EMULATED, | |
31961943 | 284 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
20f733e7 BR |
285 | .proc_name = DRV_NAME, |
286 | .dma_boundary = MV_DMA_BOUNDARY, | |
287 | .slave_configure = ata_scsi_slave_config, | |
288 | .bios_param = ata_std_bios_param, | |
289 | .ordered_flush = 1, | |
290 | }; | |
291 | ||
057ace5e | 292 | static const struct ata_port_operations mv_ops = { |
20f733e7 BR |
293 | .port_disable = ata_port_disable, |
294 | ||
295 | .tf_load = ata_tf_load, | |
296 | .tf_read = ata_tf_read, | |
297 | .check_status = ata_check_status, | |
298 | .exec_command = ata_exec_command, | |
299 | .dev_select = ata_std_dev_select, | |
300 | ||
301 | .phy_reset = mv_phy_reset, | |
302 | ||
31961943 BR |
303 | .qc_prep = mv_qc_prep, |
304 | .qc_issue = mv_qc_issue, | |
20f733e7 | 305 | |
31961943 | 306 | .eng_timeout = mv_eng_timeout, |
20f733e7 BR |
307 | |
308 | .irq_handler = mv_interrupt, | |
309 | .irq_clear = mv_irq_clear, | |
310 | ||
311 | .scr_read = mv_scr_read, | |
312 | .scr_write = mv_scr_write, | |
313 | ||
31961943 BR |
314 | .port_start = mv_port_start, |
315 | .port_stop = mv_port_stop, | |
316 | .host_stop = mv_host_stop, | |
20f733e7 BR |
317 | }; |
318 | ||
319 | static struct ata_port_info mv_port_info[] = { | |
320 | { /* chip_504x */ | |
321 | .sht = &mv_sht, | |
31961943 BR |
322 | .host_flags = MV_COMMON_FLAGS, |
323 | .pio_mask = 0x1f, /* pio0-4 */ | |
324 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ | |
20f733e7 BR |
325 | .port_ops = &mv_ops, |
326 | }, | |
327 | { /* chip_508x */ | |
328 | .sht = &mv_sht, | |
31961943 BR |
329 | .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
330 | .pio_mask = 0x1f, /* pio0-4 */ | |
331 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ | |
20f733e7 BR |
332 | .port_ops = &mv_ops, |
333 | }, | |
334 | { /* chip_604x */ | |
335 | .sht = &mv_sht, | |
31961943 BR |
336 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
337 | .pio_mask = 0x1f, /* pio0-4 */ | |
338 | .udma_mask = 0x7f, /* udma0-6 */ | |
20f733e7 BR |
339 | .port_ops = &mv_ops, |
340 | }, | |
341 | { /* chip_608x */ | |
342 | .sht = &mv_sht, | |
31961943 BR |
343 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
344 | MV_FLAG_DUAL_HC), | |
345 | .pio_mask = 0x1f, /* pio0-4 */ | |
346 | .udma_mask = 0x7f, /* udma0-6 */ | |
20f733e7 BR |
347 | .port_ops = &mv_ops, |
348 | }, | |
349 | }; | |
350 | ||
351 | static struct pci_device_id mv_pci_tbl[] = { | |
352 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x}, | |
353 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x}, | |
354 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x}, | |
355 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x}, | |
356 | ||
357 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, | |
358 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, | |
359 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, | |
360 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, | |
361 | {} /* terminate list */ | |
362 | }; | |
363 | ||
364 | static struct pci_driver mv_pci_driver = { | |
365 | .name = DRV_NAME, | |
366 | .id_table = mv_pci_tbl, | |
367 | .probe = mv_init_one, | |
368 | .remove = ata_pci_remove_one, | |
369 | }; | |
370 | ||
371 | /* | |
372 | * Functions | |
373 | */ | |
374 | ||
375 | static inline void writelfl(unsigned long data, void __iomem *addr) | |
376 | { | |
377 | writel(data, addr); | |
378 | (void) readl(addr); /* flush to avoid PCI posted write */ | |
379 | } | |
380 | ||
20f733e7 BR |
381 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
382 | { | |
383 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | |
384 | } | |
385 | ||
386 | static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) | |
387 | { | |
388 | return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) + | |
389 | MV_SATAHC_ARBTR_REG_SZ + | |
390 | ((port & MV_PORT_MASK) * MV_PORT_REG_SZ)); | |
391 | } | |
392 | ||
393 | static inline void __iomem *mv_ap_base(struct ata_port *ap) | |
394 | { | |
395 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); | |
396 | } | |
397 | ||
31961943 BR |
398 | static inline int mv_get_hc_count(unsigned long hp_flags) |
399 | { | |
400 | return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | |
401 | } | |
402 | ||
403 | static void mv_irq_clear(struct ata_port *ap) | |
20f733e7 | 404 | { |
20f733e7 BR |
405 | } |
406 | ||
05b308e1 BR |
407 | /** |
408 | * mv_start_dma - Enable eDMA engine | |
409 | * @base: port base address | |
410 | * @pp: port private data | |
411 | * | |
412 | * Verify the local cache of the eDMA state is accurate with an | |
413 | * assert. | |
414 | * | |
415 | * LOCKING: | |
416 | * Inherited from caller. | |
417 | */ | |
afb0edd9 | 418 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) |
20f733e7 | 419 | { |
afb0edd9 BR |
420 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { |
421 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | |
422 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | |
423 | } | |
424 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); | |
20f733e7 BR |
425 | } |
426 | ||
05b308e1 BR |
427 | /** |
428 | * mv_stop_dma - Disable eDMA engine | |
429 | * @ap: ATA channel to manipulate | |
430 | * | |
431 | * Verify the local cache of the eDMA state is accurate with an | |
432 | * assert. | |
433 | * | |
434 | * LOCKING: | |
435 | * Inherited from caller. | |
436 | */ | |
31961943 | 437 | static void mv_stop_dma(struct ata_port *ap) |
20f733e7 | 438 | { |
31961943 BR |
439 | void __iomem *port_mmio = mv_ap_base(ap); |
440 | struct mv_port_priv *pp = ap->private_data; | |
31961943 BR |
441 | u32 reg; |
442 | int i; | |
443 | ||
afb0edd9 BR |
444 | if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { |
445 | /* Disable EDMA if active. The disable bit auto clears. | |
31961943 | 446 | */ |
31961943 BR |
447 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
448 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
afb0edd9 BR |
449 | } else { |
450 | assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); | |
451 | } | |
31961943 BR |
452 | |
453 | /* now properly wait for the eDMA to stop */ | |
454 | for (i = 1000; i > 0; i--) { | |
455 | reg = readl(port_mmio + EDMA_CMD_OFS); | |
456 | if (!(EDMA_EN & reg)) { | |
457 | break; | |
458 | } | |
459 | udelay(100); | |
460 | } | |
461 | ||
31961943 BR |
462 | if (EDMA_EN & reg) { |
463 | printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); | |
afb0edd9 | 464 | /* FIXME: Consider doing a reset here to recover */ |
31961943 | 465 | } |
20f733e7 BR |
466 | } |
467 | ||
8a70f8dc | 468 | #ifdef ATA_DEBUG |
31961943 | 469 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
20f733e7 | 470 | { |
31961943 BR |
471 | int b, w; |
472 | for (b = 0; b < bytes; ) { | |
473 | DPRINTK("%p: ", start + b); | |
474 | for (w = 0; b < bytes && w < 4; w++) { | |
475 | printk("%08x ",readl(start + b)); | |
476 | b += sizeof(u32); | |
477 | } | |
478 | printk("\n"); | |
479 | } | |
31961943 | 480 | } |
8a70f8dc JG |
481 | #endif |
482 | ||
31961943 BR |
483 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
484 | { | |
485 | #ifdef ATA_DEBUG | |
486 | int b, w; | |
487 | u32 dw; | |
488 | for (b = 0; b < bytes; ) { | |
489 | DPRINTK("%02x: ", b); | |
490 | for (w = 0; b < bytes && w < 4; w++) { | |
491 | (void) pci_read_config_dword(pdev,b,&dw); | |
492 | printk("%08x ",dw); | |
493 | b += sizeof(u32); | |
494 | } | |
495 | printk("\n"); | |
496 | } | |
497 | #endif | |
498 | } | |
499 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | |
500 | struct pci_dev *pdev) | |
501 | { | |
502 | #ifdef ATA_DEBUG | |
503 | void __iomem *hc_base = mv_hc_base(mmio_base, | |
504 | port >> MV_PORT_HC_SHIFT); | |
505 | void __iomem *port_base; | |
506 | int start_port, num_ports, p, start_hc, num_hcs, hc; | |
507 | ||
508 | if (0 > port) { | |
509 | start_hc = start_port = 0; | |
510 | num_ports = 8; /* shld be benign for 4 port devs */ | |
511 | num_hcs = 2; | |
512 | } else { | |
513 | start_hc = port >> MV_PORT_HC_SHIFT; | |
514 | start_port = port; | |
515 | num_ports = num_hcs = 1; | |
516 | } | |
517 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, | |
518 | num_ports > 1 ? num_ports - 1 : start_port); | |
519 | ||
520 | if (NULL != pdev) { | |
521 | DPRINTK("PCI config space regs:\n"); | |
522 | mv_dump_pci_cfg(pdev, 0x68); | |
523 | } | |
524 | DPRINTK("PCI regs:\n"); | |
525 | mv_dump_mem(mmio_base+0xc00, 0x3c); | |
526 | mv_dump_mem(mmio_base+0xd00, 0x34); | |
527 | mv_dump_mem(mmio_base+0xf00, 0x4); | |
528 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | |
529 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | |
530 | hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); | |
531 | DPRINTK("HC regs (HC %i):\n", hc); | |
532 | mv_dump_mem(hc_base, 0x1c); | |
533 | } | |
534 | for (p = start_port; p < start_port + num_ports; p++) { | |
535 | port_base = mv_port_base(mmio_base, p); | |
536 | DPRINTK("EDMA regs (port %i):\n",p); | |
537 | mv_dump_mem(port_base, 0x54); | |
538 | DPRINTK("SATA regs (port %i):\n",p); | |
539 | mv_dump_mem(port_base+0x300, 0x60); | |
540 | } | |
541 | #endif | |
20f733e7 BR |
542 | } |
543 | ||
544 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |
545 | { | |
546 | unsigned int ofs; | |
547 | ||
548 | switch (sc_reg_in) { | |
549 | case SCR_STATUS: | |
550 | case SCR_CONTROL: | |
551 | case SCR_ERROR: | |
552 | ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); | |
553 | break; | |
554 | case SCR_ACTIVE: | |
555 | ofs = SATA_ACTIVE_OFS; /* active is not with the others */ | |
556 | break; | |
557 | default: | |
558 | ofs = 0xffffffffU; | |
559 | break; | |
560 | } | |
561 | return ofs; | |
562 | } | |
563 | ||
564 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) | |
565 | { | |
566 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
567 | ||
568 | if (0xffffffffU != ofs) { | |
569 | return readl(mv_ap_base(ap) + ofs); | |
570 | } else { | |
571 | return (u32) ofs; | |
572 | } | |
573 | } | |
574 | ||
575 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |
576 | { | |
577 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
578 | ||
579 | if (0xffffffffU != ofs) { | |
580 | writelfl(val, mv_ap_base(ap) + ofs); | |
581 | } | |
582 | } | |
583 | ||
05b308e1 BR |
584 | /** |
585 | * mv_global_soft_reset - Perform the 6xxx global soft reset | |
586 | * @mmio_base: base address of the HBA | |
587 | * | |
588 | * This routine only applies to 6xxx parts. | |
589 | * | |
590 | * LOCKING: | |
591 | * Inherited from caller. | |
592 | */ | |
31961943 | 593 | static int mv_global_soft_reset(void __iomem *mmio_base) |
20f733e7 BR |
594 | { |
595 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; | |
596 | int i, rc = 0; | |
597 | u32 t; | |
598 | ||
20f733e7 BR |
599 | /* Following procedure defined in PCI "main command and status |
600 | * register" table. | |
601 | */ | |
602 | t = readl(reg); | |
603 | writel(t | STOP_PCI_MASTER, reg); | |
604 | ||
31961943 BR |
605 | for (i = 0; i < 1000; i++) { |
606 | udelay(1); | |
20f733e7 BR |
607 | t = readl(reg); |
608 | if (PCI_MASTER_EMPTY & t) { | |
609 | break; | |
610 | } | |
611 | } | |
612 | if (!(PCI_MASTER_EMPTY & t)) { | |
31961943 BR |
613 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); |
614 | rc = 1; | |
20f733e7 BR |
615 | goto done; |
616 | } | |
617 | ||
618 | /* set reset */ | |
619 | i = 5; | |
620 | do { | |
621 | writel(t | GLOB_SFT_RST, reg); | |
622 | t = readl(reg); | |
623 | udelay(1); | |
624 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | |
625 | ||
626 | if (!(GLOB_SFT_RST & t)) { | |
31961943 BR |
627 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); |
628 | rc = 1; | |
20f733e7 BR |
629 | goto done; |
630 | } | |
631 | ||
31961943 | 632 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ |
20f733e7 BR |
633 | i = 5; |
634 | do { | |
31961943 | 635 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); |
20f733e7 BR |
636 | t = readl(reg); |
637 | udelay(1); | |
638 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | |
639 | ||
640 | if (GLOB_SFT_RST & t) { | |
31961943 BR |
641 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); |
642 | rc = 1; | |
20f733e7 | 643 | } |
31961943 | 644 | done: |
20f733e7 BR |
645 | return rc; |
646 | } | |
647 | ||
05b308e1 BR |
648 | /** |
649 | * mv_host_stop - Host specific cleanup/stop routine. | |
650 | * @host_set: host data structure | |
651 | * | |
652 | * Disable ints, cleanup host memory, call general purpose | |
653 | * host_stop. | |
654 | * | |
655 | * LOCKING: | |
656 | * Inherited from caller. | |
657 | */ | |
31961943 | 658 | static void mv_host_stop(struct ata_host_set *host_set) |
20f733e7 | 659 | { |
31961943 BR |
660 | struct mv_host_priv *hpriv = host_set->private_data; |
661 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | |
662 | ||
663 | if (hpriv->hp_flags & MV_HP_FLAG_MSI) { | |
664 | pci_disable_msi(pdev); | |
665 | } else { | |
666 | pci_intx(pdev, 0); | |
667 | } | |
668 | kfree(hpriv); | |
669 | ata_host_stop(host_set); | |
670 | } | |
671 | ||
05b308e1 BR |
672 | /** |
673 | * mv_port_start - Port specific init/start routine. | |
674 | * @ap: ATA channel to manipulate | |
675 | * | |
676 | * Allocate and point to DMA memory, init port private memory, | |
677 | * zero indices. | |
678 | * | |
679 | * LOCKING: | |
680 | * Inherited from caller. | |
681 | */ | |
31961943 BR |
682 | static int mv_port_start(struct ata_port *ap) |
683 | { | |
684 | struct device *dev = ap->host_set->dev; | |
685 | struct mv_port_priv *pp; | |
686 | void __iomem *port_mmio = mv_ap_base(ap); | |
687 | void *mem; | |
688 | dma_addr_t mem_dma; | |
689 | ||
690 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | |
691 | if (!pp) { | |
692 | return -ENOMEM; | |
693 | } | |
694 | memset(pp, 0, sizeof(*pp)); | |
695 | ||
696 | mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, | |
697 | GFP_KERNEL); | |
698 | if (!mem) { | |
699 | kfree(pp); | |
700 | return -ENOMEM; | |
701 | } | |
702 | memset(mem, 0, MV_PORT_PRIV_DMA_SZ); | |
703 | ||
704 | /* First item in chunk of DMA memory: | |
705 | * 32-slot command request table (CRQB), 32 bytes each in size | |
706 | */ | |
707 | pp->crqb = mem; | |
708 | pp->crqb_dma = mem_dma; | |
709 | mem += MV_CRQB_Q_SZ; | |
710 | mem_dma += MV_CRQB_Q_SZ; | |
711 | ||
712 | /* Second item: | |
713 | * 32-slot command response table (CRPB), 8 bytes each in size | |
714 | */ | |
715 | pp->crpb = mem; | |
716 | pp->crpb_dma = mem_dma; | |
717 | mem += MV_CRPB_Q_SZ; | |
718 | mem_dma += MV_CRPB_Q_SZ; | |
719 | ||
720 | /* Third item: | |
721 | * Table of scatter-gather descriptors (ePRD), 16 bytes each | |
722 | */ | |
723 | pp->sg_tbl = mem; | |
724 | pp->sg_tbl_dma = mem_dma; | |
725 | ||
726 | writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | | |
727 | EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); | |
728 | ||
729 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | |
730 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | |
731 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
732 | ||
733 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | |
734 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | |
735 | ||
736 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | |
737 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | |
738 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
739 | ||
740 | pp->req_producer = pp->rsp_consumer = 0; | |
741 | ||
742 | /* Don't turn on EDMA here...do it before DMA commands only. Else | |
743 | * we'll be unable to send non-data, PIO, etc due to restricted access | |
744 | * to shadow regs. | |
745 | */ | |
746 | ap->private_data = pp; | |
747 | return 0; | |
748 | } | |
749 | ||
05b308e1 BR |
750 | /** |
751 | * mv_port_stop - Port specific cleanup/stop routine. | |
752 | * @ap: ATA channel to manipulate | |
753 | * | |
754 | * Stop DMA, cleanup port memory. | |
755 | * | |
756 | * LOCKING: | |
757 | * This routine uses the host_set lock to protect the DMA stop. | |
758 | */ | |
31961943 BR |
759 | static void mv_port_stop(struct ata_port *ap) |
760 | { | |
761 | struct device *dev = ap->host_set->dev; | |
762 | struct mv_port_priv *pp = ap->private_data; | |
afb0edd9 | 763 | unsigned long flags; |
31961943 | 764 | |
afb0edd9 | 765 | spin_lock_irqsave(&ap->host_set->lock, flags); |
31961943 | 766 | mv_stop_dma(ap); |
afb0edd9 | 767 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
31961943 BR |
768 | |
769 | ap->private_data = NULL; | |
770 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); | |
771 | kfree(pp); | |
772 | } | |
773 | ||
05b308e1 BR |
774 | /** |
775 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | |
776 | * @qc: queued command whose SG list to source from | |
777 | * | |
778 | * Populate the SG list and mark the last entry. | |
779 | * | |
780 | * LOCKING: | |
781 | * Inherited from caller. | |
782 | */ | |
31961943 BR |
783 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
784 | { | |
785 | struct mv_port_priv *pp = qc->ap->private_data; | |
786 | unsigned int i; | |
787 | ||
788 | for (i = 0; i < qc->n_elem; i++) { | |
789 | u32 sg_len; | |
790 | dma_addr_t addr; | |
791 | ||
792 | addr = sg_dma_address(&qc->sg[i]); | |
793 | sg_len = sg_dma_len(&qc->sg[i]); | |
794 | ||
795 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | |
796 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
797 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); | |
798 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); | |
799 | } | |
800 | if (0 < qc->n_elem) { | |
7e6c1208 BR |
801 | pp->sg_tbl[qc->n_elem - 1].flags_size |= |
802 | cpu_to_le32(EPRD_FLAG_END_OF_TBL); | |
31961943 BR |
803 | } |
804 | } | |
805 | ||
806 | static inline unsigned mv_inc_q_index(unsigned *index) | |
807 | { | |
808 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | |
809 | return *index; | |
810 | } | |
811 | ||
812 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | |
813 | { | |
814 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | |
815 | (last ? CRQB_CMD_LAST : 0); | |
816 | } | |
817 | ||
05b308e1 BR |
818 | /** |
819 | * mv_qc_prep - Host specific command preparation. | |
820 | * @qc: queued command to prepare | |
821 | * | |
822 | * This routine simply redirects to the general purpose routine | |
823 | * if command is not DMA. Else, it handles prep of the CRQB | |
824 | * (command request block), does some sanity checking, and calls | |
825 | * the SG load routine. | |
826 | * | |
827 | * LOCKING: | |
828 | * Inherited from caller. | |
829 | */ | |
31961943 BR |
830 | static void mv_qc_prep(struct ata_queued_cmd *qc) |
831 | { | |
832 | struct ata_port *ap = qc->ap; | |
833 | struct mv_port_priv *pp = ap->private_data; | |
834 | u16 *cw; | |
835 | struct ata_taskfile *tf; | |
836 | u16 flags = 0; | |
837 | ||
838 | if (ATA_PROT_DMA != qc->tf.protocol) { | |
839 | return; | |
840 | } | |
20f733e7 | 841 | |
31961943 BR |
842 | /* the req producer index should be the same as we remember it */ |
843 | assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | |
844 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
845 | pp->req_producer); | |
846 | ||
847 | /* Fill in command request block | |
848 | */ | |
849 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | |
850 | flags |= CRQB_FLAG_READ; | |
851 | } | |
852 | assert(MV_MAX_Q_DEPTH > qc->tag); | |
853 | flags |= qc->tag << CRQB_TAG_SHIFT; | |
854 | ||
855 | pp->crqb[pp->req_producer].sg_addr = | |
856 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | |
857 | pp->crqb[pp->req_producer].sg_addr_hi = | |
858 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | |
859 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | |
860 | ||
861 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | |
862 | tf = &qc->tf; | |
863 | ||
864 | /* Sadly, the CRQB cannot accomodate all registers--there are | |
865 | * only 11 bytes...so we must pick and choose required | |
866 | * registers based on the command. So, we drop feature and | |
867 | * hob_feature for [RW] DMA commands, but they are needed for | |
868 | * NCQ. NCQ will drop hob_nsect. | |
20f733e7 | 869 | */ |
31961943 BR |
870 | switch (tf->command) { |
871 | case ATA_CMD_READ: | |
872 | case ATA_CMD_READ_EXT: | |
873 | case ATA_CMD_WRITE: | |
874 | case ATA_CMD_WRITE_EXT: | |
875 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); | |
876 | break; | |
877 | #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ | |
878 | case ATA_CMD_FPDMA_READ: | |
879 | case ATA_CMD_FPDMA_WRITE: | |
880 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); | |
881 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); | |
882 | break; | |
883 | #endif /* FIXME: remove this line when NCQ added */ | |
884 | default: | |
885 | /* The only other commands EDMA supports in non-queued and | |
886 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | |
887 | * of which are defined/used by Linux. If we get here, this | |
888 | * driver needs work. | |
889 | * | |
890 | * FIXME: modify libata to give qc_prep a return value and | |
891 | * return error here. | |
892 | */ | |
893 | BUG_ON(tf->command); | |
894 | break; | |
895 | } | |
896 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | |
897 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | |
898 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | |
899 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | |
900 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | |
901 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | |
902 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | |
903 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | |
904 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | |
905 | ||
906 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { | |
907 | return; | |
908 | } | |
909 | mv_fill_sg(qc); | |
910 | } | |
911 | ||
05b308e1 BR |
912 | /** |
913 | * mv_qc_issue - Initiate a command to the host | |
914 | * @qc: queued command to start | |
915 | * | |
916 | * This routine simply redirects to the general purpose routine | |
917 | * if command is not DMA. Else, it sanity checks our local | |
918 | * caches of the request producer/consumer indices then enables | |
919 | * DMA and bumps the request producer index. | |
920 | * | |
921 | * LOCKING: | |
922 | * Inherited from caller. | |
923 | */ | |
31961943 BR |
924 | static int mv_qc_issue(struct ata_queued_cmd *qc) |
925 | { | |
926 | void __iomem *port_mmio = mv_ap_base(qc->ap); | |
927 | struct mv_port_priv *pp = qc->ap->private_data; | |
928 | u32 in_ptr; | |
929 | ||
930 | if (ATA_PROT_DMA != qc->tf.protocol) { | |
931 | /* We're about to send a non-EDMA capable command to the | |
932 | * port. Turn off EDMA so there won't be problems accessing | |
933 | * shadow block, etc registers. | |
934 | */ | |
935 | mv_stop_dma(qc->ap); | |
936 | return ata_qc_issue_prot(qc); | |
937 | } | |
938 | ||
939 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
940 | ||
941 | /* the req producer index should be the same as we remember it */ | |
942 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
943 | pp->req_producer); | |
944 | /* until we do queuing, the queue should be empty at this point */ | |
945 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
946 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | |
947 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | |
948 | ||
949 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | |
950 | ||
afb0edd9 | 951 | mv_start_dma(port_mmio, pp); |
31961943 BR |
952 | |
953 | /* and write the request in pointer to kick the EDMA to life */ | |
954 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | |
955 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | |
956 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
957 | ||
958 | return 0; | |
959 | } | |
960 | ||
05b308e1 BR |
961 | /** |
962 | * mv_get_crpb_status - get status from most recently completed cmd | |
963 | * @ap: ATA channel to manipulate | |
964 | * | |
965 | * This routine is for use when the port is in DMA mode, when it | |
966 | * will be using the CRPB (command response block) method of | |
967 | * returning command completion information. We assert indices | |
968 | * are good, grab status, and bump the response consumer index to | |
969 | * prove that we're up to date. | |
970 | * | |
971 | * LOCKING: | |
972 | * Inherited from caller. | |
973 | */ | |
31961943 BR |
974 | static u8 mv_get_crpb_status(struct ata_port *ap) |
975 | { | |
976 | void __iomem *port_mmio = mv_ap_base(ap); | |
977 | struct mv_port_priv *pp = ap->private_data; | |
978 | u32 out_ptr; | |
979 | ||
980 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
981 | ||
982 | /* the response consumer index should be the same as we remember it */ | |
983 | assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
984 | pp->rsp_consumer); | |
985 | ||
986 | /* increment our consumer index... */ | |
987 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | |
988 | ||
989 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | |
990 | assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | |
991 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
992 | pp->rsp_consumer); | |
993 | ||
994 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | |
995 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | |
996 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | |
997 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
998 | ||
999 | /* Return ATA status register for completed CRPB */ | |
1000 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | |
1001 | } | |
1002 | ||
05b308e1 BR |
1003 | /** |
1004 | * mv_err_intr - Handle error interrupts on the port | |
1005 | * @ap: ATA channel to manipulate | |
1006 | * | |
1007 | * In most cases, just clear the interrupt and move on. However, | |
1008 | * some cases require an eDMA reset, which is done right before | |
1009 | * the COMRESET in mv_phy_reset(). The SERR case requires a | |
1010 | * clear of pending errors in the SATA SERROR register. Finally, | |
1011 | * if the port disabled DMA, update our cached copy to match. | |
1012 | * | |
1013 | * LOCKING: | |
1014 | * Inherited from caller. | |
1015 | */ | |
31961943 BR |
1016 | static void mv_err_intr(struct ata_port *ap) |
1017 | { | |
1018 | void __iomem *port_mmio = mv_ap_base(ap); | |
1019 | u32 edma_err_cause, serr = 0; | |
20f733e7 BR |
1020 | |
1021 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1022 | ||
1023 | if (EDMA_ERR_SERR & edma_err_cause) { | |
1024 | serr = scr_read(ap, SCR_ERROR); | |
1025 | scr_write_flush(ap, SCR_ERROR, serr); | |
1026 | } | |
afb0edd9 BR |
1027 | if (EDMA_ERR_SELF_DIS & edma_err_cause) { |
1028 | struct mv_port_priv *pp = ap->private_data; | |
1029 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
1030 | } | |
1031 | DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " | |
1032 | "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); | |
20f733e7 BR |
1033 | |
1034 | /* Clear EDMA now that SERR cleanup done */ | |
1035 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1036 | ||
1037 | /* check for fatal here and recover if needed */ | |
1038 | if (EDMA_ERR_FATAL & edma_err_cause) { | |
1039 | mv_phy_reset(ap); | |
1040 | } | |
1041 | } | |
1042 | ||
05b308e1 BR |
1043 | /** |
1044 | * mv_host_intr - Handle all interrupts on the given host controller | |
1045 | * @host_set: host specific structure | |
1046 | * @relevant: port error bits relevant to this host controller | |
1047 | * @hc: which host controller we're to look at | |
1048 | * | |
1049 | * Read then write clear the HC interrupt status then walk each | |
1050 | * port connected to the HC and see if it needs servicing. Port | |
1051 | * success ints are reported in the HC interrupt status reg, the | |
1052 | * port error ints are reported in the higher level main | |
1053 | * interrupt status register and thus are passed in via the | |
1054 | * 'relevant' argument. | |
1055 | * | |
1056 | * LOCKING: | |
1057 | * Inherited from caller. | |
1058 | */ | |
20f733e7 BR |
1059 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, |
1060 | unsigned int hc) | |
1061 | { | |
1062 | void __iomem *mmio = host_set->mmio_base; | |
1063 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | |
1064 | struct ata_port *ap; | |
1065 | struct ata_queued_cmd *qc; | |
1066 | u32 hc_irq_cause; | |
31961943 | 1067 | int shift, port, port0, hard_port, handled; |
a7dac447 | 1068 | unsigned int err_mask; |
31961943 | 1069 | u8 ata_status = 0; |
20f733e7 BR |
1070 | |
1071 | if (hc == 0) { | |
1072 | port0 = 0; | |
1073 | } else { | |
1074 | port0 = MV_PORTS_PER_HC; | |
1075 | } | |
1076 | ||
1077 | /* we'll need the HC success int register in most cases */ | |
1078 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | |
1079 | if (hc_irq_cause) { | |
31961943 | 1080 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
20f733e7 BR |
1081 | } |
1082 | ||
1083 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | |
1084 | hc,relevant,hc_irq_cause); | |
1085 | ||
1086 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | |
1087 | ap = host_set->ports[port]; | |
1088 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | |
31961943 | 1089 | handled = 0; /* ensure ata_status is set if handled++ */ |
20f733e7 | 1090 | |
31961943 BR |
1091 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { |
1092 | /* new CRPB on the queue; just one at a time until NCQ | |
1093 | */ | |
1094 | ata_status = mv_get_crpb_status(ap); | |
1095 | handled++; | |
1096 | } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { | |
1097 | /* received ATA IRQ; read the status reg to clear INTRQ | |
20f733e7 BR |
1098 | */ |
1099 | ata_status = readb((void __iomem *) | |
1100 | ap->ioaddr.status_addr); | |
31961943 | 1101 | handled++; |
20f733e7 BR |
1102 | } |
1103 | ||
a7dac447 JG |
1104 | err_mask = ac_err_mask(ata_status); |
1105 | ||
31961943 | 1106 | shift = port << 1; /* (port * 2) */ |
20f733e7 BR |
1107 | if (port >= MV_PORTS_PER_HC) { |
1108 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | |
1109 | } | |
1110 | if ((PORT0_ERR << shift) & relevant) { | |
1111 | mv_err_intr(ap); | |
a7dac447 | 1112 | err_mask |= AC_ERR_OTHER; |
31961943 | 1113 | handled++; |
20f733e7 BR |
1114 | } |
1115 | ||
31961943 | 1116 | if (handled && ap) { |
20f733e7 BR |
1117 | qc = ata_qc_from_tag(ap, ap->active_tag); |
1118 | if (NULL != qc) { | |
1119 | VPRINTK("port %u IRQ found for qc, " | |
1120 | "ata_status 0x%x\n", port,ata_status); | |
20f733e7 | 1121 | /* mark qc status appropriately */ |
a7dac447 | 1122 | ata_qc_complete(qc, err_mask); |
20f733e7 BR |
1123 | } |
1124 | } | |
1125 | } | |
1126 | VPRINTK("EXIT\n"); | |
1127 | } | |
1128 | ||
05b308e1 BR |
1129 | /** |
1130 | * mv_interrupt - | |
1131 | * @irq: unused | |
1132 | * @dev_instance: private data; in this case the host structure | |
1133 | * @regs: unused | |
1134 | * | |
1135 | * Read the read only register to determine if any host | |
1136 | * controllers have pending interrupts. If so, call lower level | |
1137 | * routine to handle. Also check for PCI errors which are only | |
1138 | * reported here. | |
1139 | * | |
1140 | * LOCKING: | |
1141 | * This routine holds the host_set lock while processing pending | |
1142 | * interrupts. | |
1143 | */ | |
20f733e7 BR |
1144 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
1145 | struct pt_regs *regs) | |
1146 | { | |
1147 | struct ata_host_set *host_set = dev_instance; | |
1148 | unsigned int hc, handled = 0, n_hcs; | |
31961943 | 1149 | void __iomem *mmio = host_set->mmio_base; |
20f733e7 BR |
1150 | u32 irq_stat; |
1151 | ||
20f733e7 | 1152 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
20f733e7 BR |
1153 | |
1154 | /* check the cases where we either have nothing pending or have read | |
1155 | * a bogus register value which can indicate HW removal or PCI fault | |
1156 | */ | |
1157 | if (!irq_stat || (0xffffffffU == irq_stat)) { | |
1158 | return IRQ_NONE; | |
1159 | } | |
1160 | ||
31961943 | 1161 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); |
20f733e7 BR |
1162 | spin_lock(&host_set->lock); |
1163 | ||
1164 | for (hc = 0; hc < n_hcs; hc++) { | |
1165 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | |
1166 | if (relevant) { | |
1167 | mv_host_intr(host_set, relevant, hc); | |
31961943 | 1168 | handled++; |
20f733e7 BR |
1169 | } |
1170 | } | |
1171 | if (PCI_ERR & irq_stat) { | |
31961943 BR |
1172 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
1173 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | |
1174 | ||
afb0edd9 | 1175 | DPRINTK("All regs @ PCI error\n"); |
31961943 | 1176 | mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); |
20f733e7 | 1177 | |
31961943 BR |
1178 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); |
1179 | handled++; | |
1180 | } | |
20f733e7 BR |
1181 | spin_unlock(&host_set->lock); |
1182 | ||
1183 | return IRQ_RETVAL(handled); | |
1184 | } | |
1185 | ||
05b308e1 BR |
1186 | /** |
1187 | * mv_phy_reset - Perform eDMA reset followed by COMRESET | |
1188 | * @ap: ATA channel to manipulate | |
1189 | * | |
1190 | * Part of this is taken from __sata_phy_reset and modified to | |
1191 | * not sleep since this routine gets called from interrupt level. | |
1192 | * | |
1193 | * LOCKING: | |
1194 | * Inherited from caller. This is coded to safe to call at | |
1195 | * interrupt level, i.e. it does not sleep. | |
31961943 | 1196 | */ |
20f733e7 BR |
1197 | static void mv_phy_reset(struct ata_port *ap) |
1198 | { | |
1199 | void __iomem *port_mmio = mv_ap_base(ap); | |
1200 | struct ata_taskfile tf; | |
1201 | struct ata_device *dev = &ap->device[0]; | |
31961943 | 1202 | unsigned long timeout; |
20f733e7 BR |
1203 | |
1204 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | |
1205 | ||
31961943 | 1206 | mv_stop_dma(ap); |
20f733e7 | 1207 | |
31961943 | 1208 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
20f733e7 BR |
1209 | udelay(25); /* allow reset propagation */ |
1210 | ||
1211 | /* Spec never mentions clearing the bit. Marvell's driver does | |
1212 | * clear the bit, however. | |
1213 | */ | |
31961943 | 1214 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
20f733e7 | 1215 | |
31961943 BR |
1216 | VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " |
1217 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | |
1218 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | |
20f733e7 BR |
1219 | |
1220 | /* proceed to init communications via the scr_control reg */ | |
31961943 BR |
1221 | scr_write_flush(ap, SCR_CONTROL, 0x301); |
1222 | mdelay(1); | |
1223 | scr_write_flush(ap, SCR_CONTROL, 0x300); | |
1224 | timeout = jiffies + (HZ * 1); | |
1225 | do { | |
1226 | mdelay(10); | |
1227 | if ((scr_read(ap, SCR_STATUS) & 0xf) != 1) | |
1228 | break; | |
1229 | } while (time_before(jiffies, timeout)); | |
20f733e7 | 1230 | |
31961943 BR |
1231 | VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " |
1232 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | |
1233 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | |
1234 | ||
1235 | if (sata_dev_present(ap)) { | |
1236 | ata_port_probe(ap); | |
1237 | } else { | |
1238 | printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", | |
1239 | ap->id, scr_read(ap, SCR_STATUS)); | |
1240 | ata_port_disable(ap); | |
20f733e7 BR |
1241 | return; |
1242 | } | |
31961943 | 1243 | ap->cbl = ATA_CBL_SATA; |
20f733e7 BR |
1244 | |
1245 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); | |
1246 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); | |
1247 | tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); | |
1248 | tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); | |
1249 | ||
1250 | dev->class = ata_dev_classify(&tf); | |
1251 | if (!ata_dev_present(dev)) { | |
1252 | VPRINTK("Port disabled post-sig: No device present.\n"); | |
1253 | ata_port_disable(ap); | |
1254 | } | |
1255 | VPRINTK("EXIT\n"); | |
1256 | } | |
1257 | ||
05b308e1 BR |
1258 | /** |
1259 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | |
1260 | * @ap: ATA channel to manipulate | |
1261 | * | |
1262 | * Intent is to clear all pending error conditions, reset the | |
1263 | * chip/bus, fail the command, and move on. | |
1264 | * | |
1265 | * LOCKING: | |
1266 | * This routine holds the host_set lock while failing the command. | |
1267 | */ | |
31961943 BR |
1268 | static void mv_eng_timeout(struct ata_port *ap) |
1269 | { | |
1270 | struct ata_queued_cmd *qc; | |
1271 | unsigned long flags; | |
1272 | ||
1273 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | |
1274 | DPRINTK("All regs @ start of eng_timeout\n"); | |
1275 | mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, | |
1276 | to_pci_dev(ap->host_set->dev)); | |
1277 | ||
1278 | qc = ata_qc_from_tag(ap, ap->active_tag); | |
1279 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | |
1280 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | |
1281 | &qc->scsicmd->cmnd); | |
1282 | ||
1283 | mv_err_intr(ap); | |
1284 | mv_phy_reset(ap); | |
1285 | ||
1286 | if (!qc) { | |
1287 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | |
1288 | ap->id); | |
1289 | } else { | |
1290 | /* hack alert! We cannot use the supplied completion | |
1291 | * function from inside the ->eh_strategy_handler() thread. | |
1292 | * libata is the only user of ->eh_strategy_handler() in | |
1293 | * any kernel, so the default scsi_done() assumes it is | |
1294 | * not being called from the SCSI EH. | |
1295 | */ | |
1296 | spin_lock_irqsave(&ap->host_set->lock, flags); | |
1297 | qc->scsidone = scsi_finish_command; | |
a7dac447 | 1298 | ata_qc_complete(qc, AC_ERR_OTHER); |
31961943 BR |
1299 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
1300 | } | |
1301 | } | |
1302 | ||
05b308e1 BR |
1303 | /** |
1304 | * mv_port_init - Perform some early initialization on a single port. | |
1305 | * @port: libata data structure storing shadow register addresses | |
1306 | * @port_mmio: base address of the port | |
1307 | * | |
1308 | * Initialize shadow register mmio addresses, clear outstanding | |
1309 | * interrupts on the port, and unmask interrupts for the future | |
1310 | * start of the port. | |
1311 | * | |
1312 | * LOCKING: | |
1313 | * Inherited from caller. | |
1314 | */ | |
31961943 | 1315 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
20f733e7 | 1316 | { |
31961943 BR |
1317 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; |
1318 | unsigned serr_ofs; | |
1319 | ||
1320 | /* PIO related setup | |
1321 | */ | |
1322 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); | |
1323 | port->error_addr = | |
1324 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); | |
1325 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); | |
1326 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); | |
1327 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); | |
1328 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | |
1329 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | |
1330 | port->status_addr = | |
1331 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); | |
1332 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | |
1333 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | |
1334 | ||
1335 | /* unused: */ | |
20f733e7 BR |
1336 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; |
1337 | ||
31961943 BR |
1338 | /* Clear any currently outstanding port interrupt conditions */ |
1339 | serr_ofs = mv_scr_offset(SCR_ERROR); | |
1340 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | |
1341 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1342 | ||
20f733e7 | 1343 | /* unmask all EDMA error interrupts */ |
31961943 | 1344 | writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); |
20f733e7 BR |
1345 | |
1346 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", | |
31961943 BR |
1347 | readl(port_mmio + EDMA_CFG_OFS), |
1348 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), | |
1349 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | |
20f733e7 BR |
1350 | } |
1351 | ||
05b308e1 BR |
1352 | /** |
1353 | * mv_host_init - Perform some early initialization of the host. | |
1354 | * @probe_ent: early data struct representing the host | |
1355 | * | |
1356 | * If possible, do an early global reset of the host. Then do | |
1357 | * our port init and clear/unmask all/relevant host interrupts. | |
1358 | * | |
1359 | * LOCKING: | |
1360 | * Inherited from caller. | |
1361 | */ | |
20f733e7 BR |
1362 | static int mv_host_init(struct ata_probe_ent *probe_ent) |
1363 | { | |
1364 | int rc = 0, n_hc, port, hc; | |
1365 | void __iomem *mmio = probe_ent->mmio_base; | |
1366 | void __iomem *port_mmio; | |
1367 | ||
31961943 BR |
1368 | if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && |
1369 | mv_global_soft_reset(probe_ent->mmio_base)) { | |
20f733e7 BR |
1370 | rc = 1; |
1371 | goto done; | |
1372 | } | |
1373 | ||
1374 | n_hc = mv_get_hc_count(probe_ent->host_flags); | |
1375 | probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; | |
1376 | ||
1377 | for (port = 0; port < probe_ent->n_ports; port++) { | |
1378 | port_mmio = mv_port_base(mmio, port); | |
31961943 | 1379 | mv_port_init(&probe_ent->port[port], port_mmio); |
20f733e7 BR |
1380 | } |
1381 | ||
1382 | for (hc = 0; hc < n_hc; hc++) { | |
31961943 BR |
1383 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
1384 | ||
1385 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " | |
1386 | "(before clear)=0x%08x\n", hc, | |
1387 | readl(hc_mmio + HC_CFG_OFS), | |
1388 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | |
1389 | ||
1390 | /* Clear any currently outstanding hc interrupt conditions */ | |
1391 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | |
20f733e7 BR |
1392 | } |
1393 | ||
31961943 BR |
1394 | /* Clear any currently outstanding host interrupt conditions */ |
1395 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | |
1396 | ||
1397 | /* and unmask interrupt generation for host regs */ | |
1398 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | |
1399 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | |
20f733e7 BR |
1400 | |
1401 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | |
1402 | "PCI int cause/mask=0x%08x/0x%08x\n", | |
1403 | readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), | |
1404 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), | |
1405 | readl(mmio + PCI_IRQ_CAUSE_OFS), | |
1406 | readl(mmio + PCI_IRQ_MASK_OFS)); | |
31961943 | 1407 | done: |
20f733e7 BR |
1408 | return rc; |
1409 | } | |
1410 | ||
05b308e1 BR |
1411 | /** |
1412 | * mv_print_info - Dump key info to kernel log for perusal. | |
1413 | * @probe_ent: early data struct representing the host | |
1414 | * | |
1415 | * FIXME: complete this. | |
1416 | * | |
1417 | * LOCKING: | |
1418 | * Inherited from caller. | |
1419 | */ | |
31961943 BR |
1420 | static void mv_print_info(struct ata_probe_ent *probe_ent) |
1421 | { | |
1422 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | |
1423 | struct mv_host_priv *hpriv = probe_ent->private_data; | |
1424 | u8 rev_id, scc; | |
1425 | const char *scc_s; | |
1426 | ||
1427 | /* Use this to determine the HW stepping of the chip so we know | |
1428 | * what errata to workaround | |
1429 | */ | |
1430 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); | |
1431 | ||
1432 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); | |
1433 | if (scc == 0) | |
1434 | scc_s = "SCSI"; | |
1435 | else if (scc == 0x01) | |
1436 | scc_s = "RAID"; | |
1437 | else | |
1438 | scc_s = "unknown"; | |
1439 | ||
1440 | printk(KERN_INFO DRV_NAME | |
1441 | "(%s) %u slots %u ports %s mode IRQ via %s\n", | |
1442 | pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, | |
1443 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | |
1444 | } | |
1445 | ||
05b308e1 BR |
1446 | /** |
1447 | * mv_init_one - handle a positive probe of a Marvell host | |
1448 | * @pdev: PCI device found | |
1449 | * @ent: PCI device ID entry for the matched host | |
1450 | * | |
1451 | * LOCKING: | |
1452 | * Inherited from caller. | |
1453 | */ | |
20f733e7 BR |
1454 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1455 | { | |
1456 | static int printed_version = 0; | |
1457 | struct ata_probe_ent *probe_ent = NULL; | |
1458 | struct mv_host_priv *hpriv; | |
1459 | unsigned int board_idx = (unsigned int)ent->driver_data; | |
1460 | void __iomem *mmio_base; | |
31961943 | 1461 | int pci_dev_busy = 0, rc; |
20f733e7 BR |
1462 | |
1463 | if (!printed_version++) { | |
31961943 | 1464 | printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n"); |
20f733e7 BR |
1465 | } |
1466 | ||
20f733e7 BR |
1467 | rc = pci_enable_device(pdev); |
1468 | if (rc) { | |
1469 | return rc; | |
1470 | } | |
1471 | ||
1472 | rc = pci_request_regions(pdev, DRV_NAME); | |
1473 | if (rc) { | |
1474 | pci_dev_busy = 1; | |
1475 | goto err_out; | |
1476 | } | |
1477 | ||
20f733e7 BR |
1478 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); |
1479 | if (probe_ent == NULL) { | |
1480 | rc = -ENOMEM; | |
1481 | goto err_out_regions; | |
1482 | } | |
1483 | ||
1484 | memset(probe_ent, 0, sizeof(*probe_ent)); | |
1485 | probe_ent->dev = pci_dev_to_dev(pdev); | |
1486 | INIT_LIST_HEAD(&probe_ent->node); | |
1487 | ||
31961943 | 1488 | mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); |
20f733e7 BR |
1489 | if (mmio_base == NULL) { |
1490 | rc = -ENOMEM; | |
1491 | goto err_out_free_ent; | |
1492 | } | |
1493 | ||
1494 | hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); | |
1495 | if (!hpriv) { | |
1496 | rc = -ENOMEM; | |
1497 | goto err_out_iounmap; | |
1498 | } | |
1499 | memset(hpriv, 0, sizeof(*hpriv)); | |
1500 | ||
1501 | probe_ent->sht = mv_port_info[board_idx].sht; | |
1502 | probe_ent->host_flags = mv_port_info[board_idx].host_flags; | |
1503 | probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; | |
1504 | probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; | |
1505 | probe_ent->port_ops = mv_port_info[board_idx].port_ops; | |
1506 | ||
1507 | probe_ent->irq = pdev->irq; | |
1508 | probe_ent->irq_flags = SA_SHIRQ; | |
1509 | probe_ent->mmio_base = mmio_base; | |
1510 | probe_ent->private_data = hpriv; | |
1511 | ||
1512 | /* initialize adapter */ | |
1513 | rc = mv_host_init(probe_ent); | |
1514 | if (rc) { | |
1515 | goto err_out_hpriv; | |
1516 | } | |
20f733e7 | 1517 | |
31961943 BR |
1518 | /* Enable interrupts */ |
1519 | if (pci_enable_msi(pdev) == 0) { | |
1520 | hpriv->hp_flags |= MV_HP_FLAG_MSI; | |
1521 | } else { | |
1522 | pci_intx(pdev, 1); | |
20f733e7 BR |
1523 | } |
1524 | ||
31961943 BR |
1525 | mv_dump_pci_cfg(pdev, 0x68); |
1526 | mv_print_info(probe_ent); | |
1527 | ||
1528 | if (ata_device_add(probe_ent) == 0) { | |
1529 | rc = -ENODEV; /* No devices discovered */ | |
1530 | goto err_out_dev_add; | |
1531 | } | |
20f733e7 | 1532 | |
31961943 | 1533 | kfree(probe_ent); |
20f733e7 BR |
1534 | return 0; |
1535 | ||
31961943 BR |
1536 | err_out_dev_add: |
1537 | if (MV_HP_FLAG_MSI & hpriv->hp_flags) { | |
1538 | pci_disable_msi(pdev); | |
1539 | } else { | |
1540 | pci_intx(pdev, 0); | |
1541 | } | |
1542 | err_out_hpriv: | |
20f733e7 | 1543 | kfree(hpriv); |
31961943 BR |
1544 | err_out_iounmap: |
1545 | pci_iounmap(pdev, mmio_base); | |
1546 | err_out_free_ent: | |
20f733e7 | 1547 | kfree(probe_ent); |
31961943 | 1548 | err_out_regions: |
20f733e7 | 1549 | pci_release_regions(pdev); |
31961943 | 1550 | err_out: |
20f733e7 BR |
1551 | if (!pci_dev_busy) { |
1552 | pci_disable_device(pdev); | |
1553 | } | |
1554 | ||
1555 | return rc; | |
1556 | } | |
1557 | ||
1558 | static int __init mv_init(void) | |
1559 | { | |
1560 | return pci_module_init(&mv_pci_driver); | |
1561 | } | |
1562 | ||
1563 | static void __exit mv_exit(void) | |
1564 | { | |
1565 | pci_unregister_driver(&mv_pci_driver); | |
1566 | } | |
1567 | ||
1568 | MODULE_AUTHOR("Brett Russ"); | |
1569 | MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); | |
1570 | MODULE_LICENSE("GPL"); | |
1571 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | |
1572 | MODULE_VERSION(DRV_VERSION); | |
1573 | ||
1574 | module_init(mv_init); | |
1575 | module_exit(mv_exit); |