]>
Commit | Line | Data |
---|---|---|
20f733e7 BR |
1 | /* |
2 | * sata_mv.c - Marvell SATA support | |
3 | * | |
4 | * Copyright 2005: EMC Corporation, all rights reserved. | |
5 | * | |
6 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; version 2 of the License. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/blkdev.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/dma-mapping.h> | |
a9524a76 | 32 | #include <linux/device.h> |
20f733e7 BR |
33 | #include "scsi.h" |
34 | #include <scsi/scsi_host.h> | |
35 | #include <linux/libata.h> | |
36 | #include <asm/io.h> | |
37 | ||
38 | #define DRV_NAME "sata_mv" | |
7e6c1208 | 39 | #define DRV_VERSION "0.25" |
20f733e7 BR |
40 | |
41 | enum { | |
42 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | |
43 | MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ | |
44 | MV_IO_BAR = 2, /* offset 0x18: IO space */ | |
45 | MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ | |
46 | ||
47 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ | |
48 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ | |
49 | ||
50 | MV_PCI_REG_BASE = 0, | |
51 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | |
52 | MV_SATAHC0_REG_BASE = 0x20000, | |
53 | ||
54 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
55 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
56 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | |
57 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | |
58 | ||
31961943 | 59 | MV_USE_Q_DEPTH = ATA_DEF_QUEUE, |
20f733e7 | 60 | |
31961943 BR |
61 | MV_MAX_Q_DEPTH = 32, |
62 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, | |
63 | ||
64 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | |
65 | * CRPB needs alignment on a 256B boundary. Size == 256B | |
66 | * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB | |
67 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B | |
68 | */ | |
69 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | |
70 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | |
71 | MV_MAX_SG_CT = 176, | |
72 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), | |
73 | MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), | |
74 | ||
75 | /* Our DMA boundary is determined by an ePRD being unable to handle | |
76 | * anything larger than 64KB | |
77 | */ | |
78 | MV_DMA_BOUNDARY = 0xffffU, | |
20f733e7 BR |
79 | |
80 | MV_PORTS_PER_HC = 4, | |
81 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ | |
82 | MV_PORT_HC_SHIFT = 2, | |
31961943 | 83 | /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ |
20f733e7 BR |
84 | MV_PORT_MASK = 3, |
85 | ||
86 | /* Host Flags */ | |
87 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | |
88 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | |
31961943 BR |
89 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ |
90 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | |
91 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | |
92 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | | |
93 | MV_FLAG_GLBL_SFT_RST), | |
20f733e7 BR |
94 | |
95 | chip_504x = 0, | |
96 | chip_508x = 1, | |
97 | chip_604x = 2, | |
98 | chip_608x = 3, | |
99 | ||
31961943 BR |
100 | CRQB_FLAG_READ = (1 << 0), |
101 | CRQB_TAG_SHIFT = 1, | |
102 | CRQB_CMD_ADDR_SHIFT = 8, | |
103 | CRQB_CMD_CS = (0x2 << 11), | |
104 | CRQB_CMD_LAST = (1 << 15), | |
105 | ||
106 | CRPB_FLAG_STATUS_SHIFT = 8, | |
107 | ||
108 | EPRD_FLAG_END_OF_TBL = (1 << 31), | |
109 | ||
20f733e7 BR |
110 | /* PCI interface registers */ |
111 | ||
31961943 BR |
112 | PCI_COMMAND_OFS = 0xc00, |
113 | ||
20f733e7 BR |
114 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
115 | STOP_PCI_MASTER = (1 << 2), | |
116 | PCI_MASTER_EMPTY = (1 << 3), | |
117 | GLOB_SFT_RST = (1 << 4), | |
118 | ||
119 | PCI_IRQ_CAUSE_OFS = 0x1d58, | |
120 | PCI_IRQ_MASK_OFS = 0x1d5c, | |
121 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ | |
122 | ||
123 | HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, | |
124 | HC_MAIN_IRQ_MASK_OFS = 0x1d64, | |
125 | PORT0_ERR = (1 << 0), /* shift by port # */ | |
126 | PORT0_DONE = (1 << 1), /* shift by port # */ | |
127 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ | |
128 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ | |
129 | PCI_ERR = (1 << 18), | |
130 | TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ | |
131 | TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ | |
132 | PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ | |
133 | GPIO_INT = (1 << 22), | |
134 | SELF_INT = (1 << 23), | |
135 | TWSI_INT = (1 << 24), | |
136 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | |
137 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | |
138 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | |
139 | HC_MAIN_RSVD), | |
140 | ||
141 | /* SATAHC registers */ | |
142 | HC_CFG_OFS = 0, | |
143 | ||
144 | HC_IRQ_CAUSE_OFS = 0x14, | |
31961943 | 145 | CRPB_DMA_DONE = (1 << 0), /* shift by port # */ |
20f733e7 BR |
146 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ |
147 | DEV_IRQ = (1 << 8), /* shift by port # */ | |
148 | ||
149 | /* Shadow block registers */ | |
31961943 BR |
150 | SHD_BLK_OFS = 0x100, |
151 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | |
20f733e7 BR |
152 | |
153 | /* SATA registers */ | |
154 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | |
155 | SATA_ACTIVE_OFS = 0x350, | |
156 | ||
157 | /* Port registers */ | |
158 | EDMA_CFG_OFS = 0, | |
31961943 BR |
159 | EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ |
160 | EDMA_CFG_NCQ = (1 << 5), | |
161 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | |
162 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | |
163 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | |
20f733e7 BR |
164 | |
165 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | |
166 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | |
167 | EDMA_ERR_D_PAR = (1 << 0), | |
168 | EDMA_ERR_PRD_PAR = (1 << 1), | |
169 | EDMA_ERR_DEV = (1 << 2), | |
170 | EDMA_ERR_DEV_DCON = (1 << 3), | |
171 | EDMA_ERR_DEV_CON = (1 << 4), | |
172 | EDMA_ERR_SERR = (1 << 5), | |
173 | EDMA_ERR_SELF_DIS = (1 << 7), | |
174 | EDMA_ERR_BIST_ASYNC = (1 << 8), | |
175 | EDMA_ERR_CRBQ_PAR = (1 << 9), | |
176 | EDMA_ERR_CRPB_PAR = (1 << 10), | |
177 | EDMA_ERR_INTRL_PAR = (1 << 11), | |
178 | EDMA_ERR_IORDY = (1 << 12), | |
179 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), | |
180 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), | |
181 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), | |
182 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), | |
183 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), | |
184 | EDMA_ERR_TRANS_PROTO = (1 << 31), | |
185 | EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | |
186 | EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | | |
187 | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | | |
188 | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | | |
189 | EDMA_ERR_LNK_DATA_RX | | |
190 | EDMA_ERR_LNK_DATA_TX | | |
191 | EDMA_ERR_TRANS_PROTO), | |
192 | ||
31961943 BR |
193 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
194 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | |
195 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | |
196 | ||
197 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | |
198 | EDMA_REQ_Q_PTR_SHIFT = 5, | |
199 | ||
200 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | |
201 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | |
202 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | |
203 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, | |
204 | EDMA_RSP_Q_PTR_SHIFT = 3, | |
205 | ||
20f733e7 BR |
206 | EDMA_CMD_OFS = 0x28, |
207 | EDMA_EN = (1 << 0), | |
208 | EDMA_DS = (1 << 1), | |
209 | ATA_RST = (1 << 2), | |
210 | ||
31961943 BR |
211 | /* Host private flags (hp_flags) */ |
212 | MV_HP_FLAG_MSI = (1 << 0), | |
20f733e7 | 213 | |
31961943 BR |
214 | /* Port private flags (pp_flags) */ |
215 | MV_PP_FLAG_EDMA_EN = (1 << 0), | |
216 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | |
20f733e7 BR |
217 | }; |
218 | ||
31961943 BR |
219 | /* Command ReQuest Block: 32B */ |
220 | struct mv_crqb { | |
221 | u32 sg_addr; | |
222 | u32 sg_addr_hi; | |
223 | u16 ctrl_flags; | |
224 | u16 ata_cmd[11]; | |
225 | }; | |
20f733e7 | 226 | |
31961943 BR |
227 | /* Command ResPonse Block: 8B */ |
228 | struct mv_crpb { | |
229 | u16 id; | |
230 | u16 flags; | |
231 | u32 tmstmp; | |
20f733e7 BR |
232 | }; |
233 | ||
31961943 BR |
234 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
235 | struct mv_sg { | |
236 | u32 addr; | |
237 | u32 flags_size; | |
238 | u32 addr_hi; | |
239 | u32 reserved; | |
240 | }; | |
20f733e7 | 241 | |
31961943 BR |
242 | struct mv_port_priv { |
243 | struct mv_crqb *crqb; | |
244 | dma_addr_t crqb_dma; | |
245 | struct mv_crpb *crpb; | |
246 | dma_addr_t crpb_dma; | |
247 | struct mv_sg *sg_tbl; | |
248 | dma_addr_t sg_tbl_dma; | |
249 | ||
250 | unsigned req_producer; /* cp of req_in_ptr */ | |
251 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | |
252 | u32 pp_flags; | |
253 | }; | |
254 | ||
255 | struct mv_host_priv { | |
256 | u32 hp_flags; | |
20f733e7 BR |
257 | }; |
258 | ||
259 | static void mv_irq_clear(struct ata_port *ap); | |
260 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | |
261 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | |
262 | static void mv_phy_reset(struct ata_port *ap); | |
31961943 BR |
263 | static void mv_host_stop(struct ata_host_set *host_set); |
264 | static int mv_port_start(struct ata_port *ap); | |
265 | static void mv_port_stop(struct ata_port *ap); | |
266 | static void mv_qc_prep(struct ata_queued_cmd *qc); | |
267 | static int mv_qc_issue(struct ata_queued_cmd *qc); | |
20f733e7 BR |
268 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
269 | struct pt_regs *regs); | |
31961943 | 270 | static void mv_eng_timeout(struct ata_port *ap); |
20f733e7 BR |
271 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
272 | ||
d0be4a7d | 273 | static struct scsi_host_template mv_sht = { |
20f733e7 BR |
274 | .module = THIS_MODULE, |
275 | .name = DRV_NAME, | |
276 | .ioctl = ata_scsi_ioctl, | |
277 | .queuecommand = ata_scsi_queuecmd, | |
278 | .eh_strategy_handler = ata_scsi_error, | |
31961943 | 279 | .can_queue = MV_USE_Q_DEPTH, |
20f733e7 | 280 | .this_id = ATA_SHT_THIS_ID, |
31961943 | 281 | .sg_tablesize = MV_MAX_SG_CT, |
20f733e7 BR |
282 | .max_sectors = ATA_MAX_SECTORS, |
283 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | |
284 | .emulated = ATA_SHT_EMULATED, | |
31961943 | 285 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
20f733e7 BR |
286 | .proc_name = DRV_NAME, |
287 | .dma_boundary = MV_DMA_BOUNDARY, | |
288 | .slave_configure = ata_scsi_slave_config, | |
289 | .bios_param = ata_std_bios_param, | |
290 | .ordered_flush = 1, | |
291 | }; | |
292 | ||
057ace5e | 293 | static const struct ata_port_operations mv_ops = { |
20f733e7 BR |
294 | .port_disable = ata_port_disable, |
295 | ||
296 | .tf_load = ata_tf_load, | |
297 | .tf_read = ata_tf_read, | |
298 | .check_status = ata_check_status, | |
299 | .exec_command = ata_exec_command, | |
300 | .dev_select = ata_std_dev_select, | |
301 | ||
302 | .phy_reset = mv_phy_reset, | |
303 | ||
31961943 BR |
304 | .qc_prep = mv_qc_prep, |
305 | .qc_issue = mv_qc_issue, | |
20f733e7 | 306 | |
31961943 | 307 | .eng_timeout = mv_eng_timeout, |
20f733e7 BR |
308 | |
309 | .irq_handler = mv_interrupt, | |
310 | .irq_clear = mv_irq_clear, | |
311 | ||
312 | .scr_read = mv_scr_read, | |
313 | .scr_write = mv_scr_write, | |
314 | ||
31961943 BR |
315 | .port_start = mv_port_start, |
316 | .port_stop = mv_port_stop, | |
317 | .host_stop = mv_host_stop, | |
20f733e7 BR |
318 | }; |
319 | ||
320 | static struct ata_port_info mv_port_info[] = { | |
321 | { /* chip_504x */ | |
322 | .sht = &mv_sht, | |
31961943 BR |
323 | .host_flags = MV_COMMON_FLAGS, |
324 | .pio_mask = 0x1f, /* pio0-4 */ | |
325 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ | |
20f733e7 BR |
326 | .port_ops = &mv_ops, |
327 | }, | |
328 | { /* chip_508x */ | |
329 | .sht = &mv_sht, | |
31961943 BR |
330 | .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
331 | .pio_mask = 0x1f, /* pio0-4 */ | |
332 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ | |
20f733e7 BR |
333 | .port_ops = &mv_ops, |
334 | }, | |
335 | { /* chip_604x */ | |
336 | .sht = &mv_sht, | |
31961943 BR |
337 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
338 | .pio_mask = 0x1f, /* pio0-4 */ | |
339 | .udma_mask = 0x7f, /* udma0-6 */ | |
20f733e7 BR |
340 | .port_ops = &mv_ops, |
341 | }, | |
342 | { /* chip_608x */ | |
343 | .sht = &mv_sht, | |
31961943 BR |
344 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
345 | MV_FLAG_DUAL_HC), | |
346 | .pio_mask = 0x1f, /* pio0-4 */ | |
347 | .udma_mask = 0x7f, /* udma0-6 */ | |
20f733e7 BR |
348 | .port_ops = &mv_ops, |
349 | }, | |
350 | }; | |
351 | ||
352 | static struct pci_device_id mv_pci_tbl[] = { | |
353 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x}, | |
354 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x}, | |
355 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x}, | |
356 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x}, | |
357 | ||
358 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, | |
359 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, | |
360 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, | |
361 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, | |
362 | {} /* terminate list */ | |
363 | }; | |
364 | ||
365 | static struct pci_driver mv_pci_driver = { | |
366 | .name = DRV_NAME, | |
367 | .id_table = mv_pci_tbl, | |
368 | .probe = mv_init_one, | |
369 | .remove = ata_pci_remove_one, | |
370 | }; | |
371 | ||
372 | /* | |
373 | * Functions | |
374 | */ | |
375 | ||
376 | static inline void writelfl(unsigned long data, void __iomem *addr) | |
377 | { | |
378 | writel(data, addr); | |
379 | (void) readl(addr); /* flush to avoid PCI posted write */ | |
380 | } | |
381 | ||
20f733e7 BR |
382 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
383 | { | |
384 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | |
385 | } | |
386 | ||
387 | static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) | |
388 | { | |
389 | return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) + | |
390 | MV_SATAHC_ARBTR_REG_SZ + | |
391 | ((port & MV_PORT_MASK) * MV_PORT_REG_SZ)); | |
392 | } | |
393 | ||
394 | static inline void __iomem *mv_ap_base(struct ata_port *ap) | |
395 | { | |
396 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); | |
397 | } | |
398 | ||
31961943 BR |
399 | static inline int mv_get_hc_count(unsigned long hp_flags) |
400 | { | |
401 | return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | |
402 | } | |
403 | ||
404 | static void mv_irq_clear(struct ata_port *ap) | |
20f733e7 | 405 | { |
20f733e7 BR |
406 | } |
407 | ||
05b308e1 BR |
408 | /** |
409 | * mv_start_dma - Enable eDMA engine | |
410 | * @base: port base address | |
411 | * @pp: port private data | |
412 | * | |
413 | * Verify the local cache of the eDMA state is accurate with an | |
414 | * assert. | |
415 | * | |
416 | * LOCKING: | |
417 | * Inherited from caller. | |
418 | */ | |
afb0edd9 | 419 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) |
20f733e7 | 420 | { |
afb0edd9 BR |
421 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { |
422 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | |
423 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | |
424 | } | |
425 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); | |
20f733e7 BR |
426 | } |
427 | ||
05b308e1 BR |
428 | /** |
429 | * mv_stop_dma - Disable eDMA engine | |
430 | * @ap: ATA channel to manipulate | |
431 | * | |
432 | * Verify the local cache of the eDMA state is accurate with an | |
433 | * assert. | |
434 | * | |
435 | * LOCKING: | |
436 | * Inherited from caller. | |
437 | */ | |
31961943 | 438 | static void mv_stop_dma(struct ata_port *ap) |
20f733e7 | 439 | { |
31961943 BR |
440 | void __iomem *port_mmio = mv_ap_base(ap); |
441 | struct mv_port_priv *pp = ap->private_data; | |
31961943 BR |
442 | u32 reg; |
443 | int i; | |
444 | ||
afb0edd9 BR |
445 | if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { |
446 | /* Disable EDMA if active. The disable bit auto clears. | |
31961943 | 447 | */ |
31961943 BR |
448 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
449 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
afb0edd9 BR |
450 | } else { |
451 | assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); | |
452 | } | |
31961943 BR |
453 | |
454 | /* now properly wait for the eDMA to stop */ | |
455 | for (i = 1000; i > 0; i--) { | |
456 | reg = readl(port_mmio + EDMA_CMD_OFS); | |
457 | if (!(EDMA_EN & reg)) { | |
458 | break; | |
459 | } | |
460 | udelay(100); | |
461 | } | |
462 | ||
31961943 BR |
463 | if (EDMA_EN & reg) { |
464 | printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); | |
afb0edd9 | 465 | /* FIXME: Consider doing a reset here to recover */ |
31961943 | 466 | } |
20f733e7 BR |
467 | } |
468 | ||
8a70f8dc | 469 | #ifdef ATA_DEBUG |
31961943 | 470 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
20f733e7 | 471 | { |
31961943 BR |
472 | int b, w; |
473 | for (b = 0; b < bytes; ) { | |
474 | DPRINTK("%p: ", start + b); | |
475 | for (w = 0; b < bytes && w < 4; w++) { | |
476 | printk("%08x ",readl(start + b)); | |
477 | b += sizeof(u32); | |
478 | } | |
479 | printk("\n"); | |
480 | } | |
31961943 | 481 | } |
8a70f8dc JG |
482 | #endif |
483 | ||
31961943 BR |
484 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
485 | { | |
486 | #ifdef ATA_DEBUG | |
487 | int b, w; | |
488 | u32 dw; | |
489 | for (b = 0; b < bytes; ) { | |
490 | DPRINTK("%02x: ", b); | |
491 | for (w = 0; b < bytes && w < 4; w++) { | |
492 | (void) pci_read_config_dword(pdev,b,&dw); | |
493 | printk("%08x ",dw); | |
494 | b += sizeof(u32); | |
495 | } | |
496 | printk("\n"); | |
497 | } | |
498 | #endif | |
499 | } | |
500 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | |
501 | struct pci_dev *pdev) | |
502 | { | |
503 | #ifdef ATA_DEBUG | |
504 | void __iomem *hc_base = mv_hc_base(mmio_base, | |
505 | port >> MV_PORT_HC_SHIFT); | |
506 | void __iomem *port_base; | |
507 | int start_port, num_ports, p, start_hc, num_hcs, hc; | |
508 | ||
509 | if (0 > port) { | |
510 | start_hc = start_port = 0; | |
511 | num_ports = 8; /* shld be benign for 4 port devs */ | |
512 | num_hcs = 2; | |
513 | } else { | |
514 | start_hc = port >> MV_PORT_HC_SHIFT; | |
515 | start_port = port; | |
516 | num_ports = num_hcs = 1; | |
517 | } | |
518 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, | |
519 | num_ports > 1 ? num_ports - 1 : start_port); | |
520 | ||
521 | if (NULL != pdev) { | |
522 | DPRINTK("PCI config space regs:\n"); | |
523 | mv_dump_pci_cfg(pdev, 0x68); | |
524 | } | |
525 | DPRINTK("PCI regs:\n"); | |
526 | mv_dump_mem(mmio_base+0xc00, 0x3c); | |
527 | mv_dump_mem(mmio_base+0xd00, 0x34); | |
528 | mv_dump_mem(mmio_base+0xf00, 0x4); | |
529 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | |
530 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | |
531 | hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); | |
532 | DPRINTK("HC regs (HC %i):\n", hc); | |
533 | mv_dump_mem(hc_base, 0x1c); | |
534 | } | |
535 | for (p = start_port; p < start_port + num_ports; p++) { | |
536 | port_base = mv_port_base(mmio_base, p); | |
537 | DPRINTK("EDMA regs (port %i):\n",p); | |
538 | mv_dump_mem(port_base, 0x54); | |
539 | DPRINTK("SATA regs (port %i):\n",p); | |
540 | mv_dump_mem(port_base+0x300, 0x60); | |
541 | } | |
542 | #endif | |
20f733e7 BR |
543 | } |
544 | ||
545 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |
546 | { | |
547 | unsigned int ofs; | |
548 | ||
549 | switch (sc_reg_in) { | |
550 | case SCR_STATUS: | |
551 | case SCR_CONTROL: | |
552 | case SCR_ERROR: | |
553 | ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); | |
554 | break; | |
555 | case SCR_ACTIVE: | |
556 | ofs = SATA_ACTIVE_OFS; /* active is not with the others */ | |
557 | break; | |
558 | default: | |
559 | ofs = 0xffffffffU; | |
560 | break; | |
561 | } | |
562 | return ofs; | |
563 | } | |
564 | ||
565 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) | |
566 | { | |
567 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
568 | ||
569 | if (0xffffffffU != ofs) { | |
570 | return readl(mv_ap_base(ap) + ofs); | |
571 | } else { | |
572 | return (u32) ofs; | |
573 | } | |
574 | } | |
575 | ||
576 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |
577 | { | |
578 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
579 | ||
580 | if (0xffffffffU != ofs) { | |
581 | writelfl(val, mv_ap_base(ap) + ofs); | |
582 | } | |
583 | } | |
584 | ||
05b308e1 BR |
585 | /** |
586 | * mv_global_soft_reset - Perform the 6xxx global soft reset | |
587 | * @mmio_base: base address of the HBA | |
588 | * | |
589 | * This routine only applies to 6xxx parts. | |
590 | * | |
591 | * LOCKING: | |
592 | * Inherited from caller. | |
593 | */ | |
31961943 | 594 | static int mv_global_soft_reset(void __iomem *mmio_base) |
20f733e7 BR |
595 | { |
596 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; | |
597 | int i, rc = 0; | |
598 | u32 t; | |
599 | ||
20f733e7 BR |
600 | /* Following procedure defined in PCI "main command and status |
601 | * register" table. | |
602 | */ | |
603 | t = readl(reg); | |
604 | writel(t | STOP_PCI_MASTER, reg); | |
605 | ||
31961943 BR |
606 | for (i = 0; i < 1000; i++) { |
607 | udelay(1); | |
20f733e7 BR |
608 | t = readl(reg); |
609 | if (PCI_MASTER_EMPTY & t) { | |
610 | break; | |
611 | } | |
612 | } | |
613 | if (!(PCI_MASTER_EMPTY & t)) { | |
31961943 BR |
614 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); |
615 | rc = 1; | |
20f733e7 BR |
616 | goto done; |
617 | } | |
618 | ||
619 | /* set reset */ | |
620 | i = 5; | |
621 | do { | |
622 | writel(t | GLOB_SFT_RST, reg); | |
623 | t = readl(reg); | |
624 | udelay(1); | |
625 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | |
626 | ||
627 | if (!(GLOB_SFT_RST & t)) { | |
31961943 BR |
628 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); |
629 | rc = 1; | |
20f733e7 BR |
630 | goto done; |
631 | } | |
632 | ||
31961943 | 633 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ |
20f733e7 BR |
634 | i = 5; |
635 | do { | |
31961943 | 636 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); |
20f733e7 BR |
637 | t = readl(reg); |
638 | udelay(1); | |
639 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | |
640 | ||
641 | if (GLOB_SFT_RST & t) { | |
31961943 BR |
642 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); |
643 | rc = 1; | |
20f733e7 | 644 | } |
31961943 | 645 | done: |
20f733e7 BR |
646 | return rc; |
647 | } | |
648 | ||
05b308e1 BR |
649 | /** |
650 | * mv_host_stop - Host specific cleanup/stop routine. | |
651 | * @host_set: host data structure | |
652 | * | |
653 | * Disable ints, cleanup host memory, call general purpose | |
654 | * host_stop. | |
655 | * | |
656 | * LOCKING: | |
657 | * Inherited from caller. | |
658 | */ | |
31961943 | 659 | static void mv_host_stop(struct ata_host_set *host_set) |
20f733e7 | 660 | { |
31961943 BR |
661 | struct mv_host_priv *hpriv = host_set->private_data; |
662 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | |
663 | ||
664 | if (hpriv->hp_flags & MV_HP_FLAG_MSI) { | |
665 | pci_disable_msi(pdev); | |
666 | } else { | |
667 | pci_intx(pdev, 0); | |
668 | } | |
669 | kfree(hpriv); | |
670 | ata_host_stop(host_set); | |
671 | } | |
672 | ||
6037d6bb JG |
673 | static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) |
674 | { | |
675 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); | |
676 | } | |
677 | ||
05b308e1 BR |
678 | /** |
679 | * mv_port_start - Port specific init/start routine. | |
680 | * @ap: ATA channel to manipulate | |
681 | * | |
682 | * Allocate and point to DMA memory, init port private memory, | |
683 | * zero indices. | |
684 | * | |
685 | * LOCKING: | |
686 | * Inherited from caller. | |
687 | */ | |
31961943 BR |
688 | static int mv_port_start(struct ata_port *ap) |
689 | { | |
690 | struct device *dev = ap->host_set->dev; | |
691 | struct mv_port_priv *pp; | |
692 | void __iomem *port_mmio = mv_ap_base(ap); | |
693 | void *mem; | |
694 | dma_addr_t mem_dma; | |
6037d6bb | 695 | int rc = -ENOMEM; |
31961943 BR |
696 | |
697 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | |
6037d6bb JG |
698 | if (!pp) |
699 | goto err_out; | |
31961943 BR |
700 | memset(pp, 0, sizeof(*pp)); |
701 | ||
702 | mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, | |
703 | GFP_KERNEL); | |
6037d6bb JG |
704 | if (!mem) |
705 | goto err_out_pp; | |
31961943 BR |
706 | memset(mem, 0, MV_PORT_PRIV_DMA_SZ); |
707 | ||
6037d6bb JG |
708 | rc = ata_pad_alloc(ap, dev); |
709 | if (rc) | |
710 | goto err_out_priv; | |
711 | ||
31961943 BR |
712 | /* First item in chunk of DMA memory: |
713 | * 32-slot command request table (CRQB), 32 bytes each in size | |
714 | */ | |
715 | pp->crqb = mem; | |
716 | pp->crqb_dma = mem_dma; | |
717 | mem += MV_CRQB_Q_SZ; | |
718 | mem_dma += MV_CRQB_Q_SZ; | |
719 | ||
720 | /* Second item: | |
721 | * 32-slot command response table (CRPB), 8 bytes each in size | |
722 | */ | |
723 | pp->crpb = mem; | |
724 | pp->crpb_dma = mem_dma; | |
725 | mem += MV_CRPB_Q_SZ; | |
726 | mem_dma += MV_CRPB_Q_SZ; | |
727 | ||
728 | /* Third item: | |
729 | * Table of scatter-gather descriptors (ePRD), 16 bytes each | |
730 | */ | |
731 | pp->sg_tbl = mem; | |
732 | pp->sg_tbl_dma = mem_dma; | |
733 | ||
734 | writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | | |
735 | EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); | |
736 | ||
737 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | |
738 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | |
739 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
740 | ||
741 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | |
742 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | |
743 | ||
744 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | |
745 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | |
746 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
747 | ||
748 | pp->req_producer = pp->rsp_consumer = 0; | |
749 | ||
750 | /* Don't turn on EDMA here...do it before DMA commands only. Else | |
751 | * we'll be unable to send non-data, PIO, etc due to restricted access | |
752 | * to shadow regs. | |
753 | */ | |
754 | ap->private_data = pp; | |
755 | return 0; | |
6037d6bb JG |
756 | |
757 | err_out_priv: | |
758 | mv_priv_free(pp, dev); | |
759 | err_out_pp: | |
760 | kfree(pp); | |
761 | err_out: | |
762 | return rc; | |
31961943 BR |
763 | } |
764 | ||
05b308e1 BR |
765 | /** |
766 | * mv_port_stop - Port specific cleanup/stop routine. | |
767 | * @ap: ATA channel to manipulate | |
768 | * | |
769 | * Stop DMA, cleanup port memory. | |
770 | * | |
771 | * LOCKING: | |
772 | * This routine uses the host_set lock to protect the DMA stop. | |
773 | */ | |
31961943 BR |
774 | static void mv_port_stop(struct ata_port *ap) |
775 | { | |
776 | struct device *dev = ap->host_set->dev; | |
777 | struct mv_port_priv *pp = ap->private_data; | |
afb0edd9 | 778 | unsigned long flags; |
31961943 | 779 | |
afb0edd9 | 780 | spin_lock_irqsave(&ap->host_set->lock, flags); |
31961943 | 781 | mv_stop_dma(ap); |
afb0edd9 | 782 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
31961943 BR |
783 | |
784 | ap->private_data = NULL; | |
6037d6bb JG |
785 | ata_pad_free(ap, dev); |
786 | mv_priv_free(pp, dev); | |
31961943 BR |
787 | kfree(pp); |
788 | } | |
789 | ||
05b308e1 BR |
790 | /** |
791 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | |
792 | * @qc: queued command whose SG list to source from | |
793 | * | |
794 | * Populate the SG list and mark the last entry. | |
795 | * | |
796 | * LOCKING: | |
797 | * Inherited from caller. | |
798 | */ | |
31961943 BR |
799 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
800 | { | |
801 | struct mv_port_priv *pp = qc->ap->private_data; | |
972c26bd JG |
802 | unsigned int i = 0; |
803 | struct scatterlist *sg; | |
31961943 | 804 | |
972c26bd | 805 | ata_for_each_sg(sg, qc) { |
31961943 BR |
806 | u32 sg_len; |
807 | dma_addr_t addr; | |
808 | ||
972c26bd JG |
809 | addr = sg_dma_address(sg); |
810 | sg_len = sg_dma_len(sg); | |
31961943 BR |
811 | |
812 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | |
813 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
814 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); | |
815 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); | |
972c26bd JG |
816 | if (ata_sg_is_last(sg, qc)) |
817 | pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | |
818 | ||
819 | i++; | |
31961943 BR |
820 | } |
821 | } | |
822 | ||
823 | static inline unsigned mv_inc_q_index(unsigned *index) | |
824 | { | |
825 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | |
826 | return *index; | |
827 | } | |
828 | ||
829 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | |
830 | { | |
831 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | |
832 | (last ? CRQB_CMD_LAST : 0); | |
833 | } | |
834 | ||
05b308e1 BR |
835 | /** |
836 | * mv_qc_prep - Host specific command preparation. | |
837 | * @qc: queued command to prepare | |
838 | * | |
839 | * This routine simply redirects to the general purpose routine | |
840 | * if command is not DMA. Else, it handles prep of the CRQB | |
841 | * (command request block), does some sanity checking, and calls | |
842 | * the SG load routine. | |
843 | * | |
844 | * LOCKING: | |
845 | * Inherited from caller. | |
846 | */ | |
31961943 BR |
847 | static void mv_qc_prep(struct ata_queued_cmd *qc) |
848 | { | |
849 | struct ata_port *ap = qc->ap; | |
850 | struct mv_port_priv *pp = ap->private_data; | |
851 | u16 *cw; | |
852 | struct ata_taskfile *tf; | |
853 | u16 flags = 0; | |
854 | ||
855 | if (ATA_PROT_DMA != qc->tf.protocol) { | |
856 | return; | |
857 | } | |
20f733e7 | 858 | |
31961943 BR |
859 | /* the req producer index should be the same as we remember it */ |
860 | assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | |
861 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
862 | pp->req_producer); | |
863 | ||
864 | /* Fill in command request block | |
865 | */ | |
866 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | |
867 | flags |= CRQB_FLAG_READ; | |
868 | } | |
869 | assert(MV_MAX_Q_DEPTH > qc->tag); | |
870 | flags |= qc->tag << CRQB_TAG_SHIFT; | |
871 | ||
872 | pp->crqb[pp->req_producer].sg_addr = | |
873 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | |
874 | pp->crqb[pp->req_producer].sg_addr_hi = | |
875 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | |
876 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | |
877 | ||
878 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | |
879 | tf = &qc->tf; | |
880 | ||
881 | /* Sadly, the CRQB cannot accomodate all registers--there are | |
882 | * only 11 bytes...so we must pick and choose required | |
883 | * registers based on the command. So, we drop feature and | |
884 | * hob_feature for [RW] DMA commands, but they are needed for | |
885 | * NCQ. NCQ will drop hob_nsect. | |
20f733e7 | 886 | */ |
31961943 BR |
887 | switch (tf->command) { |
888 | case ATA_CMD_READ: | |
889 | case ATA_CMD_READ_EXT: | |
890 | case ATA_CMD_WRITE: | |
891 | case ATA_CMD_WRITE_EXT: | |
892 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); | |
893 | break; | |
894 | #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ | |
895 | case ATA_CMD_FPDMA_READ: | |
896 | case ATA_CMD_FPDMA_WRITE: | |
897 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); | |
898 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); | |
899 | break; | |
900 | #endif /* FIXME: remove this line when NCQ added */ | |
901 | default: | |
902 | /* The only other commands EDMA supports in non-queued and | |
903 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | |
904 | * of which are defined/used by Linux. If we get here, this | |
905 | * driver needs work. | |
906 | * | |
907 | * FIXME: modify libata to give qc_prep a return value and | |
908 | * return error here. | |
909 | */ | |
910 | BUG_ON(tf->command); | |
911 | break; | |
912 | } | |
913 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | |
914 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | |
915 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | |
916 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | |
917 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | |
918 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | |
919 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | |
920 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | |
921 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | |
922 | ||
923 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { | |
924 | return; | |
925 | } | |
926 | mv_fill_sg(qc); | |
927 | } | |
928 | ||
05b308e1 BR |
929 | /** |
930 | * mv_qc_issue - Initiate a command to the host | |
931 | * @qc: queued command to start | |
932 | * | |
933 | * This routine simply redirects to the general purpose routine | |
934 | * if command is not DMA. Else, it sanity checks our local | |
935 | * caches of the request producer/consumer indices then enables | |
936 | * DMA and bumps the request producer index. | |
937 | * | |
938 | * LOCKING: | |
939 | * Inherited from caller. | |
940 | */ | |
31961943 BR |
941 | static int mv_qc_issue(struct ata_queued_cmd *qc) |
942 | { | |
943 | void __iomem *port_mmio = mv_ap_base(qc->ap); | |
944 | struct mv_port_priv *pp = qc->ap->private_data; | |
945 | u32 in_ptr; | |
946 | ||
947 | if (ATA_PROT_DMA != qc->tf.protocol) { | |
948 | /* We're about to send a non-EDMA capable command to the | |
949 | * port. Turn off EDMA so there won't be problems accessing | |
950 | * shadow block, etc registers. | |
951 | */ | |
952 | mv_stop_dma(qc->ap); | |
953 | return ata_qc_issue_prot(qc); | |
954 | } | |
955 | ||
956 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
957 | ||
958 | /* the req producer index should be the same as we remember it */ | |
959 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
960 | pp->req_producer); | |
961 | /* until we do queuing, the queue should be empty at this point */ | |
962 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
963 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | |
964 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | |
965 | ||
966 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | |
967 | ||
afb0edd9 | 968 | mv_start_dma(port_mmio, pp); |
31961943 BR |
969 | |
970 | /* and write the request in pointer to kick the EDMA to life */ | |
971 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | |
972 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | |
973 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
974 | ||
975 | return 0; | |
976 | } | |
977 | ||
05b308e1 BR |
978 | /** |
979 | * mv_get_crpb_status - get status from most recently completed cmd | |
980 | * @ap: ATA channel to manipulate | |
981 | * | |
982 | * This routine is for use when the port is in DMA mode, when it | |
983 | * will be using the CRPB (command response block) method of | |
984 | * returning command completion information. We assert indices | |
985 | * are good, grab status, and bump the response consumer index to | |
986 | * prove that we're up to date. | |
987 | * | |
988 | * LOCKING: | |
989 | * Inherited from caller. | |
990 | */ | |
31961943 BR |
991 | static u8 mv_get_crpb_status(struct ata_port *ap) |
992 | { | |
993 | void __iomem *port_mmio = mv_ap_base(ap); | |
994 | struct mv_port_priv *pp = ap->private_data; | |
995 | u32 out_ptr; | |
996 | ||
997 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
998 | ||
999 | /* the response consumer index should be the same as we remember it */ | |
1000 | assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
1001 | pp->rsp_consumer); | |
1002 | ||
1003 | /* increment our consumer index... */ | |
1004 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | |
1005 | ||
1006 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | |
1007 | assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | |
1008 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | |
1009 | pp->rsp_consumer); | |
1010 | ||
1011 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | |
1012 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | |
1013 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | |
1014 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | |
1015 | ||
1016 | /* Return ATA status register for completed CRPB */ | |
1017 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | |
1018 | } | |
1019 | ||
05b308e1 BR |
1020 | /** |
1021 | * mv_err_intr - Handle error interrupts on the port | |
1022 | * @ap: ATA channel to manipulate | |
1023 | * | |
1024 | * In most cases, just clear the interrupt and move on. However, | |
1025 | * some cases require an eDMA reset, which is done right before | |
1026 | * the COMRESET in mv_phy_reset(). The SERR case requires a | |
1027 | * clear of pending errors in the SATA SERROR register. Finally, | |
1028 | * if the port disabled DMA, update our cached copy to match. | |
1029 | * | |
1030 | * LOCKING: | |
1031 | * Inherited from caller. | |
1032 | */ | |
31961943 BR |
1033 | static void mv_err_intr(struct ata_port *ap) |
1034 | { | |
1035 | void __iomem *port_mmio = mv_ap_base(ap); | |
1036 | u32 edma_err_cause, serr = 0; | |
20f733e7 BR |
1037 | |
1038 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1039 | ||
1040 | if (EDMA_ERR_SERR & edma_err_cause) { | |
1041 | serr = scr_read(ap, SCR_ERROR); | |
1042 | scr_write_flush(ap, SCR_ERROR, serr); | |
1043 | } | |
afb0edd9 BR |
1044 | if (EDMA_ERR_SELF_DIS & edma_err_cause) { |
1045 | struct mv_port_priv *pp = ap->private_data; | |
1046 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
1047 | } | |
1048 | DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " | |
1049 | "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); | |
20f733e7 BR |
1050 | |
1051 | /* Clear EDMA now that SERR cleanup done */ | |
1052 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1053 | ||
1054 | /* check for fatal here and recover if needed */ | |
1055 | if (EDMA_ERR_FATAL & edma_err_cause) { | |
1056 | mv_phy_reset(ap); | |
1057 | } | |
1058 | } | |
1059 | ||
05b308e1 BR |
1060 | /** |
1061 | * mv_host_intr - Handle all interrupts on the given host controller | |
1062 | * @host_set: host specific structure | |
1063 | * @relevant: port error bits relevant to this host controller | |
1064 | * @hc: which host controller we're to look at | |
1065 | * | |
1066 | * Read then write clear the HC interrupt status then walk each | |
1067 | * port connected to the HC and see if it needs servicing. Port | |
1068 | * success ints are reported in the HC interrupt status reg, the | |
1069 | * port error ints are reported in the higher level main | |
1070 | * interrupt status register and thus are passed in via the | |
1071 | * 'relevant' argument. | |
1072 | * | |
1073 | * LOCKING: | |
1074 | * Inherited from caller. | |
1075 | */ | |
20f733e7 BR |
1076 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, |
1077 | unsigned int hc) | |
1078 | { | |
1079 | void __iomem *mmio = host_set->mmio_base; | |
1080 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | |
1081 | struct ata_port *ap; | |
1082 | struct ata_queued_cmd *qc; | |
1083 | u32 hc_irq_cause; | |
31961943 | 1084 | int shift, port, port0, hard_port, handled; |
a7dac447 | 1085 | unsigned int err_mask; |
31961943 | 1086 | u8 ata_status = 0; |
20f733e7 BR |
1087 | |
1088 | if (hc == 0) { | |
1089 | port0 = 0; | |
1090 | } else { | |
1091 | port0 = MV_PORTS_PER_HC; | |
1092 | } | |
1093 | ||
1094 | /* we'll need the HC success int register in most cases */ | |
1095 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | |
1096 | if (hc_irq_cause) { | |
31961943 | 1097 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
20f733e7 BR |
1098 | } |
1099 | ||
1100 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | |
1101 | hc,relevant,hc_irq_cause); | |
1102 | ||
1103 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | |
1104 | ap = host_set->ports[port]; | |
1105 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | |
31961943 | 1106 | handled = 0; /* ensure ata_status is set if handled++ */ |
20f733e7 | 1107 | |
31961943 BR |
1108 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { |
1109 | /* new CRPB on the queue; just one at a time until NCQ | |
1110 | */ | |
1111 | ata_status = mv_get_crpb_status(ap); | |
1112 | handled++; | |
1113 | } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { | |
1114 | /* received ATA IRQ; read the status reg to clear INTRQ | |
20f733e7 BR |
1115 | */ |
1116 | ata_status = readb((void __iomem *) | |
1117 | ap->ioaddr.status_addr); | |
31961943 | 1118 | handled++; |
20f733e7 BR |
1119 | } |
1120 | ||
a7dac447 JG |
1121 | err_mask = ac_err_mask(ata_status); |
1122 | ||
31961943 | 1123 | shift = port << 1; /* (port * 2) */ |
20f733e7 BR |
1124 | if (port >= MV_PORTS_PER_HC) { |
1125 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | |
1126 | } | |
1127 | if ((PORT0_ERR << shift) & relevant) { | |
1128 | mv_err_intr(ap); | |
a7dac447 | 1129 | err_mask |= AC_ERR_OTHER; |
31961943 | 1130 | handled++; |
20f733e7 BR |
1131 | } |
1132 | ||
31961943 | 1133 | if (handled && ap) { |
20f733e7 BR |
1134 | qc = ata_qc_from_tag(ap, ap->active_tag); |
1135 | if (NULL != qc) { | |
1136 | VPRINTK("port %u IRQ found for qc, " | |
1137 | "ata_status 0x%x\n", port,ata_status); | |
20f733e7 | 1138 | /* mark qc status appropriately */ |
a7dac447 | 1139 | ata_qc_complete(qc, err_mask); |
20f733e7 BR |
1140 | } |
1141 | } | |
1142 | } | |
1143 | VPRINTK("EXIT\n"); | |
1144 | } | |
1145 | ||
05b308e1 BR |
1146 | /** |
1147 | * mv_interrupt - | |
1148 | * @irq: unused | |
1149 | * @dev_instance: private data; in this case the host structure | |
1150 | * @regs: unused | |
1151 | * | |
1152 | * Read the read only register to determine if any host | |
1153 | * controllers have pending interrupts. If so, call lower level | |
1154 | * routine to handle. Also check for PCI errors which are only | |
1155 | * reported here. | |
1156 | * | |
1157 | * LOCKING: | |
1158 | * This routine holds the host_set lock while processing pending | |
1159 | * interrupts. | |
1160 | */ | |
20f733e7 BR |
1161 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
1162 | struct pt_regs *regs) | |
1163 | { | |
1164 | struct ata_host_set *host_set = dev_instance; | |
1165 | unsigned int hc, handled = 0, n_hcs; | |
31961943 | 1166 | void __iomem *mmio = host_set->mmio_base; |
20f733e7 BR |
1167 | u32 irq_stat; |
1168 | ||
20f733e7 | 1169 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
20f733e7 BR |
1170 | |
1171 | /* check the cases where we either have nothing pending or have read | |
1172 | * a bogus register value which can indicate HW removal or PCI fault | |
1173 | */ | |
1174 | if (!irq_stat || (0xffffffffU == irq_stat)) { | |
1175 | return IRQ_NONE; | |
1176 | } | |
1177 | ||
31961943 | 1178 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); |
20f733e7 BR |
1179 | spin_lock(&host_set->lock); |
1180 | ||
1181 | for (hc = 0; hc < n_hcs; hc++) { | |
1182 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | |
1183 | if (relevant) { | |
1184 | mv_host_intr(host_set, relevant, hc); | |
31961943 | 1185 | handled++; |
20f733e7 BR |
1186 | } |
1187 | } | |
1188 | if (PCI_ERR & irq_stat) { | |
31961943 BR |
1189 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
1190 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | |
1191 | ||
afb0edd9 | 1192 | DPRINTK("All regs @ PCI error\n"); |
31961943 | 1193 | mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); |
20f733e7 | 1194 | |
31961943 BR |
1195 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); |
1196 | handled++; | |
1197 | } | |
20f733e7 BR |
1198 | spin_unlock(&host_set->lock); |
1199 | ||
1200 | return IRQ_RETVAL(handled); | |
1201 | } | |
1202 | ||
05b308e1 BR |
1203 | /** |
1204 | * mv_phy_reset - Perform eDMA reset followed by COMRESET | |
1205 | * @ap: ATA channel to manipulate | |
1206 | * | |
1207 | * Part of this is taken from __sata_phy_reset and modified to | |
1208 | * not sleep since this routine gets called from interrupt level. | |
1209 | * | |
1210 | * LOCKING: | |
1211 | * Inherited from caller. This is coded to safe to call at | |
1212 | * interrupt level, i.e. it does not sleep. | |
31961943 | 1213 | */ |
20f733e7 BR |
1214 | static void mv_phy_reset(struct ata_port *ap) |
1215 | { | |
1216 | void __iomem *port_mmio = mv_ap_base(ap); | |
1217 | struct ata_taskfile tf; | |
1218 | struct ata_device *dev = &ap->device[0]; | |
31961943 | 1219 | unsigned long timeout; |
20f733e7 BR |
1220 | |
1221 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | |
1222 | ||
31961943 | 1223 | mv_stop_dma(ap); |
20f733e7 | 1224 | |
31961943 | 1225 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
20f733e7 BR |
1226 | udelay(25); /* allow reset propagation */ |
1227 | ||
1228 | /* Spec never mentions clearing the bit. Marvell's driver does | |
1229 | * clear the bit, however. | |
1230 | */ | |
31961943 | 1231 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
20f733e7 | 1232 | |
31961943 BR |
1233 | VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " |
1234 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | |
1235 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | |
20f733e7 BR |
1236 | |
1237 | /* proceed to init communications via the scr_control reg */ | |
31961943 BR |
1238 | scr_write_flush(ap, SCR_CONTROL, 0x301); |
1239 | mdelay(1); | |
1240 | scr_write_flush(ap, SCR_CONTROL, 0x300); | |
1241 | timeout = jiffies + (HZ * 1); | |
1242 | do { | |
1243 | mdelay(10); | |
1244 | if ((scr_read(ap, SCR_STATUS) & 0xf) != 1) | |
1245 | break; | |
1246 | } while (time_before(jiffies, timeout)); | |
20f733e7 | 1247 | |
31961943 BR |
1248 | VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " |
1249 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | |
1250 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | |
1251 | ||
1252 | if (sata_dev_present(ap)) { | |
1253 | ata_port_probe(ap); | |
1254 | } else { | |
1255 | printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", | |
1256 | ap->id, scr_read(ap, SCR_STATUS)); | |
1257 | ata_port_disable(ap); | |
20f733e7 BR |
1258 | return; |
1259 | } | |
31961943 | 1260 | ap->cbl = ATA_CBL_SATA; |
20f733e7 BR |
1261 | |
1262 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); | |
1263 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); | |
1264 | tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); | |
1265 | tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); | |
1266 | ||
1267 | dev->class = ata_dev_classify(&tf); | |
1268 | if (!ata_dev_present(dev)) { | |
1269 | VPRINTK("Port disabled post-sig: No device present.\n"); | |
1270 | ata_port_disable(ap); | |
1271 | } | |
1272 | VPRINTK("EXIT\n"); | |
1273 | } | |
1274 | ||
05b308e1 BR |
1275 | /** |
1276 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | |
1277 | * @ap: ATA channel to manipulate | |
1278 | * | |
1279 | * Intent is to clear all pending error conditions, reset the | |
1280 | * chip/bus, fail the command, and move on. | |
1281 | * | |
1282 | * LOCKING: | |
1283 | * This routine holds the host_set lock while failing the command. | |
1284 | */ | |
31961943 BR |
1285 | static void mv_eng_timeout(struct ata_port *ap) |
1286 | { | |
1287 | struct ata_queued_cmd *qc; | |
1288 | unsigned long flags; | |
1289 | ||
1290 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | |
1291 | DPRINTK("All regs @ start of eng_timeout\n"); | |
1292 | mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, | |
1293 | to_pci_dev(ap->host_set->dev)); | |
1294 | ||
1295 | qc = ata_qc_from_tag(ap, ap->active_tag); | |
1296 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | |
1297 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | |
1298 | &qc->scsicmd->cmnd); | |
1299 | ||
1300 | mv_err_intr(ap); | |
1301 | mv_phy_reset(ap); | |
1302 | ||
1303 | if (!qc) { | |
1304 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | |
1305 | ap->id); | |
1306 | } else { | |
1307 | /* hack alert! We cannot use the supplied completion | |
1308 | * function from inside the ->eh_strategy_handler() thread. | |
1309 | * libata is the only user of ->eh_strategy_handler() in | |
1310 | * any kernel, so the default scsi_done() assumes it is | |
1311 | * not being called from the SCSI EH. | |
1312 | */ | |
1313 | spin_lock_irqsave(&ap->host_set->lock, flags); | |
1314 | qc->scsidone = scsi_finish_command; | |
a7dac447 | 1315 | ata_qc_complete(qc, AC_ERR_OTHER); |
31961943 BR |
1316 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
1317 | } | |
1318 | } | |
1319 | ||
05b308e1 BR |
1320 | /** |
1321 | * mv_port_init - Perform some early initialization on a single port. | |
1322 | * @port: libata data structure storing shadow register addresses | |
1323 | * @port_mmio: base address of the port | |
1324 | * | |
1325 | * Initialize shadow register mmio addresses, clear outstanding | |
1326 | * interrupts on the port, and unmask interrupts for the future | |
1327 | * start of the port. | |
1328 | * | |
1329 | * LOCKING: | |
1330 | * Inherited from caller. | |
1331 | */ | |
31961943 | 1332 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
20f733e7 | 1333 | { |
31961943 BR |
1334 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; |
1335 | unsigned serr_ofs; | |
1336 | ||
1337 | /* PIO related setup | |
1338 | */ | |
1339 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); | |
1340 | port->error_addr = | |
1341 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); | |
1342 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); | |
1343 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); | |
1344 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); | |
1345 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | |
1346 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | |
1347 | port->status_addr = | |
1348 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); | |
1349 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | |
1350 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | |
1351 | ||
1352 | /* unused: */ | |
20f733e7 BR |
1353 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; |
1354 | ||
31961943 BR |
1355 | /* Clear any currently outstanding port interrupt conditions */ |
1356 | serr_ofs = mv_scr_offset(SCR_ERROR); | |
1357 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | |
1358 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1359 | ||
20f733e7 | 1360 | /* unmask all EDMA error interrupts */ |
31961943 | 1361 | writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); |
20f733e7 BR |
1362 | |
1363 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", | |
31961943 BR |
1364 | readl(port_mmio + EDMA_CFG_OFS), |
1365 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), | |
1366 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | |
20f733e7 BR |
1367 | } |
1368 | ||
05b308e1 BR |
1369 | /** |
1370 | * mv_host_init - Perform some early initialization of the host. | |
1371 | * @probe_ent: early data struct representing the host | |
1372 | * | |
1373 | * If possible, do an early global reset of the host. Then do | |
1374 | * our port init and clear/unmask all/relevant host interrupts. | |
1375 | * | |
1376 | * LOCKING: | |
1377 | * Inherited from caller. | |
1378 | */ | |
20f733e7 BR |
1379 | static int mv_host_init(struct ata_probe_ent *probe_ent) |
1380 | { | |
1381 | int rc = 0, n_hc, port, hc; | |
1382 | void __iomem *mmio = probe_ent->mmio_base; | |
1383 | void __iomem *port_mmio; | |
1384 | ||
31961943 BR |
1385 | if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && |
1386 | mv_global_soft_reset(probe_ent->mmio_base)) { | |
20f733e7 BR |
1387 | rc = 1; |
1388 | goto done; | |
1389 | } | |
1390 | ||
1391 | n_hc = mv_get_hc_count(probe_ent->host_flags); | |
1392 | probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; | |
1393 | ||
1394 | for (port = 0; port < probe_ent->n_ports; port++) { | |
1395 | port_mmio = mv_port_base(mmio, port); | |
31961943 | 1396 | mv_port_init(&probe_ent->port[port], port_mmio); |
20f733e7 BR |
1397 | } |
1398 | ||
1399 | for (hc = 0; hc < n_hc; hc++) { | |
31961943 BR |
1400 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
1401 | ||
1402 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " | |
1403 | "(before clear)=0x%08x\n", hc, | |
1404 | readl(hc_mmio + HC_CFG_OFS), | |
1405 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | |
1406 | ||
1407 | /* Clear any currently outstanding hc interrupt conditions */ | |
1408 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | |
20f733e7 BR |
1409 | } |
1410 | ||
31961943 BR |
1411 | /* Clear any currently outstanding host interrupt conditions */ |
1412 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | |
1413 | ||
1414 | /* and unmask interrupt generation for host regs */ | |
1415 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | |
1416 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | |
20f733e7 BR |
1417 | |
1418 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | |
1419 | "PCI int cause/mask=0x%08x/0x%08x\n", | |
1420 | readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), | |
1421 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), | |
1422 | readl(mmio + PCI_IRQ_CAUSE_OFS), | |
1423 | readl(mmio + PCI_IRQ_MASK_OFS)); | |
31961943 | 1424 | done: |
20f733e7 BR |
1425 | return rc; |
1426 | } | |
1427 | ||
05b308e1 BR |
1428 | /** |
1429 | * mv_print_info - Dump key info to kernel log for perusal. | |
1430 | * @probe_ent: early data struct representing the host | |
1431 | * | |
1432 | * FIXME: complete this. | |
1433 | * | |
1434 | * LOCKING: | |
1435 | * Inherited from caller. | |
1436 | */ | |
31961943 BR |
1437 | static void mv_print_info(struct ata_probe_ent *probe_ent) |
1438 | { | |
1439 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | |
1440 | struct mv_host_priv *hpriv = probe_ent->private_data; | |
1441 | u8 rev_id, scc; | |
1442 | const char *scc_s; | |
1443 | ||
1444 | /* Use this to determine the HW stepping of the chip so we know | |
1445 | * what errata to workaround | |
1446 | */ | |
1447 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); | |
1448 | ||
1449 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); | |
1450 | if (scc == 0) | |
1451 | scc_s = "SCSI"; | |
1452 | else if (scc == 0x01) | |
1453 | scc_s = "RAID"; | |
1454 | else | |
1455 | scc_s = "unknown"; | |
1456 | ||
a9524a76 JG |
1457 | dev_printk(KERN_INFO, &pdev->dev, |
1458 | "%u slots %u ports %s mode IRQ via %s\n", | |
1459 | (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, | |
31961943 BR |
1460 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
1461 | } | |
1462 | ||
05b308e1 BR |
1463 | /** |
1464 | * mv_init_one - handle a positive probe of a Marvell host | |
1465 | * @pdev: PCI device found | |
1466 | * @ent: PCI device ID entry for the matched host | |
1467 | * | |
1468 | * LOCKING: | |
1469 | * Inherited from caller. | |
1470 | */ | |
20f733e7 BR |
1471 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1472 | { | |
1473 | static int printed_version = 0; | |
1474 | struct ata_probe_ent *probe_ent = NULL; | |
1475 | struct mv_host_priv *hpriv; | |
1476 | unsigned int board_idx = (unsigned int)ent->driver_data; | |
1477 | void __iomem *mmio_base; | |
31961943 | 1478 | int pci_dev_busy = 0, rc; |
20f733e7 | 1479 | |
a9524a76 JG |
1480 | if (!printed_version++) |
1481 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | |
20f733e7 | 1482 | |
20f733e7 BR |
1483 | rc = pci_enable_device(pdev); |
1484 | if (rc) { | |
1485 | return rc; | |
1486 | } | |
1487 | ||
1488 | rc = pci_request_regions(pdev, DRV_NAME); | |
1489 | if (rc) { | |
1490 | pci_dev_busy = 1; | |
1491 | goto err_out; | |
1492 | } | |
1493 | ||
20f733e7 BR |
1494 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); |
1495 | if (probe_ent == NULL) { | |
1496 | rc = -ENOMEM; | |
1497 | goto err_out_regions; | |
1498 | } | |
1499 | ||
1500 | memset(probe_ent, 0, sizeof(*probe_ent)); | |
1501 | probe_ent->dev = pci_dev_to_dev(pdev); | |
1502 | INIT_LIST_HEAD(&probe_ent->node); | |
1503 | ||
31961943 | 1504 | mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); |
20f733e7 BR |
1505 | if (mmio_base == NULL) { |
1506 | rc = -ENOMEM; | |
1507 | goto err_out_free_ent; | |
1508 | } | |
1509 | ||
1510 | hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); | |
1511 | if (!hpriv) { | |
1512 | rc = -ENOMEM; | |
1513 | goto err_out_iounmap; | |
1514 | } | |
1515 | memset(hpriv, 0, sizeof(*hpriv)); | |
1516 | ||
1517 | probe_ent->sht = mv_port_info[board_idx].sht; | |
1518 | probe_ent->host_flags = mv_port_info[board_idx].host_flags; | |
1519 | probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; | |
1520 | probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; | |
1521 | probe_ent->port_ops = mv_port_info[board_idx].port_ops; | |
1522 | ||
1523 | probe_ent->irq = pdev->irq; | |
1524 | probe_ent->irq_flags = SA_SHIRQ; | |
1525 | probe_ent->mmio_base = mmio_base; | |
1526 | probe_ent->private_data = hpriv; | |
1527 | ||
1528 | /* initialize adapter */ | |
1529 | rc = mv_host_init(probe_ent); | |
1530 | if (rc) { | |
1531 | goto err_out_hpriv; | |
1532 | } | |
20f733e7 | 1533 | |
31961943 BR |
1534 | /* Enable interrupts */ |
1535 | if (pci_enable_msi(pdev) == 0) { | |
1536 | hpriv->hp_flags |= MV_HP_FLAG_MSI; | |
1537 | } else { | |
1538 | pci_intx(pdev, 1); | |
20f733e7 BR |
1539 | } |
1540 | ||
31961943 BR |
1541 | mv_dump_pci_cfg(pdev, 0x68); |
1542 | mv_print_info(probe_ent); | |
1543 | ||
1544 | if (ata_device_add(probe_ent) == 0) { | |
1545 | rc = -ENODEV; /* No devices discovered */ | |
1546 | goto err_out_dev_add; | |
1547 | } | |
20f733e7 | 1548 | |
31961943 | 1549 | kfree(probe_ent); |
20f733e7 BR |
1550 | return 0; |
1551 | ||
31961943 BR |
1552 | err_out_dev_add: |
1553 | if (MV_HP_FLAG_MSI & hpriv->hp_flags) { | |
1554 | pci_disable_msi(pdev); | |
1555 | } else { | |
1556 | pci_intx(pdev, 0); | |
1557 | } | |
1558 | err_out_hpriv: | |
20f733e7 | 1559 | kfree(hpriv); |
31961943 BR |
1560 | err_out_iounmap: |
1561 | pci_iounmap(pdev, mmio_base); | |
1562 | err_out_free_ent: | |
20f733e7 | 1563 | kfree(probe_ent); |
31961943 | 1564 | err_out_regions: |
20f733e7 | 1565 | pci_release_regions(pdev); |
31961943 | 1566 | err_out: |
20f733e7 BR |
1567 | if (!pci_dev_busy) { |
1568 | pci_disable_device(pdev); | |
1569 | } | |
1570 | ||
1571 | return rc; | |
1572 | } | |
1573 | ||
1574 | static int __init mv_init(void) | |
1575 | { | |
1576 | return pci_module_init(&mv_pci_driver); | |
1577 | } | |
1578 | ||
1579 | static void __exit mv_exit(void) | |
1580 | { | |
1581 | pci_unregister_driver(&mv_pci_driver); | |
1582 | } | |
1583 | ||
1584 | MODULE_AUTHOR("Brett Russ"); | |
1585 | MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); | |
1586 | MODULE_LICENSE("GPL"); | |
1587 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | |
1588 | MODULE_VERSION(DRV_VERSION); | |
1589 | ||
1590 | module_init(mv_init); | |
1591 | module_exit(mv_exit); |