]>
Commit | Line | Data |
---|---|---|
1fd7a697 TH |
1 | /* |
2 | * sata_inic162x.c - Driver for Initio 162x SATA controllers | |
3 | * | |
4 | * Copyright 2006 SUSE Linux Products GmbH | |
5 | * Copyright 2006 Tejun Heo <teheo@novell.com> | |
6 | * | |
7 | * This file is released under GPL v2. | |
8 | * | |
9 | * This controller is eccentric and easily locks up if something isn't | |
10 | * right. Documentation is available at initio's website but it only | |
11 | * documents registers (not programming model). | |
12 | * | |
13 | * - ATA disks work. | |
14 | * - Hotplug works. | |
15 | * - ATAPI read works but burning doesn't. This thing is really | |
16 | * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and | |
17 | * ATAPI DMA WRITE should be programmed. If you've got a clue, be | |
18 | * my guest. | |
19 | * - Both STR and STD work. | |
20 | */ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/pci.h> | |
25 | #include <scsi/scsi_host.h> | |
26 | #include <linux/libata.h> | |
27 | #include <linux/blkdev.h> | |
28 | #include <scsi/scsi_device.h> | |
29 | ||
30 | #define DRV_NAME "sata_inic162x" | |
31 | #define DRV_VERSION "0.1" | |
32 | ||
33 | enum { | |
34 | MMIO_BAR = 5, | |
35 | ||
36 | NR_PORTS = 2, | |
37 | ||
38 | HOST_CTL = 0x7c, | |
39 | HOST_STAT = 0x7e, | |
40 | HOST_IRQ_STAT = 0xbc, | |
41 | HOST_IRQ_MASK = 0xbe, | |
42 | ||
43 | PORT_SIZE = 0x40, | |
44 | ||
45 | /* registers for ATA TF operation */ | |
46 | PORT_TF = 0x00, | |
47 | PORT_ALT_STAT = 0x08, | |
48 | PORT_IRQ_STAT = 0x09, | |
49 | PORT_IRQ_MASK = 0x0a, | |
50 | PORT_PRD_CTL = 0x0b, | |
51 | PORT_PRD_ADDR = 0x0c, | |
52 | PORT_PRD_XFERLEN = 0x10, | |
53 | ||
54 | /* IDMA register */ | |
55 | PORT_IDMA_CTL = 0x14, | |
56 | ||
57 | PORT_SCR = 0x20, | |
58 | ||
59 | /* HOST_CTL bits */ | |
60 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ | |
61 | HCTL_PWRDWN = (1 << 13), /* power down PHYs */ | |
62 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ | |
63 | HCTL_RPGSEL = (1 << 15), /* register page select */ | |
64 | ||
65 | HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | | |
66 | HCTL_RPGSEL, | |
67 | ||
68 | /* HOST_IRQ_(STAT|MASK) bits */ | |
69 | HIRQ_PORT0 = (1 << 0), | |
70 | HIRQ_PORT1 = (1 << 1), | |
71 | HIRQ_SOFT = (1 << 14), | |
72 | HIRQ_GLOBAL = (1 << 15), /* STAT only */ | |
73 | ||
74 | /* PORT_IRQ_(STAT|MASK) bits */ | |
75 | PIRQ_OFFLINE = (1 << 0), /* device unplugged */ | |
76 | PIRQ_ONLINE = (1 << 1), /* device plugged */ | |
77 | PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ | |
78 | PIRQ_FATAL = (1 << 3), /* fatal error */ | |
79 | PIRQ_ATA = (1 << 4), /* ATA interrupt */ | |
80 | PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ | |
81 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ | |
82 | ||
83 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | |
84 | ||
85 | PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, | |
86 | PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, | |
87 | PIRQ_MASK_FREEZE = 0xff, | |
88 | ||
89 | /* PORT_PRD_CTL bits */ | |
90 | PRD_CTL_START = (1 << 0), | |
91 | PRD_CTL_WR = (1 << 3), | |
92 | PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ | |
93 | ||
94 | /* PORT_IDMA_CTL bits */ | |
95 | IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ | |
96 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ | |
97 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ | |
98 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ | |
99 | }; | |
100 | ||
101 | struct inic_host_priv { | |
102 | u16 cached_hctl; | |
103 | }; | |
104 | ||
105 | struct inic_port_priv { | |
106 | u8 dfl_prdctl; | |
107 | u8 cached_prdctl; | |
108 | u8 cached_pirq_mask; | |
109 | }; | |
110 | ||
111 | static int inic_slave_config(struct scsi_device *sdev) | |
112 | { | |
113 | /* This controller is braindamaged. dma_boundary is 0xffff | |
114 | * like others but it will lock up the whole machine HARD if | |
115 | * 65536 byte PRD entry is fed. Reduce maximum segment size. | |
116 | */ | |
117 | blk_queue_max_segment_size(sdev->request_queue, 65536 - 512); | |
118 | ||
119 | return ata_scsi_slave_config(sdev); | |
120 | } | |
121 | ||
122 | static struct scsi_host_template inic_sht = { | |
123 | .module = THIS_MODULE, | |
124 | .name = DRV_NAME, | |
125 | .ioctl = ata_scsi_ioctl, | |
126 | .queuecommand = ata_scsi_queuecmd, | |
127 | .can_queue = ATA_DEF_QUEUE, | |
128 | .this_id = ATA_SHT_THIS_ID, | |
129 | .sg_tablesize = LIBATA_MAX_PRD, | |
130 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | |
131 | .emulated = ATA_SHT_EMULATED, | |
132 | .use_clustering = ATA_SHT_USE_CLUSTERING, | |
133 | .proc_name = DRV_NAME, | |
134 | .dma_boundary = ATA_DMA_BOUNDARY, | |
135 | .slave_configure = inic_slave_config, | |
136 | .slave_destroy = ata_scsi_slave_destroy, | |
137 | .bios_param = ata_std_bios_param, | |
1fd7a697 TH |
138 | }; |
139 | ||
140 | static const int scr_map[] = { | |
141 | [SCR_STATUS] = 0, | |
142 | [SCR_ERROR] = 1, | |
143 | [SCR_CONTROL] = 2, | |
144 | }; | |
145 | ||
146 | static void __iomem * inic_port_base(struct ata_port *ap) | |
147 | { | |
0d5ff566 | 148 | return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; |
1fd7a697 TH |
149 | } |
150 | ||
151 | static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) | |
152 | { | |
153 | void __iomem *port_base = inic_port_base(ap); | |
154 | struct inic_port_priv *pp = ap->private_data; | |
155 | ||
156 | writeb(mask, port_base + PORT_IRQ_MASK); | |
157 | pp->cached_pirq_mask = mask; | |
158 | } | |
159 | ||
160 | static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) | |
161 | { | |
162 | struct inic_port_priv *pp = ap->private_data; | |
163 | ||
164 | if (pp->cached_pirq_mask != mask) | |
165 | __inic_set_pirq_mask(ap, mask); | |
166 | } | |
167 | ||
168 | static void inic_reset_port(void __iomem *port_base) | |
169 | { | |
170 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | |
171 | u16 ctl; | |
172 | ||
173 | ctl = readw(idma_ctl); | |
174 | ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | |
175 | ||
176 | /* mask IRQ and assert reset */ | |
177 | writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | |
178 | readw(idma_ctl); /* flush */ | |
179 | ||
180 | /* give it some time */ | |
181 | msleep(1); | |
182 | ||
183 | /* release reset */ | |
184 | writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | |
185 | ||
186 | /* clear irq */ | |
187 | writeb(0xff, port_base + PORT_IRQ_STAT); | |
188 | ||
189 | /* reenable ATA IRQ, turn off IDMA mode */ | |
190 | writew(ctl, idma_ctl); | |
191 | } | |
192 | ||
193 | static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) | |
194 | { | |
195 | void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; | |
196 | void __iomem *addr; | |
197 | u32 val; | |
198 | ||
199 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | |
200 | return 0xffffffffU; | |
201 | ||
202 | addr = scr_addr + scr_map[sc_reg] * 4; | |
203 | val = readl(scr_addr + scr_map[sc_reg] * 4); | |
204 | ||
205 | /* this controller has stuck DIAG.N, ignore it */ | |
206 | if (sc_reg == SCR_ERROR) | |
207 | val &= ~SERR_PHYRDY_CHG; | |
208 | return val; | |
209 | } | |
210 | ||
211 | static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | |
212 | { | |
213 | void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; | |
214 | void __iomem *addr; | |
215 | ||
216 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | |
217 | return; | |
218 | ||
219 | addr = scr_addr + scr_map[sc_reg] * 4; | |
220 | writel(val, scr_addr + scr_map[sc_reg] * 4); | |
221 | } | |
222 | ||
223 | /* | |
224 | * In TF mode, inic162x is very similar to SFF device. TF registers | |
225 | * function the same. DMA engine behaves similary using the same PRD | |
226 | * format as BMDMA but different command register, interrupt and event | |
227 | * notification methods are used. The following inic_bmdma_*() | |
228 | * functions do the impedance matching. | |
229 | */ | |
230 | static void inic_bmdma_setup(struct ata_queued_cmd *qc) | |
231 | { | |
232 | struct ata_port *ap = qc->ap; | |
233 | struct inic_port_priv *pp = ap->private_data; | |
234 | void __iomem *port_base = inic_port_base(ap); | |
235 | int rw = qc->tf.flags & ATA_TFLAG_WRITE; | |
236 | ||
237 | /* make sure device sees PRD table writes */ | |
238 | wmb(); | |
239 | ||
240 | /* load transfer length */ | |
241 | writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); | |
242 | ||
243 | /* turn on DMA and specify data direction */ | |
244 | pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; | |
245 | if (!rw) | |
246 | pp->cached_prdctl |= PRD_CTL_WR; | |
247 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | |
248 | ||
249 | /* issue r/w command */ | |
250 | ap->ops->exec_command(ap, &qc->tf); | |
251 | } | |
252 | ||
253 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | |
254 | { | |
255 | struct ata_port *ap = qc->ap; | |
256 | struct inic_port_priv *pp = ap->private_data; | |
257 | void __iomem *port_base = inic_port_base(ap); | |
258 | ||
259 | /* start host DMA transaction */ | |
260 | pp->cached_prdctl |= PRD_CTL_START; | |
261 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | |
262 | } | |
263 | ||
264 | static void inic_bmdma_stop(struct ata_queued_cmd *qc) | |
265 | { | |
266 | struct ata_port *ap = qc->ap; | |
267 | struct inic_port_priv *pp = ap->private_data; | |
268 | void __iomem *port_base = inic_port_base(ap); | |
269 | ||
270 | /* stop DMA engine */ | |
271 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | |
272 | } | |
273 | ||
274 | static u8 inic_bmdma_status(struct ata_port *ap) | |
275 | { | |
276 | /* event is already verified by the interrupt handler */ | |
277 | return ATA_DMA_INTR; | |
278 | } | |
279 | ||
280 | static void inic_irq_clear(struct ata_port *ap) | |
281 | { | |
282 | /* noop */ | |
283 | } | |
284 | ||
285 | static void inic_host_intr(struct ata_port *ap) | |
286 | { | |
287 | void __iomem *port_base = inic_port_base(ap); | |
288 | struct ata_eh_info *ehi = &ap->eh_info; | |
289 | u8 irq_stat; | |
290 | ||
291 | /* fetch and clear irq */ | |
292 | irq_stat = readb(port_base + PORT_IRQ_STAT); | |
293 | writeb(irq_stat, port_base + PORT_IRQ_STAT); | |
294 | ||
295 | if (likely(!(irq_stat & PIRQ_ERR))) { | |
296 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | |
297 | ||
298 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | |
299 | ata_chk_status(ap); /* clear ATA interrupt */ | |
300 | return; | |
301 | } | |
302 | ||
303 | if (likely(ata_host_intr(ap, qc))) | |
304 | return; | |
305 | ||
306 | ata_chk_status(ap); /* clear ATA interrupt */ | |
307 | ata_port_printk(ap, KERN_WARNING, "unhandled " | |
308 | "interrupt, irq_stat=%x\n", irq_stat); | |
309 | return; | |
310 | } | |
311 | ||
312 | /* error */ | |
313 | ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); | |
314 | ||
315 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { | |
316 | ata_ehi_hotplugged(ehi); | |
317 | ata_port_freeze(ap); | |
318 | } else | |
319 | ata_port_abort(ap); | |
320 | } | |
321 | ||
322 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) | |
323 | { | |
324 | struct ata_host *host = dev_instance; | |
0d5ff566 | 325 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; |
1fd7a697 TH |
326 | u16 host_irq_stat; |
327 | int i, handled = 0;; | |
328 | ||
329 | host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); | |
330 | ||
331 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) | |
332 | goto out; | |
333 | ||
334 | spin_lock(&host->lock); | |
335 | ||
336 | for (i = 0; i < NR_PORTS; i++) { | |
337 | struct ata_port *ap = host->ports[i]; | |
338 | ||
339 | if (!(host_irq_stat & (HIRQ_PORT0 << i))) | |
340 | continue; | |
341 | ||
342 | if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { | |
343 | inic_host_intr(ap); | |
344 | handled++; | |
345 | } else { | |
346 | if (ata_ratelimit()) | |
347 | dev_printk(KERN_ERR, host->dev, "interrupt " | |
348 | "from disabled port %d (0x%x)\n", | |
349 | i, host_irq_stat); | |
350 | } | |
351 | } | |
352 | ||
353 | spin_unlock(&host->lock); | |
354 | ||
355 | out: | |
356 | return IRQ_RETVAL(handled); | |
357 | } | |
358 | ||
359 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | |
360 | { | |
361 | struct ata_port *ap = qc->ap; | |
362 | ||
363 | /* ATA IRQ doesn't wait for DMA transfer completion and vice | |
364 | * versa. Mask IRQ selectively to detect command completion. | |
365 | * Without it, ATA DMA read command can cause data corruption. | |
366 | * | |
367 | * Something similar might be needed for ATAPI writes. I | |
368 | * tried a lot of combinations but couldn't find the solution. | |
369 | */ | |
370 | if (qc->tf.protocol == ATA_PROT_DMA && | |
371 | !(qc->tf.flags & ATA_TFLAG_WRITE)) | |
372 | inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); | |
373 | else | |
374 | inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | |
375 | ||
376 | /* Issuing a command to yet uninitialized port locks up the | |
377 | * controller. Most of the time, this happens for the first | |
378 | * command after reset which are ATA and ATAPI IDENTIFYs. | |
379 | * Fast fail if stat is 0x7f or 0xff for those commands. | |
380 | */ | |
381 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | |
382 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | |
383 | u8 stat = ata_chk_status(ap); | |
384 | if (stat == 0x7f || stat == 0xff) | |
385 | return AC_ERR_HSM; | |
386 | } | |
387 | ||
388 | return ata_qc_issue_prot(qc); | |
389 | } | |
390 | ||
391 | static void inic_freeze(struct ata_port *ap) | |
392 | { | |
393 | void __iomem *port_base = inic_port_base(ap); | |
394 | ||
395 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | |
396 | ||
397 | ata_chk_status(ap); | |
398 | writeb(0xff, port_base + PORT_IRQ_STAT); | |
399 | ||
400 | readb(port_base + PORT_IRQ_STAT); /* flush */ | |
401 | } | |
402 | ||
403 | static void inic_thaw(struct ata_port *ap) | |
404 | { | |
405 | void __iomem *port_base = inic_port_base(ap); | |
406 | ||
407 | ata_chk_status(ap); | |
408 | writeb(0xff, port_base + PORT_IRQ_STAT); | |
409 | ||
410 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | |
411 | ||
412 | readb(port_base + PORT_IRQ_STAT); /* flush */ | |
413 | } | |
414 | ||
415 | /* | |
416 | * SRST and SControl hardreset don't give valid signature on this | |
417 | * controller. Only controller specific hardreset mechanism works. | |
418 | */ | |
d4b2bab4 TH |
419 | static int inic_hardreset(struct ata_port *ap, unsigned int *class, |
420 | unsigned long deadline) | |
1fd7a697 TH |
421 | { |
422 | void __iomem *port_base = inic_port_base(ap); | |
423 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | |
424 | const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); | |
425 | u16 val; | |
426 | int rc; | |
427 | ||
428 | /* hammer it into sane state */ | |
429 | inic_reset_port(port_base); | |
430 | ||
1fd7a697 TH |
431 | val = readw(idma_ctl); |
432 | writew(val | IDMA_CTL_RST_ATA, idma_ctl); | |
433 | readw(idma_ctl); /* flush */ | |
434 | msleep(1); | |
435 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | |
436 | ||
d4b2bab4 | 437 | rc = sata_phy_resume(ap, timing, deadline); |
1fd7a697 TH |
438 | if (rc) { |
439 | ata_port_printk(ap, KERN_WARNING, "failed to resume " | |
fe334602 | 440 | "link after reset (errno=%d)\n", rc); |
1fd7a697 TH |
441 | return rc; |
442 | } | |
443 | ||
1fd7a697 TH |
444 | *class = ATA_DEV_NONE; |
445 | if (ata_port_online(ap)) { | |
446 | struct ata_taskfile tf; | |
447 | ||
fe334602 TH |
448 | /* wait a while before checking status */ |
449 | msleep(150); | |
450 | ||
d4b2bab4 | 451 | rc = ata_wait_ready(ap, deadline); |
9b89391c TH |
452 | /* link occupied, -ENODEV too is an error */ |
453 | if (rc) { | |
d4b2bab4 TH |
454 | ata_port_printk(ap, KERN_WARNING, "device not ready " |
455 | "after hardreset (errno=%d)\n", rc); | |
456 | return rc; | |
1fd7a697 TH |
457 | } |
458 | ||
459 | ata_tf_read(ap, &tf); | |
460 | *class = ata_dev_classify(&tf); | |
461 | if (*class == ATA_DEV_UNKNOWN) | |
462 | *class = ATA_DEV_NONE; | |
463 | } | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
468 | static void inic_error_handler(struct ata_port *ap) | |
469 | { | |
470 | void __iomem *port_base = inic_port_base(ap); | |
471 | struct inic_port_priv *pp = ap->private_data; | |
472 | unsigned long flags; | |
473 | ||
474 | /* reset PIO HSM and stop DMA engine */ | |
475 | inic_reset_port(port_base); | |
476 | ||
477 | spin_lock_irqsave(ap->lock, flags); | |
478 | ap->hsm_task_state = HSM_ST_IDLE; | |
479 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | |
480 | spin_unlock_irqrestore(ap->lock, flags); | |
481 | ||
482 | /* PIO and DMA engines have been stopped, perform recovery */ | |
483 | ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, | |
484 | ata_std_postreset); | |
485 | } | |
486 | ||
487 | static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | |
488 | { | |
489 | /* make DMA engine forget about the failed command */ | |
a51d644a | 490 | if (qc->flags & ATA_QCFLAG_FAILED) |
1fd7a697 TH |
491 | inic_reset_port(inic_port_base(qc->ap)); |
492 | } | |
493 | ||
cd0d3bbc | 494 | static void inic_dev_config(struct ata_device *dev) |
1fd7a697 TH |
495 | { |
496 | /* inic can only handle upto LBA28 max sectors */ | |
497 | if (dev->max_sectors > ATA_MAX_SECTORS) | |
498 | dev->max_sectors = ATA_MAX_SECTORS; | |
499 | } | |
500 | ||
501 | static void init_port(struct ata_port *ap) | |
502 | { | |
503 | void __iomem *port_base = inic_port_base(ap); | |
504 | ||
505 | /* Setup PRD address */ | |
506 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); | |
507 | } | |
508 | ||
509 | static int inic_port_resume(struct ata_port *ap) | |
510 | { | |
511 | init_port(ap); | |
512 | return 0; | |
513 | } | |
514 | ||
515 | static int inic_port_start(struct ata_port *ap) | |
516 | { | |
517 | void __iomem *port_base = inic_port_base(ap); | |
518 | struct inic_port_priv *pp; | |
519 | u8 tmp; | |
520 | int rc; | |
521 | ||
522 | /* alloc and initialize private data */ | |
24dc5f33 | 523 | pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); |
1fd7a697 TH |
524 | if (!pp) |
525 | return -ENOMEM; | |
526 | ap->private_data = pp; | |
527 | ||
528 | /* default PRD_CTL value, DMAEN, WR and START off */ | |
529 | tmp = readb(port_base + PORT_PRD_CTL); | |
530 | tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); | |
531 | pp->dfl_prdctl = tmp; | |
532 | ||
533 | /* Alloc resources */ | |
534 | rc = ata_port_start(ap); | |
535 | if (rc) { | |
536 | kfree(pp); | |
537 | return rc; | |
538 | } | |
539 | ||
540 | init_port(ap); | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
1fd7a697 TH |
545 | static struct ata_port_operations inic_port_ops = { |
546 | .port_disable = ata_port_disable, | |
547 | .tf_load = ata_tf_load, | |
548 | .tf_read = ata_tf_read, | |
549 | .check_status = ata_check_status, | |
550 | .exec_command = ata_exec_command, | |
551 | .dev_select = ata_std_dev_select, | |
552 | ||
553 | .scr_read = inic_scr_read, | |
554 | .scr_write = inic_scr_write, | |
555 | ||
556 | .bmdma_setup = inic_bmdma_setup, | |
557 | .bmdma_start = inic_bmdma_start, | |
558 | .bmdma_stop = inic_bmdma_stop, | |
559 | .bmdma_status = inic_bmdma_status, | |
560 | ||
1fd7a697 | 561 | .irq_clear = inic_irq_clear, |
246ce3b6 AI |
562 | .irq_on = ata_irq_on, |
563 | .irq_ack = ata_irq_ack, | |
1fd7a697 TH |
564 | |
565 | .qc_prep = ata_qc_prep, | |
566 | .qc_issue = inic_qc_issue, | |
0d5ff566 | 567 | .data_xfer = ata_data_xfer, |
1fd7a697 TH |
568 | |
569 | .freeze = inic_freeze, | |
570 | .thaw = inic_thaw, | |
571 | .error_handler = inic_error_handler, | |
572 | .post_internal_cmd = inic_post_internal_cmd, | |
573 | .dev_config = inic_dev_config, | |
574 | ||
575 | .port_resume = inic_port_resume, | |
576 | ||
577 | .port_start = inic_port_start, | |
1fd7a697 TH |
578 | }; |
579 | ||
580 | static struct ata_port_info inic_port_info = { | |
1fd7a697 TH |
581 | /* For some reason, ATA_PROT_ATAPI is broken on this |
582 | * controller, and no, PIO_POLLING does't fix it. It somehow | |
583 | * manages to report the wrong ireason and ignoring ireason | |
584 | * results in machine lock up. Tell libata to always prefer | |
585 | * DMA. | |
586 | */ | |
587 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | |
588 | .pio_mask = 0x1f, /* pio0-4 */ | |
589 | .mwdma_mask = 0x07, /* mwdma0-2 */ | |
590 | .udma_mask = 0x7f, /* udma0-6 */ | |
591 | .port_ops = &inic_port_ops | |
592 | }; | |
593 | ||
594 | static int init_controller(void __iomem *mmio_base, u16 hctl) | |
595 | { | |
596 | int i; | |
597 | u16 val; | |
598 | ||
599 | hctl &= ~HCTL_KNOWN_BITS; | |
600 | ||
601 | /* Soft reset whole controller. Spec says reset duration is 3 | |
602 | * PCI clocks, be generous and give it 10ms. | |
603 | */ | |
604 | writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); | |
605 | readw(mmio_base + HOST_CTL); /* flush */ | |
606 | ||
607 | for (i = 0; i < 10; i++) { | |
608 | msleep(1); | |
609 | val = readw(mmio_base + HOST_CTL); | |
610 | if (!(val & HCTL_SOFTRST)) | |
611 | break; | |
612 | } | |
613 | ||
614 | if (val & HCTL_SOFTRST) | |
615 | return -EIO; | |
616 | ||
617 | /* mask all interrupts and reset ports */ | |
618 | for (i = 0; i < NR_PORTS; i++) { | |
619 | void __iomem *port_base = mmio_base + i * PORT_SIZE; | |
620 | ||
621 | writeb(0xff, port_base + PORT_IRQ_MASK); | |
622 | inic_reset_port(port_base); | |
623 | } | |
624 | ||
625 | /* port IRQ is masked now, unmask global IRQ */ | |
626 | writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); | |
627 | val = readw(mmio_base + HOST_IRQ_MASK); | |
628 | val &= ~(HIRQ_PORT0 | HIRQ_PORT1); | |
629 | writew(val, mmio_base + HOST_IRQ_MASK); | |
630 | ||
631 | return 0; | |
632 | } | |
633 | ||
438ac6d5 | 634 | #ifdef CONFIG_PM |
1fd7a697 TH |
635 | static int inic_pci_device_resume(struct pci_dev *pdev) |
636 | { | |
637 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | |
638 | struct inic_host_priv *hpriv = host->private_data; | |
0d5ff566 | 639 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; |
1fd7a697 TH |
640 | int rc; |
641 | ||
5aea408d DM |
642 | rc = ata_pci_device_do_resume(pdev); |
643 | if (rc) | |
644 | return rc; | |
1fd7a697 TH |
645 | |
646 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | |
1fd7a697 TH |
647 | rc = init_controller(mmio_base, hpriv->cached_hctl); |
648 | if (rc) | |
649 | return rc; | |
650 | } | |
651 | ||
652 | ata_host_resume(host); | |
653 | ||
654 | return 0; | |
655 | } | |
438ac6d5 | 656 | #endif |
1fd7a697 TH |
657 | |
658 | static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |
659 | { | |
660 | static int printed_version; | |
4447d351 TH |
661 | const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; |
662 | struct ata_host *host; | |
1fd7a697 | 663 | struct inic_host_priv *hpriv; |
0d5ff566 | 664 | void __iomem * const *iomap; |
1fd7a697 TH |
665 | int i, rc; |
666 | ||
667 | if (!printed_version++) | |
668 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | |
669 | ||
4447d351 TH |
670 | /* alloc host */ |
671 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); | |
672 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | |
673 | if (!host || !hpriv) | |
674 | return -ENOMEM; | |
675 | ||
676 | host->private_data = hpriv; | |
677 | ||
678 | /* acquire resources and fill host */ | |
24dc5f33 | 679 | rc = pcim_enable_device(pdev); |
1fd7a697 TH |
680 | if (rc) |
681 | return rc; | |
682 | ||
0d5ff566 TH |
683 | rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); |
684 | if (rc) | |
685 | return rc; | |
4447d351 TH |
686 | host->iomap = iomap = pcim_iomap_table(pdev); |
687 | ||
688 | for (i = 0; i < NR_PORTS; i++) { | |
689 | struct ata_ioports *port = &host->ports[i]->ioaddr; | |
690 | void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE; | |
691 | ||
692 | port->cmd_addr = iomap[2 * i]; | |
693 | port->altstatus_addr = | |
694 | port->ctl_addr = (void __iomem *) | |
695 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | |
696 | port->scr_addr = port_base + PORT_SCR; | |
697 | ||
698 | ata_std_ports(port); | |
699 | } | |
700 | ||
701 | hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); | |
1fd7a697 TH |
702 | |
703 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ | |
704 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
705 | if (rc) { | |
706 | dev_printk(KERN_ERR, &pdev->dev, | |
707 | "32-bit DMA enable failed\n"); | |
24dc5f33 | 708 | return rc; |
1fd7a697 TH |
709 | } |
710 | ||
711 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
712 | if (rc) { | |
713 | dev_printk(KERN_ERR, &pdev->dev, | |
714 | "32-bit consistent DMA enable failed\n"); | |
24dc5f33 | 715 | return rc; |
1fd7a697 TH |
716 | } |
717 | ||
0d5ff566 | 718 | rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); |
1fd7a697 TH |
719 | if (rc) { |
720 | dev_printk(KERN_ERR, &pdev->dev, | |
721 | "failed to initialize controller\n"); | |
24dc5f33 | 722 | return rc; |
1fd7a697 TH |
723 | } |
724 | ||
725 | pci_set_master(pdev); | |
4447d351 TH |
726 | return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, |
727 | &inic_sht); | |
1fd7a697 TH |
728 | } |
729 | ||
730 | static const struct pci_device_id inic_pci_tbl[] = { | |
731 | { PCI_VDEVICE(INIT, 0x1622), }, | |
732 | { }, | |
733 | }; | |
734 | ||
735 | static struct pci_driver inic_pci_driver = { | |
736 | .name = DRV_NAME, | |
737 | .id_table = inic_pci_tbl, | |
438ac6d5 | 738 | #ifdef CONFIG_PM |
1fd7a697 TH |
739 | .suspend = ata_pci_device_suspend, |
740 | .resume = inic_pci_device_resume, | |
438ac6d5 | 741 | #endif |
1fd7a697 TH |
742 | .probe = inic_init_one, |
743 | .remove = ata_pci_remove_one, | |
744 | }; | |
745 | ||
746 | static int __init inic_init(void) | |
747 | { | |
748 | return pci_register_driver(&inic_pci_driver); | |
749 | } | |
750 | ||
751 | static void __exit inic_exit(void) | |
752 | { | |
753 | pci_unregister_driver(&inic_pci_driver); | |
754 | } | |
755 | ||
756 | MODULE_AUTHOR("Tejun Heo"); | |
757 | MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); | |
758 | MODULE_LICENSE("GPL v2"); | |
759 | MODULE_DEVICE_TABLE(pci, inic_pci_tbl); | |
760 | MODULE_VERSION(DRV_VERSION); | |
761 | ||
762 | module_init(inic_init); | |
763 | module_exit(inic_exit); |