]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/libata-bmdma.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libata-bmdma.c
1 /*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/pci.h>
38 #include <linux/libata.h>
39
40 #include "libata.h"
41
42 /**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53 static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54 {
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98 }
99
100 /**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111 static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112 {
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156 }
157
158
159 /**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181 {
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186 }
187
188 /**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201 {
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206 }
207
208
209 /**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * FIXME: missing write posting for 400nS delay enforcement
218 *
219 * LOCKING:
220 * spin_lock_irqsave(host_set lock)
221 */
222
223 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
224 {
225 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
226
227 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
228 ata_pause(ap);
229 }
230
231
232 /**
233 * ata_exec_command - issue ATA command to host controller
234 * @ap: port to which command is being issued
235 * @tf: ATA taskfile register set
236 *
237 * Issues PIO/MMIO write to ATA command register, with proper
238 * synchronization with interrupt handler / other threads.
239 *
240 * LOCKING:
241 * spin_lock_irqsave(host_set lock)
242 */
243 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
244 {
245 if (ap->flags & ATA_FLAG_MMIO)
246 ata_exec_command_mmio(ap, tf);
247 else
248 ata_exec_command_pio(ap, tf);
249 }
250
251 /**
252 * ata_tf_read_pio - input device's ATA taskfile shadow registers
253 * @ap: Port from which input is read
254 * @tf: ATA taskfile register set for storing input
255 *
256 * Reads ATA taskfile registers for currently-selected device
257 * into @tf.
258 *
259 * LOCKING:
260 * Inherited from caller.
261 */
262
263 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
264 {
265 struct ata_ioports *ioaddr = &ap->ioaddr;
266
267 tf->command = ata_check_status(ap);
268 tf->feature = inb(ioaddr->error_addr);
269 tf->nsect = inb(ioaddr->nsect_addr);
270 tf->lbal = inb(ioaddr->lbal_addr);
271 tf->lbam = inb(ioaddr->lbam_addr);
272 tf->lbah = inb(ioaddr->lbah_addr);
273 tf->device = inb(ioaddr->device_addr);
274
275 if (tf->flags & ATA_TFLAG_LBA48) {
276 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
277 tf->hob_feature = inb(ioaddr->error_addr);
278 tf->hob_nsect = inb(ioaddr->nsect_addr);
279 tf->hob_lbal = inb(ioaddr->lbal_addr);
280 tf->hob_lbam = inb(ioaddr->lbam_addr);
281 tf->hob_lbah = inb(ioaddr->lbah_addr);
282 }
283 }
284
285 /**
286 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
287 * @ap: Port from which input is read
288 * @tf: ATA taskfile register set for storing input
289 *
290 * Reads ATA taskfile registers for currently-selected device
291 * into @tf via MMIO.
292 *
293 * LOCKING:
294 * Inherited from caller.
295 */
296
297 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
298 {
299 struct ata_ioports *ioaddr = &ap->ioaddr;
300
301 tf->command = ata_check_status(ap);
302 tf->feature = readb((void __iomem *)ioaddr->error_addr);
303 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
304 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
305 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
306 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
307 tf->device = readb((void __iomem *)ioaddr->device_addr);
308
309 if (tf->flags & ATA_TFLAG_LBA48) {
310 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
311 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
312 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
313 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
314 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
315 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
316 }
317 }
318
319
320 /**
321 * ata_tf_read - input device's ATA taskfile shadow registers
322 * @ap: Port from which input is read
323 * @tf: ATA taskfile register set for storing input
324 *
325 * Reads ATA taskfile registers for currently-selected device
326 * into @tf.
327 *
328 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
329 * is set, also reads the hob registers.
330 *
331 * May be used as the tf_read() entry in ata_port_operations.
332 *
333 * LOCKING:
334 * Inherited from caller.
335 */
336 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
337 {
338 if (ap->flags & ATA_FLAG_MMIO)
339 ata_tf_read_mmio(ap, tf);
340 else
341 ata_tf_read_pio(ap, tf);
342 }
343
344 /**
345 * ata_check_status_pio - Read device status reg & clear interrupt
346 * @ap: port where the device is
347 *
348 * Reads ATA taskfile status register for currently-selected device
349 * and return its value. This also clears pending interrupts
350 * from this device
351 *
352 * LOCKING:
353 * Inherited from caller.
354 */
355 static u8 ata_check_status_pio(struct ata_port *ap)
356 {
357 return inb(ap->ioaddr.status_addr);
358 }
359
360 /**
361 * ata_check_status_mmio - Read device status reg & clear interrupt
362 * @ap: port where the device is
363 *
364 * Reads ATA taskfile status register for currently-selected device
365 * via MMIO and return its value. This also clears pending interrupts
366 * from this device
367 *
368 * LOCKING:
369 * Inherited from caller.
370 */
371 static u8 ata_check_status_mmio(struct ata_port *ap)
372 {
373 return readb((void __iomem *) ap->ioaddr.status_addr);
374 }
375
376
377 /**
378 * ata_check_status - Read device status reg & clear interrupt
379 * @ap: port where the device is
380 *
381 * Reads ATA taskfile status register for currently-selected device
382 * and return its value. This also clears pending interrupts
383 * from this device
384 *
385 * May be used as the check_status() entry in ata_port_operations.
386 *
387 * LOCKING:
388 * Inherited from caller.
389 */
390 u8 ata_check_status(struct ata_port *ap)
391 {
392 if (ap->flags & ATA_FLAG_MMIO)
393 return ata_check_status_mmio(ap);
394 return ata_check_status_pio(ap);
395 }
396
397
398 /**
399 * ata_altstatus - Read device alternate status reg
400 * @ap: port where the device is
401 *
402 * Reads ATA taskfile alternate status register for
403 * currently-selected device and return its value.
404 *
405 * Note: may NOT be used as the check_altstatus() entry in
406 * ata_port_operations.
407 *
408 * LOCKING:
409 * Inherited from caller.
410 */
411 u8 ata_altstatus(struct ata_port *ap)
412 {
413 if (ap->ops->check_altstatus)
414 return ap->ops->check_altstatus(ap);
415
416 if (ap->flags & ATA_FLAG_MMIO)
417 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
418 return inb(ap->ioaddr.altstatus_addr);
419 }
420
421 /**
422 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
423 * @qc: Info associated with this ATA transaction.
424 *
425 * LOCKING:
426 * spin_lock_irqsave(host_set lock)
427 */
428
429 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
430 {
431 struct ata_port *ap = qc->ap;
432 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
433 u8 dmactl;
434 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
435
436 /* load PRD table addr. */
437 mb(); /* make sure PRD table writes are visible to controller */
438 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
439
440 /* specify data direction, triple-check start bit is clear */
441 dmactl = readb(mmio + ATA_DMA_CMD);
442 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
443 if (!rw)
444 dmactl |= ATA_DMA_WR;
445 writeb(dmactl, mmio + ATA_DMA_CMD);
446
447 /* issue r/w command */
448 ap->ops->exec_command(ap, &qc->tf);
449 }
450
451 /**
452 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
453 * @qc: Info associated with this ATA transaction.
454 *
455 * LOCKING:
456 * spin_lock_irqsave(host_set lock)
457 */
458
459 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
460 {
461 struct ata_port *ap = qc->ap;
462 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
463 u8 dmactl;
464
465 /* start host DMA transaction */
466 dmactl = readb(mmio + ATA_DMA_CMD);
467 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
468
469 /* Strictly, one may wish to issue a readb() here, to
470 * flush the mmio write. However, control also passes
471 * to the hardware at this point, and it will interrupt
472 * us when we are to resume control. So, in effect,
473 * we don't care when the mmio write flushes.
474 * Further, a read of the DMA status register _immediately_
475 * following the write may not be what certain flaky hardware
476 * is expected, so I think it is best to not add a readb()
477 * without first all the MMIO ATA cards/mobos.
478 * Or maybe I'm just being paranoid.
479 */
480 }
481
482 /**
483 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
484 * @qc: Info associated with this ATA transaction.
485 *
486 * LOCKING:
487 * spin_lock_irqsave(host_set lock)
488 */
489
490 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
491 {
492 struct ata_port *ap = qc->ap;
493 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
494 u8 dmactl;
495
496 /* load PRD table addr. */
497 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
498
499 /* specify data direction, triple-check start bit is clear */
500 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
501 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
502 if (!rw)
503 dmactl |= ATA_DMA_WR;
504 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
505
506 /* issue r/w command */
507 ap->ops->exec_command(ap, &qc->tf);
508 }
509
510 /**
511 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
512 * @qc: Info associated with this ATA transaction.
513 *
514 * LOCKING:
515 * spin_lock_irqsave(host_set lock)
516 */
517
518 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
519 {
520 struct ata_port *ap = qc->ap;
521 u8 dmactl;
522
523 /* start host DMA transaction */
524 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
525 outb(dmactl | ATA_DMA_START,
526 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
527 }
528
529
530 /**
531 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
532 * @qc: Info associated with this ATA transaction.
533 *
534 * Writes the ATA_DMA_START flag to the DMA command register.
535 *
536 * May be used as the bmdma_start() entry in ata_port_operations.
537 *
538 * LOCKING:
539 * spin_lock_irqsave(host_set lock)
540 */
541 void ata_bmdma_start(struct ata_queued_cmd *qc)
542 {
543 if (qc->ap->flags & ATA_FLAG_MMIO)
544 ata_bmdma_start_mmio(qc);
545 else
546 ata_bmdma_start_pio(qc);
547 }
548
549
550 /**
551 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
552 * @qc: Info associated with this ATA transaction.
553 *
554 * Writes address of PRD table to device's PRD Table Address
555 * register, sets the DMA control register, and calls
556 * ops->exec_command() to start the transfer.
557 *
558 * May be used as the bmdma_setup() entry in ata_port_operations.
559 *
560 * LOCKING:
561 * spin_lock_irqsave(host_set lock)
562 */
563 void ata_bmdma_setup(struct ata_queued_cmd *qc)
564 {
565 if (qc->ap->flags & ATA_FLAG_MMIO)
566 ata_bmdma_setup_mmio(qc);
567 else
568 ata_bmdma_setup_pio(qc);
569 }
570
571
572 /**
573 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
574 * @ap: Port associated with this ATA transaction.
575 *
576 * Clear interrupt and error flags in DMA status register.
577 *
578 * May be used as the irq_clear() entry in ata_port_operations.
579 *
580 * LOCKING:
581 * spin_lock_irqsave(host_set lock)
582 */
583
584 void ata_bmdma_irq_clear(struct ata_port *ap)
585 {
586 if (!ap->ioaddr.bmdma_addr)
587 return;
588
589 if (ap->flags & ATA_FLAG_MMIO) {
590 void __iomem *mmio =
591 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
592 writeb(readb(mmio), mmio);
593 } else {
594 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
595 outb(inb(addr), addr);
596 }
597 }
598
599
600 /**
601 * ata_bmdma_status - Read PCI IDE BMDMA status
602 * @ap: Port associated with this ATA transaction.
603 *
604 * Read and return BMDMA status register.
605 *
606 * May be used as the bmdma_status() entry in ata_port_operations.
607 *
608 * LOCKING:
609 * spin_lock_irqsave(host_set lock)
610 */
611
612 u8 ata_bmdma_status(struct ata_port *ap)
613 {
614 u8 host_stat;
615 if (ap->flags & ATA_FLAG_MMIO) {
616 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
617 host_stat = readb(mmio + ATA_DMA_STATUS);
618 } else
619 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
620 return host_stat;
621 }
622
623
624 /**
625 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
626 * @qc: Command we are ending DMA for
627 *
628 * Clears the ATA_DMA_START flag in the dma control register
629 *
630 * May be used as the bmdma_stop() entry in ata_port_operations.
631 *
632 * LOCKING:
633 * spin_lock_irqsave(host_set lock)
634 */
635
636 void ata_bmdma_stop(struct ata_queued_cmd *qc)
637 {
638 struct ata_port *ap = qc->ap;
639 if (ap->flags & ATA_FLAG_MMIO) {
640 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
641
642 /* clear start/stop bit */
643 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
644 mmio + ATA_DMA_CMD);
645 } else {
646 /* clear start/stop bit */
647 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
648 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
649 }
650
651 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
652 ata_altstatus(ap); /* dummy read */
653 }
654
655 /**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664 void ata_bmdma_freeze(struct ata_port *ap)
665 {
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675 }
676
677 /**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686 void ata_bmdma_thaw(struct ata_port *ap)
687 {
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693 }
694
695 /**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @prereset: prereset method (can be NULL)
699 * @softreset: softreset method (can be NULL)
700 * @hardreset: hardreset method (can be NULL)
701 * @postreset: postreset method (can be NULL)
702 *
703 * Handle error for ATA BMDMA controller. It can handle both
704 * PATA and SATA controllers. Many controllers should be able to
705 * use this EH as-is or with some added handling before and
706 * after.
707 *
708 * This function is intended to be used for constructing
709 * ->error_handler callback by low level drivers.
710 *
711 * LOCKING:
712 * Kernel thread context (may sleep)
713 */
714 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
715 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
716 ata_postreset_fn_t postreset)
717 {
718 struct ata_eh_context *ehc = &ap->eh_context;
719 struct ata_queued_cmd *qc;
720 unsigned long flags;
721 int thaw = 0;
722
723 qc = __ata_qc_from_tag(ap, ap->active_tag);
724 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
725 qc = NULL;
726
727 /* reset PIO HSM and stop DMA engine */
728 spin_lock_irqsave(ap->lock, flags);
729
730 ap->hsm_task_state = HSM_ST_IDLE;
731
732 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
733 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
734 u8 host_stat;
735
736 host_stat = ata_bmdma_status(ap);
737
738 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
739
740 /* BMDMA controllers indicate host bus error by
741 * setting DMA_ERR bit and timing out. As it wasn't
742 * really a timeout event, adjust error mask and
743 * cancel frozen state.
744 */
745 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
746 qc->err_mask = AC_ERR_HOST_BUS;
747 thaw = 1;
748 }
749
750 ap->ops->bmdma_stop(qc);
751 }
752
753 ata_altstatus(ap);
754 ata_chk_status(ap);
755 ap->ops->irq_clear(ap);
756
757 spin_unlock_irqrestore(ap->lock, flags);
758
759 if (thaw)
760 ata_eh_thaw_port(ap);
761
762 /* PIO and DMA engines have been stopped, perform recovery */
763 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
764 }
765
766 /**
767 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
768 * @ap: port to handle error for
769 *
770 * Stock error handler for BMDMA controller.
771 *
772 * LOCKING:
773 * Kernel thread context (may sleep)
774 */
775 void ata_bmdma_error_handler(struct ata_port *ap)
776 {
777 ata_reset_fn_t hardreset;
778
779 hardreset = NULL;
780 if (sata_scr_valid(ap))
781 hardreset = sata_std_hardreset;
782
783 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
784 ata_std_postreset);
785 }
786
787 /**
788 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
789 * BMDMA controller
790 * @qc: internal command to clean up
791 *
792 * LOCKING:
793 * Kernel thread context (may sleep)
794 */
795 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
796 {
797 ata_bmdma_stop(qc);
798 }
799
800 #ifdef CONFIG_PCI
801 static struct ata_probe_ent *
802 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
803 {
804 struct ata_probe_ent *probe_ent;
805
806 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
807 if (!probe_ent) {
808 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
809 kobject_name(&(dev->kobj)));
810 return NULL;
811 }
812
813 INIT_LIST_HEAD(&probe_ent->node);
814 probe_ent->dev = dev;
815
816 probe_ent->sht = port->sht;
817 probe_ent->host_flags = port->host_flags;
818 probe_ent->pio_mask = port->pio_mask;
819 probe_ent->mwdma_mask = port->mwdma_mask;
820 probe_ent->udma_mask = port->udma_mask;
821 probe_ent->port_ops = port->port_ops;
822
823 return probe_ent;
824 }
825
826
827 /**
828 * ata_pci_init_native_mode - Initialize native-mode driver
829 * @pdev: pci device to be initialized
830 * @port: array[2] of pointers to port info structures.
831 * @ports: bitmap of ports present
832 *
833 * Utility function which allocates and initializes an
834 * ata_probe_ent structure for a standard dual-port
835 * PIO-based IDE controller. The returned ata_probe_ent
836 * structure can be passed to ata_device_add(). The returned
837 * ata_probe_ent structure should then be freed with kfree().
838 *
839 * The caller need only pass the address of the primary port, the
840 * secondary will be deduced automatically. If the device has non
841 * standard secondary port mappings this function can be called twice,
842 * once for each interface.
843 */
844
845 struct ata_probe_ent *
846 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
847 {
848 struct ata_probe_ent *probe_ent =
849 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
850 int p = 0;
851 unsigned long bmdma;
852
853 if (!probe_ent)
854 return NULL;
855
856 probe_ent->irq = pdev->irq;
857 probe_ent->irq_flags = SA_SHIRQ;
858 probe_ent->private_data = port[0]->private_data;
859
860 if (ports & ATA_PORT_PRIMARY) {
861 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
862 probe_ent->port[p].altstatus_addr =
863 probe_ent->port[p].ctl_addr =
864 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
865 bmdma = pci_resource_start(pdev, 4);
866 if (bmdma) {
867 if (inb(bmdma + 2) & 0x80)
868 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
869 probe_ent->port[p].bmdma_addr = bmdma;
870 }
871 ata_std_ports(&probe_ent->port[p]);
872 p++;
873 }
874
875 if (ports & ATA_PORT_SECONDARY) {
876 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
877 probe_ent->port[p].altstatus_addr =
878 probe_ent->port[p].ctl_addr =
879 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
880 bmdma = pci_resource_start(pdev, 4);
881 if (bmdma) {
882 bmdma += 8;
883 if(inb(bmdma + 2) & 0x80)
884 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
885 probe_ent->port[p].bmdma_addr = bmdma;
886 }
887 ata_std_ports(&probe_ent->port[p]);
888 p++;
889 }
890
891 probe_ent->n_ports = p;
892 return probe_ent;
893 }
894
895
896 static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
897 struct ata_port_info *port, int port_num)
898 {
899 struct ata_probe_ent *probe_ent;
900 unsigned long bmdma;
901
902 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
903 if (!probe_ent)
904 return NULL;
905
906 probe_ent->legacy_mode = 1;
907 probe_ent->n_ports = 1;
908 probe_ent->hard_port_no = port_num;
909 probe_ent->private_data = port->private_data;
910
911 switch(port_num)
912 {
913 case 0:
914 probe_ent->irq = 14;
915 probe_ent->port[0].cmd_addr = 0x1f0;
916 probe_ent->port[0].altstatus_addr =
917 probe_ent->port[0].ctl_addr = 0x3f6;
918 break;
919 case 1:
920 probe_ent->irq = 15;
921 probe_ent->port[0].cmd_addr = 0x170;
922 probe_ent->port[0].altstatus_addr =
923 probe_ent->port[0].ctl_addr = 0x376;
924 break;
925 }
926
927 bmdma = pci_resource_start(pdev, 4);
928 if (bmdma != 0) {
929 bmdma += 8 * port_num;
930 probe_ent->port[0].bmdma_addr = bmdma;
931 if (inb(bmdma + 2) & 0x80)
932 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
933 }
934 ata_std_ports(&probe_ent->port[0]);
935
936 return probe_ent;
937 }
938
939
940 /**
941 * ata_pci_init_one - Initialize/register PCI IDE host controller
942 * @pdev: Controller to be initialized
943 * @port_info: Information from low-level host driver
944 * @n_ports: Number of ports attached to host controller
945 *
946 * This is a helper function which can be called from a driver's
947 * xxx_init_one() probe function if the hardware uses traditional
948 * IDE taskfile registers.
949 *
950 * This function calls pci_enable_device(), reserves its register
951 * regions, sets the dma mask, enables bus master mode, and calls
952 * ata_device_add()
953 *
954 * LOCKING:
955 * Inherited from PCI layer (may sleep).
956 *
957 * RETURNS:
958 * Zero on success, negative on errno-based value on error.
959 */
960
961 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
962 unsigned int n_ports)
963 {
964 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
965 struct ata_port_info *port[2];
966 u8 tmp8, mask;
967 unsigned int legacy_mode = 0;
968 int disable_dev_on_err = 1;
969 int rc;
970
971 DPRINTK("ENTER\n");
972
973 port[0] = port_info[0];
974 if (n_ports > 1)
975 port[1] = port_info[1];
976 else
977 port[1] = port[0];
978
979 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
980 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
981 /* TODO: What if one channel is in native mode ... */
982 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
983 mask = (1 << 2) | (1 << 0);
984 if ((tmp8 & mask) != mask)
985 legacy_mode = (1 << 3);
986 }
987
988 /* FIXME... */
989 if ((!legacy_mode) && (n_ports > 2)) {
990 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
991 n_ports = 2;
992 /* For now */
993 }
994
995 /* FIXME: Really for ATA it isn't safe because the device may be
996 multi-purpose and we want to leave it alone if it was already
997 enabled. Secondly for shared use as Arjan says we want refcounting
998
999 Checking dev->is_enabled is insufficient as this is not set at
1000 boot for the primary video which is BIOS enabled
1001 */
1002
1003 rc = pci_enable_device(pdev);
1004 if (rc)
1005 return rc;
1006
1007 rc = pci_request_regions(pdev, DRV_NAME);
1008 if (rc) {
1009 disable_dev_on_err = 0;
1010 goto err_out;
1011 }
1012
1013 /* FIXME: Should use platform specific mappers for legacy port ranges */
1014 if (legacy_mode) {
1015 if (!request_region(0x1f0, 8, "libata")) {
1016 struct resource *conflict, res;
1017 res.start = 0x1f0;
1018 res.end = 0x1f0 + 8 - 1;
1019 conflict = ____request_resource(&ioport_resource, &res);
1020 if (!strcmp(conflict->name, "libata"))
1021 legacy_mode |= (1 << 0);
1022 else {
1023 disable_dev_on_err = 0;
1024 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
1025 }
1026 } else
1027 legacy_mode |= (1 << 0);
1028
1029 if (!request_region(0x170, 8, "libata")) {
1030 struct resource *conflict, res;
1031 res.start = 0x170;
1032 res.end = 0x170 + 8 - 1;
1033 conflict = ____request_resource(&ioport_resource, &res);
1034 if (!strcmp(conflict->name, "libata"))
1035 legacy_mode |= (1 << 1);
1036 else {
1037 disable_dev_on_err = 0;
1038 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
1039 }
1040 } else
1041 legacy_mode |= (1 << 1);
1042 }
1043
1044 /* we have legacy mode, but all ports are unavailable */
1045 if (legacy_mode == (1 << 3)) {
1046 rc = -EBUSY;
1047 goto err_out_regions;
1048 }
1049
1050 /* FIXME: If we get no DMA mask we should fall back to PIO */
1051 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1052 if (rc)
1053 goto err_out_regions;
1054 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1055 if (rc)
1056 goto err_out_regions;
1057
1058 if (legacy_mode) {
1059 if (legacy_mode & (1 << 0))
1060 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1061 if (legacy_mode & (1 << 1))
1062 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1063 } else {
1064 if (n_ports == 2)
1065 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1066 else
1067 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1068 }
1069 if (!probe_ent && !probe_ent2) {
1070 rc = -ENOMEM;
1071 goto err_out_regions;
1072 }
1073
1074 pci_set_master(pdev);
1075
1076 /* FIXME: check ata_device_add return */
1077 if (legacy_mode) {
1078 struct device *dev = &pdev->dev;
1079 struct ata_host_set *host_set = NULL;
1080
1081 if (legacy_mode & (1 << 0)) {
1082 ata_device_add(probe_ent);
1083 host_set = dev_get_drvdata(dev);
1084 }
1085
1086 if (legacy_mode & (1 << 1)) {
1087 ata_device_add(probe_ent2);
1088 if (host_set) {
1089 host_set->next = dev_get_drvdata(dev);
1090 dev_set_drvdata(dev, host_set);
1091 }
1092 }
1093 } else
1094 ata_device_add(probe_ent);
1095
1096 kfree(probe_ent);
1097 kfree(probe_ent2);
1098
1099 return 0;
1100
1101 err_out_regions:
1102 if (legacy_mode & (1 << 0))
1103 release_region(0x1f0, 8);
1104 if (legacy_mode & (1 << 1))
1105 release_region(0x170, 8);
1106 pci_release_regions(pdev);
1107 err_out:
1108 if (disable_dev_on_err)
1109 pci_disable_device(pdev);
1110 return rc;
1111 }
1112
1113 /**
1114 * ata_pci_clear_simplex - attempt to kick device out of simplex
1115 * @pdev: PCI device
1116 *
1117 * Some PCI ATA devices report simplex mode but in fact can be told to
1118 * enter non simplex mode. This implements the neccessary logic to
1119 * perform the task on such devices. Calling it on other devices will
1120 * have -undefined- behaviour.
1121 */
1122
1123 int ata_pci_clear_simplex(struct pci_dev *pdev)
1124 {
1125 unsigned long bmdma = pci_resource_start(pdev, 4);
1126 u8 simplex;
1127
1128 if (bmdma == 0)
1129 return -ENOENT;
1130
1131 simplex = inb(bmdma + 0x02);
1132 outb(simplex & 0x60, bmdma + 0x02);
1133 simplex = inb(bmdma + 0x02);
1134 if (simplex & 0x80)
1135 return -EOPNOTSUPP;
1136 return 0;
1137 }
1138
1139 unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1140 {
1141 /* Filter out DMA modes if the device has been configured by
1142 the BIOS as PIO only */
1143
1144 if (ap->ioaddr.bmdma_addr == 0)
1145 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1146 return xfer_mask;
1147 }
1148
1149 #endif /* CONFIG_PCI */
1150