]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/libata-bmdma.c
[PATCH] irq-flags: scsi: Use the new IRQF_ constants
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / libata-bmdma.c
1 /*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/libata.h>
38
39 #include "libata.h"
40
41 /**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52 static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53 {
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97 }
98
99 /**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110 static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111 {
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155 }
156
157
158 /**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180 {
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185 }
186
187 /**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host_set lock)
197 */
198
199 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200 {
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205 }
206
207
208 /**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
218 * LOCKING:
219 * spin_lock_irqsave(host_set lock)
220 */
221
222 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223 {
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228 }
229
230
231 /**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host_set lock)
241 */
242 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243 {
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248 }
249
250 /**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263 {
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282 }
283
284 /**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297 {
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316 }
317
318
319 /**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336 {
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341 }
342
343 /**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354 static u8 ata_check_status_pio(struct ata_port *ap)
355 {
356 return inb(ap->ioaddr.status_addr);
357 }
358
359 /**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370 static u8 ata_check_status_mmio(struct ata_port *ap)
371 {
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373 }
374
375
376 /**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389 u8 ata_check_status(struct ata_port *ap)
390 {
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394 }
395
396
397 /**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410 u8 ata_altstatus(struct ata_port *ap)
411 {
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418 }
419
420 /**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host_set lock)
426 */
427
428 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429 {
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448 }
449
450 /**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host_set lock)
456 */
457
458 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459 {
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479 }
480
481 /**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host_set lock)
487 */
488
489 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490 {
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507 }
508
509 /**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host_set lock)
515 */
516
517 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518 {
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526 }
527
528
529 /**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host_set lock)
539 */
540 void ata_bmdma_start(struct ata_queued_cmd *qc)
541 {
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546 }
547
548
549 /**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host_set lock)
561 */
562 void ata_bmdma_setup(struct ata_queued_cmd *qc)
563 {
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568 }
569
570
571 /**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host_set lock)
581 */
582
583 void ata_bmdma_irq_clear(struct ata_port *ap)
584 {
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596 }
597
598
599 /**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host_set lock)
609 */
610
611 u8 ata_bmdma_status(struct ata_port *ap)
612 {
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620 }
621
622
623 /**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host_set lock)
633 */
634
635 void ata_bmdma_stop(struct ata_queued_cmd *qc)
636 {
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652 }
653
654 /**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663 void ata_bmdma_freeze(struct ata_port *ap)
664 {
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674 }
675
676 /**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685 void ata_bmdma_thaw(struct ata_port *ap)
686 {
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692 }
693
694 /**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
697 * @prereset: prereset method (can be NULL)
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
716 {
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(ap->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(ap->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
763 }
764
765 /**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774 void ata_bmdma_error_handler(struct ata_port *ap)
775 {
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
784 }
785
786 /**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795 {
796 ata_bmdma_stop(qc);
797 }
798
799 #ifdef CONFIG_PCI
800 static struct ata_probe_ent *
801 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
802 {
803 struct ata_probe_ent *probe_ent;
804
805 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
806 if (!probe_ent) {
807 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
808 kobject_name(&(dev->kobj)));
809 return NULL;
810 }
811
812 INIT_LIST_HEAD(&probe_ent->node);
813 probe_ent->dev = dev;
814
815 probe_ent->sht = port->sht;
816 probe_ent->host_flags = port->host_flags;
817 probe_ent->pio_mask = port->pio_mask;
818 probe_ent->mwdma_mask = port->mwdma_mask;
819 probe_ent->udma_mask = port->udma_mask;
820 probe_ent->port_ops = port->port_ops;
821
822 return probe_ent;
823 }
824
825
826 /**
827 * ata_pci_init_native_mode - Initialize native-mode driver
828 * @pdev: pci device to be initialized
829 * @port: array[2] of pointers to port info structures.
830 * @ports: bitmap of ports present
831 *
832 * Utility function which allocates and initializes an
833 * ata_probe_ent structure for a standard dual-port
834 * PIO-based IDE controller. The returned ata_probe_ent
835 * structure can be passed to ata_device_add(). The returned
836 * ata_probe_ent structure should then be freed with kfree().
837 *
838 * The caller need only pass the address of the primary port, the
839 * secondary will be deduced automatically. If the device has non
840 * standard secondary port mappings this function can be called twice,
841 * once for each interface.
842 */
843
844 struct ata_probe_ent *
845 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
846 {
847 struct ata_probe_ent *probe_ent =
848 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
849 int p = 0;
850 unsigned long bmdma;
851
852 if (!probe_ent)
853 return NULL;
854
855 probe_ent->irq = pdev->irq;
856 probe_ent->irq_flags = IRQF_SHARED;
857 probe_ent->private_data = port[0]->private_data;
858
859 if (ports & ATA_PORT_PRIMARY) {
860 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
861 probe_ent->port[p].altstatus_addr =
862 probe_ent->port[p].ctl_addr =
863 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
864 bmdma = pci_resource_start(pdev, 4);
865 if (bmdma) {
866 if (inb(bmdma + 2) & 0x80)
867 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
868 probe_ent->port[p].bmdma_addr = bmdma;
869 }
870 ata_std_ports(&probe_ent->port[p]);
871 p++;
872 }
873
874 if (ports & ATA_PORT_SECONDARY) {
875 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
876 probe_ent->port[p].altstatus_addr =
877 probe_ent->port[p].ctl_addr =
878 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
879 bmdma = pci_resource_start(pdev, 4);
880 if (bmdma) {
881 bmdma += 8;
882 if(inb(bmdma + 2) & 0x80)
883 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
884 probe_ent->port[p].bmdma_addr = bmdma;
885 }
886 ata_std_ports(&probe_ent->port[p]);
887 p++;
888 }
889
890 probe_ent->n_ports = p;
891 return probe_ent;
892 }
893
894
895 static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
896 struct ata_port_info *port, int port_num)
897 {
898 struct ata_probe_ent *probe_ent;
899 unsigned long bmdma;
900
901 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
902 if (!probe_ent)
903 return NULL;
904
905 probe_ent->legacy_mode = 1;
906 probe_ent->n_ports = 1;
907 probe_ent->hard_port_no = port_num;
908 probe_ent->private_data = port->private_data;
909
910 switch(port_num)
911 {
912 case 0:
913 probe_ent->irq = 14;
914 probe_ent->port[0].cmd_addr = 0x1f0;
915 probe_ent->port[0].altstatus_addr =
916 probe_ent->port[0].ctl_addr = 0x3f6;
917 break;
918 case 1:
919 probe_ent->irq = 15;
920 probe_ent->port[0].cmd_addr = 0x170;
921 probe_ent->port[0].altstatus_addr =
922 probe_ent->port[0].ctl_addr = 0x376;
923 break;
924 }
925
926 bmdma = pci_resource_start(pdev, 4);
927 if (bmdma != 0) {
928 bmdma += 8 * port_num;
929 probe_ent->port[0].bmdma_addr = bmdma;
930 if (inb(bmdma + 2) & 0x80)
931 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
932 }
933 ata_std_ports(&probe_ent->port[0]);
934
935 return probe_ent;
936 }
937
938
939 /**
940 * ata_pci_init_one - Initialize/register PCI IDE host controller
941 * @pdev: Controller to be initialized
942 * @port_info: Information from low-level host driver
943 * @n_ports: Number of ports attached to host controller
944 *
945 * This is a helper function which can be called from a driver's
946 * xxx_init_one() probe function if the hardware uses traditional
947 * IDE taskfile registers.
948 *
949 * This function calls pci_enable_device(), reserves its register
950 * regions, sets the dma mask, enables bus master mode, and calls
951 * ata_device_add()
952 *
953 * LOCKING:
954 * Inherited from PCI layer (may sleep).
955 *
956 * RETURNS:
957 * Zero on success, negative on errno-based value on error.
958 */
959
960 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
961 unsigned int n_ports)
962 {
963 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
964 struct ata_port_info *port[2];
965 u8 tmp8, mask;
966 unsigned int legacy_mode = 0;
967 int disable_dev_on_err = 1;
968 int rc;
969
970 DPRINTK("ENTER\n");
971
972 port[0] = port_info[0];
973 if (n_ports > 1)
974 port[1] = port_info[1];
975 else
976 port[1] = port[0];
977
978 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
979 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
980 /* TODO: What if one channel is in native mode ... */
981 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
982 mask = (1 << 2) | (1 << 0);
983 if ((tmp8 & mask) != mask)
984 legacy_mode = (1 << 3);
985 }
986
987 /* FIXME... */
988 if ((!legacy_mode) && (n_ports > 2)) {
989 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
990 n_ports = 2;
991 /* For now */
992 }
993
994 /* FIXME: Really for ATA it isn't safe because the device may be
995 multi-purpose and we want to leave it alone if it was already
996 enabled. Secondly for shared use as Arjan says we want refcounting
997
998 Checking dev->is_enabled is insufficient as this is not set at
999 boot for the primary video which is BIOS enabled
1000 */
1001
1002 rc = pci_enable_device(pdev);
1003 if (rc)
1004 return rc;
1005
1006 rc = pci_request_regions(pdev, DRV_NAME);
1007 if (rc) {
1008 disable_dev_on_err = 0;
1009 goto err_out;
1010 }
1011
1012 /* FIXME: Should use platform specific mappers for legacy port ranges */
1013 if (legacy_mode) {
1014 if (!request_region(0x1f0, 8, "libata")) {
1015 struct resource *conflict, res;
1016 res.start = 0x1f0;
1017 res.end = 0x1f0 + 8 - 1;
1018 conflict = ____request_resource(&ioport_resource, &res);
1019 if (!strcmp(conflict->name, "libata"))
1020 legacy_mode |= (1 << 0);
1021 else {
1022 disable_dev_on_err = 0;
1023 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
1024 }
1025 } else
1026 legacy_mode |= (1 << 0);
1027
1028 if (!request_region(0x170, 8, "libata")) {
1029 struct resource *conflict, res;
1030 res.start = 0x170;
1031 res.end = 0x170 + 8 - 1;
1032 conflict = ____request_resource(&ioport_resource, &res);
1033 if (!strcmp(conflict->name, "libata"))
1034 legacy_mode |= (1 << 1);
1035 else {
1036 disable_dev_on_err = 0;
1037 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
1038 }
1039 } else
1040 legacy_mode |= (1 << 1);
1041 }
1042
1043 /* we have legacy mode, but all ports are unavailable */
1044 if (legacy_mode == (1 << 3)) {
1045 rc = -EBUSY;
1046 goto err_out_regions;
1047 }
1048
1049 /* FIXME: If we get no DMA mask we should fall back to PIO */
1050 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1051 if (rc)
1052 goto err_out_regions;
1053 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1054 if (rc)
1055 goto err_out_regions;
1056
1057 if (legacy_mode) {
1058 if (legacy_mode & (1 << 0))
1059 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1060 if (legacy_mode & (1 << 1))
1061 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1062 } else {
1063 if (n_ports == 2)
1064 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1065 else
1066 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1067 }
1068 if (!probe_ent && !probe_ent2) {
1069 rc = -ENOMEM;
1070 goto err_out_regions;
1071 }
1072
1073 pci_set_master(pdev);
1074
1075 /* FIXME: check ata_device_add return */
1076 if (legacy_mode) {
1077 struct device *dev = &pdev->dev;
1078 struct ata_host_set *host_set = NULL;
1079
1080 if (legacy_mode & (1 << 0)) {
1081 ata_device_add(probe_ent);
1082 host_set = dev_get_drvdata(dev);
1083 }
1084
1085 if (legacy_mode & (1 << 1)) {
1086 ata_device_add(probe_ent2);
1087 if (host_set) {
1088 host_set->next = dev_get_drvdata(dev);
1089 dev_set_drvdata(dev, host_set);
1090 }
1091 }
1092 } else
1093 ata_device_add(probe_ent);
1094
1095 kfree(probe_ent);
1096 kfree(probe_ent2);
1097
1098 return 0;
1099
1100 err_out_regions:
1101 if (legacy_mode & (1 << 0))
1102 release_region(0x1f0, 8);
1103 if (legacy_mode & (1 << 1))
1104 release_region(0x170, 8);
1105 pci_release_regions(pdev);
1106 err_out:
1107 if (disable_dev_on_err)
1108 pci_disable_device(pdev);
1109 return rc;
1110 }
1111
1112 /**
1113 * ata_pci_clear_simplex - attempt to kick device out of simplex
1114 * @pdev: PCI device
1115 *
1116 * Some PCI ATA devices report simplex mode but in fact can be told to
1117 * enter non simplex mode. This implements the neccessary logic to
1118 * perform the task on such devices. Calling it on other devices will
1119 * have -undefined- behaviour.
1120 */
1121
1122 int ata_pci_clear_simplex(struct pci_dev *pdev)
1123 {
1124 unsigned long bmdma = pci_resource_start(pdev, 4);
1125 u8 simplex;
1126
1127 if (bmdma == 0)
1128 return -ENOENT;
1129
1130 simplex = inb(bmdma + 0x02);
1131 outb(simplex & 0x60, bmdma + 0x02);
1132 simplex = inb(bmdma + 0x02);
1133 if (simplex & 0x80)
1134 return -EOPNOTSUPP;
1135 return 0;
1136 }
1137
1138 unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1139 {
1140 /* Filter out DMA modes if the device has been configured by
1141 the BIOS as PIO only */
1142
1143 if (ap->ioaddr.bmdma_addr == 0)
1144 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1145 return xfer_mask;
1146 }
1147
1148 #endif /* CONFIG_PCI */
1149