]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/scsi/libata-core.c
[PATCH] ata_piix: reimplement piix_sata_probe()
[mirror_ubuntu-focal-kernel.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
69 static int fgb(u32 bitmap);
70 static int ata_choose_xfer_mode(const struct ata_port *ap,
71 u8 *xfer_mode_out,
72 unsigned int *xfer_shift_out);
73
74 static unsigned int ata_unique_id = 1;
75 static struct workqueue_struct *ata_wq;
76
77 int atapi_enabled = 0;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
81 int libata_fua = 0;
82 module_param_named(fua, libata_fua, int, 0444);
83 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
84
85 MODULE_AUTHOR("Jeff Garzik");
86 MODULE_DESCRIPTION("Library module for ATA devices");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION);
89
90
91 /**
92 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
93 * @tf: Taskfile to convert
94 * @fis: Buffer into which data will output
95 * @pmp: Port multiplier port
96 *
97 * Converts a standard ATA taskfile to a Serial ATA
98 * FIS structure (Register - Host to Device).
99 *
100 * LOCKING:
101 * Inherited from caller.
102 */
103
104 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
105 {
106 fis[0] = 0x27; /* Register - Host to Device FIS */
107 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
108 bit 7 indicates Command FIS */
109 fis[2] = tf->command;
110 fis[3] = tf->feature;
111
112 fis[4] = tf->lbal;
113 fis[5] = tf->lbam;
114 fis[6] = tf->lbah;
115 fis[7] = tf->device;
116
117 fis[8] = tf->hob_lbal;
118 fis[9] = tf->hob_lbam;
119 fis[10] = tf->hob_lbah;
120 fis[11] = tf->hob_feature;
121
122 fis[12] = tf->nsect;
123 fis[13] = tf->hob_nsect;
124 fis[14] = 0;
125 fis[15] = tf->ctl;
126
127 fis[16] = 0;
128 fis[17] = 0;
129 fis[18] = 0;
130 fis[19] = 0;
131 }
132
133 /**
134 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
135 * @fis: Buffer from which data will be input
136 * @tf: Taskfile to output
137 *
138 * Converts a serial ATA FIS structure to a standard ATA taskfile.
139 *
140 * LOCKING:
141 * Inherited from caller.
142 */
143
144 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
145 {
146 tf->command = fis[2]; /* status */
147 tf->feature = fis[3]; /* error */
148
149 tf->lbal = fis[4];
150 tf->lbam = fis[5];
151 tf->lbah = fis[6];
152 tf->device = fis[7];
153
154 tf->hob_lbal = fis[8];
155 tf->hob_lbam = fis[9];
156 tf->hob_lbah = fis[10];
157
158 tf->nsect = fis[12];
159 tf->hob_nsect = fis[13];
160 }
161
162 static const u8 ata_rw_cmds[] = {
163 /* pio multi */
164 ATA_CMD_READ_MULTI,
165 ATA_CMD_WRITE_MULTI,
166 ATA_CMD_READ_MULTI_EXT,
167 ATA_CMD_WRITE_MULTI_EXT,
168 0,
169 0,
170 0,
171 ATA_CMD_WRITE_MULTI_FUA_EXT,
172 /* pio */
173 ATA_CMD_PIO_READ,
174 ATA_CMD_PIO_WRITE,
175 ATA_CMD_PIO_READ_EXT,
176 ATA_CMD_PIO_WRITE_EXT,
177 0,
178 0,
179 0,
180 0,
181 /* dma */
182 ATA_CMD_READ,
183 ATA_CMD_WRITE,
184 ATA_CMD_READ_EXT,
185 ATA_CMD_WRITE_EXT,
186 0,
187 0,
188 0,
189 ATA_CMD_WRITE_FUA_EXT
190 };
191
192 /**
193 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
194 * @qc: command to examine and configure
195 *
196 * Examine the device configuration and tf->flags to calculate
197 * the proper read/write commands and protocol to use.
198 *
199 * LOCKING:
200 * caller.
201 */
202 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
203 {
204 struct ata_taskfile *tf = &qc->tf;
205 struct ata_device *dev = qc->dev;
206 u8 cmd;
207
208 int index, fua, lba48, write;
209
210 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
211 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
212 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
213
214 if (dev->flags & ATA_DFLAG_PIO) {
215 tf->protocol = ATA_PROT_PIO;
216 index = dev->multi_count ? 0 : 8;
217 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
218 /* Unable to use DMA due to host limitation */
219 tf->protocol = ATA_PROT_PIO;
220 index = dev->multi_count ? 0 : 8;
221 } else {
222 tf->protocol = ATA_PROT_DMA;
223 index = 16;
224 }
225
226 cmd = ata_rw_cmds[index + fua + lba48 + write];
227 if (cmd) {
228 tf->command = cmd;
229 return 0;
230 }
231 return -1;
232 }
233
234 static const char * const xfer_mode_str[] = {
235 "UDMA/16",
236 "UDMA/25",
237 "UDMA/33",
238 "UDMA/44",
239 "UDMA/66",
240 "UDMA/100",
241 "UDMA/133",
242 "UDMA7",
243 "MWDMA0",
244 "MWDMA1",
245 "MWDMA2",
246 "PIO0",
247 "PIO1",
248 "PIO2",
249 "PIO3",
250 "PIO4",
251 };
252
253 /**
254 * ata_udma_string - convert UDMA bit offset to string
255 * @mask: mask of bits supported; only highest bit counts.
256 *
257 * Determine string which represents the highest speed
258 * (highest bit in @udma_mask).
259 *
260 * LOCKING:
261 * None.
262 *
263 * RETURNS:
264 * Constant C string representing highest speed listed in
265 * @udma_mask, or the constant C string "<n/a>".
266 */
267
268 static const char *ata_mode_string(unsigned int mask)
269 {
270 int i;
271
272 for (i = 7; i >= 0; i--)
273 if (mask & (1 << i))
274 goto out;
275 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
276 if (mask & (1 << i))
277 goto out;
278 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
279 if (mask & (1 << i))
280 goto out;
281
282 return "<n/a>";
283
284 out:
285 return xfer_mode_str[i];
286 }
287
288 /**
289 * ata_pio_devchk - PATA device presence detection
290 * @ap: ATA channel to examine
291 * @device: Device to examine (starting at zero)
292 *
293 * This technique was originally described in
294 * Hale Landis's ATADRVR (www.ata-atapi.com), and
295 * later found its way into the ATA/ATAPI spec.
296 *
297 * Write a pattern to the ATA shadow registers,
298 * and if a device is present, it will respond by
299 * correctly storing and echoing back the
300 * ATA shadow register contents.
301 *
302 * LOCKING:
303 * caller.
304 */
305
306 static unsigned int ata_pio_devchk(struct ata_port *ap,
307 unsigned int device)
308 {
309 struct ata_ioports *ioaddr = &ap->ioaddr;
310 u8 nsect, lbal;
311
312 ap->ops->dev_select(ap, device);
313
314 outb(0x55, ioaddr->nsect_addr);
315 outb(0xaa, ioaddr->lbal_addr);
316
317 outb(0xaa, ioaddr->nsect_addr);
318 outb(0x55, ioaddr->lbal_addr);
319
320 outb(0x55, ioaddr->nsect_addr);
321 outb(0xaa, ioaddr->lbal_addr);
322
323 nsect = inb(ioaddr->nsect_addr);
324 lbal = inb(ioaddr->lbal_addr);
325
326 if ((nsect == 0x55) && (lbal == 0xaa))
327 return 1; /* we found a device */
328
329 return 0; /* nothing found */
330 }
331
332 /**
333 * ata_mmio_devchk - PATA device presence detection
334 * @ap: ATA channel to examine
335 * @device: Device to examine (starting at zero)
336 *
337 * This technique was originally described in
338 * Hale Landis's ATADRVR (www.ata-atapi.com), and
339 * later found its way into the ATA/ATAPI spec.
340 *
341 * Write a pattern to the ATA shadow registers,
342 * and if a device is present, it will respond by
343 * correctly storing and echoing back the
344 * ATA shadow register contents.
345 *
346 * LOCKING:
347 * caller.
348 */
349
350 static unsigned int ata_mmio_devchk(struct ata_port *ap,
351 unsigned int device)
352 {
353 struct ata_ioports *ioaddr = &ap->ioaddr;
354 u8 nsect, lbal;
355
356 ap->ops->dev_select(ap, device);
357
358 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
359 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
360
361 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
362 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
363
364 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
365 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
366
367 nsect = readb((void __iomem *) ioaddr->nsect_addr);
368 lbal = readb((void __iomem *) ioaddr->lbal_addr);
369
370 if ((nsect == 0x55) && (lbal == 0xaa))
371 return 1; /* we found a device */
372
373 return 0; /* nothing found */
374 }
375
376 /**
377 * ata_devchk - PATA device presence detection
378 * @ap: ATA channel to examine
379 * @device: Device to examine (starting at zero)
380 *
381 * Dispatch ATA device presence detection, depending
382 * on whether we are using PIO or MMIO to talk to the
383 * ATA shadow registers.
384 *
385 * LOCKING:
386 * caller.
387 */
388
389 static unsigned int ata_devchk(struct ata_port *ap,
390 unsigned int device)
391 {
392 if (ap->flags & ATA_FLAG_MMIO)
393 return ata_mmio_devchk(ap, device);
394 return ata_pio_devchk(ap, device);
395 }
396
397 /**
398 * ata_dev_classify - determine device type based on ATA-spec signature
399 * @tf: ATA taskfile register set for device to be identified
400 *
401 * Determine from taskfile register contents whether a device is
402 * ATA or ATAPI, as per "Signature and persistence" section
403 * of ATA/PI spec (volume 1, sect 5.14).
404 *
405 * LOCKING:
406 * None.
407 *
408 * RETURNS:
409 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
410 * the event of failure.
411 */
412
413 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
414 {
415 /* Apple's open source Darwin code hints that some devices only
416 * put a proper signature into the LBA mid/high registers,
417 * So, we only check those. It's sufficient for uniqueness.
418 */
419
420 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
421 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
422 DPRINTK("found ATA device by sig\n");
423 return ATA_DEV_ATA;
424 }
425
426 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
427 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
428 DPRINTK("found ATAPI device by sig\n");
429 return ATA_DEV_ATAPI;
430 }
431
432 DPRINTK("unknown device\n");
433 return ATA_DEV_UNKNOWN;
434 }
435
436 /**
437 * ata_dev_try_classify - Parse returned ATA device signature
438 * @ap: ATA channel to examine
439 * @device: Device to examine (starting at zero)
440 * @r_err: Value of error register on completion
441 *
442 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
443 * an ATA/ATAPI-defined set of values is placed in the ATA
444 * shadow registers, indicating the results of device detection
445 * and diagnostics.
446 *
447 * Select the ATA device, and read the values from the ATA shadow
448 * registers. Then parse according to the Error register value,
449 * and the spec-defined values examined by ata_dev_classify().
450 *
451 * LOCKING:
452 * caller.
453 *
454 * RETURNS:
455 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
456 */
457
458 static unsigned int
459 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
460 {
461 struct ata_taskfile tf;
462 unsigned int class;
463 u8 err;
464
465 ap->ops->dev_select(ap, device);
466
467 memset(&tf, 0, sizeof(tf));
468
469 ap->ops->tf_read(ap, &tf);
470 err = tf.feature;
471 if (r_err)
472 *r_err = err;
473
474 /* see if device passed diags */
475 if (err == 1)
476 /* do nothing */ ;
477 else if ((device == 0) && (err == 0x81))
478 /* do nothing */ ;
479 else
480 return ATA_DEV_NONE;
481
482 /* determine if device is ATA or ATAPI */
483 class = ata_dev_classify(&tf);
484
485 if (class == ATA_DEV_UNKNOWN)
486 return ATA_DEV_NONE;
487 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
488 return ATA_DEV_NONE;
489 return class;
490 }
491
492 /**
493 * ata_id_string - Convert IDENTIFY DEVICE page into string
494 * @id: IDENTIFY DEVICE results we will examine
495 * @s: string into which data is output
496 * @ofs: offset into identify device page
497 * @len: length of string to return. must be an even number.
498 *
499 * The strings in the IDENTIFY DEVICE page are broken up into
500 * 16-bit chunks. Run through the string, and output each
501 * 8-bit chunk linearly, regardless of platform.
502 *
503 * LOCKING:
504 * caller.
505 */
506
507 void ata_id_string(const u16 *id, unsigned char *s,
508 unsigned int ofs, unsigned int len)
509 {
510 unsigned int c;
511
512 while (len > 0) {
513 c = id[ofs] >> 8;
514 *s = c;
515 s++;
516
517 c = id[ofs] & 0xff;
518 *s = c;
519 s++;
520
521 ofs++;
522 len -= 2;
523 }
524 }
525
526 /**
527 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
528 * @id: IDENTIFY DEVICE results we will examine
529 * @s: string into which data is output
530 * @ofs: offset into identify device page
531 * @len: length of string to return. must be an odd number.
532 *
533 * This function is identical to ata_id_string except that it
534 * trims trailing spaces and terminates the resulting string with
535 * null. @len must be actual maximum length (even number) + 1.
536 *
537 * LOCKING:
538 * caller.
539 */
540 void ata_id_c_string(const u16 *id, unsigned char *s,
541 unsigned int ofs, unsigned int len)
542 {
543 unsigned char *p;
544
545 WARN_ON(!(len & 1));
546
547 ata_id_string(id, s, ofs, len - 1);
548
549 p = s + strnlen(s, len - 1);
550 while (p > s && p[-1] == ' ')
551 p--;
552 *p = '\0';
553 }
554
555 static u64 ata_id_n_sectors(const u16 *id)
556 {
557 if (ata_id_has_lba(id)) {
558 if (ata_id_has_lba48(id))
559 return ata_id_u64(id, 100);
560 else
561 return ata_id_u32(id, 60);
562 } else {
563 if (ata_id_current_chs_valid(id))
564 return ata_id_u32(id, 57);
565 else
566 return id[1] * id[3] * id[6];
567 }
568 }
569
570 /**
571 * ata_noop_dev_select - Select device 0/1 on ATA bus
572 * @ap: ATA channel to manipulate
573 * @device: ATA device (numbered from zero) to select
574 *
575 * This function performs no actual function.
576 *
577 * May be used as the dev_select() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * caller.
581 */
582 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
583 {
584 }
585
586
587 /**
588 * ata_std_dev_select - Select device 0/1 on ATA bus
589 * @ap: ATA channel to manipulate
590 * @device: ATA device (numbered from zero) to select
591 *
592 * Use the method defined in the ATA specification to
593 * make either device 0, or device 1, active on the
594 * ATA channel. Works with both PIO and MMIO.
595 *
596 * May be used as the dev_select() entry in ata_port_operations.
597 *
598 * LOCKING:
599 * caller.
600 */
601
602 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
603 {
604 u8 tmp;
605
606 if (device == 0)
607 tmp = ATA_DEVICE_OBS;
608 else
609 tmp = ATA_DEVICE_OBS | ATA_DEV1;
610
611 if (ap->flags & ATA_FLAG_MMIO) {
612 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
613 } else {
614 outb(tmp, ap->ioaddr.device_addr);
615 }
616 ata_pause(ap); /* needed; also flushes, for mmio */
617 }
618
619 /**
620 * ata_dev_select - Select device 0/1 on ATA bus
621 * @ap: ATA channel to manipulate
622 * @device: ATA device (numbered from zero) to select
623 * @wait: non-zero to wait for Status register BSY bit to clear
624 * @can_sleep: non-zero if context allows sleeping
625 *
626 * Use the method defined in the ATA specification to
627 * make either device 0, or device 1, active on the
628 * ATA channel.
629 *
630 * This is a high-level version of ata_std_dev_select(),
631 * which additionally provides the services of inserting
632 * the proper pauses and status polling, where needed.
633 *
634 * LOCKING:
635 * caller.
636 */
637
638 void ata_dev_select(struct ata_port *ap, unsigned int device,
639 unsigned int wait, unsigned int can_sleep)
640 {
641 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
642 ap->id, device, wait);
643
644 if (wait)
645 ata_wait_idle(ap);
646
647 ap->ops->dev_select(ap, device);
648
649 if (wait) {
650 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
651 msleep(150);
652 ata_wait_idle(ap);
653 }
654 }
655
656 /**
657 * ata_dump_id - IDENTIFY DEVICE info debugging output
658 * @id: IDENTIFY DEVICE page to dump
659 *
660 * Dump selected 16-bit words from the given IDENTIFY DEVICE
661 * page.
662 *
663 * LOCKING:
664 * caller.
665 */
666
667 static inline void ata_dump_id(const u16 *id)
668 {
669 DPRINTK("49==0x%04x "
670 "53==0x%04x "
671 "63==0x%04x "
672 "64==0x%04x "
673 "75==0x%04x \n",
674 id[49],
675 id[53],
676 id[63],
677 id[64],
678 id[75]);
679 DPRINTK("80==0x%04x "
680 "81==0x%04x "
681 "82==0x%04x "
682 "83==0x%04x "
683 "84==0x%04x \n",
684 id[80],
685 id[81],
686 id[82],
687 id[83],
688 id[84]);
689 DPRINTK("88==0x%04x "
690 "93==0x%04x\n",
691 id[88],
692 id[93]);
693 }
694
695 /*
696 * Compute the PIO modes available for this device. This is not as
697 * trivial as it seems if we must consider early devices correctly.
698 *
699 * FIXME: pre IDE drive timing (do we care ?).
700 */
701
702 static unsigned int ata_pio_modes(const struct ata_device *adev)
703 {
704 u16 modes;
705
706 /* Usual case. Word 53 indicates word 64 is valid */
707 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
708 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
709 modes <<= 3;
710 modes |= 0x7;
711 return modes;
712 }
713
714 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
715 number for the maximum. Turn it into a mask and return it */
716 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
717 return modes;
718 /* But wait.. there's more. Design your standards by committee and
719 you too can get a free iordy field to process. However its the
720 speeds not the modes that are supported... Note drivers using the
721 timing API will get this right anyway */
722 }
723
724 static inline void
725 ata_queue_packet_task(struct ata_port *ap)
726 {
727 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
728 queue_work(ata_wq, &ap->packet_task);
729 }
730
731 static inline void
732 ata_queue_pio_task(struct ata_port *ap)
733 {
734 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
735 queue_work(ata_wq, &ap->pio_task);
736 }
737
738 static inline void
739 ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
740 {
741 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
742 queue_delayed_work(ata_wq, &ap->pio_task, delay);
743 }
744
745 /**
746 * ata_flush_pio_tasks - Flush pio_task and packet_task
747 * @ap: the target ata_port
748 *
749 * After this function completes, pio_task and packet_task are
750 * guranteed not to be running or scheduled.
751 *
752 * LOCKING:
753 * Kernel thread context (may sleep)
754 */
755
756 static void ata_flush_pio_tasks(struct ata_port *ap)
757 {
758 int tmp = 0;
759 unsigned long flags;
760
761 DPRINTK("ENTER\n");
762
763 spin_lock_irqsave(&ap->host_set->lock, flags);
764 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
765 spin_unlock_irqrestore(&ap->host_set->lock, flags);
766
767 DPRINTK("flush #1\n");
768 flush_workqueue(ata_wq);
769
770 /*
771 * At this point, if a task is running, it's guaranteed to see
772 * the FLUSH flag; thus, it will never queue pio tasks again.
773 * Cancel and flush.
774 */
775 tmp |= cancel_delayed_work(&ap->pio_task);
776 tmp |= cancel_delayed_work(&ap->packet_task);
777 if (!tmp) {
778 DPRINTK("flush #2\n");
779 flush_workqueue(ata_wq);
780 }
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
784 spin_unlock_irqrestore(&ap->host_set->lock, flags);
785
786 DPRINTK("EXIT\n");
787 }
788
789 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
790 {
791 struct completion *waiting = qc->private_data;
792
793 qc->ap->ops->tf_read(qc->ap, &qc->tf);
794 complete(waiting);
795 }
796
797 /**
798 * ata_exec_internal - execute libata internal command
799 * @ap: Port to which the command is sent
800 * @dev: Device to which the command is sent
801 * @tf: Taskfile registers for the command and the result
802 * @dma_dir: Data tranfer direction of the command
803 * @buf: Data buffer of the command
804 * @buflen: Length of data buffer
805 *
806 * Executes libata internal command with timeout. @tf contains
807 * command on entry and result on return. Timeout and error
808 * conditions are reported via return value. No recovery action
809 * is taken after a command times out. It's caller's duty to
810 * clean up after timeout.
811 *
812 * LOCKING:
813 * None. Should be called with kernel context, might sleep.
814 */
815
816 static unsigned
817 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
818 struct ata_taskfile *tf,
819 int dma_dir, void *buf, unsigned int buflen)
820 {
821 u8 command = tf->command;
822 struct ata_queued_cmd *qc;
823 DECLARE_COMPLETION(wait);
824 unsigned long flags;
825 unsigned int err_mask;
826
827 spin_lock_irqsave(&ap->host_set->lock, flags);
828
829 qc = ata_qc_new_init(ap, dev);
830 BUG_ON(qc == NULL);
831
832 qc->tf = *tf;
833 qc->dma_dir = dma_dir;
834 if (dma_dir != DMA_NONE) {
835 ata_sg_init_one(qc, buf, buflen);
836 qc->nsect = buflen / ATA_SECT_SIZE;
837 }
838
839 qc->private_data = &wait;
840 qc->complete_fn = ata_qc_complete_internal;
841
842 qc->err_mask = ata_qc_issue(qc);
843 if (qc->err_mask)
844 ata_qc_complete(qc);
845
846 spin_unlock_irqrestore(&ap->host_set->lock, flags);
847
848 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
849 spin_lock_irqsave(&ap->host_set->lock, flags);
850
851 /* We're racing with irq here. If we lose, the
852 * following test prevents us from completing the qc
853 * again. If completion irq occurs after here but
854 * before the caller cleans up, it will result in a
855 * spurious interrupt. We can live with that.
856 */
857 if (qc->flags & ATA_QCFLAG_ACTIVE) {
858 qc->err_mask = AC_ERR_TIMEOUT;
859 ata_qc_complete(qc);
860 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
861 ap->id, command);
862 }
863
864 spin_unlock_irqrestore(&ap->host_set->lock, flags);
865 }
866
867 *tf = qc->tf;
868 err_mask = qc->err_mask;
869
870 ata_qc_free(qc);
871
872 return err_mask;
873 }
874
875 /**
876 * ata_pio_need_iordy - check if iordy needed
877 * @adev: ATA device
878 *
879 * Check if the current speed of the device requires IORDY. Used
880 * by various controllers for chip configuration.
881 */
882
883 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
884 {
885 int pio;
886 int speed = adev->pio_mode - XFER_PIO_0;
887
888 if (speed < 2)
889 return 0;
890 if (speed > 2)
891 return 1;
892
893 /* If we have no drive specific rule, then PIO 2 is non IORDY */
894
895 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
896 pio = adev->id[ATA_ID_EIDE_PIO];
897 /* Is the speed faster than the drive allows non IORDY ? */
898 if (pio) {
899 /* This is cycle times not frequency - watch the logic! */
900 if (pio > 240) /* PIO2 is 240nS per cycle */
901 return 1;
902 return 0;
903 }
904 }
905 return 0;
906 }
907
908 /**
909 * ata_dev_read_id - Read ID data from the specified device
910 * @ap: port on which target device resides
911 * @dev: target device
912 * @p_class: pointer to class of the target device (may be changed)
913 * @post_reset: is this read ID post-reset?
914 * @id: buffer to fill IDENTIFY page into
915 *
916 * Read ID data from the specified device. ATA_CMD_ID_ATA is
917 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
918 * devices. This function also takes care of EDD signature
919 * misreporting (to be removed once EDD support is gone) and
920 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
921 *
922 * LOCKING:
923 * Kernel thread context (may sleep)
924 *
925 * RETURNS:
926 * 0 on success, -errno otherwise.
927 */
928 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
929 unsigned int *p_class, int post_reset, u16 *id)
930 {
931 unsigned int class = *p_class;
932 unsigned int using_edd;
933 struct ata_taskfile tf;
934 unsigned int err_mask = 0;
935 const char *reason;
936 int rc;
937
938 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
939
940 if (ap->ops->probe_reset ||
941 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
942 using_edd = 0;
943 else
944 using_edd = 1;
945
946 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
947
948 retry:
949 ata_tf_init(ap, &tf, dev->devno);
950
951 switch (class) {
952 case ATA_DEV_ATA:
953 tf.command = ATA_CMD_ID_ATA;
954 break;
955 case ATA_DEV_ATAPI:
956 tf.command = ATA_CMD_ID_ATAPI;
957 break;
958 default:
959 rc = -ENODEV;
960 reason = "unsupported class";
961 goto err_out;
962 }
963
964 tf.protocol = ATA_PROT_PIO;
965
966 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
967 id, sizeof(id[0]) * ATA_ID_WORDS);
968
969 if (err_mask) {
970 rc = -EIO;
971 reason = "I/O error";
972
973 if (err_mask & ~AC_ERR_DEV)
974 goto err_out;
975
976 /*
977 * arg! EDD works for all test cases, but seems to return
978 * the ATA signature for some ATAPI devices. Until the
979 * reason for this is found and fixed, we fix up the mess
980 * here. If IDENTIFY DEVICE returns command aborted
981 * (as ATAPI devices do), then we issue an
982 * IDENTIFY PACKET DEVICE.
983 *
984 * ATA software reset (SRST, the default) does not appear
985 * to have this problem.
986 */
987 if ((using_edd) && (class == ATA_DEV_ATA)) {
988 u8 err = tf.feature;
989 if (err & ATA_ABORTED) {
990 class = ATA_DEV_ATAPI;
991 goto retry;
992 }
993 }
994 goto err_out;
995 }
996
997 swap_buf_le16(id, ATA_ID_WORDS);
998
999 /* print device capabilities */
1000 printk(KERN_DEBUG "ata%u: dev %u cfg "
1001 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1002 ap->id, dev->devno,
1003 id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]);
1004
1005 /* sanity check */
1006 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1007 rc = -EINVAL;
1008 reason = "device reports illegal type";
1009 goto err_out;
1010 }
1011
1012 if (post_reset && class == ATA_DEV_ATA) {
1013 /*
1014 * The exact sequence expected by certain pre-ATA4 drives is:
1015 * SRST RESET
1016 * IDENTIFY
1017 * INITIALIZE DEVICE PARAMETERS
1018 * anything else..
1019 * Some drives were very specific about that exact sequence.
1020 */
1021 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1022 err_mask = ata_dev_init_params(ap, dev);
1023 if (err_mask) {
1024 rc = -EIO;
1025 reason = "INIT_DEV_PARAMS failed";
1026 goto err_out;
1027 }
1028
1029 /* current CHS translation info (id[53-58]) might be
1030 * changed. reread the identify device info.
1031 */
1032 post_reset = 0;
1033 goto retry;
1034 }
1035 }
1036
1037 *p_class = class;
1038 return 0;
1039
1040 err_out:
1041 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1042 ap->id, dev->devno, reason);
1043 return rc;
1044 }
1045
1046 /**
1047 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1048 * @ap: port on which device we wish to probe resides
1049 * @device: device bus address, starting at zero
1050 *
1051 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1052 * command, and read back the 512-byte device information page.
1053 * The device information page is fed to us via the standard
1054 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1055 * using standard PIO-IN paths)
1056 *
1057 * After reading the device information page, we use several
1058 * bits of information from it to initialize data structures
1059 * that will be used during the lifetime of the ata_device.
1060 * Other data from the info page is used to disqualify certain
1061 * older ATA devices we do not wish to support.
1062 *
1063 * LOCKING:
1064 * Inherited from caller. Some functions called by this function
1065 * obtain the host_set lock.
1066 */
1067
1068 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1069 {
1070 struct ata_device *dev = &ap->device[device];
1071 unsigned long xfer_modes;
1072 int i, rc;
1073
1074 if (!ata_dev_present(dev)) {
1075 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1076 ap->id, device);
1077 return;
1078 }
1079
1080 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1081
1082 rc = ata_dev_read_id(ap, dev, &dev->class, 1, dev->id);
1083 if (rc)
1084 goto err_out;
1085
1086 /*
1087 * common ATA, ATAPI feature tests
1088 */
1089
1090 /* we require DMA support (bits 8 of word 49) */
1091 if (!ata_id_has_dma(dev->id)) {
1092 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1093 goto err_out_nosup;
1094 }
1095
1096 /* quick-n-dirty find max transfer mode; for printk only */
1097 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1098 if (!xfer_modes)
1099 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1100 if (!xfer_modes)
1101 xfer_modes = ata_pio_modes(dev);
1102
1103 ata_dump_id(dev->id);
1104
1105 /* ATA-specific feature tests */
1106 if (dev->class == ATA_DEV_ATA) {
1107 dev->n_sectors = ata_id_n_sectors(dev->id);
1108
1109 if (ata_id_has_lba(dev->id)) {
1110 dev->flags |= ATA_DFLAG_LBA;
1111
1112 if (ata_id_has_lba48(dev->id))
1113 dev->flags |= ATA_DFLAG_LBA48;
1114
1115 /* print device info to dmesg */
1116 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1117 ap->id, device,
1118 ata_id_major_version(dev->id),
1119 ata_mode_string(xfer_modes),
1120 (unsigned long long)dev->n_sectors,
1121 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1122 } else {
1123 /* CHS */
1124
1125 /* Default translation */
1126 dev->cylinders = dev->id[1];
1127 dev->heads = dev->id[3];
1128 dev->sectors = dev->id[6];
1129
1130 if (ata_id_current_chs_valid(dev->id)) {
1131 /* Current CHS translation is valid. */
1132 dev->cylinders = dev->id[54];
1133 dev->heads = dev->id[55];
1134 dev->sectors = dev->id[56];
1135 }
1136
1137 /* print device info to dmesg */
1138 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1139 ap->id, device,
1140 ata_id_major_version(dev->id),
1141 ata_mode_string(xfer_modes),
1142 (unsigned long long)dev->n_sectors,
1143 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1144
1145 }
1146
1147 dev->cdb_len = 16;
1148 }
1149
1150 /* ATAPI-specific feature tests */
1151 else if (dev->class == ATA_DEV_ATAPI) {
1152 rc = atapi_cdb_len(dev->id);
1153 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1154 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1155 goto err_out_nosup;
1156 }
1157 dev->cdb_len = (unsigned int) rc;
1158
1159 /* print device info to dmesg */
1160 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1161 ap->id, device,
1162 ata_mode_string(xfer_modes));
1163 }
1164
1165 ap->host->max_cmd_len = 0;
1166 for (i = 0; i < ATA_MAX_DEVICES; i++)
1167 ap->host->max_cmd_len = max_t(unsigned int,
1168 ap->host->max_cmd_len,
1169 ap->device[i].cdb_len);
1170
1171 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1172 return;
1173
1174 err_out_nosup:
1175 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1176 ap->id, device);
1177 err_out:
1178 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1179 DPRINTK("EXIT, err\n");
1180 }
1181
1182
1183 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1184 struct ata_device *dev)
1185 {
1186 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1187 }
1188
1189 /**
1190 * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
1191 * @ap: Bus
1192 * @i: Device
1193 *
1194 * LOCKING:
1195 */
1196
1197 void ata_dev_config(struct ata_port *ap, unsigned int i)
1198 {
1199 /* limit bridge transfers to udma5, 200 sectors */
1200 if (ata_dev_knobble(ap, &ap->device[i])) {
1201 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1202 ap->id, i);
1203 ap->udma_mask &= ATA_UDMA5;
1204 ap->device[i].max_sectors = ATA_MAX_SECTORS;
1205 }
1206
1207 if (ap->ops->dev_config)
1208 ap->ops->dev_config(ap, &ap->device[i]);
1209 }
1210
1211 /**
1212 * ata_bus_probe - Reset and probe ATA bus
1213 * @ap: Bus to probe
1214 *
1215 * Master ATA bus probing function. Initiates a hardware-dependent
1216 * bus reset, then attempts to identify any devices found on
1217 * the bus.
1218 *
1219 * LOCKING:
1220 * PCI/etc. bus probe sem.
1221 *
1222 * RETURNS:
1223 * Zero on success, non-zero on error.
1224 */
1225
1226 static int ata_bus_probe(struct ata_port *ap)
1227 {
1228 unsigned int i, found = 0;
1229
1230 if (ap->ops->probe_reset) {
1231 unsigned int classes[ATA_MAX_DEVICES];
1232 int rc;
1233
1234 ata_port_probe(ap);
1235
1236 rc = ap->ops->probe_reset(ap, classes);
1237 if (rc == 0) {
1238 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1239 if (classes[i] == ATA_DEV_UNKNOWN)
1240 classes[i] = ATA_DEV_NONE;
1241 ap->device[i].class = classes[i];
1242 }
1243 } else {
1244 printk(KERN_ERR "ata%u: probe reset failed, "
1245 "disabling port\n", ap->id);
1246 ata_port_disable(ap);
1247 }
1248 } else
1249 ap->ops->phy_reset(ap);
1250
1251 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1252 goto err_out;
1253
1254 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1255 ata_dev_identify(ap, i);
1256 if (ata_dev_present(&ap->device[i])) {
1257 found = 1;
1258 ata_dev_config(ap,i);
1259 }
1260 }
1261
1262 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1263 goto err_out_disable;
1264
1265 ata_set_mode(ap);
1266 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1267 goto err_out_disable;
1268
1269 return 0;
1270
1271 err_out_disable:
1272 ap->ops->port_disable(ap);
1273 err_out:
1274 return -1;
1275 }
1276
1277 /**
1278 * ata_port_probe - Mark port as enabled
1279 * @ap: Port for which we indicate enablement
1280 *
1281 * Modify @ap data structure such that the system
1282 * thinks that the entire port is enabled.
1283 *
1284 * LOCKING: host_set lock, or some other form of
1285 * serialization.
1286 */
1287
1288 void ata_port_probe(struct ata_port *ap)
1289 {
1290 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1291 }
1292
1293 /**
1294 * sata_print_link_status - Print SATA link status
1295 * @ap: SATA port to printk link status about
1296 *
1297 * This function prints link speed and status of a SATA link.
1298 *
1299 * LOCKING:
1300 * None.
1301 */
1302 static void sata_print_link_status(struct ata_port *ap)
1303 {
1304 u32 sstatus, tmp;
1305 const char *speed;
1306
1307 if (!ap->ops->scr_read)
1308 return;
1309
1310 sstatus = scr_read(ap, SCR_STATUS);
1311
1312 if (sata_dev_present(ap)) {
1313 tmp = (sstatus >> 4) & 0xf;
1314 if (tmp & (1 << 0))
1315 speed = "1.5";
1316 else if (tmp & (1 << 1))
1317 speed = "3.0";
1318 else
1319 speed = "<unknown>";
1320 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1321 ap->id, speed, sstatus);
1322 } else {
1323 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1324 ap->id, sstatus);
1325 }
1326 }
1327
1328 /**
1329 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1330 * @ap: SATA port associated with target SATA PHY.
1331 *
1332 * This function issues commands to standard SATA Sxxx
1333 * PHY registers, to wake up the phy (and device), and
1334 * clear any reset condition.
1335 *
1336 * LOCKING:
1337 * PCI/etc. bus probe sem.
1338 *
1339 */
1340 void __sata_phy_reset(struct ata_port *ap)
1341 {
1342 u32 sstatus;
1343 unsigned long timeout = jiffies + (HZ * 5);
1344
1345 if (ap->flags & ATA_FLAG_SATA_RESET) {
1346 /* issue phy wake/reset */
1347 scr_write_flush(ap, SCR_CONTROL, 0x301);
1348 /* Couldn't find anything in SATA I/II specs, but
1349 * AHCI-1.1 10.4.2 says at least 1 ms. */
1350 mdelay(1);
1351 }
1352 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1353
1354 /* wait for phy to become ready, if necessary */
1355 do {
1356 msleep(200);
1357 sstatus = scr_read(ap, SCR_STATUS);
1358 if ((sstatus & 0xf) != 1)
1359 break;
1360 } while (time_before(jiffies, timeout));
1361
1362 /* print link status */
1363 sata_print_link_status(ap);
1364
1365 /* TODO: phy layer with polling, timeouts, etc. */
1366 if (sata_dev_present(ap))
1367 ata_port_probe(ap);
1368 else
1369 ata_port_disable(ap);
1370
1371 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1372 return;
1373
1374 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1375 ata_port_disable(ap);
1376 return;
1377 }
1378
1379 ap->cbl = ATA_CBL_SATA;
1380 }
1381
1382 /**
1383 * sata_phy_reset - Reset SATA bus.
1384 * @ap: SATA port associated with target SATA PHY.
1385 *
1386 * This function resets the SATA bus, and then probes
1387 * the bus for devices.
1388 *
1389 * LOCKING:
1390 * PCI/etc. bus probe sem.
1391 *
1392 */
1393 void sata_phy_reset(struct ata_port *ap)
1394 {
1395 __sata_phy_reset(ap);
1396 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1397 return;
1398 ata_bus_reset(ap);
1399 }
1400
1401 /**
1402 * ata_port_disable - Disable port.
1403 * @ap: Port to be disabled.
1404 *
1405 * Modify @ap data structure such that the system
1406 * thinks that the entire port is disabled, and should
1407 * never attempt to probe or communicate with devices
1408 * on this port.
1409 *
1410 * LOCKING: host_set lock, or some other form of
1411 * serialization.
1412 */
1413
1414 void ata_port_disable(struct ata_port *ap)
1415 {
1416 ap->device[0].class = ATA_DEV_NONE;
1417 ap->device[1].class = ATA_DEV_NONE;
1418 ap->flags |= ATA_FLAG_PORT_DISABLED;
1419 }
1420
1421 /*
1422 * This mode timing computation functionality is ported over from
1423 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1424 */
1425 /*
1426 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1427 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1428 * for PIO 5, which is a nonstandard extension and UDMA6, which
1429 * is currently supported only by Maxtor drives.
1430 */
1431
1432 static const struct ata_timing ata_timing[] = {
1433
1434 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1435 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1436 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1437 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1438
1439 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1440 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1441 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1442
1443 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1444
1445 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1446 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1447 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1448
1449 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1450 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1451 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1452
1453 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1454 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1455 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1456
1457 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1458 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1459 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1460
1461 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1462
1463 { 0xFF }
1464 };
1465
1466 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1467 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1468
1469 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1470 {
1471 q->setup = EZ(t->setup * 1000, T);
1472 q->act8b = EZ(t->act8b * 1000, T);
1473 q->rec8b = EZ(t->rec8b * 1000, T);
1474 q->cyc8b = EZ(t->cyc8b * 1000, T);
1475 q->active = EZ(t->active * 1000, T);
1476 q->recover = EZ(t->recover * 1000, T);
1477 q->cycle = EZ(t->cycle * 1000, T);
1478 q->udma = EZ(t->udma * 1000, UT);
1479 }
1480
1481 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1482 struct ata_timing *m, unsigned int what)
1483 {
1484 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1485 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1486 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1487 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1488 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1489 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1490 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1491 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1492 }
1493
1494 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1495 {
1496 const struct ata_timing *t;
1497
1498 for (t = ata_timing; t->mode != speed; t++)
1499 if (t->mode == 0xFF)
1500 return NULL;
1501 return t;
1502 }
1503
1504 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1505 struct ata_timing *t, int T, int UT)
1506 {
1507 const struct ata_timing *s;
1508 struct ata_timing p;
1509
1510 /*
1511 * Find the mode.
1512 */
1513
1514 if (!(s = ata_timing_find_mode(speed)))
1515 return -EINVAL;
1516
1517 memcpy(t, s, sizeof(*s));
1518
1519 /*
1520 * If the drive is an EIDE drive, it can tell us it needs extended
1521 * PIO/MW_DMA cycle timing.
1522 */
1523
1524 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1525 memset(&p, 0, sizeof(p));
1526 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1527 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1528 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1529 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1530 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1531 }
1532 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1533 }
1534
1535 /*
1536 * Convert the timing to bus clock counts.
1537 */
1538
1539 ata_timing_quantize(t, t, T, UT);
1540
1541 /*
1542 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1543 * S.M.A.R.T * and some other commands. We have to ensure that the
1544 * DMA cycle timing is slower/equal than the fastest PIO timing.
1545 */
1546
1547 if (speed > XFER_PIO_4) {
1548 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1549 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1550 }
1551
1552 /*
1553 * Lengthen active & recovery time so that cycle time is correct.
1554 */
1555
1556 if (t->act8b + t->rec8b < t->cyc8b) {
1557 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1558 t->rec8b = t->cyc8b - t->act8b;
1559 }
1560
1561 if (t->active + t->recover < t->cycle) {
1562 t->active += (t->cycle - (t->active + t->recover)) / 2;
1563 t->recover = t->cycle - t->active;
1564 }
1565
1566 return 0;
1567 }
1568
1569 static const struct {
1570 unsigned int shift;
1571 u8 base;
1572 } xfer_mode_classes[] = {
1573 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1574 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1575 { ATA_SHIFT_PIO, XFER_PIO_0 },
1576 };
1577
1578 static u8 base_from_shift(unsigned int shift)
1579 {
1580 int i;
1581
1582 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1583 if (xfer_mode_classes[i].shift == shift)
1584 return xfer_mode_classes[i].base;
1585
1586 return 0xff;
1587 }
1588
1589 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1590 {
1591 int ofs, idx;
1592 u8 base;
1593
1594 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1595 return;
1596
1597 if (dev->xfer_shift == ATA_SHIFT_PIO)
1598 dev->flags |= ATA_DFLAG_PIO;
1599
1600 ata_dev_set_xfermode(ap, dev);
1601
1602 base = base_from_shift(dev->xfer_shift);
1603 ofs = dev->xfer_mode - base;
1604 idx = ofs + dev->xfer_shift;
1605 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1606
1607 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1608 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1609
1610 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1611 ap->id, dev->devno, xfer_mode_str[idx]);
1612 }
1613
1614 static int ata_host_set_pio(struct ata_port *ap)
1615 {
1616 unsigned int mask;
1617 int x, i;
1618 u8 base, xfer_mode;
1619
1620 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1621 x = fgb(mask);
1622 if (x < 0) {
1623 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1624 return -1;
1625 }
1626
1627 base = base_from_shift(ATA_SHIFT_PIO);
1628 xfer_mode = base + x;
1629
1630 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1631 (int)base, (int)xfer_mode, mask, x);
1632
1633 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1634 struct ata_device *dev = &ap->device[i];
1635 if (ata_dev_present(dev)) {
1636 dev->pio_mode = xfer_mode;
1637 dev->xfer_mode = xfer_mode;
1638 dev->xfer_shift = ATA_SHIFT_PIO;
1639 if (ap->ops->set_piomode)
1640 ap->ops->set_piomode(ap, dev);
1641 }
1642 }
1643
1644 return 0;
1645 }
1646
1647 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1648 unsigned int xfer_shift)
1649 {
1650 int i;
1651
1652 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1653 struct ata_device *dev = &ap->device[i];
1654 if (ata_dev_present(dev)) {
1655 dev->dma_mode = xfer_mode;
1656 dev->xfer_mode = xfer_mode;
1657 dev->xfer_shift = xfer_shift;
1658 if (ap->ops->set_dmamode)
1659 ap->ops->set_dmamode(ap, dev);
1660 }
1661 }
1662 }
1663
1664 /**
1665 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1666 * @ap: port on which timings will be programmed
1667 *
1668 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1669 *
1670 * LOCKING:
1671 * PCI/etc. bus probe sem.
1672 */
1673 static void ata_set_mode(struct ata_port *ap)
1674 {
1675 unsigned int xfer_shift;
1676 u8 xfer_mode;
1677 int rc;
1678
1679 /* step 1: always set host PIO timings */
1680 rc = ata_host_set_pio(ap);
1681 if (rc)
1682 goto err_out;
1683
1684 /* step 2: choose the best data xfer mode */
1685 xfer_mode = xfer_shift = 0;
1686 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1687 if (rc)
1688 goto err_out;
1689
1690 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1691 if (xfer_shift != ATA_SHIFT_PIO)
1692 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1693
1694 /* step 4: update devices' xfer mode */
1695 ata_dev_set_mode(ap, &ap->device[0]);
1696 ata_dev_set_mode(ap, &ap->device[1]);
1697
1698 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1699 return;
1700
1701 if (ap->ops->post_set_mode)
1702 ap->ops->post_set_mode(ap);
1703
1704 return;
1705
1706 err_out:
1707 ata_port_disable(ap);
1708 }
1709
1710 /**
1711 * ata_tf_to_host - issue ATA taskfile to host controller
1712 * @ap: port to which command is being issued
1713 * @tf: ATA taskfile register set
1714 *
1715 * Issues ATA taskfile register set to ATA host controller,
1716 * with proper synchronization with interrupt handler and
1717 * other threads.
1718 *
1719 * LOCKING:
1720 * spin_lock_irqsave(host_set lock)
1721 */
1722
1723 static inline void ata_tf_to_host(struct ata_port *ap,
1724 const struct ata_taskfile *tf)
1725 {
1726 ap->ops->tf_load(ap, tf);
1727 ap->ops->exec_command(ap, tf);
1728 }
1729
1730 /**
1731 * ata_busy_sleep - sleep until BSY clears, or timeout
1732 * @ap: port containing status register to be polled
1733 * @tmout_pat: impatience timeout
1734 * @tmout: overall timeout
1735 *
1736 * Sleep until ATA Status register bit BSY clears,
1737 * or a timeout occurs.
1738 *
1739 * LOCKING: None.
1740 */
1741
1742 unsigned int ata_busy_sleep (struct ata_port *ap,
1743 unsigned long tmout_pat, unsigned long tmout)
1744 {
1745 unsigned long timer_start, timeout;
1746 u8 status;
1747
1748 status = ata_busy_wait(ap, ATA_BUSY, 300);
1749 timer_start = jiffies;
1750 timeout = timer_start + tmout_pat;
1751 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1752 msleep(50);
1753 status = ata_busy_wait(ap, ATA_BUSY, 3);
1754 }
1755
1756 if (status & ATA_BUSY)
1757 printk(KERN_WARNING "ata%u is slow to respond, "
1758 "please be patient\n", ap->id);
1759
1760 timeout = timer_start + tmout;
1761 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1762 msleep(50);
1763 status = ata_chk_status(ap);
1764 }
1765
1766 if (status & ATA_BUSY) {
1767 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1768 ap->id, tmout / HZ);
1769 return 1;
1770 }
1771
1772 return 0;
1773 }
1774
1775 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1776 {
1777 struct ata_ioports *ioaddr = &ap->ioaddr;
1778 unsigned int dev0 = devmask & (1 << 0);
1779 unsigned int dev1 = devmask & (1 << 1);
1780 unsigned long timeout;
1781
1782 /* if device 0 was found in ata_devchk, wait for its
1783 * BSY bit to clear
1784 */
1785 if (dev0)
1786 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1787
1788 /* if device 1 was found in ata_devchk, wait for
1789 * register access, then wait for BSY to clear
1790 */
1791 timeout = jiffies + ATA_TMOUT_BOOT;
1792 while (dev1) {
1793 u8 nsect, lbal;
1794
1795 ap->ops->dev_select(ap, 1);
1796 if (ap->flags & ATA_FLAG_MMIO) {
1797 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1798 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1799 } else {
1800 nsect = inb(ioaddr->nsect_addr);
1801 lbal = inb(ioaddr->lbal_addr);
1802 }
1803 if ((nsect == 1) && (lbal == 1))
1804 break;
1805 if (time_after(jiffies, timeout)) {
1806 dev1 = 0;
1807 break;
1808 }
1809 msleep(50); /* give drive a breather */
1810 }
1811 if (dev1)
1812 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1813
1814 /* is all this really necessary? */
1815 ap->ops->dev_select(ap, 0);
1816 if (dev1)
1817 ap->ops->dev_select(ap, 1);
1818 if (dev0)
1819 ap->ops->dev_select(ap, 0);
1820 }
1821
1822 /**
1823 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1824 * @ap: Port to reset and probe
1825 *
1826 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1827 * probe the bus. Not often used these days.
1828 *
1829 * LOCKING:
1830 * PCI/etc. bus probe sem.
1831 * Obtains host_set lock.
1832 *
1833 */
1834
1835 static unsigned int ata_bus_edd(struct ata_port *ap)
1836 {
1837 struct ata_taskfile tf;
1838 unsigned long flags;
1839
1840 /* set up execute-device-diag (bus reset) taskfile */
1841 /* also, take interrupts to a known state (disabled) */
1842 DPRINTK("execute-device-diag\n");
1843 ata_tf_init(ap, &tf, 0);
1844 tf.ctl |= ATA_NIEN;
1845 tf.command = ATA_CMD_EDD;
1846 tf.protocol = ATA_PROT_NODATA;
1847
1848 /* do bus reset */
1849 spin_lock_irqsave(&ap->host_set->lock, flags);
1850 ata_tf_to_host(ap, &tf);
1851 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1852
1853 /* spec says at least 2ms. but who knows with those
1854 * crazy ATAPI devices...
1855 */
1856 msleep(150);
1857
1858 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1859 }
1860
1861 static unsigned int ata_bus_softreset(struct ata_port *ap,
1862 unsigned int devmask)
1863 {
1864 struct ata_ioports *ioaddr = &ap->ioaddr;
1865
1866 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1867
1868 /* software reset. causes dev0 to be selected */
1869 if (ap->flags & ATA_FLAG_MMIO) {
1870 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1871 udelay(20); /* FIXME: flush */
1872 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1873 udelay(20); /* FIXME: flush */
1874 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1875 } else {
1876 outb(ap->ctl, ioaddr->ctl_addr);
1877 udelay(10);
1878 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1879 udelay(10);
1880 outb(ap->ctl, ioaddr->ctl_addr);
1881 }
1882
1883 /* spec mandates ">= 2ms" before checking status.
1884 * We wait 150ms, because that was the magic delay used for
1885 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1886 * between when the ATA command register is written, and then
1887 * status is checked. Because waiting for "a while" before
1888 * checking status is fine, post SRST, we perform this magic
1889 * delay here as well.
1890 */
1891 msleep(150);
1892
1893 ata_bus_post_reset(ap, devmask);
1894
1895 return 0;
1896 }
1897
1898 /**
1899 * ata_bus_reset - reset host port and associated ATA channel
1900 * @ap: port to reset
1901 *
1902 * This is typically the first time we actually start issuing
1903 * commands to the ATA channel. We wait for BSY to clear, then
1904 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1905 * result. Determine what devices, if any, are on the channel
1906 * by looking at the device 0/1 error register. Look at the signature
1907 * stored in each device's taskfile registers, to determine if
1908 * the device is ATA or ATAPI.
1909 *
1910 * LOCKING:
1911 * PCI/etc. bus probe sem.
1912 * Obtains host_set lock.
1913 *
1914 * SIDE EFFECTS:
1915 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1916 */
1917
1918 void ata_bus_reset(struct ata_port *ap)
1919 {
1920 struct ata_ioports *ioaddr = &ap->ioaddr;
1921 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1922 u8 err;
1923 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1924
1925 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1926
1927 /* determine if device 0/1 are present */
1928 if (ap->flags & ATA_FLAG_SATA_RESET)
1929 dev0 = 1;
1930 else {
1931 dev0 = ata_devchk(ap, 0);
1932 if (slave_possible)
1933 dev1 = ata_devchk(ap, 1);
1934 }
1935
1936 if (dev0)
1937 devmask |= (1 << 0);
1938 if (dev1)
1939 devmask |= (1 << 1);
1940
1941 /* select device 0 again */
1942 ap->ops->dev_select(ap, 0);
1943
1944 /* issue bus reset */
1945 if (ap->flags & ATA_FLAG_SRST)
1946 rc = ata_bus_softreset(ap, devmask);
1947 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1948 /* set up device control */
1949 if (ap->flags & ATA_FLAG_MMIO)
1950 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1951 else
1952 outb(ap->ctl, ioaddr->ctl_addr);
1953 rc = ata_bus_edd(ap);
1954 }
1955
1956 if (rc)
1957 goto err_out;
1958
1959 /*
1960 * determine by signature whether we have ATA or ATAPI devices
1961 */
1962 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1963 if ((slave_possible) && (err != 0x81))
1964 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1965
1966 /* re-enable interrupts */
1967 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1968 ata_irq_on(ap);
1969
1970 /* is double-select really necessary? */
1971 if (ap->device[1].class != ATA_DEV_NONE)
1972 ap->ops->dev_select(ap, 1);
1973 if (ap->device[0].class != ATA_DEV_NONE)
1974 ap->ops->dev_select(ap, 0);
1975
1976 /* if no devices were detected, disable this port */
1977 if ((ap->device[0].class == ATA_DEV_NONE) &&
1978 (ap->device[1].class == ATA_DEV_NONE))
1979 goto err_out;
1980
1981 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1982 /* set up device control for ATA_FLAG_SATA_RESET */
1983 if (ap->flags & ATA_FLAG_MMIO)
1984 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1985 else
1986 outb(ap->ctl, ioaddr->ctl_addr);
1987 }
1988
1989 DPRINTK("EXIT\n");
1990 return;
1991
1992 err_out:
1993 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1994 ap->ops->port_disable(ap);
1995
1996 DPRINTK("EXIT\n");
1997 }
1998
1999 static int sata_phy_resume(struct ata_port *ap)
2000 {
2001 unsigned long timeout = jiffies + (HZ * 5);
2002 u32 sstatus;
2003
2004 scr_write_flush(ap, SCR_CONTROL, 0x300);
2005
2006 /* Wait for phy to become ready, if necessary. */
2007 do {
2008 msleep(200);
2009 sstatus = scr_read(ap, SCR_STATUS);
2010 if ((sstatus & 0xf) != 1)
2011 return 0;
2012 } while (time_before(jiffies, timeout));
2013
2014 return -1;
2015 }
2016
2017 /**
2018 * ata_std_probeinit - initialize probing
2019 * @ap: port to be probed
2020 *
2021 * @ap is about to be probed. Initialize it. This function is
2022 * to be used as standard callback for ata_drive_probe_reset().
2023 *
2024 * NOTE!!! Do not use this function as probeinit if a low level
2025 * driver implements only hardreset. Just pass NULL as probeinit
2026 * in that case. Using this function is probably okay but doing
2027 * so makes reset sequence different from the original
2028 * ->phy_reset implementation and Jeff nervous. :-P
2029 */
2030 extern void ata_std_probeinit(struct ata_port *ap)
2031 {
2032 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2033 sata_phy_resume(ap);
2034 if (sata_dev_present(ap))
2035 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2036 }
2037 }
2038
2039 /**
2040 * ata_std_softreset - reset host port via ATA SRST
2041 * @ap: port to reset
2042 * @verbose: fail verbosely
2043 * @classes: resulting classes of attached devices
2044 *
2045 * Reset host port using ATA SRST. This function is to be used
2046 * as standard callback for ata_drive_*_reset() functions.
2047 *
2048 * LOCKING:
2049 * Kernel thread context (may sleep)
2050 *
2051 * RETURNS:
2052 * 0 on success, -errno otherwise.
2053 */
2054 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2055 {
2056 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2057 unsigned int devmask = 0, err_mask;
2058 u8 err;
2059
2060 DPRINTK("ENTER\n");
2061
2062 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2063 classes[0] = ATA_DEV_NONE;
2064 goto out;
2065 }
2066
2067 /* determine if device 0/1 are present */
2068 if (ata_devchk(ap, 0))
2069 devmask |= (1 << 0);
2070 if (slave_possible && ata_devchk(ap, 1))
2071 devmask |= (1 << 1);
2072
2073 /* select device 0 again */
2074 ap->ops->dev_select(ap, 0);
2075
2076 /* issue bus reset */
2077 DPRINTK("about to softreset, devmask=%x\n", devmask);
2078 err_mask = ata_bus_softreset(ap, devmask);
2079 if (err_mask) {
2080 if (verbose)
2081 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2082 ap->id, err_mask);
2083 else
2084 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2085 err_mask);
2086 return -EIO;
2087 }
2088
2089 /* determine by signature whether we have ATA or ATAPI devices */
2090 classes[0] = ata_dev_try_classify(ap, 0, &err);
2091 if (slave_possible && err != 0x81)
2092 classes[1] = ata_dev_try_classify(ap, 1, &err);
2093
2094 out:
2095 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2096 return 0;
2097 }
2098
2099 /**
2100 * sata_std_hardreset - reset host port via SATA phy reset
2101 * @ap: port to reset
2102 * @verbose: fail verbosely
2103 * @class: resulting class of attached device
2104 *
2105 * SATA phy-reset host port using DET bits of SControl register.
2106 * This function is to be used as standard callback for
2107 * ata_drive_*_reset().
2108 *
2109 * LOCKING:
2110 * Kernel thread context (may sleep)
2111 *
2112 * RETURNS:
2113 * 0 on success, -errno otherwise.
2114 */
2115 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2116 {
2117 DPRINTK("ENTER\n");
2118
2119 /* Issue phy wake/reset */
2120 scr_write_flush(ap, SCR_CONTROL, 0x301);
2121
2122 /*
2123 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2124 * 10.4.2 says at least 1 ms.
2125 */
2126 msleep(1);
2127
2128 /* Bring phy back */
2129 sata_phy_resume(ap);
2130
2131 /* TODO: phy layer with polling, timeouts, etc. */
2132 if (!sata_dev_present(ap)) {
2133 *class = ATA_DEV_NONE;
2134 DPRINTK("EXIT, link offline\n");
2135 return 0;
2136 }
2137
2138 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2139 if (verbose)
2140 printk(KERN_ERR "ata%u: COMRESET failed "
2141 "(device not ready)\n", ap->id);
2142 else
2143 DPRINTK("EXIT, device not ready\n");
2144 return -EIO;
2145 }
2146
2147 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2148
2149 *class = ata_dev_try_classify(ap, 0, NULL);
2150
2151 DPRINTK("EXIT, class=%u\n", *class);
2152 return 0;
2153 }
2154
2155 /**
2156 * ata_std_postreset - standard postreset callback
2157 * @ap: the target ata_port
2158 * @classes: classes of attached devices
2159 *
2160 * This function is invoked after a successful reset. Note that
2161 * the device might have been reset more than once using
2162 * different reset methods before postreset is invoked.
2163 *
2164 * This function is to be used as standard callback for
2165 * ata_drive_*_reset().
2166 *
2167 * LOCKING:
2168 * Kernel thread context (may sleep)
2169 */
2170 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2171 {
2172 DPRINTK("ENTER\n");
2173
2174 /* set cable type if it isn't already set */
2175 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2176 ap->cbl = ATA_CBL_SATA;
2177
2178 /* print link status */
2179 if (ap->cbl == ATA_CBL_SATA)
2180 sata_print_link_status(ap);
2181
2182 /* re-enable interrupts */
2183 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2184 ata_irq_on(ap);
2185
2186 /* is double-select really necessary? */
2187 if (classes[0] != ATA_DEV_NONE)
2188 ap->ops->dev_select(ap, 1);
2189 if (classes[1] != ATA_DEV_NONE)
2190 ap->ops->dev_select(ap, 0);
2191
2192 /* bail out if no device is present */
2193 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2194 DPRINTK("EXIT, no device\n");
2195 return;
2196 }
2197
2198 /* set up device control */
2199 if (ap->ioaddr.ctl_addr) {
2200 if (ap->flags & ATA_FLAG_MMIO)
2201 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2202 else
2203 outb(ap->ctl, ap->ioaddr.ctl_addr);
2204 }
2205
2206 DPRINTK("EXIT\n");
2207 }
2208
2209 /**
2210 * ata_std_probe_reset - standard probe reset method
2211 * @ap: prot to perform probe-reset
2212 * @classes: resulting classes of attached devices
2213 *
2214 * The stock off-the-shelf ->probe_reset method.
2215 *
2216 * LOCKING:
2217 * Kernel thread context (may sleep)
2218 *
2219 * RETURNS:
2220 * 0 on success, -errno otherwise.
2221 */
2222 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2223 {
2224 ata_reset_fn_t hardreset;
2225
2226 hardreset = NULL;
2227 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2228 hardreset = sata_std_hardreset;
2229
2230 return ata_drive_probe_reset(ap, ata_std_probeinit,
2231 ata_std_softreset, hardreset,
2232 ata_std_postreset, classes);
2233 }
2234
2235 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2236 ata_postreset_fn_t postreset,
2237 unsigned int *classes)
2238 {
2239 int i, rc;
2240
2241 for (i = 0; i < ATA_MAX_DEVICES; i++)
2242 classes[i] = ATA_DEV_UNKNOWN;
2243
2244 rc = reset(ap, 0, classes);
2245 if (rc)
2246 return rc;
2247
2248 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2249 * is complete and convert all ATA_DEV_UNKNOWN to
2250 * ATA_DEV_NONE.
2251 */
2252 for (i = 0; i < ATA_MAX_DEVICES; i++)
2253 if (classes[i] != ATA_DEV_UNKNOWN)
2254 break;
2255
2256 if (i < ATA_MAX_DEVICES)
2257 for (i = 0; i < ATA_MAX_DEVICES; i++)
2258 if (classes[i] == ATA_DEV_UNKNOWN)
2259 classes[i] = ATA_DEV_NONE;
2260
2261 if (postreset)
2262 postreset(ap, classes);
2263
2264 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2265 }
2266
2267 /**
2268 * ata_drive_probe_reset - Perform probe reset with given methods
2269 * @ap: port to reset
2270 * @probeinit: probeinit method (can be NULL)
2271 * @softreset: softreset method (can be NULL)
2272 * @hardreset: hardreset method (can be NULL)
2273 * @postreset: postreset method (can be NULL)
2274 * @classes: resulting classes of attached devices
2275 *
2276 * Reset the specified port and classify attached devices using
2277 * given methods. This function prefers softreset but tries all
2278 * possible reset sequences to reset and classify devices. This
2279 * function is intended to be used for constructing ->probe_reset
2280 * callback by low level drivers.
2281 *
2282 * Reset methods should follow the following rules.
2283 *
2284 * - Return 0 on sucess, -errno on failure.
2285 * - If classification is supported, fill classes[] with
2286 * recognized class codes.
2287 * - If classification is not supported, leave classes[] alone.
2288 * - If verbose is non-zero, print error message on failure;
2289 * otherwise, shut up.
2290 *
2291 * LOCKING:
2292 * Kernel thread context (may sleep)
2293 *
2294 * RETURNS:
2295 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2296 * if classification fails, and any error code from reset
2297 * methods.
2298 */
2299 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2300 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2301 ata_postreset_fn_t postreset, unsigned int *classes)
2302 {
2303 int rc = -EINVAL;
2304
2305 if (probeinit)
2306 probeinit(ap);
2307
2308 if (softreset) {
2309 rc = do_probe_reset(ap, softreset, postreset, classes);
2310 if (rc == 0)
2311 return 0;
2312 }
2313
2314 if (!hardreset)
2315 return rc;
2316
2317 rc = do_probe_reset(ap, hardreset, postreset, classes);
2318 if (rc == 0 || rc != -ENODEV)
2319 return rc;
2320
2321 if (softreset)
2322 rc = do_probe_reset(ap, softreset, postreset, classes);
2323
2324 return rc;
2325 }
2326
2327 static void ata_pr_blacklisted(const struct ata_port *ap,
2328 const struct ata_device *dev)
2329 {
2330 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
2331 ap->id, dev->devno);
2332 }
2333
2334 static const char * const ata_dma_blacklist [] = {
2335 "WDC AC11000H",
2336 "WDC AC22100H",
2337 "WDC AC32500H",
2338 "WDC AC33100H",
2339 "WDC AC31600H",
2340 "WDC AC32100H",
2341 "WDC AC23200L",
2342 "Compaq CRD-8241B",
2343 "CRD-8400B",
2344 "CRD-8480B",
2345 "CRD-8482B",
2346 "CRD-84",
2347 "SanDisk SDP3B",
2348 "SanDisk SDP3B-64",
2349 "SANYO CD-ROM CRD",
2350 "HITACHI CDR-8",
2351 "HITACHI CDR-8335",
2352 "HITACHI CDR-8435",
2353 "Toshiba CD-ROM XM-6202B",
2354 "TOSHIBA CD-ROM XM-1702BC",
2355 "CD-532E-A",
2356 "E-IDE CD-ROM CR-840",
2357 "CD-ROM Drive/F5A",
2358 "WPI CDD-820",
2359 "SAMSUNG CD-ROM SC-148C",
2360 "SAMSUNG CD-ROM SC",
2361 "SanDisk SDP3B-64",
2362 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
2363 "_NEC DV5800A",
2364 };
2365
2366 static int ata_dma_blacklisted(const struct ata_device *dev)
2367 {
2368 unsigned char model_num[41];
2369 int i;
2370
2371 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2372
2373 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2374 if (!strcmp(ata_dma_blacklist[i], model_num))
2375 return 1;
2376
2377 return 0;
2378 }
2379
2380 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2381 {
2382 const struct ata_device *master, *slave;
2383 unsigned int mask;
2384
2385 master = &ap->device[0];
2386 slave = &ap->device[1];
2387
2388 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2389
2390 if (shift == ATA_SHIFT_UDMA) {
2391 mask = ap->udma_mask;
2392 if (ata_dev_present(master)) {
2393 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2394 if (ata_dma_blacklisted(master)) {
2395 mask = 0;
2396 ata_pr_blacklisted(ap, master);
2397 }
2398 }
2399 if (ata_dev_present(slave)) {
2400 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2401 if (ata_dma_blacklisted(slave)) {
2402 mask = 0;
2403 ata_pr_blacklisted(ap, slave);
2404 }
2405 }
2406 }
2407 else if (shift == ATA_SHIFT_MWDMA) {
2408 mask = ap->mwdma_mask;
2409 if (ata_dev_present(master)) {
2410 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2411 if (ata_dma_blacklisted(master)) {
2412 mask = 0;
2413 ata_pr_blacklisted(ap, master);
2414 }
2415 }
2416 if (ata_dev_present(slave)) {
2417 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2418 if (ata_dma_blacklisted(slave)) {
2419 mask = 0;
2420 ata_pr_blacklisted(ap, slave);
2421 }
2422 }
2423 }
2424 else if (shift == ATA_SHIFT_PIO) {
2425 mask = ap->pio_mask;
2426 if (ata_dev_present(master)) {
2427 /* spec doesn't return explicit support for
2428 * PIO0-2, so we fake it
2429 */
2430 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2431 tmp_mode <<= 3;
2432 tmp_mode |= 0x7;
2433 mask &= tmp_mode;
2434 }
2435 if (ata_dev_present(slave)) {
2436 /* spec doesn't return explicit support for
2437 * PIO0-2, so we fake it
2438 */
2439 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2440 tmp_mode <<= 3;
2441 tmp_mode |= 0x7;
2442 mask &= tmp_mode;
2443 }
2444 }
2445 else {
2446 mask = 0xffffffff; /* shut up compiler warning */
2447 BUG();
2448 }
2449
2450 return mask;
2451 }
2452
2453 /* find greatest bit */
2454 static int fgb(u32 bitmap)
2455 {
2456 unsigned int i;
2457 int x = -1;
2458
2459 for (i = 0; i < 32; i++)
2460 if (bitmap & (1 << i))
2461 x = i;
2462
2463 return x;
2464 }
2465
2466 /**
2467 * ata_choose_xfer_mode - attempt to find best transfer mode
2468 * @ap: Port for which an xfer mode will be selected
2469 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2470 * @xfer_shift_out: (output) bit shift that selects this mode
2471 *
2472 * Based on host and device capabilities, determine the
2473 * maximum transfer mode that is amenable to all.
2474 *
2475 * LOCKING:
2476 * PCI/etc. bus probe sem.
2477 *
2478 * RETURNS:
2479 * Zero on success, negative on error.
2480 */
2481
2482 static int ata_choose_xfer_mode(const struct ata_port *ap,
2483 u8 *xfer_mode_out,
2484 unsigned int *xfer_shift_out)
2485 {
2486 unsigned int mask, shift;
2487 int x, i;
2488
2489 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2490 shift = xfer_mode_classes[i].shift;
2491 mask = ata_get_mode_mask(ap, shift);
2492
2493 x = fgb(mask);
2494 if (x >= 0) {
2495 *xfer_mode_out = xfer_mode_classes[i].base + x;
2496 *xfer_shift_out = shift;
2497 return 0;
2498 }
2499 }
2500
2501 return -1;
2502 }
2503
2504 /**
2505 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2506 * @ap: Port associated with device @dev
2507 * @dev: Device to which command will be sent
2508 *
2509 * Issue SET FEATURES - XFER MODE command to device @dev
2510 * on port @ap.
2511 *
2512 * LOCKING:
2513 * PCI/etc. bus probe sem.
2514 */
2515
2516 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2517 {
2518 struct ata_taskfile tf;
2519
2520 /* set up set-features taskfile */
2521 DPRINTK("set features - xfer mode\n");
2522
2523 ata_tf_init(ap, &tf, dev->devno);
2524 tf.command = ATA_CMD_SET_FEATURES;
2525 tf.feature = SETFEATURES_XFER;
2526 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2527 tf.protocol = ATA_PROT_NODATA;
2528 tf.nsect = dev->xfer_mode;
2529
2530 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2531 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2532 ap->id);
2533 ata_port_disable(ap);
2534 }
2535
2536 DPRINTK("EXIT\n");
2537 }
2538
2539 /**
2540 * ata_dev_init_params - Issue INIT DEV PARAMS command
2541 * @ap: Port associated with device @dev
2542 * @dev: Device to which command will be sent
2543 *
2544 * LOCKING:
2545 * Kernel thread context (may sleep)
2546 *
2547 * RETURNS:
2548 * 0 on success, AC_ERR_* mask otherwise.
2549 */
2550
2551 static unsigned int ata_dev_init_params(struct ata_port *ap,
2552 struct ata_device *dev)
2553 {
2554 struct ata_taskfile tf;
2555 unsigned int err_mask;
2556 u16 sectors = dev->id[6];
2557 u16 heads = dev->id[3];
2558
2559 /* Number of sectors per track 1-255. Number of heads 1-16 */
2560 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2561 return 0;
2562
2563 /* set up init dev params taskfile */
2564 DPRINTK("init dev params \n");
2565
2566 ata_tf_init(ap, &tf, dev->devno);
2567 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2568 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2569 tf.protocol = ATA_PROT_NODATA;
2570 tf.nsect = sectors;
2571 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2572
2573 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2574
2575 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2576 return err_mask;
2577 }
2578
2579 /**
2580 * ata_sg_clean - Unmap DMA memory associated with command
2581 * @qc: Command containing DMA memory to be released
2582 *
2583 * Unmap all mapped DMA memory associated with this command.
2584 *
2585 * LOCKING:
2586 * spin_lock_irqsave(host_set lock)
2587 */
2588
2589 static void ata_sg_clean(struct ata_queued_cmd *qc)
2590 {
2591 struct ata_port *ap = qc->ap;
2592 struct scatterlist *sg = qc->__sg;
2593 int dir = qc->dma_dir;
2594 void *pad_buf = NULL;
2595
2596 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2597 WARN_ON(sg == NULL);
2598
2599 if (qc->flags & ATA_QCFLAG_SINGLE)
2600 WARN_ON(qc->n_elem > 1);
2601
2602 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2603
2604 /* if we padded the buffer out to 32-bit bound, and data
2605 * xfer direction is from-device, we must copy from the
2606 * pad buffer back into the supplied buffer
2607 */
2608 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2609 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2610
2611 if (qc->flags & ATA_QCFLAG_SG) {
2612 if (qc->n_elem)
2613 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2614 /* restore last sg */
2615 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2616 if (pad_buf) {
2617 struct scatterlist *psg = &qc->pad_sgent;
2618 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2619 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2620 kunmap_atomic(addr, KM_IRQ0);
2621 }
2622 } else {
2623 if (qc->n_elem)
2624 dma_unmap_single(ap->host_set->dev,
2625 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2626 dir);
2627 /* restore sg */
2628 sg->length += qc->pad_len;
2629 if (pad_buf)
2630 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2631 pad_buf, qc->pad_len);
2632 }
2633
2634 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2635 qc->__sg = NULL;
2636 }
2637
2638 /**
2639 * ata_fill_sg - Fill PCI IDE PRD table
2640 * @qc: Metadata associated with taskfile to be transferred
2641 *
2642 * Fill PCI IDE PRD (scatter-gather) table with segments
2643 * associated with the current disk command.
2644 *
2645 * LOCKING:
2646 * spin_lock_irqsave(host_set lock)
2647 *
2648 */
2649 static void ata_fill_sg(struct ata_queued_cmd *qc)
2650 {
2651 struct ata_port *ap = qc->ap;
2652 struct scatterlist *sg;
2653 unsigned int idx;
2654
2655 WARN_ON(qc->__sg == NULL);
2656 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2657
2658 idx = 0;
2659 ata_for_each_sg(sg, qc) {
2660 u32 addr, offset;
2661 u32 sg_len, len;
2662
2663 /* determine if physical DMA addr spans 64K boundary.
2664 * Note h/w doesn't support 64-bit, so we unconditionally
2665 * truncate dma_addr_t to u32.
2666 */
2667 addr = (u32) sg_dma_address(sg);
2668 sg_len = sg_dma_len(sg);
2669
2670 while (sg_len) {
2671 offset = addr & 0xffff;
2672 len = sg_len;
2673 if ((offset + sg_len) > 0x10000)
2674 len = 0x10000 - offset;
2675
2676 ap->prd[idx].addr = cpu_to_le32(addr);
2677 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2678 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2679
2680 idx++;
2681 sg_len -= len;
2682 addr += len;
2683 }
2684 }
2685
2686 if (idx)
2687 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2688 }
2689 /**
2690 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2691 * @qc: Metadata associated with taskfile to check
2692 *
2693 * Allow low-level driver to filter ATA PACKET commands, returning
2694 * a status indicating whether or not it is OK to use DMA for the
2695 * supplied PACKET command.
2696 *
2697 * LOCKING:
2698 * spin_lock_irqsave(host_set lock)
2699 *
2700 * RETURNS: 0 when ATAPI DMA can be used
2701 * nonzero otherwise
2702 */
2703 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2704 {
2705 struct ata_port *ap = qc->ap;
2706 int rc = 0; /* Assume ATAPI DMA is OK by default */
2707
2708 if (ap->ops->check_atapi_dma)
2709 rc = ap->ops->check_atapi_dma(qc);
2710
2711 return rc;
2712 }
2713 /**
2714 * ata_qc_prep - Prepare taskfile for submission
2715 * @qc: Metadata associated with taskfile to be prepared
2716 *
2717 * Prepare ATA taskfile for submission.
2718 *
2719 * LOCKING:
2720 * spin_lock_irqsave(host_set lock)
2721 */
2722 void ata_qc_prep(struct ata_queued_cmd *qc)
2723 {
2724 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2725 return;
2726
2727 ata_fill_sg(qc);
2728 }
2729
2730 /**
2731 * ata_sg_init_one - Associate command with memory buffer
2732 * @qc: Command to be associated
2733 * @buf: Memory buffer
2734 * @buflen: Length of memory buffer, in bytes.
2735 *
2736 * Initialize the data-related elements of queued_cmd @qc
2737 * to point to a single memory buffer, @buf of byte length @buflen.
2738 *
2739 * LOCKING:
2740 * spin_lock_irqsave(host_set lock)
2741 */
2742
2743 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2744 {
2745 struct scatterlist *sg;
2746
2747 qc->flags |= ATA_QCFLAG_SINGLE;
2748
2749 memset(&qc->sgent, 0, sizeof(qc->sgent));
2750 qc->__sg = &qc->sgent;
2751 qc->n_elem = 1;
2752 qc->orig_n_elem = 1;
2753 qc->buf_virt = buf;
2754
2755 sg = qc->__sg;
2756 sg_init_one(sg, buf, buflen);
2757 }
2758
2759 /**
2760 * ata_sg_init - Associate command with scatter-gather table.
2761 * @qc: Command to be associated
2762 * @sg: Scatter-gather table.
2763 * @n_elem: Number of elements in s/g table.
2764 *
2765 * Initialize the data-related elements of queued_cmd @qc
2766 * to point to a scatter-gather table @sg, containing @n_elem
2767 * elements.
2768 *
2769 * LOCKING:
2770 * spin_lock_irqsave(host_set lock)
2771 */
2772
2773 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2774 unsigned int n_elem)
2775 {
2776 qc->flags |= ATA_QCFLAG_SG;
2777 qc->__sg = sg;
2778 qc->n_elem = n_elem;
2779 qc->orig_n_elem = n_elem;
2780 }
2781
2782 /**
2783 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2784 * @qc: Command with memory buffer to be mapped.
2785 *
2786 * DMA-map the memory buffer associated with queued_cmd @qc.
2787 *
2788 * LOCKING:
2789 * spin_lock_irqsave(host_set lock)
2790 *
2791 * RETURNS:
2792 * Zero on success, negative on error.
2793 */
2794
2795 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2796 {
2797 struct ata_port *ap = qc->ap;
2798 int dir = qc->dma_dir;
2799 struct scatterlist *sg = qc->__sg;
2800 dma_addr_t dma_address;
2801 int trim_sg = 0;
2802
2803 /* we must lengthen transfers to end on a 32-bit boundary */
2804 qc->pad_len = sg->length & 3;
2805 if (qc->pad_len) {
2806 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2807 struct scatterlist *psg = &qc->pad_sgent;
2808
2809 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2810
2811 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2812
2813 if (qc->tf.flags & ATA_TFLAG_WRITE)
2814 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2815 qc->pad_len);
2816
2817 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2818 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2819 /* trim sg */
2820 sg->length -= qc->pad_len;
2821 if (sg->length == 0)
2822 trim_sg = 1;
2823
2824 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2825 sg->length, qc->pad_len);
2826 }
2827
2828 if (trim_sg) {
2829 qc->n_elem--;
2830 goto skip_map;
2831 }
2832
2833 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2834 sg->length, dir);
2835 if (dma_mapping_error(dma_address)) {
2836 /* restore sg */
2837 sg->length += qc->pad_len;
2838 return -1;
2839 }
2840
2841 sg_dma_address(sg) = dma_address;
2842 sg_dma_len(sg) = sg->length;
2843
2844 skip_map:
2845 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2846 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2847
2848 return 0;
2849 }
2850
2851 /**
2852 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2853 * @qc: Command with scatter-gather table to be mapped.
2854 *
2855 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2856 *
2857 * LOCKING:
2858 * spin_lock_irqsave(host_set lock)
2859 *
2860 * RETURNS:
2861 * Zero on success, negative on error.
2862 *
2863 */
2864
2865 static int ata_sg_setup(struct ata_queued_cmd *qc)
2866 {
2867 struct ata_port *ap = qc->ap;
2868 struct scatterlist *sg = qc->__sg;
2869 struct scatterlist *lsg = &sg[qc->n_elem - 1];
2870 int n_elem, pre_n_elem, dir, trim_sg = 0;
2871
2872 VPRINTK("ENTER, ata%u\n", ap->id);
2873 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2874
2875 /* we must lengthen transfers to end on a 32-bit boundary */
2876 qc->pad_len = lsg->length & 3;
2877 if (qc->pad_len) {
2878 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2879 struct scatterlist *psg = &qc->pad_sgent;
2880 unsigned int offset;
2881
2882 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2883
2884 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2885
2886 /*
2887 * psg->page/offset are used to copy to-be-written
2888 * data in this function or read data in ata_sg_clean.
2889 */
2890 offset = lsg->offset + lsg->length - qc->pad_len;
2891 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
2892 psg->offset = offset_in_page(offset);
2893
2894 if (qc->tf.flags & ATA_TFLAG_WRITE) {
2895 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2896 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
2897 kunmap_atomic(addr, KM_IRQ0);
2898 }
2899
2900 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2901 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2902 /* trim last sg */
2903 lsg->length -= qc->pad_len;
2904 if (lsg->length == 0)
2905 trim_sg = 1;
2906
2907 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2908 qc->n_elem - 1, lsg->length, qc->pad_len);
2909 }
2910
2911 pre_n_elem = qc->n_elem;
2912 if (trim_sg && pre_n_elem)
2913 pre_n_elem--;
2914
2915 if (!pre_n_elem) {
2916 n_elem = 0;
2917 goto skip_map;
2918 }
2919
2920 dir = qc->dma_dir;
2921 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
2922 if (n_elem < 1) {
2923 /* restore last sg */
2924 lsg->length += qc->pad_len;
2925 return -1;
2926 }
2927
2928 DPRINTK("%d sg elements mapped\n", n_elem);
2929
2930 skip_map:
2931 qc->n_elem = n_elem;
2932
2933 return 0;
2934 }
2935
2936 /**
2937 * ata_poll_qc_complete - turn irq back on and finish qc
2938 * @qc: Command to complete
2939 * @err_mask: ATA status register content
2940 *
2941 * LOCKING:
2942 * None. (grabs host lock)
2943 */
2944
2945 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2946 {
2947 struct ata_port *ap = qc->ap;
2948 unsigned long flags;
2949
2950 spin_lock_irqsave(&ap->host_set->lock, flags);
2951 ap->flags &= ~ATA_FLAG_NOINTR;
2952 ata_irq_on(ap);
2953 ata_qc_complete(qc);
2954 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2955 }
2956
2957 /**
2958 * ata_pio_poll - poll using PIO, depending on current state
2959 * @ap: the target ata_port
2960 *
2961 * LOCKING:
2962 * None. (executing in kernel thread context)
2963 *
2964 * RETURNS:
2965 * timeout value to use
2966 */
2967
2968 static unsigned long ata_pio_poll(struct ata_port *ap)
2969 {
2970 struct ata_queued_cmd *qc;
2971 u8 status;
2972 unsigned int poll_state = HSM_ST_UNKNOWN;
2973 unsigned int reg_state = HSM_ST_UNKNOWN;
2974
2975 qc = ata_qc_from_tag(ap, ap->active_tag);
2976 WARN_ON(qc == NULL);
2977
2978 switch (ap->hsm_task_state) {
2979 case HSM_ST:
2980 case HSM_ST_POLL:
2981 poll_state = HSM_ST_POLL;
2982 reg_state = HSM_ST;
2983 break;
2984 case HSM_ST_LAST:
2985 case HSM_ST_LAST_POLL:
2986 poll_state = HSM_ST_LAST_POLL;
2987 reg_state = HSM_ST_LAST;
2988 break;
2989 default:
2990 BUG();
2991 break;
2992 }
2993
2994 status = ata_chk_status(ap);
2995 if (status & ATA_BUSY) {
2996 if (time_after(jiffies, ap->pio_task_timeout)) {
2997 qc->err_mask |= AC_ERR_TIMEOUT;
2998 ap->hsm_task_state = HSM_ST_TMOUT;
2999 return 0;
3000 }
3001 ap->hsm_task_state = poll_state;
3002 return ATA_SHORT_PAUSE;
3003 }
3004
3005 ap->hsm_task_state = reg_state;
3006 return 0;
3007 }
3008
3009 /**
3010 * ata_pio_complete - check if drive is busy or idle
3011 * @ap: the target ata_port
3012 *
3013 * LOCKING:
3014 * None. (executing in kernel thread context)
3015 *
3016 * RETURNS:
3017 * Non-zero if qc completed, zero otherwise.
3018 */
3019
3020 static int ata_pio_complete (struct ata_port *ap)
3021 {
3022 struct ata_queued_cmd *qc;
3023 u8 drv_stat;
3024
3025 /*
3026 * This is purely heuristic. This is a fast path. Sometimes when
3027 * we enter, BSY will be cleared in a chk-status or two. If not,
3028 * the drive is probably seeking or something. Snooze for a couple
3029 * msecs, then chk-status again. If still busy, fall back to
3030 * HSM_ST_POLL state.
3031 */
3032 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3033 if (drv_stat & ATA_BUSY) {
3034 msleep(2);
3035 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3036 if (drv_stat & ATA_BUSY) {
3037 ap->hsm_task_state = HSM_ST_LAST_POLL;
3038 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3039 return 0;
3040 }
3041 }
3042
3043 qc = ata_qc_from_tag(ap, ap->active_tag);
3044 WARN_ON(qc == NULL);
3045
3046 drv_stat = ata_wait_idle(ap);
3047 if (!ata_ok(drv_stat)) {
3048 qc->err_mask |= __ac_err_mask(drv_stat);
3049 ap->hsm_task_state = HSM_ST_ERR;
3050 return 0;
3051 }
3052
3053 ap->hsm_task_state = HSM_ST_IDLE;
3054
3055 WARN_ON(qc->err_mask);
3056 ata_poll_qc_complete(qc);
3057
3058 /* another command may start at this point */
3059
3060 return 1;
3061 }
3062
3063
3064 /**
3065 * swap_buf_le16 - swap halves of 16-bit words in place
3066 * @buf: Buffer to swap
3067 * @buf_words: Number of 16-bit words in buffer.
3068 *
3069 * Swap halves of 16-bit words if needed to convert from
3070 * little-endian byte order to native cpu byte order, or
3071 * vice-versa.
3072 *
3073 * LOCKING:
3074 * Inherited from caller.
3075 */
3076 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3077 {
3078 #ifdef __BIG_ENDIAN
3079 unsigned int i;
3080
3081 for (i = 0; i < buf_words; i++)
3082 buf[i] = le16_to_cpu(buf[i]);
3083 #endif /* __BIG_ENDIAN */
3084 }
3085
3086 /**
3087 * ata_mmio_data_xfer - Transfer data by MMIO
3088 * @ap: port to read/write
3089 * @buf: data buffer
3090 * @buflen: buffer length
3091 * @write_data: read/write
3092 *
3093 * Transfer data from/to the device data register by MMIO.
3094 *
3095 * LOCKING:
3096 * Inherited from caller.
3097 */
3098
3099 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3100 unsigned int buflen, int write_data)
3101 {
3102 unsigned int i;
3103 unsigned int words = buflen >> 1;
3104 u16 *buf16 = (u16 *) buf;
3105 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3106
3107 /* Transfer multiple of 2 bytes */
3108 if (write_data) {
3109 for (i = 0; i < words; i++)
3110 writew(le16_to_cpu(buf16[i]), mmio);
3111 } else {
3112 for (i = 0; i < words; i++)
3113 buf16[i] = cpu_to_le16(readw(mmio));
3114 }
3115
3116 /* Transfer trailing 1 byte, if any. */
3117 if (unlikely(buflen & 0x01)) {
3118 u16 align_buf[1] = { 0 };
3119 unsigned char *trailing_buf = buf + buflen - 1;
3120
3121 if (write_data) {
3122 memcpy(align_buf, trailing_buf, 1);
3123 writew(le16_to_cpu(align_buf[0]), mmio);
3124 } else {
3125 align_buf[0] = cpu_to_le16(readw(mmio));
3126 memcpy(trailing_buf, align_buf, 1);
3127 }
3128 }
3129 }
3130
3131 /**
3132 * ata_pio_data_xfer - Transfer data by PIO
3133 * @ap: port to read/write
3134 * @buf: data buffer
3135 * @buflen: buffer length
3136 * @write_data: read/write
3137 *
3138 * Transfer data from/to the device data register by PIO.
3139 *
3140 * LOCKING:
3141 * Inherited from caller.
3142 */
3143
3144 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3145 unsigned int buflen, int write_data)
3146 {
3147 unsigned int words = buflen >> 1;
3148
3149 /* Transfer multiple of 2 bytes */
3150 if (write_data)
3151 outsw(ap->ioaddr.data_addr, buf, words);
3152 else
3153 insw(ap->ioaddr.data_addr, buf, words);
3154
3155 /* Transfer trailing 1 byte, if any. */
3156 if (unlikely(buflen & 0x01)) {
3157 u16 align_buf[1] = { 0 };
3158 unsigned char *trailing_buf = buf + buflen - 1;
3159
3160 if (write_data) {
3161 memcpy(align_buf, trailing_buf, 1);
3162 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3163 } else {
3164 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3165 memcpy(trailing_buf, align_buf, 1);
3166 }
3167 }
3168 }
3169
3170 /**
3171 * ata_data_xfer - Transfer data from/to the data register.
3172 * @ap: port to read/write
3173 * @buf: data buffer
3174 * @buflen: buffer length
3175 * @do_write: read/write
3176 *
3177 * Transfer data from/to the device data register.
3178 *
3179 * LOCKING:
3180 * Inherited from caller.
3181 */
3182
3183 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3184 unsigned int buflen, int do_write)
3185 {
3186 /* Make the crap hardware pay the costs not the good stuff */
3187 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3188 unsigned long flags;
3189 local_irq_save(flags);
3190 if (ap->flags & ATA_FLAG_MMIO)
3191 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3192 else
3193 ata_pio_data_xfer(ap, buf, buflen, do_write);
3194 local_irq_restore(flags);
3195 } else {
3196 if (ap->flags & ATA_FLAG_MMIO)
3197 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3198 else
3199 ata_pio_data_xfer(ap, buf, buflen, do_write);
3200 }
3201 }
3202
3203 /**
3204 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3205 * @qc: Command on going
3206 *
3207 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3208 *
3209 * LOCKING:
3210 * Inherited from caller.
3211 */
3212
3213 static void ata_pio_sector(struct ata_queued_cmd *qc)
3214 {
3215 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3216 struct scatterlist *sg = qc->__sg;
3217 struct ata_port *ap = qc->ap;
3218 struct page *page;
3219 unsigned int offset;
3220 unsigned char *buf;
3221
3222 if (qc->cursect == (qc->nsect - 1))
3223 ap->hsm_task_state = HSM_ST_LAST;
3224
3225 page = sg[qc->cursg].page;
3226 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3227
3228 /* get the current page and offset */
3229 page = nth_page(page, (offset >> PAGE_SHIFT));
3230 offset %= PAGE_SIZE;
3231
3232 buf = kmap(page) + offset;
3233
3234 qc->cursect++;
3235 qc->cursg_ofs++;
3236
3237 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3238 qc->cursg++;
3239 qc->cursg_ofs = 0;
3240 }
3241
3242 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3243
3244 /* do the actual data transfer */
3245 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3246 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3247
3248 kunmap(page);
3249 }
3250
3251 /**
3252 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3253 * @qc: Command on going
3254 * @bytes: number of bytes
3255 *
3256 * Transfer Transfer data from/to the ATAPI device.
3257 *
3258 * LOCKING:
3259 * Inherited from caller.
3260 *
3261 */
3262
3263 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3264 {
3265 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3266 struct scatterlist *sg = qc->__sg;
3267 struct ata_port *ap = qc->ap;
3268 struct page *page;
3269 unsigned char *buf;
3270 unsigned int offset, count;
3271
3272 if (qc->curbytes + bytes >= qc->nbytes)
3273 ap->hsm_task_state = HSM_ST_LAST;
3274
3275 next_sg:
3276 if (unlikely(qc->cursg >= qc->n_elem)) {
3277 /*
3278 * The end of qc->sg is reached and the device expects
3279 * more data to transfer. In order not to overrun qc->sg
3280 * and fulfill length specified in the byte count register,
3281 * - for read case, discard trailing data from the device
3282 * - for write case, padding zero data to the device
3283 */
3284 u16 pad_buf[1] = { 0 };
3285 unsigned int words = bytes >> 1;
3286 unsigned int i;
3287
3288 if (words) /* warning if bytes > 1 */
3289 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3290 ap->id, bytes);
3291
3292 for (i = 0; i < words; i++)
3293 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3294
3295 ap->hsm_task_state = HSM_ST_LAST;
3296 return;
3297 }
3298
3299 sg = &qc->__sg[qc->cursg];
3300
3301 page = sg->page;
3302 offset = sg->offset + qc->cursg_ofs;
3303
3304 /* get the current page and offset */
3305 page = nth_page(page, (offset >> PAGE_SHIFT));
3306 offset %= PAGE_SIZE;
3307
3308 /* don't overrun current sg */
3309 count = min(sg->length - qc->cursg_ofs, bytes);
3310
3311 /* don't cross page boundaries */
3312 count = min(count, (unsigned int)PAGE_SIZE - offset);
3313
3314 buf = kmap(page) + offset;
3315
3316 bytes -= count;
3317 qc->curbytes += count;
3318 qc->cursg_ofs += count;
3319
3320 if (qc->cursg_ofs == sg->length) {
3321 qc->cursg++;
3322 qc->cursg_ofs = 0;
3323 }
3324
3325 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3326
3327 /* do the actual data transfer */
3328 ata_data_xfer(ap, buf, count, do_write);
3329
3330 kunmap(page);
3331
3332 if (bytes)
3333 goto next_sg;
3334 }
3335
3336 /**
3337 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3338 * @qc: Command on going
3339 *
3340 * Transfer Transfer data from/to the ATAPI device.
3341 *
3342 * LOCKING:
3343 * Inherited from caller.
3344 */
3345
3346 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3347 {
3348 struct ata_port *ap = qc->ap;
3349 struct ata_device *dev = qc->dev;
3350 unsigned int ireason, bc_lo, bc_hi, bytes;
3351 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3352
3353 ap->ops->tf_read(ap, &qc->tf);
3354 ireason = qc->tf.nsect;
3355 bc_lo = qc->tf.lbam;
3356 bc_hi = qc->tf.lbah;
3357 bytes = (bc_hi << 8) | bc_lo;
3358
3359 /* shall be cleared to zero, indicating xfer of data */
3360 if (ireason & (1 << 0))
3361 goto err_out;
3362
3363 /* make sure transfer direction matches expected */
3364 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3365 if (do_write != i_write)
3366 goto err_out;
3367
3368 __atapi_pio_bytes(qc, bytes);
3369
3370 return;
3371
3372 err_out:
3373 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3374 ap->id, dev->devno);
3375 qc->err_mask |= AC_ERR_HSM;
3376 ap->hsm_task_state = HSM_ST_ERR;
3377 }
3378
3379 /**
3380 * ata_pio_block - start PIO on a block
3381 * @ap: the target ata_port
3382 *
3383 * LOCKING:
3384 * None. (executing in kernel thread context)
3385 */
3386
3387 static void ata_pio_block(struct ata_port *ap)
3388 {
3389 struct ata_queued_cmd *qc;
3390 u8 status;
3391
3392 /*
3393 * This is purely heuristic. This is a fast path.
3394 * Sometimes when we enter, BSY will be cleared in
3395 * a chk-status or two. If not, the drive is probably seeking
3396 * or something. Snooze for a couple msecs, then
3397 * chk-status again. If still busy, fall back to
3398 * HSM_ST_POLL state.
3399 */
3400 status = ata_busy_wait(ap, ATA_BUSY, 5);
3401 if (status & ATA_BUSY) {
3402 msleep(2);
3403 status = ata_busy_wait(ap, ATA_BUSY, 10);
3404 if (status & ATA_BUSY) {
3405 ap->hsm_task_state = HSM_ST_POLL;
3406 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3407 return;
3408 }
3409 }
3410
3411 qc = ata_qc_from_tag(ap, ap->active_tag);
3412 WARN_ON(qc == NULL);
3413
3414 /* check error */
3415 if (status & (ATA_ERR | ATA_DF)) {
3416 qc->err_mask |= AC_ERR_DEV;
3417 ap->hsm_task_state = HSM_ST_ERR;
3418 return;
3419 }
3420
3421 /* transfer data if any */
3422 if (is_atapi_taskfile(&qc->tf)) {
3423 /* DRQ=0 means no more data to transfer */
3424 if ((status & ATA_DRQ) == 0) {
3425 ap->hsm_task_state = HSM_ST_LAST;
3426 return;
3427 }
3428
3429 atapi_pio_bytes(qc);
3430 } else {
3431 /* handle BSY=0, DRQ=0 as error */
3432 if ((status & ATA_DRQ) == 0) {
3433 qc->err_mask |= AC_ERR_HSM;
3434 ap->hsm_task_state = HSM_ST_ERR;
3435 return;
3436 }
3437
3438 ata_pio_sector(qc);
3439 }
3440 }
3441
3442 static void ata_pio_error(struct ata_port *ap)
3443 {
3444 struct ata_queued_cmd *qc;
3445
3446 qc = ata_qc_from_tag(ap, ap->active_tag);
3447 WARN_ON(qc == NULL);
3448
3449 if (qc->tf.command != ATA_CMD_PACKET)
3450 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3451
3452 /* make sure qc->err_mask is available to
3453 * know what's wrong and recover
3454 */
3455 WARN_ON(qc->err_mask == 0);
3456
3457 ap->hsm_task_state = HSM_ST_IDLE;
3458
3459 ata_poll_qc_complete(qc);
3460 }
3461
3462 static void ata_pio_task(void *_data)
3463 {
3464 struct ata_port *ap = _data;
3465 unsigned long timeout;
3466 int qc_completed;
3467
3468 fsm_start:
3469 timeout = 0;
3470 qc_completed = 0;
3471
3472 switch (ap->hsm_task_state) {
3473 case HSM_ST_IDLE:
3474 return;
3475
3476 case HSM_ST:
3477 ata_pio_block(ap);
3478 break;
3479
3480 case HSM_ST_LAST:
3481 qc_completed = ata_pio_complete(ap);
3482 break;
3483
3484 case HSM_ST_POLL:
3485 case HSM_ST_LAST_POLL:
3486 timeout = ata_pio_poll(ap);
3487 break;
3488
3489 case HSM_ST_TMOUT:
3490 case HSM_ST_ERR:
3491 ata_pio_error(ap);
3492 return;
3493 }
3494
3495 if (timeout)
3496 ata_queue_delayed_pio_task(ap, timeout);
3497 else if (!qc_completed)
3498 goto fsm_start;
3499 }
3500
3501 /**
3502 * ata_qc_timeout - Handle timeout of queued command
3503 * @qc: Command that timed out
3504 *
3505 * Some part of the kernel (currently, only the SCSI layer)
3506 * has noticed that the active command on port @ap has not
3507 * completed after a specified length of time. Handle this
3508 * condition by disabling DMA (if necessary) and completing
3509 * transactions, with error if necessary.
3510 *
3511 * This also handles the case of the "lost interrupt", where
3512 * for some reason (possibly hardware bug, possibly driver bug)
3513 * an interrupt was not delivered to the driver, even though the
3514 * transaction completed successfully.
3515 *
3516 * LOCKING:
3517 * Inherited from SCSI layer (none, can sleep)
3518 */
3519
3520 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3521 {
3522 struct ata_port *ap = qc->ap;
3523 struct ata_host_set *host_set = ap->host_set;
3524 u8 host_stat = 0, drv_stat;
3525 unsigned long flags;
3526
3527 DPRINTK("ENTER\n");
3528
3529 ata_flush_pio_tasks(ap);
3530 ap->hsm_task_state = HSM_ST_IDLE;
3531
3532 spin_lock_irqsave(&host_set->lock, flags);
3533
3534 switch (qc->tf.protocol) {
3535
3536 case ATA_PROT_DMA:
3537 case ATA_PROT_ATAPI_DMA:
3538 host_stat = ap->ops->bmdma_status(ap);
3539
3540 /* before we do anything else, clear DMA-Start bit */
3541 ap->ops->bmdma_stop(qc);
3542
3543 /* fall through */
3544
3545 default:
3546 ata_altstatus(ap);
3547 drv_stat = ata_chk_status(ap);
3548
3549 /* ack bmdma irq events */
3550 ap->ops->irq_clear(ap);
3551
3552 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3553 ap->id, qc->tf.command, drv_stat, host_stat);
3554
3555 /* complete taskfile transaction */
3556 qc->err_mask |= ac_err_mask(drv_stat);
3557 break;
3558 }
3559
3560 spin_unlock_irqrestore(&host_set->lock, flags);
3561
3562 ata_eh_qc_complete(qc);
3563
3564 DPRINTK("EXIT\n");
3565 }
3566
3567 /**
3568 * ata_eng_timeout - Handle timeout of queued command
3569 * @ap: Port on which timed-out command is active
3570 *
3571 * Some part of the kernel (currently, only the SCSI layer)
3572 * has noticed that the active command on port @ap has not
3573 * completed after a specified length of time. Handle this
3574 * condition by disabling DMA (if necessary) and completing
3575 * transactions, with error if necessary.
3576 *
3577 * This also handles the case of the "lost interrupt", where
3578 * for some reason (possibly hardware bug, possibly driver bug)
3579 * an interrupt was not delivered to the driver, even though the
3580 * transaction completed successfully.
3581 *
3582 * LOCKING:
3583 * Inherited from SCSI layer (none, can sleep)
3584 */
3585
3586 void ata_eng_timeout(struct ata_port *ap)
3587 {
3588 DPRINTK("ENTER\n");
3589
3590 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3591
3592 DPRINTK("EXIT\n");
3593 }
3594
3595 /**
3596 * ata_qc_new - Request an available ATA command, for queueing
3597 * @ap: Port associated with device @dev
3598 * @dev: Device from whom we request an available command structure
3599 *
3600 * LOCKING:
3601 * None.
3602 */
3603
3604 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3605 {
3606 struct ata_queued_cmd *qc = NULL;
3607 unsigned int i;
3608
3609 for (i = 0; i < ATA_MAX_QUEUE; i++)
3610 if (!test_and_set_bit(i, &ap->qactive)) {
3611 qc = ata_qc_from_tag(ap, i);
3612 break;
3613 }
3614
3615 if (qc)
3616 qc->tag = i;
3617
3618 return qc;
3619 }
3620
3621 /**
3622 * ata_qc_new_init - Request an available ATA command, and initialize it
3623 * @ap: Port associated with device @dev
3624 * @dev: Device from whom we request an available command structure
3625 *
3626 * LOCKING:
3627 * None.
3628 */
3629
3630 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3631 struct ata_device *dev)
3632 {
3633 struct ata_queued_cmd *qc;
3634
3635 qc = ata_qc_new(ap);
3636 if (qc) {
3637 qc->scsicmd = NULL;
3638 qc->ap = ap;
3639 qc->dev = dev;
3640
3641 ata_qc_reinit(qc);
3642 }
3643
3644 return qc;
3645 }
3646
3647 /**
3648 * ata_qc_free - free unused ata_queued_cmd
3649 * @qc: Command to complete
3650 *
3651 * Designed to free unused ata_queued_cmd object
3652 * in case something prevents using it.
3653 *
3654 * LOCKING:
3655 * spin_lock_irqsave(host_set lock)
3656 */
3657 void ata_qc_free(struct ata_queued_cmd *qc)
3658 {
3659 struct ata_port *ap = qc->ap;
3660 unsigned int tag;
3661
3662 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3663
3664 qc->flags = 0;
3665 tag = qc->tag;
3666 if (likely(ata_tag_valid(tag))) {
3667 if (tag == ap->active_tag)
3668 ap->active_tag = ATA_TAG_POISON;
3669 qc->tag = ATA_TAG_POISON;
3670 clear_bit(tag, &ap->qactive);
3671 }
3672 }
3673
3674 void __ata_qc_complete(struct ata_queued_cmd *qc)
3675 {
3676 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3677 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3678
3679 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3680 ata_sg_clean(qc);
3681
3682 /* atapi: mark qc as inactive to prevent the interrupt handler
3683 * from completing the command twice later, before the error handler
3684 * is called. (when rc != 0 and atapi request sense is needed)
3685 */
3686 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3687
3688 /* call completion callback */
3689 qc->complete_fn(qc);
3690 }
3691
3692 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3693 {
3694 struct ata_port *ap = qc->ap;
3695
3696 switch (qc->tf.protocol) {
3697 case ATA_PROT_DMA:
3698 case ATA_PROT_ATAPI_DMA:
3699 return 1;
3700
3701 case ATA_PROT_ATAPI:
3702 case ATA_PROT_PIO:
3703 case ATA_PROT_PIO_MULT:
3704 if (ap->flags & ATA_FLAG_PIO_DMA)
3705 return 1;
3706
3707 /* fall through */
3708
3709 default:
3710 return 0;
3711 }
3712
3713 /* never reached */
3714 }
3715
3716 /**
3717 * ata_qc_issue - issue taskfile to device
3718 * @qc: command to issue to device
3719 *
3720 * Prepare an ATA command to submission to device.
3721 * This includes mapping the data into a DMA-able
3722 * area, filling in the S/G table, and finally
3723 * writing the taskfile to hardware, starting the command.
3724 *
3725 * LOCKING:
3726 * spin_lock_irqsave(host_set lock)
3727 *
3728 * RETURNS:
3729 * Zero on success, AC_ERR_* mask on failure
3730 */
3731
3732 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3733 {
3734 struct ata_port *ap = qc->ap;
3735
3736 if (ata_should_dma_map(qc)) {
3737 if (qc->flags & ATA_QCFLAG_SG) {
3738 if (ata_sg_setup(qc))
3739 goto sg_err;
3740 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3741 if (ata_sg_setup_one(qc))
3742 goto sg_err;
3743 }
3744 } else {
3745 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3746 }
3747
3748 ap->ops->qc_prep(qc);
3749
3750 qc->ap->active_tag = qc->tag;
3751 qc->flags |= ATA_QCFLAG_ACTIVE;
3752
3753 return ap->ops->qc_issue(qc);
3754
3755 sg_err:
3756 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3757 return AC_ERR_SYSTEM;
3758 }
3759
3760
3761 /**
3762 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3763 * @qc: command to issue to device
3764 *
3765 * Using various libata functions and hooks, this function
3766 * starts an ATA command. ATA commands are grouped into
3767 * classes called "protocols", and issuing each type of protocol
3768 * is slightly different.
3769 *
3770 * May be used as the qc_issue() entry in ata_port_operations.
3771 *
3772 * LOCKING:
3773 * spin_lock_irqsave(host_set lock)
3774 *
3775 * RETURNS:
3776 * Zero on success, AC_ERR_* mask on failure
3777 */
3778
3779 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3780 {
3781 struct ata_port *ap = qc->ap;
3782
3783 ata_dev_select(ap, qc->dev->devno, 1, 0);
3784
3785 switch (qc->tf.protocol) {
3786 case ATA_PROT_NODATA:
3787 ata_tf_to_host(ap, &qc->tf);
3788 break;
3789
3790 case ATA_PROT_DMA:
3791 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3792 ap->ops->bmdma_setup(qc); /* set up bmdma */
3793 ap->ops->bmdma_start(qc); /* initiate bmdma */
3794 break;
3795
3796 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3797 ata_qc_set_polling(qc);
3798 ata_tf_to_host(ap, &qc->tf);
3799 ap->hsm_task_state = HSM_ST;
3800 ata_queue_pio_task(ap);
3801 break;
3802
3803 case ATA_PROT_ATAPI:
3804 ata_qc_set_polling(qc);
3805 ata_tf_to_host(ap, &qc->tf);
3806 ata_queue_packet_task(ap);
3807 break;
3808
3809 case ATA_PROT_ATAPI_NODATA:
3810 ap->flags |= ATA_FLAG_NOINTR;
3811 ata_tf_to_host(ap, &qc->tf);
3812 ata_queue_packet_task(ap);
3813 break;
3814
3815 case ATA_PROT_ATAPI_DMA:
3816 ap->flags |= ATA_FLAG_NOINTR;
3817 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3818 ap->ops->bmdma_setup(qc); /* set up bmdma */
3819 ata_queue_packet_task(ap);
3820 break;
3821
3822 default:
3823 WARN_ON(1);
3824 return AC_ERR_SYSTEM;
3825 }
3826
3827 return 0;
3828 }
3829
3830 /**
3831 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3832 * @qc: Info associated with this ATA transaction.
3833 *
3834 * LOCKING:
3835 * spin_lock_irqsave(host_set lock)
3836 */
3837
3838 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3839 {
3840 struct ata_port *ap = qc->ap;
3841 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3842 u8 dmactl;
3843 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3844
3845 /* load PRD table addr. */
3846 mb(); /* make sure PRD table writes are visible to controller */
3847 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3848
3849 /* specify data direction, triple-check start bit is clear */
3850 dmactl = readb(mmio + ATA_DMA_CMD);
3851 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3852 if (!rw)
3853 dmactl |= ATA_DMA_WR;
3854 writeb(dmactl, mmio + ATA_DMA_CMD);
3855
3856 /* issue r/w command */
3857 ap->ops->exec_command(ap, &qc->tf);
3858 }
3859
3860 /**
3861 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
3862 * @qc: Info associated with this ATA transaction.
3863 *
3864 * LOCKING:
3865 * spin_lock_irqsave(host_set lock)
3866 */
3867
3868 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3869 {
3870 struct ata_port *ap = qc->ap;
3871 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3872 u8 dmactl;
3873
3874 /* start host DMA transaction */
3875 dmactl = readb(mmio + ATA_DMA_CMD);
3876 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3877
3878 /* Strictly, one may wish to issue a readb() here, to
3879 * flush the mmio write. However, control also passes
3880 * to the hardware at this point, and it will interrupt
3881 * us when we are to resume control. So, in effect,
3882 * we don't care when the mmio write flushes.
3883 * Further, a read of the DMA status register _immediately_
3884 * following the write may not be what certain flaky hardware
3885 * is expected, so I think it is best to not add a readb()
3886 * without first all the MMIO ATA cards/mobos.
3887 * Or maybe I'm just being paranoid.
3888 */
3889 }
3890
3891 /**
3892 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3893 * @qc: Info associated with this ATA transaction.
3894 *
3895 * LOCKING:
3896 * spin_lock_irqsave(host_set lock)
3897 */
3898
3899 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3900 {
3901 struct ata_port *ap = qc->ap;
3902 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3903 u8 dmactl;
3904
3905 /* load PRD table addr. */
3906 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3907
3908 /* specify data direction, triple-check start bit is clear */
3909 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3910 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3911 if (!rw)
3912 dmactl |= ATA_DMA_WR;
3913 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3914
3915 /* issue r/w command */
3916 ap->ops->exec_command(ap, &qc->tf);
3917 }
3918
3919 /**
3920 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3921 * @qc: Info associated with this ATA transaction.
3922 *
3923 * LOCKING:
3924 * spin_lock_irqsave(host_set lock)
3925 */
3926
3927 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3928 {
3929 struct ata_port *ap = qc->ap;
3930 u8 dmactl;
3931
3932 /* start host DMA transaction */
3933 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3934 outb(dmactl | ATA_DMA_START,
3935 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3936 }
3937
3938
3939 /**
3940 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3941 * @qc: Info associated with this ATA transaction.
3942 *
3943 * Writes the ATA_DMA_START flag to the DMA command register.
3944 *
3945 * May be used as the bmdma_start() entry in ata_port_operations.
3946 *
3947 * LOCKING:
3948 * spin_lock_irqsave(host_set lock)
3949 */
3950 void ata_bmdma_start(struct ata_queued_cmd *qc)
3951 {
3952 if (qc->ap->flags & ATA_FLAG_MMIO)
3953 ata_bmdma_start_mmio(qc);
3954 else
3955 ata_bmdma_start_pio(qc);
3956 }
3957
3958
3959 /**
3960 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3961 * @qc: Info associated with this ATA transaction.
3962 *
3963 * Writes address of PRD table to device's PRD Table Address
3964 * register, sets the DMA control register, and calls
3965 * ops->exec_command() to start the transfer.
3966 *
3967 * May be used as the bmdma_setup() entry in ata_port_operations.
3968 *
3969 * LOCKING:
3970 * spin_lock_irqsave(host_set lock)
3971 */
3972 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3973 {
3974 if (qc->ap->flags & ATA_FLAG_MMIO)
3975 ata_bmdma_setup_mmio(qc);
3976 else
3977 ata_bmdma_setup_pio(qc);
3978 }
3979
3980
3981 /**
3982 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3983 * @ap: Port associated with this ATA transaction.
3984 *
3985 * Clear interrupt and error flags in DMA status register.
3986 *
3987 * May be used as the irq_clear() entry in ata_port_operations.
3988 *
3989 * LOCKING:
3990 * spin_lock_irqsave(host_set lock)
3991 */
3992
3993 void ata_bmdma_irq_clear(struct ata_port *ap)
3994 {
3995 if (ap->flags & ATA_FLAG_MMIO) {
3996 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3997 writeb(readb(mmio), mmio);
3998 } else {
3999 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4000 outb(inb(addr), addr);
4001 }
4002
4003 }
4004
4005
4006 /**
4007 * ata_bmdma_status - Read PCI IDE BMDMA status
4008 * @ap: Port associated with this ATA transaction.
4009 *
4010 * Read and return BMDMA status register.
4011 *
4012 * May be used as the bmdma_status() entry in ata_port_operations.
4013 *
4014 * LOCKING:
4015 * spin_lock_irqsave(host_set lock)
4016 */
4017
4018 u8 ata_bmdma_status(struct ata_port *ap)
4019 {
4020 u8 host_stat;
4021 if (ap->flags & ATA_FLAG_MMIO) {
4022 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4023 host_stat = readb(mmio + ATA_DMA_STATUS);
4024 } else
4025 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4026 return host_stat;
4027 }
4028
4029
4030 /**
4031 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4032 * @qc: Command we are ending DMA for
4033 *
4034 * Clears the ATA_DMA_START flag in the dma control register
4035 *
4036 * May be used as the bmdma_stop() entry in ata_port_operations.
4037 *
4038 * LOCKING:
4039 * spin_lock_irqsave(host_set lock)
4040 */
4041
4042 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4043 {
4044 struct ata_port *ap = qc->ap;
4045 if (ap->flags & ATA_FLAG_MMIO) {
4046 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4047
4048 /* clear start/stop bit */
4049 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4050 mmio + ATA_DMA_CMD);
4051 } else {
4052 /* clear start/stop bit */
4053 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4054 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4055 }
4056
4057 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4058 ata_altstatus(ap); /* dummy read */
4059 }
4060
4061 /**
4062 * ata_host_intr - Handle host interrupt for given (port, task)
4063 * @ap: Port on which interrupt arrived (possibly...)
4064 * @qc: Taskfile currently active in engine
4065 *
4066 * Handle host interrupt for given queued command. Currently,
4067 * only DMA interrupts are handled. All other commands are
4068 * handled via polling with interrupts disabled (nIEN bit).
4069 *
4070 * LOCKING:
4071 * spin_lock_irqsave(host_set lock)
4072 *
4073 * RETURNS:
4074 * One if interrupt was handled, zero if not (shared irq).
4075 */
4076
4077 inline unsigned int ata_host_intr (struct ata_port *ap,
4078 struct ata_queued_cmd *qc)
4079 {
4080 u8 status, host_stat;
4081
4082 switch (qc->tf.protocol) {
4083
4084 case ATA_PROT_DMA:
4085 case ATA_PROT_ATAPI_DMA:
4086 case ATA_PROT_ATAPI:
4087 /* check status of DMA engine */
4088 host_stat = ap->ops->bmdma_status(ap);
4089 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4090
4091 /* if it's not our irq... */
4092 if (!(host_stat & ATA_DMA_INTR))
4093 goto idle_irq;
4094
4095 /* before we do anything else, clear DMA-Start bit */
4096 ap->ops->bmdma_stop(qc);
4097
4098 /* fall through */
4099
4100 case ATA_PROT_ATAPI_NODATA:
4101 case ATA_PROT_NODATA:
4102 /* check altstatus */
4103 status = ata_altstatus(ap);
4104 if (status & ATA_BUSY)
4105 goto idle_irq;
4106
4107 /* check main status, clearing INTRQ */
4108 status = ata_chk_status(ap);
4109 if (unlikely(status & ATA_BUSY))
4110 goto idle_irq;
4111 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4112 ap->id, qc->tf.protocol, status);
4113
4114 /* ack bmdma irq events */
4115 ap->ops->irq_clear(ap);
4116
4117 /* complete taskfile transaction */
4118 qc->err_mask |= ac_err_mask(status);
4119 ata_qc_complete(qc);
4120 break;
4121
4122 default:
4123 goto idle_irq;
4124 }
4125
4126 return 1; /* irq handled */
4127
4128 idle_irq:
4129 ap->stats.idle_irq++;
4130
4131 #ifdef ATA_IRQ_TRAP
4132 if ((ap->stats.idle_irq % 1000) == 0) {
4133 handled = 1;
4134 ata_irq_ack(ap, 0); /* debug trap */
4135 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4136 }
4137 #endif
4138 return 0; /* irq not handled */
4139 }
4140
4141 /**
4142 * ata_interrupt - Default ATA host interrupt handler
4143 * @irq: irq line (unused)
4144 * @dev_instance: pointer to our ata_host_set information structure
4145 * @regs: unused
4146 *
4147 * Default interrupt handler for PCI IDE devices. Calls
4148 * ata_host_intr() for each port that is not disabled.
4149 *
4150 * LOCKING:
4151 * Obtains host_set lock during operation.
4152 *
4153 * RETURNS:
4154 * IRQ_NONE or IRQ_HANDLED.
4155 */
4156
4157 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4158 {
4159 struct ata_host_set *host_set = dev_instance;
4160 unsigned int i;
4161 unsigned int handled = 0;
4162 unsigned long flags;
4163
4164 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4165 spin_lock_irqsave(&host_set->lock, flags);
4166
4167 for (i = 0; i < host_set->n_ports; i++) {
4168 struct ata_port *ap;
4169
4170 ap = host_set->ports[i];
4171 if (ap &&
4172 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4173 struct ata_queued_cmd *qc;
4174
4175 qc = ata_qc_from_tag(ap, ap->active_tag);
4176 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4177 (qc->flags & ATA_QCFLAG_ACTIVE))
4178 handled |= ata_host_intr(ap, qc);
4179 }
4180 }
4181
4182 spin_unlock_irqrestore(&host_set->lock, flags);
4183
4184 return IRQ_RETVAL(handled);
4185 }
4186
4187 /**
4188 * atapi_packet_task - Write CDB bytes to hardware
4189 * @_data: Port to which ATAPI device is attached.
4190 *
4191 * When device has indicated its readiness to accept
4192 * a CDB, this function is called. Send the CDB.
4193 * If DMA is to be performed, exit immediately.
4194 * Otherwise, we are in polling mode, so poll
4195 * status under operation succeeds or fails.
4196 *
4197 * LOCKING:
4198 * Kernel thread context (may sleep)
4199 */
4200
4201 static void atapi_packet_task(void *_data)
4202 {
4203 struct ata_port *ap = _data;
4204 struct ata_queued_cmd *qc;
4205 u8 status;
4206
4207 qc = ata_qc_from_tag(ap, ap->active_tag);
4208 WARN_ON(qc == NULL);
4209 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4210
4211 /* sleep-wait for BSY to clear */
4212 DPRINTK("busy wait\n");
4213 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4214 qc->err_mask |= AC_ERR_TIMEOUT;
4215 goto err_out;
4216 }
4217
4218 /* make sure DRQ is set */
4219 status = ata_chk_status(ap);
4220 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4221 qc->err_mask |= AC_ERR_HSM;
4222 goto err_out;
4223 }
4224
4225 /* send SCSI cdb */
4226 DPRINTK("send cdb\n");
4227 WARN_ON(qc->dev->cdb_len < 12);
4228
4229 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4230 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4231 unsigned long flags;
4232
4233 /* Once we're done issuing command and kicking bmdma,
4234 * irq handler takes over. To not lose irq, we need
4235 * to clear NOINTR flag before sending cdb, but
4236 * interrupt handler shouldn't be invoked before we're
4237 * finished. Hence, the following locking.
4238 */
4239 spin_lock_irqsave(&ap->host_set->lock, flags);
4240 ap->flags &= ~ATA_FLAG_NOINTR;
4241 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4242 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4243 ap->ops->bmdma_start(qc); /* initiate bmdma */
4244 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4245 } else {
4246 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4247
4248 /* PIO commands are handled by polling */
4249 ap->hsm_task_state = HSM_ST;
4250 ata_queue_pio_task(ap);
4251 }
4252
4253 return;
4254
4255 err_out:
4256 ata_poll_qc_complete(qc);
4257 }
4258
4259
4260 /*
4261 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4262 * without filling any other registers
4263 */
4264 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4265 u8 cmd)
4266 {
4267 struct ata_taskfile tf;
4268 int err;
4269
4270 ata_tf_init(ap, &tf, dev->devno);
4271
4272 tf.command = cmd;
4273 tf.flags |= ATA_TFLAG_DEVICE;
4274 tf.protocol = ATA_PROT_NODATA;
4275
4276 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4277 if (err)
4278 printk(KERN_ERR "%s: ata command failed: %d\n",
4279 __FUNCTION__, err);
4280
4281 return err;
4282 }
4283
4284 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4285 {
4286 u8 cmd;
4287
4288 if (!ata_try_flush_cache(dev))
4289 return 0;
4290
4291 if (ata_id_has_flush_ext(dev->id))
4292 cmd = ATA_CMD_FLUSH_EXT;
4293 else
4294 cmd = ATA_CMD_FLUSH;
4295
4296 return ata_do_simple_cmd(ap, dev, cmd);
4297 }
4298
4299 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4300 {
4301 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4302 }
4303
4304 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4305 {
4306 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4307 }
4308
4309 /**
4310 * ata_device_resume - wakeup a previously suspended devices
4311 * @ap: port the device is connected to
4312 * @dev: the device to resume
4313 *
4314 * Kick the drive back into action, by sending it an idle immediate
4315 * command and making sure its transfer mode matches between drive
4316 * and host.
4317 *
4318 */
4319 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4320 {
4321 if (ap->flags & ATA_FLAG_SUSPENDED) {
4322 ap->flags &= ~ATA_FLAG_SUSPENDED;
4323 ata_set_mode(ap);
4324 }
4325 if (!ata_dev_present(dev))
4326 return 0;
4327 if (dev->class == ATA_DEV_ATA)
4328 ata_start_drive(ap, dev);
4329
4330 return 0;
4331 }
4332
4333 /**
4334 * ata_device_suspend - prepare a device for suspend
4335 * @ap: port the device is connected to
4336 * @dev: the device to suspend
4337 *
4338 * Flush the cache on the drive, if appropriate, then issue a
4339 * standbynow command.
4340 */
4341 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4342 {
4343 if (!ata_dev_present(dev))
4344 return 0;
4345 if (dev->class == ATA_DEV_ATA)
4346 ata_flush_cache(ap, dev);
4347
4348 ata_standby_drive(ap, dev);
4349 ap->flags |= ATA_FLAG_SUSPENDED;
4350 return 0;
4351 }
4352
4353 /**
4354 * ata_port_start - Set port up for dma.
4355 * @ap: Port to initialize
4356 *
4357 * Called just after data structures for each port are
4358 * initialized. Allocates space for PRD table.
4359 *
4360 * May be used as the port_start() entry in ata_port_operations.
4361 *
4362 * LOCKING:
4363 * Inherited from caller.
4364 */
4365
4366 int ata_port_start (struct ata_port *ap)
4367 {
4368 struct device *dev = ap->host_set->dev;
4369 int rc;
4370
4371 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4372 if (!ap->prd)
4373 return -ENOMEM;
4374
4375 rc = ata_pad_alloc(ap, dev);
4376 if (rc) {
4377 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4378 return rc;
4379 }
4380
4381 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4382
4383 return 0;
4384 }
4385
4386
4387 /**
4388 * ata_port_stop - Undo ata_port_start()
4389 * @ap: Port to shut down
4390 *
4391 * Frees the PRD table.
4392 *
4393 * May be used as the port_stop() entry in ata_port_operations.
4394 *
4395 * LOCKING:
4396 * Inherited from caller.
4397 */
4398
4399 void ata_port_stop (struct ata_port *ap)
4400 {
4401 struct device *dev = ap->host_set->dev;
4402
4403 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4404 ata_pad_free(ap, dev);
4405 }
4406
4407 void ata_host_stop (struct ata_host_set *host_set)
4408 {
4409 if (host_set->mmio_base)
4410 iounmap(host_set->mmio_base);
4411 }
4412
4413
4414 /**
4415 * ata_host_remove - Unregister SCSI host structure with upper layers
4416 * @ap: Port to unregister
4417 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4418 *
4419 * LOCKING:
4420 * Inherited from caller.
4421 */
4422
4423 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4424 {
4425 struct Scsi_Host *sh = ap->host;
4426
4427 DPRINTK("ENTER\n");
4428
4429 if (do_unregister)
4430 scsi_remove_host(sh);
4431
4432 ap->ops->port_stop(ap);
4433 }
4434
4435 /**
4436 * ata_host_init - Initialize an ata_port structure
4437 * @ap: Structure to initialize
4438 * @host: associated SCSI mid-layer structure
4439 * @host_set: Collection of hosts to which @ap belongs
4440 * @ent: Probe information provided by low-level driver
4441 * @port_no: Port number associated with this ata_port
4442 *
4443 * Initialize a new ata_port structure, and its associated
4444 * scsi_host.
4445 *
4446 * LOCKING:
4447 * Inherited from caller.
4448 */
4449
4450 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4451 struct ata_host_set *host_set,
4452 const struct ata_probe_ent *ent, unsigned int port_no)
4453 {
4454 unsigned int i;
4455
4456 host->max_id = 16;
4457 host->max_lun = 1;
4458 host->max_channel = 1;
4459 host->unique_id = ata_unique_id++;
4460 host->max_cmd_len = 12;
4461
4462 ap->flags = ATA_FLAG_PORT_DISABLED;
4463 ap->id = host->unique_id;
4464 ap->host = host;
4465 ap->ctl = ATA_DEVCTL_OBS;
4466 ap->host_set = host_set;
4467 ap->port_no = port_no;
4468 ap->hard_port_no =
4469 ent->legacy_mode ? ent->hard_port_no : port_no;
4470 ap->pio_mask = ent->pio_mask;
4471 ap->mwdma_mask = ent->mwdma_mask;
4472 ap->udma_mask = ent->udma_mask;
4473 ap->flags |= ent->host_flags;
4474 ap->ops = ent->port_ops;
4475 ap->cbl = ATA_CBL_NONE;
4476 ap->active_tag = ATA_TAG_POISON;
4477 ap->last_ctl = 0xFF;
4478
4479 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4480 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4481 INIT_LIST_HEAD(&ap->eh_done_q);
4482
4483 for (i = 0; i < ATA_MAX_DEVICES; i++)
4484 ap->device[i].devno = i;
4485
4486 #ifdef ATA_IRQ_TRAP
4487 ap->stats.unhandled_irq = 1;
4488 ap->stats.idle_irq = 1;
4489 #endif
4490
4491 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4492 }
4493
4494 /**
4495 * ata_host_add - Attach low-level ATA driver to system
4496 * @ent: Information provided by low-level driver
4497 * @host_set: Collections of ports to which we add
4498 * @port_no: Port number associated with this host
4499 *
4500 * Attach low-level ATA driver to system.
4501 *
4502 * LOCKING:
4503 * PCI/etc. bus probe sem.
4504 *
4505 * RETURNS:
4506 * New ata_port on success, for NULL on error.
4507 */
4508
4509 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4510 struct ata_host_set *host_set,
4511 unsigned int port_no)
4512 {
4513 struct Scsi_Host *host;
4514 struct ata_port *ap;
4515 int rc;
4516
4517 DPRINTK("ENTER\n");
4518 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4519 if (!host)
4520 return NULL;
4521
4522 ap = (struct ata_port *) &host->hostdata[0];
4523
4524 ata_host_init(ap, host, host_set, ent, port_no);
4525
4526 rc = ap->ops->port_start(ap);
4527 if (rc)
4528 goto err_out;
4529
4530 return ap;
4531
4532 err_out:
4533 scsi_host_put(host);
4534 return NULL;
4535 }
4536
4537 /**
4538 * ata_device_add - Register hardware device with ATA and SCSI layers
4539 * @ent: Probe information describing hardware device to be registered
4540 *
4541 * This function processes the information provided in the probe
4542 * information struct @ent, allocates the necessary ATA and SCSI
4543 * host information structures, initializes them, and registers
4544 * everything with requisite kernel subsystems.
4545 *
4546 * This function requests irqs, probes the ATA bus, and probes
4547 * the SCSI bus.
4548 *
4549 * LOCKING:
4550 * PCI/etc. bus probe sem.
4551 *
4552 * RETURNS:
4553 * Number of ports registered. Zero on error (no ports registered).
4554 */
4555
4556 int ata_device_add(const struct ata_probe_ent *ent)
4557 {
4558 unsigned int count = 0, i;
4559 struct device *dev = ent->dev;
4560 struct ata_host_set *host_set;
4561
4562 DPRINTK("ENTER\n");
4563 /* alloc a container for our list of ATA ports (buses) */
4564 host_set = kzalloc(sizeof(struct ata_host_set) +
4565 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4566 if (!host_set)
4567 return 0;
4568 spin_lock_init(&host_set->lock);
4569
4570 host_set->dev = dev;
4571 host_set->n_ports = ent->n_ports;
4572 host_set->irq = ent->irq;
4573 host_set->mmio_base = ent->mmio_base;
4574 host_set->private_data = ent->private_data;
4575 host_set->ops = ent->port_ops;
4576
4577 /* register each port bound to this device */
4578 for (i = 0; i < ent->n_ports; i++) {
4579 struct ata_port *ap;
4580 unsigned long xfer_mode_mask;
4581
4582 ap = ata_host_add(ent, host_set, i);
4583 if (!ap)
4584 goto err_out;
4585
4586 host_set->ports[i] = ap;
4587 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4588 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4589 (ap->pio_mask << ATA_SHIFT_PIO);
4590
4591 /* print per-port info to dmesg */
4592 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4593 "bmdma 0x%lX irq %lu\n",
4594 ap->id,
4595 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4596 ata_mode_string(xfer_mode_mask),
4597 ap->ioaddr.cmd_addr,
4598 ap->ioaddr.ctl_addr,
4599 ap->ioaddr.bmdma_addr,
4600 ent->irq);
4601
4602 ata_chk_status(ap);
4603 host_set->ops->irq_clear(ap);
4604 count++;
4605 }
4606
4607 if (!count)
4608 goto err_free_ret;
4609
4610 /* obtain irq, that is shared between channels */
4611 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4612 DRV_NAME, host_set))
4613 goto err_out;
4614
4615 /* perform each probe synchronously */
4616 DPRINTK("probe begin\n");
4617 for (i = 0; i < count; i++) {
4618 struct ata_port *ap;
4619 int rc;
4620
4621 ap = host_set->ports[i];
4622
4623 DPRINTK("ata%u: bus probe begin\n", ap->id);
4624 rc = ata_bus_probe(ap);
4625 DPRINTK("ata%u: bus probe end\n", ap->id);
4626
4627 if (rc) {
4628 /* FIXME: do something useful here?
4629 * Current libata behavior will
4630 * tear down everything when
4631 * the module is removed
4632 * or the h/w is unplugged.
4633 */
4634 }
4635
4636 rc = scsi_add_host(ap->host, dev);
4637 if (rc) {
4638 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4639 ap->id);
4640 /* FIXME: do something useful here */
4641 /* FIXME: handle unconditional calls to
4642 * scsi_scan_host and ata_host_remove, below,
4643 * at the very least
4644 */
4645 }
4646 }
4647
4648 /* probes are done, now scan each port's disk(s) */
4649 DPRINTK("host probe begin\n");
4650 for (i = 0; i < count; i++) {
4651 struct ata_port *ap = host_set->ports[i];
4652
4653 ata_scsi_scan_host(ap);
4654 }
4655
4656 dev_set_drvdata(dev, host_set);
4657
4658 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4659 return ent->n_ports; /* success */
4660
4661 err_out:
4662 for (i = 0; i < count; i++) {
4663 ata_host_remove(host_set->ports[i], 1);
4664 scsi_host_put(host_set->ports[i]->host);
4665 }
4666 err_free_ret:
4667 kfree(host_set);
4668 VPRINTK("EXIT, returning 0\n");
4669 return 0;
4670 }
4671
4672 /**
4673 * ata_host_set_remove - PCI layer callback for device removal
4674 * @host_set: ATA host set that was removed
4675 *
4676 * Unregister all objects associated with this host set. Free those
4677 * objects.
4678 *
4679 * LOCKING:
4680 * Inherited from calling layer (may sleep).
4681 */
4682
4683 void ata_host_set_remove(struct ata_host_set *host_set)
4684 {
4685 struct ata_port *ap;
4686 unsigned int i;
4687
4688 for (i = 0; i < host_set->n_ports; i++) {
4689 ap = host_set->ports[i];
4690 scsi_remove_host(ap->host);
4691 }
4692
4693 free_irq(host_set->irq, host_set);
4694
4695 for (i = 0; i < host_set->n_ports; i++) {
4696 ap = host_set->ports[i];
4697
4698 ata_scsi_release(ap->host);
4699
4700 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4701 struct ata_ioports *ioaddr = &ap->ioaddr;
4702
4703 if (ioaddr->cmd_addr == 0x1f0)
4704 release_region(0x1f0, 8);
4705 else if (ioaddr->cmd_addr == 0x170)
4706 release_region(0x170, 8);
4707 }
4708
4709 scsi_host_put(ap->host);
4710 }
4711
4712 if (host_set->ops->host_stop)
4713 host_set->ops->host_stop(host_set);
4714
4715 kfree(host_set);
4716 }
4717
4718 /**
4719 * ata_scsi_release - SCSI layer callback hook for host unload
4720 * @host: libata host to be unloaded
4721 *
4722 * Performs all duties necessary to shut down a libata port...
4723 * Kill port kthread, disable port, and release resources.
4724 *
4725 * LOCKING:
4726 * Inherited from SCSI layer.
4727 *
4728 * RETURNS:
4729 * One.
4730 */
4731
4732 int ata_scsi_release(struct Scsi_Host *host)
4733 {
4734 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4735
4736 DPRINTK("ENTER\n");
4737
4738 ap->ops->port_disable(ap);
4739 ata_host_remove(ap, 0);
4740
4741 DPRINTK("EXIT\n");
4742 return 1;
4743 }
4744
4745 /**
4746 * ata_std_ports - initialize ioaddr with standard port offsets.
4747 * @ioaddr: IO address structure to be initialized
4748 *
4749 * Utility function which initializes data_addr, error_addr,
4750 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4751 * device_addr, status_addr, and command_addr to standard offsets
4752 * relative to cmd_addr.
4753 *
4754 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4755 */
4756
4757 void ata_std_ports(struct ata_ioports *ioaddr)
4758 {
4759 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4760 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4761 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4762 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4763 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4764 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4765 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4766 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4767 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4768 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4769 }
4770
4771
4772 #ifdef CONFIG_PCI
4773
4774 void ata_pci_host_stop (struct ata_host_set *host_set)
4775 {
4776 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4777
4778 pci_iounmap(pdev, host_set->mmio_base);
4779 }
4780
4781 /**
4782 * ata_pci_remove_one - PCI layer callback for device removal
4783 * @pdev: PCI device that was removed
4784 *
4785 * PCI layer indicates to libata via this hook that
4786 * hot-unplug or module unload event has occurred.
4787 * Handle this by unregistering all objects associated
4788 * with this PCI device. Free those objects. Then finally
4789 * release PCI resources and disable device.
4790 *
4791 * LOCKING:
4792 * Inherited from PCI layer (may sleep).
4793 */
4794
4795 void ata_pci_remove_one (struct pci_dev *pdev)
4796 {
4797 struct device *dev = pci_dev_to_dev(pdev);
4798 struct ata_host_set *host_set = dev_get_drvdata(dev);
4799
4800 ata_host_set_remove(host_set);
4801 pci_release_regions(pdev);
4802 pci_disable_device(pdev);
4803 dev_set_drvdata(dev, NULL);
4804 }
4805
4806 /* move to PCI subsystem */
4807 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4808 {
4809 unsigned long tmp = 0;
4810
4811 switch (bits->width) {
4812 case 1: {
4813 u8 tmp8 = 0;
4814 pci_read_config_byte(pdev, bits->reg, &tmp8);
4815 tmp = tmp8;
4816 break;
4817 }
4818 case 2: {
4819 u16 tmp16 = 0;
4820 pci_read_config_word(pdev, bits->reg, &tmp16);
4821 tmp = tmp16;
4822 break;
4823 }
4824 case 4: {
4825 u32 tmp32 = 0;
4826 pci_read_config_dword(pdev, bits->reg, &tmp32);
4827 tmp = tmp32;
4828 break;
4829 }
4830
4831 default:
4832 return -EINVAL;
4833 }
4834
4835 tmp &= bits->mask;
4836
4837 return (tmp == bits->val) ? 1 : 0;
4838 }
4839
4840 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4841 {
4842 pci_save_state(pdev);
4843 pci_disable_device(pdev);
4844 pci_set_power_state(pdev, PCI_D3hot);
4845 return 0;
4846 }
4847
4848 int ata_pci_device_resume(struct pci_dev *pdev)
4849 {
4850 pci_set_power_state(pdev, PCI_D0);
4851 pci_restore_state(pdev);
4852 pci_enable_device(pdev);
4853 pci_set_master(pdev);
4854 return 0;
4855 }
4856 #endif /* CONFIG_PCI */
4857
4858
4859 static int __init ata_init(void)
4860 {
4861 ata_wq = create_workqueue("ata");
4862 if (!ata_wq)
4863 return -ENOMEM;
4864
4865 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4866 return 0;
4867 }
4868
4869 static void __exit ata_exit(void)
4870 {
4871 destroy_workqueue(ata_wq);
4872 }
4873
4874 module_init(ata_init);
4875 module_exit(ata_exit);
4876
4877 static unsigned long ratelimit_time;
4878 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4879
4880 int ata_ratelimit(void)
4881 {
4882 int rc;
4883 unsigned long flags;
4884
4885 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4886
4887 if (time_after(jiffies, ratelimit_time)) {
4888 rc = 1;
4889 ratelimit_time = jiffies + (HZ/5);
4890 } else
4891 rc = 0;
4892
4893 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4894
4895 return rc;
4896 }
4897
4898 /*
4899 * libata is essentially a library of internal helper functions for
4900 * low-level ATA host controller drivers. As such, the API/ABI is
4901 * likely to change as new drivers are added and updated.
4902 * Do not depend on ABI/API stability.
4903 */
4904
4905 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4906 EXPORT_SYMBOL_GPL(ata_std_ports);
4907 EXPORT_SYMBOL_GPL(ata_device_add);
4908 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4909 EXPORT_SYMBOL_GPL(ata_sg_init);
4910 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4911 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4912 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4913 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4914 EXPORT_SYMBOL_GPL(ata_tf_load);
4915 EXPORT_SYMBOL_GPL(ata_tf_read);
4916 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4917 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4918 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4919 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4920 EXPORT_SYMBOL_GPL(ata_check_status);
4921 EXPORT_SYMBOL_GPL(ata_altstatus);
4922 EXPORT_SYMBOL_GPL(ata_exec_command);
4923 EXPORT_SYMBOL_GPL(ata_port_start);
4924 EXPORT_SYMBOL_GPL(ata_port_stop);
4925 EXPORT_SYMBOL_GPL(ata_host_stop);
4926 EXPORT_SYMBOL_GPL(ata_interrupt);
4927 EXPORT_SYMBOL_GPL(ata_qc_prep);
4928 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4929 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4930 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4931 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4932 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4933 EXPORT_SYMBOL_GPL(ata_port_probe);
4934 EXPORT_SYMBOL_GPL(sata_phy_reset);
4935 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4936 EXPORT_SYMBOL_GPL(ata_bus_reset);
4937 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4938 EXPORT_SYMBOL_GPL(ata_std_softreset);
4939 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4940 EXPORT_SYMBOL_GPL(ata_std_postreset);
4941 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4942 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4943 EXPORT_SYMBOL_GPL(ata_port_disable);
4944 EXPORT_SYMBOL_GPL(ata_ratelimit);
4945 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4946 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4947 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4948 EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
4949 EXPORT_SYMBOL_GPL(ata_scsi_error);
4950 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4951 EXPORT_SYMBOL_GPL(ata_scsi_release);
4952 EXPORT_SYMBOL_GPL(ata_host_intr);
4953 EXPORT_SYMBOL_GPL(ata_dev_classify);
4954 EXPORT_SYMBOL_GPL(ata_id_string);
4955 EXPORT_SYMBOL_GPL(ata_id_c_string);
4956 EXPORT_SYMBOL_GPL(ata_dev_config);
4957 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4958 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4959 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4960
4961 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4962 EXPORT_SYMBOL_GPL(ata_timing_compute);
4963 EXPORT_SYMBOL_GPL(ata_timing_merge);
4964
4965 #ifdef CONFIG_PCI
4966 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4967 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4968 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4969 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4970 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4971 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4972 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4973 #endif /* CONFIG_PCI */
4974
4975 EXPORT_SYMBOL_GPL(ata_device_suspend);
4976 EXPORT_SYMBOL_GPL(ata_device_resume);
4977 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4978 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);