]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/libata-core.c
[PATCH] libata: fix ata_qc_issue failure path
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
72
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
75
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 unsigned int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
401 {
402 if (ata_dev_present(dev)) {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n",
404 ap->id, dev->devno);
405 dev->class++;
406 }
407 }
408
409 /**
410 * ata_pio_devchk - PATA device presence detection
411 * @ap: ATA channel to examine
412 * @device: Device to examine (starting at zero)
413 *
414 * This technique was originally described in
415 * Hale Landis's ATADRVR (www.ata-atapi.com), and
416 * later found its way into the ATA/ATAPI spec.
417 *
418 * Write a pattern to the ATA shadow registers,
419 * and if a device is present, it will respond by
420 * correctly storing and echoing back the
421 * ATA shadow register contents.
422 *
423 * LOCKING:
424 * caller.
425 */
426
427 static unsigned int ata_pio_devchk(struct ata_port *ap,
428 unsigned int device)
429 {
430 struct ata_ioports *ioaddr = &ap->ioaddr;
431 u8 nsect, lbal;
432
433 ap->ops->dev_select(ap, device);
434
435 outb(0x55, ioaddr->nsect_addr);
436 outb(0xaa, ioaddr->lbal_addr);
437
438 outb(0xaa, ioaddr->nsect_addr);
439 outb(0x55, ioaddr->lbal_addr);
440
441 outb(0x55, ioaddr->nsect_addr);
442 outb(0xaa, ioaddr->lbal_addr);
443
444 nsect = inb(ioaddr->nsect_addr);
445 lbal = inb(ioaddr->lbal_addr);
446
447 if ((nsect == 0x55) && (lbal == 0xaa))
448 return 1; /* we found a device */
449
450 return 0; /* nothing found */
451 }
452
453 /**
454 * ata_mmio_devchk - PATA device presence detection
455 * @ap: ATA channel to examine
456 * @device: Device to examine (starting at zero)
457 *
458 * This technique was originally described in
459 * Hale Landis's ATADRVR (www.ata-atapi.com), and
460 * later found its way into the ATA/ATAPI spec.
461 *
462 * Write a pattern to the ATA shadow registers,
463 * and if a device is present, it will respond by
464 * correctly storing and echoing back the
465 * ATA shadow register contents.
466 *
467 * LOCKING:
468 * caller.
469 */
470
471 static unsigned int ata_mmio_devchk(struct ata_port *ap,
472 unsigned int device)
473 {
474 struct ata_ioports *ioaddr = &ap->ioaddr;
475 u8 nsect, lbal;
476
477 ap->ops->dev_select(ap, device);
478
479 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
481
482 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
483 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
484
485 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
486 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
487
488 nsect = readb((void __iomem *) ioaddr->nsect_addr);
489 lbal = readb((void __iomem *) ioaddr->lbal_addr);
490
491 if ((nsect == 0x55) && (lbal == 0xaa))
492 return 1; /* we found a device */
493
494 return 0; /* nothing found */
495 }
496
497 /**
498 * ata_devchk - PATA device presence detection
499 * @ap: ATA channel to examine
500 * @device: Device to examine (starting at zero)
501 *
502 * Dispatch ATA device presence detection, depending
503 * on whether we are using PIO or MMIO to talk to the
504 * ATA shadow registers.
505 *
506 * LOCKING:
507 * caller.
508 */
509
510 static unsigned int ata_devchk(struct ata_port *ap,
511 unsigned int device)
512 {
513 if (ap->flags & ATA_FLAG_MMIO)
514 return ata_mmio_devchk(ap, device);
515 return ata_pio_devchk(ap, device);
516 }
517
518 /**
519 * ata_dev_classify - determine device type based on ATA-spec signature
520 * @tf: ATA taskfile register set for device to be identified
521 *
522 * Determine from taskfile register contents whether a device is
523 * ATA or ATAPI, as per "Signature and persistence" section
524 * of ATA/PI spec (volume 1, sect 5.14).
525 *
526 * LOCKING:
527 * None.
528 *
529 * RETURNS:
530 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
531 * the event of failure.
532 */
533
534 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
535 {
536 /* Apple's open source Darwin code hints that some devices only
537 * put a proper signature into the LBA mid/high registers,
538 * So, we only check those. It's sufficient for uniqueness.
539 */
540
541 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
542 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
543 DPRINTK("found ATA device by sig\n");
544 return ATA_DEV_ATA;
545 }
546
547 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
548 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
549 DPRINTK("found ATAPI device by sig\n");
550 return ATA_DEV_ATAPI;
551 }
552
553 DPRINTK("unknown device\n");
554 return ATA_DEV_UNKNOWN;
555 }
556
557 /**
558 * ata_dev_try_classify - Parse returned ATA device signature
559 * @ap: ATA channel to examine
560 * @device: Device to examine (starting at zero)
561 * @r_err: Value of error register on completion
562 *
563 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
564 * an ATA/ATAPI-defined set of values is placed in the ATA
565 * shadow registers, indicating the results of device detection
566 * and diagnostics.
567 *
568 * Select the ATA device, and read the values from the ATA shadow
569 * registers. Then parse according to the Error register value,
570 * and the spec-defined values examined by ata_dev_classify().
571 *
572 * LOCKING:
573 * caller.
574 *
575 * RETURNS:
576 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
577 */
578
579 static unsigned int
580 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
581 {
582 struct ata_taskfile tf;
583 unsigned int class;
584 u8 err;
585
586 ap->ops->dev_select(ap, device);
587
588 memset(&tf, 0, sizeof(tf));
589
590 ap->ops->tf_read(ap, &tf);
591 err = tf.feature;
592 if (r_err)
593 *r_err = err;
594
595 /* see if device passed diags */
596 if (err == 1)
597 /* do nothing */ ;
598 else if ((device == 0) && (err == 0x81))
599 /* do nothing */ ;
600 else
601 return ATA_DEV_NONE;
602
603 /* determine if device is ATA or ATAPI */
604 class = ata_dev_classify(&tf);
605
606 if (class == ATA_DEV_UNKNOWN)
607 return ATA_DEV_NONE;
608 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
609 return ATA_DEV_NONE;
610 return class;
611 }
612
613 /**
614 * ata_id_string - Convert IDENTIFY DEVICE page into string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an even number.
619 *
620 * The strings in the IDENTIFY DEVICE page are broken up into
621 * 16-bit chunks. Run through the string, and output each
622 * 8-bit chunk linearly, regardless of platform.
623 *
624 * LOCKING:
625 * caller.
626 */
627
628 void ata_id_string(const u16 *id, unsigned char *s,
629 unsigned int ofs, unsigned int len)
630 {
631 unsigned int c;
632
633 while (len > 0) {
634 c = id[ofs] >> 8;
635 *s = c;
636 s++;
637
638 c = id[ofs] & 0xff;
639 *s = c;
640 s++;
641
642 ofs++;
643 len -= 2;
644 }
645 }
646
647 /**
648 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
649 * @id: IDENTIFY DEVICE results we will examine
650 * @s: string into which data is output
651 * @ofs: offset into identify device page
652 * @len: length of string to return. must be an odd number.
653 *
654 * This function is identical to ata_id_string except that it
655 * trims trailing spaces and terminates the resulting string with
656 * null. @len must be actual maximum length (even number) + 1.
657 *
658 * LOCKING:
659 * caller.
660 */
661 void ata_id_c_string(const u16 *id, unsigned char *s,
662 unsigned int ofs, unsigned int len)
663 {
664 unsigned char *p;
665
666 WARN_ON(!(len & 1));
667
668 ata_id_string(id, s, ofs, len - 1);
669
670 p = s + strnlen(s, len - 1);
671 while (p > s && p[-1] == ' ')
672 p--;
673 *p = '\0';
674 }
675
676 static u64 ata_id_n_sectors(const u16 *id)
677 {
678 if (ata_id_has_lba(id)) {
679 if (ata_id_has_lba48(id))
680 return ata_id_u64(id, 100);
681 else
682 return ata_id_u32(id, 60);
683 } else {
684 if (ata_id_current_chs_valid(id))
685 return ata_id_u32(id, 57);
686 else
687 return id[1] * id[3] * id[6];
688 }
689 }
690
691 /**
692 * ata_noop_dev_select - Select device 0/1 on ATA bus
693 * @ap: ATA channel to manipulate
694 * @device: ATA device (numbered from zero) to select
695 *
696 * This function performs no actual function.
697 *
698 * May be used as the dev_select() entry in ata_port_operations.
699 *
700 * LOCKING:
701 * caller.
702 */
703 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
704 {
705 }
706
707
708 /**
709 * ata_std_dev_select - Select device 0/1 on ATA bus
710 * @ap: ATA channel to manipulate
711 * @device: ATA device (numbered from zero) to select
712 *
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel. Works with both PIO and MMIO.
716 *
717 * May be used as the dev_select() entry in ata_port_operations.
718 *
719 * LOCKING:
720 * caller.
721 */
722
723 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
724 {
725 u8 tmp;
726
727 if (device == 0)
728 tmp = ATA_DEVICE_OBS;
729 else
730 tmp = ATA_DEVICE_OBS | ATA_DEV1;
731
732 if (ap->flags & ATA_FLAG_MMIO) {
733 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
734 } else {
735 outb(tmp, ap->ioaddr.device_addr);
736 }
737 ata_pause(ap); /* needed; also flushes, for mmio */
738 }
739
740 /**
741 * ata_dev_select - Select device 0/1 on ATA bus
742 * @ap: ATA channel to manipulate
743 * @device: ATA device (numbered from zero) to select
744 * @wait: non-zero to wait for Status register BSY bit to clear
745 * @can_sleep: non-zero if context allows sleeping
746 *
747 * Use the method defined in the ATA specification to
748 * make either device 0, or device 1, active on the
749 * ATA channel.
750 *
751 * This is a high-level version of ata_std_dev_select(),
752 * which additionally provides the services of inserting
753 * the proper pauses and status polling, where needed.
754 *
755 * LOCKING:
756 * caller.
757 */
758
759 void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep)
761 {
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
763 ap->id, device, wait);
764
765 if (wait)
766 ata_wait_idle(ap);
767
768 ap->ops->dev_select(ap, device);
769
770 if (wait) {
771 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
772 msleep(150);
773 ata_wait_idle(ap);
774 }
775 }
776
777 /**
778 * ata_dump_id - IDENTIFY DEVICE info debugging output
779 * @id: IDENTIFY DEVICE page to dump
780 *
781 * Dump selected 16-bit words from the given IDENTIFY DEVICE
782 * page.
783 *
784 * LOCKING:
785 * caller.
786 */
787
788 static inline void ata_dump_id(const u16 *id)
789 {
790 DPRINTK("49==0x%04x "
791 "53==0x%04x "
792 "63==0x%04x "
793 "64==0x%04x "
794 "75==0x%04x \n",
795 id[49],
796 id[53],
797 id[63],
798 id[64],
799 id[75]);
800 DPRINTK("80==0x%04x "
801 "81==0x%04x "
802 "82==0x%04x "
803 "83==0x%04x "
804 "84==0x%04x \n",
805 id[80],
806 id[81],
807 id[82],
808 id[83],
809 id[84]);
810 DPRINTK("88==0x%04x "
811 "93==0x%04x\n",
812 id[88],
813 id[93]);
814 }
815
816 /**
817 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
818 * @id: IDENTIFY data to compute xfer mask from
819 *
820 * Compute the xfermask for this device. This is not as trivial
821 * as it seems if we must consider early devices correctly.
822 *
823 * FIXME: pre IDE drive timing (do we care ?).
824 *
825 * LOCKING:
826 * None.
827 *
828 * RETURNS:
829 * Computed xfermask
830 */
831 static unsigned int ata_id_xfermask(const u16 *id)
832 {
833 unsigned int pio_mask, mwdma_mask, udma_mask;
834
835 /* Usual case. Word 53 indicates word 64 is valid */
836 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
837 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
838 pio_mask <<= 3;
839 pio_mask |= 0x7;
840 } else {
841 /* If word 64 isn't valid then Word 51 high byte holds
842 * the PIO timing number for the maximum. Turn it into
843 * a mask.
844 */
845 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
846
847 /* But wait.. there's more. Design your standards by
848 * committee and you too can get a free iordy field to
849 * process. However its the speeds not the modes that
850 * are supported... Note drivers using the timing API
851 * will get this right anyway
852 */
853 }
854
855 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
856
857 udma_mask = 0;
858 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
859 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
860
861 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
862 }
863
864 /**
865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for
867 *
868 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the
870 * user(low level driver)'s responsibility to make sure that only
871 * one task is active at any given time.
872 *
873 * libata core layer takes care of synchronization between
874 * port_task and EH. ata_port_queue_task() may be ignored for EH
875 * synchronization.
876 *
877 * LOCKING:
878 * Inherited from caller.
879 */
880 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
881 unsigned long delay)
882 {
883 int rc;
884
885 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
886 return;
887
888 PREPARE_WORK(&ap->port_task, fn, data);
889
890 if (!delay)
891 rc = queue_work(ata_wq, &ap->port_task);
892 else
893 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
894
895 /* rc == 0 means that another user is using port task */
896 WARN_ON(rc == 0);
897 }
898
899 /**
900 * ata_port_flush_task - Flush port_task
901 * @ap: The ata_port to flush port_task for
902 *
903 * After this function completes, port_task is guranteed not to
904 * be running or scheduled.
905 *
906 * LOCKING:
907 * Kernel thread context (may sleep)
908 */
909 void ata_port_flush_task(struct ata_port *ap)
910 {
911 unsigned long flags;
912
913 DPRINTK("ENTER\n");
914
915 spin_lock_irqsave(&ap->host_set->lock, flags);
916 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
917 spin_unlock_irqrestore(&ap->host_set->lock, flags);
918
919 DPRINTK("flush #1\n");
920 flush_workqueue(ata_wq);
921
922 /*
923 * At this point, if a task is running, it's guaranteed to see
924 * the FLUSH flag; thus, it will never queue pio tasks again.
925 * Cancel and flush.
926 */
927 if (!cancel_delayed_work(&ap->port_task)) {
928 DPRINTK("flush #2\n");
929 flush_workqueue(ata_wq);
930 }
931
932 spin_lock_irqsave(&ap->host_set->lock, flags);
933 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
934 spin_unlock_irqrestore(&ap->host_set->lock, flags);
935
936 DPRINTK("EXIT\n");
937 }
938
939 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
940 {
941 struct completion *waiting = qc->private_data;
942
943 qc->ap->ops->tf_read(qc->ap, &qc->tf);
944 complete(waiting);
945 }
946
947 /**
948 * ata_exec_internal - execute libata internal command
949 * @ap: Port to which the command is sent
950 * @dev: Device to which the command is sent
951 * @tf: Taskfile registers for the command and the result
952 * @dma_dir: Data tranfer direction of the command
953 * @buf: Data buffer of the command
954 * @buflen: Length of data buffer
955 *
956 * Executes libata internal command with timeout. @tf contains
957 * command on entry and result on return. Timeout and error
958 * conditions are reported via return value. No recovery action
959 * is taken after a command times out. It's caller's duty to
960 * clean up after timeout.
961 *
962 * LOCKING:
963 * None. Should be called with kernel context, might sleep.
964 */
965
966 static unsigned
967 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
968 struct ata_taskfile *tf,
969 int dma_dir, void *buf, unsigned int buflen)
970 {
971 u8 command = tf->command;
972 struct ata_queued_cmd *qc;
973 DECLARE_COMPLETION(wait);
974 unsigned long flags;
975 unsigned int err_mask;
976
977 spin_lock_irqsave(&ap->host_set->lock, flags);
978
979 qc = ata_qc_new_init(ap, dev);
980 BUG_ON(qc == NULL);
981
982 qc->tf = *tf;
983 qc->dma_dir = dma_dir;
984 if (dma_dir != DMA_NONE) {
985 ata_sg_init_one(qc, buf, buflen);
986 qc->nsect = buflen / ATA_SECT_SIZE;
987 }
988
989 qc->private_data = &wait;
990 qc->complete_fn = ata_qc_complete_internal;
991
992 qc->err_mask = ata_qc_issue(qc);
993 if (qc->err_mask)
994 ata_qc_complete(qc);
995
996 spin_unlock_irqrestore(&ap->host_set->lock, flags);
997
998 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
999 ata_port_flush_task(ap);
1000
1001 spin_lock_irqsave(&ap->host_set->lock, flags);
1002
1003 /* We're racing with irq here. If we lose, the
1004 * following test prevents us from completing the qc
1005 * again. If completion irq occurs after here but
1006 * before the caller cleans up, it will result in a
1007 * spurious interrupt. We can live with that.
1008 */
1009 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1010 qc->err_mask = AC_ERR_TIMEOUT;
1011 ata_qc_complete(qc);
1012 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1013 ap->id, command);
1014 }
1015
1016 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1017 }
1018
1019 *tf = qc->tf;
1020 err_mask = qc->err_mask;
1021
1022 ata_qc_free(qc);
1023
1024 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1025 * Until those drivers are fixed, we detect the condition
1026 * here, fail the command with AC_ERR_SYSTEM and reenable the
1027 * port.
1028 *
1029 * Note that this doesn't change any behavior as internal
1030 * command failure results in disabling the device in the
1031 * higher layer for LLDDs without new reset/EH callbacks.
1032 *
1033 * Kill the following code as soon as those drivers are fixed.
1034 */
1035 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1036 err_mask |= AC_ERR_SYSTEM;
1037 ata_port_probe(ap);
1038 }
1039
1040 return err_mask;
1041 }
1042
1043 /**
1044 * ata_pio_need_iordy - check if iordy needed
1045 * @adev: ATA device
1046 *
1047 * Check if the current speed of the device requires IORDY. Used
1048 * by various controllers for chip configuration.
1049 */
1050
1051 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1052 {
1053 int pio;
1054 int speed = adev->pio_mode - XFER_PIO_0;
1055
1056 if (speed < 2)
1057 return 0;
1058 if (speed > 2)
1059 return 1;
1060
1061 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1062
1063 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1064 pio = adev->id[ATA_ID_EIDE_PIO];
1065 /* Is the speed faster than the drive allows non IORDY ? */
1066 if (pio) {
1067 /* This is cycle times not frequency - watch the logic! */
1068 if (pio > 240) /* PIO2 is 240nS per cycle */
1069 return 1;
1070 return 0;
1071 }
1072 }
1073 return 0;
1074 }
1075
1076 /**
1077 * ata_dev_read_id - Read ID data from the specified device
1078 * @ap: port on which target device resides
1079 * @dev: target device
1080 * @p_class: pointer to class of the target device (may be changed)
1081 * @post_reset: is this read ID post-reset?
1082 * @p_id: read IDENTIFY page (newly allocated)
1083 *
1084 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1085 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1086 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1087 * for pre-ATA4 drives.
1088 *
1089 * LOCKING:
1090 * Kernel thread context (may sleep)
1091 *
1092 * RETURNS:
1093 * 0 on success, -errno otherwise.
1094 */
1095 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1096 unsigned int *p_class, int post_reset, u16 **p_id)
1097 {
1098 unsigned int class = *p_class;
1099 struct ata_taskfile tf;
1100 unsigned int err_mask = 0;
1101 u16 *id;
1102 const char *reason;
1103 int rc;
1104
1105 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1106
1107 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1108
1109 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1110 if (id == NULL) {
1111 rc = -ENOMEM;
1112 reason = "out of memory";
1113 goto err_out;
1114 }
1115
1116 retry:
1117 ata_tf_init(ap, &tf, dev->devno);
1118
1119 switch (class) {
1120 case ATA_DEV_ATA:
1121 tf.command = ATA_CMD_ID_ATA;
1122 break;
1123 case ATA_DEV_ATAPI:
1124 tf.command = ATA_CMD_ID_ATAPI;
1125 break;
1126 default:
1127 rc = -ENODEV;
1128 reason = "unsupported class";
1129 goto err_out;
1130 }
1131
1132 tf.protocol = ATA_PROT_PIO;
1133
1134 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1135 id, sizeof(id[0]) * ATA_ID_WORDS);
1136 if (err_mask) {
1137 rc = -EIO;
1138 reason = "I/O error";
1139 goto err_out;
1140 }
1141
1142 swap_buf_le16(id, ATA_ID_WORDS);
1143
1144 /* sanity check */
1145 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1146 rc = -EINVAL;
1147 reason = "device reports illegal type";
1148 goto err_out;
1149 }
1150
1151 if (post_reset && class == ATA_DEV_ATA) {
1152 /*
1153 * The exact sequence expected by certain pre-ATA4 drives is:
1154 * SRST RESET
1155 * IDENTIFY
1156 * INITIALIZE DEVICE PARAMETERS
1157 * anything else..
1158 * Some drives were very specific about that exact sequence.
1159 */
1160 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1161 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1162 if (err_mask) {
1163 rc = -EIO;
1164 reason = "INIT_DEV_PARAMS failed";
1165 goto err_out;
1166 }
1167
1168 /* current CHS translation info (id[53-58]) might be
1169 * changed. reread the identify device info.
1170 */
1171 post_reset = 0;
1172 goto retry;
1173 }
1174 }
1175
1176 *p_class = class;
1177 *p_id = id;
1178 return 0;
1179
1180 err_out:
1181 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1182 ap->id, dev->devno, reason);
1183 kfree(id);
1184 return rc;
1185 }
1186
1187 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1188 struct ata_device *dev)
1189 {
1190 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1191 }
1192
1193 /**
1194 * ata_dev_configure - Configure the specified ATA/ATAPI device
1195 * @ap: Port on which target device resides
1196 * @dev: Target device to configure
1197 * @print_info: Enable device info printout
1198 *
1199 * Configure @dev according to @dev->id. Generic and low-level
1200 * driver specific fixups are also applied.
1201 *
1202 * LOCKING:
1203 * Kernel thread context (may sleep)
1204 *
1205 * RETURNS:
1206 * 0 on success, -errno otherwise
1207 */
1208 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1209 int print_info)
1210 {
1211 const u16 *id = dev->id;
1212 unsigned int xfer_mask;
1213 int i, rc;
1214
1215 if (!ata_dev_present(dev)) {
1216 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1217 ap->id, dev->devno);
1218 return 0;
1219 }
1220
1221 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1222
1223 /* print device capabilities */
1224 if (print_info)
1225 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1226 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1227 ap->id, dev->devno, id[49], id[82], id[83],
1228 id[84], id[85], id[86], id[87], id[88]);
1229
1230 /* initialize to-be-configured parameters */
1231 dev->flags = 0;
1232 dev->max_sectors = 0;
1233 dev->cdb_len = 0;
1234 dev->n_sectors = 0;
1235 dev->cylinders = 0;
1236 dev->heads = 0;
1237 dev->sectors = 0;
1238
1239 /*
1240 * common ATA, ATAPI feature tests
1241 */
1242
1243 /* find max transfer mode; for printk only */
1244 xfer_mask = ata_id_xfermask(id);
1245
1246 ata_dump_id(id);
1247
1248 /* ATA-specific feature tests */
1249 if (dev->class == ATA_DEV_ATA) {
1250 dev->n_sectors = ata_id_n_sectors(id);
1251
1252 if (ata_id_has_lba(id)) {
1253 const char *lba_desc;
1254
1255 lba_desc = "LBA";
1256 dev->flags |= ATA_DFLAG_LBA;
1257 if (ata_id_has_lba48(id)) {
1258 dev->flags |= ATA_DFLAG_LBA48;
1259 lba_desc = "LBA48";
1260 }
1261
1262 /* print device info to dmesg */
1263 if (print_info)
1264 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1265 "max %s, %Lu sectors: %s\n",
1266 ap->id, dev->devno,
1267 ata_id_major_version(id),
1268 ata_mode_string(xfer_mask),
1269 (unsigned long long)dev->n_sectors,
1270 lba_desc);
1271 } else {
1272 /* CHS */
1273
1274 /* Default translation */
1275 dev->cylinders = id[1];
1276 dev->heads = id[3];
1277 dev->sectors = id[6];
1278
1279 if (ata_id_current_chs_valid(id)) {
1280 /* Current CHS translation is valid. */
1281 dev->cylinders = id[54];
1282 dev->heads = id[55];
1283 dev->sectors = id[56];
1284 }
1285
1286 /* print device info to dmesg */
1287 if (print_info)
1288 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1289 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1290 ap->id, dev->devno,
1291 ata_id_major_version(id),
1292 ata_mode_string(xfer_mask),
1293 (unsigned long long)dev->n_sectors,
1294 dev->cylinders, dev->heads, dev->sectors);
1295 }
1296
1297 dev->cdb_len = 16;
1298 }
1299
1300 /* ATAPI-specific feature tests */
1301 else if (dev->class == ATA_DEV_ATAPI) {
1302 rc = atapi_cdb_len(id);
1303 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1304 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1305 rc = -EINVAL;
1306 goto err_out_nosup;
1307 }
1308 dev->cdb_len = (unsigned int) rc;
1309
1310 /* print device info to dmesg */
1311 if (print_info)
1312 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1313 ap->id, dev->devno, ata_mode_string(xfer_mask));
1314 }
1315
1316 ap->host->max_cmd_len = 0;
1317 for (i = 0; i < ATA_MAX_DEVICES; i++)
1318 ap->host->max_cmd_len = max_t(unsigned int,
1319 ap->host->max_cmd_len,
1320 ap->device[i].cdb_len);
1321
1322 /* limit bridge transfers to udma5, 200 sectors */
1323 if (ata_dev_knobble(ap, dev)) {
1324 if (print_info)
1325 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1326 ap->id, dev->devno);
1327 dev->udma_mask &= ATA_UDMA5;
1328 dev->max_sectors = ATA_MAX_SECTORS;
1329 }
1330
1331 if (ap->ops->dev_config)
1332 ap->ops->dev_config(ap, dev);
1333
1334 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1335 return 0;
1336
1337 err_out_nosup:
1338 DPRINTK("EXIT, err\n");
1339 return rc;
1340 }
1341
1342 /**
1343 * ata_bus_probe - Reset and probe ATA bus
1344 * @ap: Bus to probe
1345 *
1346 * Master ATA bus probing function. Initiates a hardware-dependent
1347 * bus reset, then attempts to identify any devices found on
1348 * the bus.
1349 *
1350 * LOCKING:
1351 * PCI/etc. bus probe sem.
1352 *
1353 * RETURNS:
1354 * Zero on success, non-zero on error.
1355 */
1356
1357 static int ata_bus_probe(struct ata_port *ap)
1358 {
1359 unsigned int classes[ATA_MAX_DEVICES];
1360 unsigned int i, rc, found = 0;
1361
1362 ata_port_probe(ap);
1363
1364 /* reset and determine device classes */
1365 for (i = 0; i < ATA_MAX_DEVICES; i++)
1366 classes[i] = ATA_DEV_UNKNOWN;
1367
1368 if (ap->ops->probe_reset) {
1369 rc = ap->ops->probe_reset(ap, classes);
1370 if (rc) {
1371 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1372 return rc;
1373 }
1374 } else {
1375 ap->ops->phy_reset(ap);
1376
1377 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1378 for (i = 0; i < ATA_MAX_DEVICES; i++)
1379 classes[i] = ap->device[i].class;
1380
1381 ata_port_probe(ap);
1382 }
1383
1384 for (i = 0; i < ATA_MAX_DEVICES; i++)
1385 if (classes[i] == ATA_DEV_UNKNOWN)
1386 classes[i] = ATA_DEV_NONE;
1387
1388 /* read IDENTIFY page and configure devices */
1389 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1390 struct ata_device *dev = &ap->device[i];
1391
1392 dev->class = classes[i];
1393
1394 if (!ata_dev_present(dev))
1395 continue;
1396
1397 WARN_ON(dev->id != NULL);
1398 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1399 dev->class = ATA_DEV_NONE;
1400 continue;
1401 }
1402
1403 if (ata_dev_configure(ap, dev, 1)) {
1404 ata_dev_disable(ap, dev);
1405 continue;
1406 }
1407
1408 found = 1;
1409 }
1410
1411 if (!found)
1412 goto err_out_disable;
1413
1414 if (ap->ops->set_mode)
1415 ap->ops->set_mode(ap);
1416 else
1417 ata_set_mode(ap);
1418
1419 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1420 goto err_out_disable;
1421
1422 return 0;
1423
1424 err_out_disable:
1425 ap->ops->port_disable(ap);
1426 return -1;
1427 }
1428
1429 /**
1430 * ata_port_probe - Mark port as enabled
1431 * @ap: Port for which we indicate enablement
1432 *
1433 * Modify @ap data structure such that the system
1434 * thinks that the entire port is enabled.
1435 *
1436 * LOCKING: host_set lock, or some other form of
1437 * serialization.
1438 */
1439
1440 void ata_port_probe(struct ata_port *ap)
1441 {
1442 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1443 }
1444
1445 /**
1446 * sata_print_link_status - Print SATA link status
1447 * @ap: SATA port to printk link status about
1448 *
1449 * This function prints link speed and status of a SATA link.
1450 *
1451 * LOCKING:
1452 * None.
1453 */
1454 static void sata_print_link_status(struct ata_port *ap)
1455 {
1456 u32 sstatus, tmp;
1457 const char *speed;
1458
1459 if (!ap->ops->scr_read)
1460 return;
1461
1462 sstatus = scr_read(ap, SCR_STATUS);
1463
1464 if (sata_dev_present(ap)) {
1465 tmp = (sstatus >> 4) & 0xf;
1466 if (tmp & (1 << 0))
1467 speed = "1.5";
1468 else if (tmp & (1 << 1))
1469 speed = "3.0";
1470 else
1471 speed = "<unknown>";
1472 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1473 ap->id, speed, sstatus);
1474 } else {
1475 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1476 ap->id, sstatus);
1477 }
1478 }
1479
1480 /**
1481 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1482 * @ap: SATA port associated with target SATA PHY.
1483 *
1484 * This function issues commands to standard SATA Sxxx
1485 * PHY registers, to wake up the phy (and device), and
1486 * clear any reset condition.
1487 *
1488 * LOCKING:
1489 * PCI/etc. bus probe sem.
1490 *
1491 */
1492 void __sata_phy_reset(struct ata_port *ap)
1493 {
1494 u32 sstatus;
1495 unsigned long timeout = jiffies + (HZ * 5);
1496
1497 if (ap->flags & ATA_FLAG_SATA_RESET) {
1498 /* issue phy wake/reset */
1499 scr_write_flush(ap, SCR_CONTROL, 0x301);
1500 /* Couldn't find anything in SATA I/II specs, but
1501 * AHCI-1.1 10.4.2 says at least 1 ms. */
1502 mdelay(1);
1503 }
1504 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1505
1506 /* wait for phy to become ready, if necessary */
1507 do {
1508 msleep(200);
1509 sstatus = scr_read(ap, SCR_STATUS);
1510 if ((sstatus & 0xf) != 1)
1511 break;
1512 } while (time_before(jiffies, timeout));
1513
1514 /* print link status */
1515 sata_print_link_status(ap);
1516
1517 /* TODO: phy layer with polling, timeouts, etc. */
1518 if (sata_dev_present(ap))
1519 ata_port_probe(ap);
1520 else
1521 ata_port_disable(ap);
1522
1523 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1524 return;
1525
1526 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1527 ata_port_disable(ap);
1528 return;
1529 }
1530
1531 ap->cbl = ATA_CBL_SATA;
1532 }
1533
1534 /**
1535 * sata_phy_reset - Reset SATA bus.
1536 * @ap: SATA port associated with target SATA PHY.
1537 *
1538 * This function resets the SATA bus, and then probes
1539 * the bus for devices.
1540 *
1541 * LOCKING:
1542 * PCI/etc. bus probe sem.
1543 *
1544 */
1545 void sata_phy_reset(struct ata_port *ap)
1546 {
1547 __sata_phy_reset(ap);
1548 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1549 return;
1550 ata_bus_reset(ap);
1551 }
1552
1553 /**
1554 * ata_dev_pair - return other device on cable
1555 * @ap: port
1556 * @adev: device
1557 *
1558 * Obtain the other device on the same cable, or if none is
1559 * present NULL is returned
1560 */
1561
1562 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1563 {
1564 struct ata_device *pair = &ap->device[1 - adev->devno];
1565 if (!ata_dev_present(pair))
1566 return NULL;
1567 return pair;
1568 }
1569
1570 /**
1571 * ata_port_disable - Disable port.
1572 * @ap: Port to be disabled.
1573 *
1574 * Modify @ap data structure such that the system
1575 * thinks that the entire port is disabled, and should
1576 * never attempt to probe or communicate with devices
1577 * on this port.
1578 *
1579 * LOCKING: host_set lock, or some other form of
1580 * serialization.
1581 */
1582
1583 void ata_port_disable(struct ata_port *ap)
1584 {
1585 ap->device[0].class = ATA_DEV_NONE;
1586 ap->device[1].class = ATA_DEV_NONE;
1587 ap->flags |= ATA_FLAG_PORT_DISABLED;
1588 }
1589
1590 /*
1591 * This mode timing computation functionality is ported over from
1592 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1593 */
1594 /*
1595 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1596 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1597 * for PIO 5, which is a nonstandard extension and UDMA6, which
1598 * is currently supported only by Maxtor drives.
1599 */
1600
1601 static const struct ata_timing ata_timing[] = {
1602
1603 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1604 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1605 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1606 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1607
1608 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1609 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1610 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1611
1612 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1613
1614 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1615 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1616 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1617
1618 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1619 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1620 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1621
1622 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1623 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1624 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1625
1626 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1627 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1628 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1629
1630 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1631
1632 { 0xFF }
1633 };
1634
1635 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1636 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1637
1638 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1639 {
1640 q->setup = EZ(t->setup * 1000, T);
1641 q->act8b = EZ(t->act8b * 1000, T);
1642 q->rec8b = EZ(t->rec8b * 1000, T);
1643 q->cyc8b = EZ(t->cyc8b * 1000, T);
1644 q->active = EZ(t->active * 1000, T);
1645 q->recover = EZ(t->recover * 1000, T);
1646 q->cycle = EZ(t->cycle * 1000, T);
1647 q->udma = EZ(t->udma * 1000, UT);
1648 }
1649
1650 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1651 struct ata_timing *m, unsigned int what)
1652 {
1653 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1654 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1655 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1656 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1657 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1658 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1659 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1660 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1661 }
1662
1663 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1664 {
1665 const struct ata_timing *t;
1666
1667 for (t = ata_timing; t->mode != speed; t++)
1668 if (t->mode == 0xFF)
1669 return NULL;
1670 return t;
1671 }
1672
1673 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1674 struct ata_timing *t, int T, int UT)
1675 {
1676 const struct ata_timing *s;
1677 struct ata_timing p;
1678
1679 /*
1680 * Find the mode.
1681 */
1682
1683 if (!(s = ata_timing_find_mode(speed)))
1684 return -EINVAL;
1685
1686 memcpy(t, s, sizeof(*s));
1687
1688 /*
1689 * If the drive is an EIDE drive, it can tell us it needs extended
1690 * PIO/MW_DMA cycle timing.
1691 */
1692
1693 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1694 memset(&p, 0, sizeof(p));
1695 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1696 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1697 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1698 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1699 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1700 }
1701 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1702 }
1703
1704 /*
1705 * Convert the timing to bus clock counts.
1706 */
1707
1708 ata_timing_quantize(t, t, T, UT);
1709
1710 /*
1711 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1712 * S.M.A.R.T * and some other commands. We have to ensure that the
1713 * DMA cycle timing is slower/equal than the fastest PIO timing.
1714 */
1715
1716 if (speed > XFER_PIO_4) {
1717 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1718 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1719 }
1720
1721 /*
1722 * Lengthen active & recovery time so that cycle time is correct.
1723 */
1724
1725 if (t->act8b + t->rec8b < t->cyc8b) {
1726 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1727 t->rec8b = t->cyc8b - t->act8b;
1728 }
1729
1730 if (t->active + t->recover < t->cycle) {
1731 t->active += (t->cycle - (t->active + t->recover)) / 2;
1732 t->recover = t->cycle - t->active;
1733 }
1734
1735 return 0;
1736 }
1737
1738 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1739 {
1740 unsigned int err_mask;
1741 int rc;
1742
1743 if (dev->xfer_shift == ATA_SHIFT_PIO)
1744 dev->flags |= ATA_DFLAG_PIO;
1745
1746 err_mask = ata_dev_set_xfermode(ap, dev);
1747 if (err_mask) {
1748 printk(KERN_ERR
1749 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1750 ap->id, err_mask);
1751 return -EIO;
1752 }
1753
1754 rc = ata_dev_revalidate(ap, dev, 0);
1755 if (rc) {
1756 printk(KERN_ERR
1757 "ata%u: failed to revalidate after set xfermode\n",
1758 ap->id);
1759 return rc;
1760 }
1761
1762 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1763 dev->xfer_shift, (int)dev->xfer_mode);
1764
1765 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1766 ap->id, dev->devno,
1767 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1768 return 0;
1769 }
1770
1771 static int ata_host_set_pio(struct ata_port *ap)
1772 {
1773 int i;
1774
1775 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1776 struct ata_device *dev = &ap->device[i];
1777
1778 if (!ata_dev_present(dev))
1779 continue;
1780
1781 if (!dev->pio_mode) {
1782 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1783 return -1;
1784 }
1785
1786 dev->xfer_mode = dev->pio_mode;
1787 dev->xfer_shift = ATA_SHIFT_PIO;
1788 if (ap->ops->set_piomode)
1789 ap->ops->set_piomode(ap, dev);
1790 }
1791
1792 return 0;
1793 }
1794
1795 static void ata_host_set_dma(struct ata_port *ap)
1796 {
1797 int i;
1798
1799 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1800 struct ata_device *dev = &ap->device[i];
1801
1802 if (!ata_dev_present(dev) || !dev->dma_mode)
1803 continue;
1804
1805 dev->xfer_mode = dev->dma_mode;
1806 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1807 if (ap->ops->set_dmamode)
1808 ap->ops->set_dmamode(ap, dev);
1809 }
1810 }
1811
1812 /**
1813 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1814 * @ap: port on which timings will be programmed
1815 *
1816 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1817 *
1818 * LOCKING:
1819 * PCI/etc. bus probe sem.
1820 */
1821 static void ata_set_mode(struct ata_port *ap)
1822 {
1823 int i, rc, used_dma = 0;
1824
1825 /* step 1: calculate xfer_mask */
1826 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1827 struct ata_device *dev = &ap->device[i];
1828 unsigned int pio_mask, dma_mask;
1829
1830 if (!ata_dev_present(dev))
1831 continue;
1832
1833 ata_dev_xfermask(ap, dev);
1834
1835 /* TODO: let LLDD filter dev->*_mask here */
1836
1837 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1838 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1839 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1840 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1841
1842 if (dev->dma_mode)
1843 used_dma = 1;
1844 }
1845
1846 /* step 2: always set host PIO timings */
1847 rc = ata_host_set_pio(ap);
1848 if (rc)
1849 goto err_out;
1850
1851 /* step 3: set host DMA timings */
1852 ata_host_set_dma(ap);
1853
1854 /* step 4: update devices' xfer mode */
1855 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1856 struct ata_device *dev = &ap->device[i];
1857
1858 if (!ata_dev_present(dev))
1859 continue;
1860
1861 if (ata_dev_set_mode(ap, dev))
1862 goto err_out;
1863 }
1864
1865 /*
1866 * Record simplex status. If we selected DMA then the other
1867 * host channels are not permitted to do so.
1868 */
1869
1870 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1871 ap->host_set->simplex_claimed = 1;
1872
1873 /*
1874 * Chip specific finalisation
1875 */
1876 if (ap->ops->post_set_mode)
1877 ap->ops->post_set_mode(ap);
1878
1879 return;
1880
1881 err_out:
1882 ata_port_disable(ap);
1883 }
1884
1885 /**
1886 * ata_tf_to_host - issue ATA taskfile to host controller
1887 * @ap: port to which command is being issued
1888 * @tf: ATA taskfile register set
1889 *
1890 * Issues ATA taskfile register set to ATA host controller,
1891 * with proper synchronization with interrupt handler and
1892 * other threads.
1893 *
1894 * LOCKING:
1895 * spin_lock_irqsave(host_set lock)
1896 */
1897
1898 static inline void ata_tf_to_host(struct ata_port *ap,
1899 const struct ata_taskfile *tf)
1900 {
1901 ap->ops->tf_load(ap, tf);
1902 ap->ops->exec_command(ap, tf);
1903 }
1904
1905 /**
1906 * ata_busy_sleep - sleep until BSY clears, or timeout
1907 * @ap: port containing status register to be polled
1908 * @tmout_pat: impatience timeout
1909 * @tmout: overall timeout
1910 *
1911 * Sleep until ATA Status register bit BSY clears,
1912 * or a timeout occurs.
1913 *
1914 * LOCKING: None.
1915 */
1916
1917 unsigned int ata_busy_sleep (struct ata_port *ap,
1918 unsigned long tmout_pat, unsigned long tmout)
1919 {
1920 unsigned long timer_start, timeout;
1921 u8 status;
1922
1923 status = ata_busy_wait(ap, ATA_BUSY, 300);
1924 timer_start = jiffies;
1925 timeout = timer_start + tmout_pat;
1926 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1927 msleep(50);
1928 status = ata_busy_wait(ap, ATA_BUSY, 3);
1929 }
1930
1931 if (status & ATA_BUSY)
1932 printk(KERN_WARNING "ata%u is slow to respond, "
1933 "please be patient\n", ap->id);
1934
1935 timeout = timer_start + tmout;
1936 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1937 msleep(50);
1938 status = ata_chk_status(ap);
1939 }
1940
1941 if (status & ATA_BUSY) {
1942 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1943 ap->id, tmout / HZ);
1944 return 1;
1945 }
1946
1947 return 0;
1948 }
1949
1950 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1951 {
1952 struct ata_ioports *ioaddr = &ap->ioaddr;
1953 unsigned int dev0 = devmask & (1 << 0);
1954 unsigned int dev1 = devmask & (1 << 1);
1955 unsigned long timeout;
1956
1957 /* if device 0 was found in ata_devchk, wait for its
1958 * BSY bit to clear
1959 */
1960 if (dev0)
1961 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1962
1963 /* if device 1 was found in ata_devchk, wait for
1964 * register access, then wait for BSY to clear
1965 */
1966 timeout = jiffies + ATA_TMOUT_BOOT;
1967 while (dev1) {
1968 u8 nsect, lbal;
1969
1970 ap->ops->dev_select(ap, 1);
1971 if (ap->flags & ATA_FLAG_MMIO) {
1972 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1973 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1974 } else {
1975 nsect = inb(ioaddr->nsect_addr);
1976 lbal = inb(ioaddr->lbal_addr);
1977 }
1978 if ((nsect == 1) && (lbal == 1))
1979 break;
1980 if (time_after(jiffies, timeout)) {
1981 dev1 = 0;
1982 break;
1983 }
1984 msleep(50); /* give drive a breather */
1985 }
1986 if (dev1)
1987 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1988
1989 /* is all this really necessary? */
1990 ap->ops->dev_select(ap, 0);
1991 if (dev1)
1992 ap->ops->dev_select(ap, 1);
1993 if (dev0)
1994 ap->ops->dev_select(ap, 0);
1995 }
1996
1997 static unsigned int ata_bus_softreset(struct ata_port *ap,
1998 unsigned int devmask)
1999 {
2000 struct ata_ioports *ioaddr = &ap->ioaddr;
2001
2002 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2003
2004 /* software reset. causes dev0 to be selected */
2005 if (ap->flags & ATA_FLAG_MMIO) {
2006 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2007 udelay(20); /* FIXME: flush */
2008 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2009 udelay(20); /* FIXME: flush */
2010 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2011 } else {
2012 outb(ap->ctl, ioaddr->ctl_addr);
2013 udelay(10);
2014 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2015 udelay(10);
2016 outb(ap->ctl, ioaddr->ctl_addr);
2017 }
2018
2019 /* spec mandates ">= 2ms" before checking status.
2020 * We wait 150ms, because that was the magic delay used for
2021 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2022 * between when the ATA command register is written, and then
2023 * status is checked. Because waiting for "a while" before
2024 * checking status is fine, post SRST, we perform this magic
2025 * delay here as well.
2026 *
2027 * Old drivers/ide uses the 2mS rule and then waits for ready
2028 */
2029 msleep(150);
2030
2031 /* Before we perform post reset processing we want to see if
2032 * the bus shows 0xFF because the odd clown forgets the D7
2033 * pulldown resistor.
2034 */
2035 if (ata_check_status(ap) == 0xFF)
2036 return AC_ERR_OTHER;
2037
2038 ata_bus_post_reset(ap, devmask);
2039
2040 return 0;
2041 }
2042
2043 /**
2044 * ata_bus_reset - reset host port and associated ATA channel
2045 * @ap: port to reset
2046 *
2047 * This is typically the first time we actually start issuing
2048 * commands to the ATA channel. We wait for BSY to clear, then
2049 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2050 * result. Determine what devices, if any, are on the channel
2051 * by looking at the device 0/1 error register. Look at the signature
2052 * stored in each device's taskfile registers, to determine if
2053 * the device is ATA or ATAPI.
2054 *
2055 * LOCKING:
2056 * PCI/etc. bus probe sem.
2057 * Obtains host_set lock.
2058 *
2059 * SIDE EFFECTS:
2060 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2061 */
2062
2063 void ata_bus_reset(struct ata_port *ap)
2064 {
2065 struct ata_ioports *ioaddr = &ap->ioaddr;
2066 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2067 u8 err;
2068 unsigned int dev0, dev1 = 0, devmask = 0;
2069
2070 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2071
2072 /* determine if device 0/1 are present */
2073 if (ap->flags & ATA_FLAG_SATA_RESET)
2074 dev0 = 1;
2075 else {
2076 dev0 = ata_devchk(ap, 0);
2077 if (slave_possible)
2078 dev1 = ata_devchk(ap, 1);
2079 }
2080
2081 if (dev0)
2082 devmask |= (1 << 0);
2083 if (dev1)
2084 devmask |= (1 << 1);
2085
2086 /* select device 0 again */
2087 ap->ops->dev_select(ap, 0);
2088
2089 /* issue bus reset */
2090 if (ap->flags & ATA_FLAG_SRST)
2091 if (ata_bus_softreset(ap, devmask))
2092 goto err_out;
2093
2094 /*
2095 * determine by signature whether we have ATA or ATAPI devices
2096 */
2097 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2098 if ((slave_possible) && (err != 0x81))
2099 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2100
2101 /* re-enable interrupts */
2102 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2103 ata_irq_on(ap);
2104
2105 /* is double-select really necessary? */
2106 if (ap->device[1].class != ATA_DEV_NONE)
2107 ap->ops->dev_select(ap, 1);
2108 if (ap->device[0].class != ATA_DEV_NONE)
2109 ap->ops->dev_select(ap, 0);
2110
2111 /* if no devices were detected, disable this port */
2112 if ((ap->device[0].class == ATA_DEV_NONE) &&
2113 (ap->device[1].class == ATA_DEV_NONE))
2114 goto err_out;
2115
2116 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2117 /* set up device control for ATA_FLAG_SATA_RESET */
2118 if (ap->flags & ATA_FLAG_MMIO)
2119 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2120 else
2121 outb(ap->ctl, ioaddr->ctl_addr);
2122 }
2123
2124 DPRINTK("EXIT\n");
2125 return;
2126
2127 err_out:
2128 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2129 ap->ops->port_disable(ap);
2130
2131 DPRINTK("EXIT\n");
2132 }
2133
2134 static int sata_phy_resume(struct ata_port *ap)
2135 {
2136 unsigned long timeout = jiffies + (HZ * 5);
2137 u32 sstatus;
2138
2139 scr_write_flush(ap, SCR_CONTROL, 0x300);
2140
2141 /* Wait for phy to become ready, if necessary. */
2142 do {
2143 msleep(200);
2144 sstatus = scr_read(ap, SCR_STATUS);
2145 if ((sstatus & 0xf) != 1)
2146 return 0;
2147 } while (time_before(jiffies, timeout));
2148
2149 return -1;
2150 }
2151
2152 /**
2153 * ata_std_probeinit - initialize probing
2154 * @ap: port to be probed
2155 *
2156 * @ap is about to be probed. Initialize it. This function is
2157 * to be used as standard callback for ata_drive_probe_reset().
2158 *
2159 * NOTE!!! Do not use this function as probeinit if a low level
2160 * driver implements only hardreset. Just pass NULL as probeinit
2161 * in that case. Using this function is probably okay but doing
2162 * so makes reset sequence different from the original
2163 * ->phy_reset implementation and Jeff nervous. :-P
2164 */
2165 void ata_std_probeinit(struct ata_port *ap)
2166 {
2167 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2168 sata_phy_resume(ap);
2169 if (sata_dev_present(ap))
2170 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2171 }
2172 }
2173
2174 /**
2175 * ata_std_softreset - reset host port via ATA SRST
2176 * @ap: port to reset
2177 * @verbose: fail verbosely
2178 * @classes: resulting classes of attached devices
2179 *
2180 * Reset host port using ATA SRST. This function is to be used
2181 * as standard callback for ata_drive_*_reset() functions.
2182 *
2183 * LOCKING:
2184 * Kernel thread context (may sleep)
2185 *
2186 * RETURNS:
2187 * 0 on success, -errno otherwise.
2188 */
2189 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2190 {
2191 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2192 unsigned int devmask = 0, err_mask;
2193 u8 err;
2194
2195 DPRINTK("ENTER\n");
2196
2197 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2198 classes[0] = ATA_DEV_NONE;
2199 goto out;
2200 }
2201
2202 /* determine if device 0/1 are present */
2203 if (ata_devchk(ap, 0))
2204 devmask |= (1 << 0);
2205 if (slave_possible && ata_devchk(ap, 1))
2206 devmask |= (1 << 1);
2207
2208 /* select device 0 again */
2209 ap->ops->dev_select(ap, 0);
2210
2211 /* issue bus reset */
2212 DPRINTK("about to softreset, devmask=%x\n", devmask);
2213 err_mask = ata_bus_softreset(ap, devmask);
2214 if (err_mask) {
2215 if (verbose)
2216 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2217 ap->id, err_mask);
2218 else
2219 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2220 err_mask);
2221 return -EIO;
2222 }
2223
2224 /* determine by signature whether we have ATA or ATAPI devices */
2225 classes[0] = ata_dev_try_classify(ap, 0, &err);
2226 if (slave_possible && err != 0x81)
2227 classes[1] = ata_dev_try_classify(ap, 1, &err);
2228
2229 out:
2230 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2231 return 0;
2232 }
2233
2234 /**
2235 * sata_std_hardreset - reset host port via SATA phy reset
2236 * @ap: port to reset
2237 * @verbose: fail verbosely
2238 * @class: resulting class of attached device
2239 *
2240 * SATA phy-reset host port using DET bits of SControl register.
2241 * This function is to be used as standard callback for
2242 * ata_drive_*_reset().
2243 *
2244 * LOCKING:
2245 * Kernel thread context (may sleep)
2246 *
2247 * RETURNS:
2248 * 0 on success, -errno otherwise.
2249 */
2250 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2251 {
2252 DPRINTK("ENTER\n");
2253
2254 /* Issue phy wake/reset */
2255 scr_write_flush(ap, SCR_CONTROL, 0x301);
2256
2257 /*
2258 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2259 * 10.4.2 says at least 1 ms.
2260 */
2261 msleep(1);
2262
2263 /* Bring phy back */
2264 sata_phy_resume(ap);
2265
2266 /* TODO: phy layer with polling, timeouts, etc. */
2267 if (!sata_dev_present(ap)) {
2268 *class = ATA_DEV_NONE;
2269 DPRINTK("EXIT, link offline\n");
2270 return 0;
2271 }
2272
2273 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2274 if (verbose)
2275 printk(KERN_ERR "ata%u: COMRESET failed "
2276 "(device not ready)\n", ap->id);
2277 else
2278 DPRINTK("EXIT, device not ready\n");
2279 return -EIO;
2280 }
2281
2282 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2283
2284 *class = ata_dev_try_classify(ap, 0, NULL);
2285
2286 DPRINTK("EXIT, class=%u\n", *class);
2287 return 0;
2288 }
2289
2290 /**
2291 * ata_std_postreset - standard postreset callback
2292 * @ap: the target ata_port
2293 * @classes: classes of attached devices
2294 *
2295 * This function is invoked after a successful reset. Note that
2296 * the device might have been reset more than once using
2297 * different reset methods before postreset is invoked.
2298 *
2299 * This function is to be used as standard callback for
2300 * ata_drive_*_reset().
2301 *
2302 * LOCKING:
2303 * Kernel thread context (may sleep)
2304 */
2305 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2306 {
2307 DPRINTK("ENTER\n");
2308
2309 /* set cable type if it isn't already set */
2310 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2311 ap->cbl = ATA_CBL_SATA;
2312
2313 /* print link status */
2314 if (ap->cbl == ATA_CBL_SATA)
2315 sata_print_link_status(ap);
2316
2317 /* re-enable interrupts */
2318 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2319 ata_irq_on(ap);
2320
2321 /* is double-select really necessary? */
2322 if (classes[0] != ATA_DEV_NONE)
2323 ap->ops->dev_select(ap, 1);
2324 if (classes[1] != ATA_DEV_NONE)
2325 ap->ops->dev_select(ap, 0);
2326
2327 /* bail out if no device is present */
2328 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2329 DPRINTK("EXIT, no device\n");
2330 return;
2331 }
2332
2333 /* set up device control */
2334 if (ap->ioaddr.ctl_addr) {
2335 if (ap->flags & ATA_FLAG_MMIO)
2336 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2337 else
2338 outb(ap->ctl, ap->ioaddr.ctl_addr);
2339 }
2340
2341 DPRINTK("EXIT\n");
2342 }
2343
2344 /**
2345 * ata_std_probe_reset - standard probe reset method
2346 * @ap: prot to perform probe-reset
2347 * @classes: resulting classes of attached devices
2348 *
2349 * The stock off-the-shelf ->probe_reset method.
2350 *
2351 * LOCKING:
2352 * Kernel thread context (may sleep)
2353 *
2354 * RETURNS:
2355 * 0 on success, -errno otherwise.
2356 */
2357 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2358 {
2359 ata_reset_fn_t hardreset;
2360
2361 hardreset = NULL;
2362 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2363 hardreset = sata_std_hardreset;
2364
2365 return ata_drive_probe_reset(ap, ata_std_probeinit,
2366 ata_std_softreset, hardreset,
2367 ata_std_postreset, classes);
2368 }
2369
2370 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2371 ata_postreset_fn_t postreset,
2372 unsigned int *classes)
2373 {
2374 int i, rc;
2375
2376 for (i = 0; i < ATA_MAX_DEVICES; i++)
2377 classes[i] = ATA_DEV_UNKNOWN;
2378
2379 rc = reset(ap, 0, classes);
2380 if (rc)
2381 return rc;
2382
2383 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2384 * is complete and convert all ATA_DEV_UNKNOWN to
2385 * ATA_DEV_NONE.
2386 */
2387 for (i = 0; i < ATA_MAX_DEVICES; i++)
2388 if (classes[i] != ATA_DEV_UNKNOWN)
2389 break;
2390
2391 if (i < ATA_MAX_DEVICES)
2392 for (i = 0; i < ATA_MAX_DEVICES; i++)
2393 if (classes[i] == ATA_DEV_UNKNOWN)
2394 classes[i] = ATA_DEV_NONE;
2395
2396 if (postreset)
2397 postreset(ap, classes);
2398
2399 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2400 }
2401
2402 /**
2403 * ata_drive_probe_reset - Perform probe reset with given methods
2404 * @ap: port to reset
2405 * @probeinit: probeinit method (can be NULL)
2406 * @softreset: softreset method (can be NULL)
2407 * @hardreset: hardreset method (can be NULL)
2408 * @postreset: postreset method (can be NULL)
2409 * @classes: resulting classes of attached devices
2410 *
2411 * Reset the specified port and classify attached devices using
2412 * given methods. This function prefers softreset but tries all
2413 * possible reset sequences to reset and classify devices. This
2414 * function is intended to be used for constructing ->probe_reset
2415 * callback by low level drivers.
2416 *
2417 * Reset methods should follow the following rules.
2418 *
2419 * - Return 0 on sucess, -errno on failure.
2420 * - If classification is supported, fill classes[] with
2421 * recognized class codes.
2422 * - If classification is not supported, leave classes[] alone.
2423 * - If verbose is non-zero, print error message on failure;
2424 * otherwise, shut up.
2425 *
2426 * LOCKING:
2427 * Kernel thread context (may sleep)
2428 *
2429 * RETURNS:
2430 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2431 * if classification fails, and any error code from reset
2432 * methods.
2433 */
2434 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2435 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2436 ata_postreset_fn_t postreset, unsigned int *classes)
2437 {
2438 int rc = -EINVAL;
2439
2440 if (probeinit)
2441 probeinit(ap);
2442
2443 if (softreset) {
2444 rc = do_probe_reset(ap, softreset, postreset, classes);
2445 if (rc == 0)
2446 return 0;
2447 }
2448
2449 if (!hardreset)
2450 return rc;
2451
2452 rc = do_probe_reset(ap, hardreset, postreset, classes);
2453 if (rc == 0 || rc != -ENODEV)
2454 return rc;
2455
2456 if (softreset)
2457 rc = do_probe_reset(ap, softreset, postreset, classes);
2458
2459 return rc;
2460 }
2461
2462 /**
2463 * ata_dev_same_device - Determine whether new ID matches configured device
2464 * @ap: port on which the device to compare against resides
2465 * @dev: device to compare against
2466 * @new_class: class of the new device
2467 * @new_id: IDENTIFY page of the new device
2468 *
2469 * Compare @new_class and @new_id against @dev and determine
2470 * whether @dev is the device indicated by @new_class and
2471 * @new_id.
2472 *
2473 * LOCKING:
2474 * None.
2475 *
2476 * RETURNS:
2477 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2478 */
2479 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2480 unsigned int new_class, const u16 *new_id)
2481 {
2482 const u16 *old_id = dev->id;
2483 unsigned char model[2][41], serial[2][21];
2484 u64 new_n_sectors;
2485
2486 if (dev->class != new_class) {
2487 printk(KERN_INFO
2488 "ata%u: dev %u class mismatch %d != %d\n",
2489 ap->id, dev->devno, dev->class, new_class);
2490 return 0;
2491 }
2492
2493 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2494 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2495 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2496 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2497 new_n_sectors = ata_id_n_sectors(new_id);
2498
2499 if (strcmp(model[0], model[1])) {
2500 printk(KERN_INFO
2501 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2502 ap->id, dev->devno, model[0], model[1]);
2503 return 0;
2504 }
2505
2506 if (strcmp(serial[0], serial[1])) {
2507 printk(KERN_INFO
2508 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2509 ap->id, dev->devno, serial[0], serial[1]);
2510 return 0;
2511 }
2512
2513 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2514 printk(KERN_INFO
2515 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2516 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2517 (unsigned long long)new_n_sectors);
2518 return 0;
2519 }
2520
2521 return 1;
2522 }
2523
2524 /**
2525 * ata_dev_revalidate - Revalidate ATA device
2526 * @ap: port on which the device to revalidate resides
2527 * @dev: device to revalidate
2528 * @post_reset: is this revalidation after reset?
2529 *
2530 * Re-read IDENTIFY page and make sure @dev is still attached to
2531 * the port.
2532 *
2533 * LOCKING:
2534 * Kernel thread context (may sleep)
2535 *
2536 * RETURNS:
2537 * 0 on success, negative errno otherwise
2538 */
2539 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2540 int post_reset)
2541 {
2542 unsigned int class;
2543 u16 *id;
2544 int rc;
2545
2546 if (!ata_dev_present(dev))
2547 return -ENODEV;
2548
2549 class = dev->class;
2550 id = NULL;
2551
2552 /* allocate & read ID data */
2553 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2554 if (rc)
2555 goto fail;
2556
2557 /* is the device still there? */
2558 if (!ata_dev_same_device(ap, dev, class, id)) {
2559 rc = -ENODEV;
2560 goto fail;
2561 }
2562
2563 kfree(dev->id);
2564 dev->id = id;
2565
2566 /* configure device according to the new ID */
2567 return ata_dev_configure(ap, dev, 0);
2568
2569 fail:
2570 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2571 ap->id, dev->devno, rc);
2572 kfree(id);
2573 return rc;
2574 }
2575
2576 static const char * const ata_dma_blacklist [] = {
2577 "WDC AC11000H", NULL,
2578 "WDC AC22100H", NULL,
2579 "WDC AC32500H", NULL,
2580 "WDC AC33100H", NULL,
2581 "WDC AC31600H", NULL,
2582 "WDC AC32100H", "24.09P07",
2583 "WDC AC23200L", "21.10N21",
2584 "Compaq CRD-8241B", NULL,
2585 "CRD-8400B", NULL,
2586 "CRD-8480B", NULL,
2587 "CRD-8482B", NULL,
2588 "CRD-84", NULL,
2589 "SanDisk SDP3B", NULL,
2590 "SanDisk SDP3B-64", NULL,
2591 "SANYO CD-ROM CRD", NULL,
2592 "HITACHI CDR-8", NULL,
2593 "HITACHI CDR-8335", NULL,
2594 "HITACHI CDR-8435", NULL,
2595 "Toshiba CD-ROM XM-6202B", NULL,
2596 "TOSHIBA CD-ROM XM-1702BC", NULL,
2597 "CD-532E-A", NULL,
2598 "E-IDE CD-ROM CR-840", NULL,
2599 "CD-ROM Drive/F5A", NULL,
2600 "WPI CDD-820", NULL,
2601 "SAMSUNG CD-ROM SC-148C", NULL,
2602 "SAMSUNG CD-ROM SC", NULL,
2603 "SanDisk SDP3B-64", NULL,
2604 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2605 "_NEC DV5800A", NULL,
2606 "SAMSUNG CD-ROM SN-124", "N001"
2607 };
2608
2609 static int ata_strim(char *s, size_t len)
2610 {
2611 len = strnlen(s, len);
2612
2613 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2614 while ((len > 0) && (s[len - 1] == ' ')) {
2615 len--;
2616 s[len] = 0;
2617 }
2618 return len;
2619 }
2620
2621 static int ata_dma_blacklisted(const struct ata_device *dev)
2622 {
2623 unsigned char model_num[40];
2624 unsigned char model_rev[16];
2625 unsigned int nlen, rlen;
2626 int i;
2627
2628 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2629 sizeof(model_num));
2630 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2631 sizeof(model_rev));
2632 nlen = ata_strim(model_num, sizeof(model_num));
2633 rlen = ata_strim(model_rev, sizeof(model_rev));
2634
2635 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2636 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2637 if (ata_dma_blacklist[i+1] == NULL)
2638 return 1;
2639 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2640 return 1;
2641 }
2642 }
2643 return 0;
2644 }
2645
2646 /**
2647 * ata_dev_xfermask - Compute supported xfermask of the given device
2648 * @ap: Port on which the device to compute xfermask for resides
2649 * @dev: Device to compute xfermask for
2650 *
2651 * Compute supported xfermask of @dev and store it in
2652 * dev->*_mask. This function is responsible for applying all
2653 * known limits including host controller limits, device
2654 * blacklist, etc...
2655 *
2656 * FIXME: The current implementation limits all transfer modes to
2657 * the fastest of the lowested device on the port. This is not
2658 * required on most controllers.
2659 *
2660 * LOCKING:
2661 * None.
2662 */
2663 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2664 {
2665 struct ata_host_set *hs = ap->host_set;
2666 unsigned long xfer_mask;
2667 int i;
2668
2669 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2670 ap->udma_mask);
2671
2672 /* FIXME: Use port-wide xfermask for now */
2673 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2674 struct ata_device *d = &ap->device[i];
2675 if (!ata_dev_present(d))
2676 continue;
2677 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2678 d->udma_mask);
2679 xfer_mask &= ata_id_xfermask(d->id);
2680 if (ata_dma_blacklisted(d))
2681 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2682 /* Apply cable rule here. Don't apply it early because when
2683 we handle hot plug the cable type can itself change */
2684 if (ap->cbl == ATA_CBL_PATA40)
2685 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2686 }
2687
2688 if (ata_dma_blacklisted(dev))
2689 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2690 "disabling DMA\n", ap->id, dev->devno);
2691
2692 if (hs->flags & ATA_HOST_SIMPLEX) {
2693 if (hs->simplex_claimed)
2694 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2695 }
2696 if (ap->ops->mode_filter)
2697 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2698
2699 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2700 &dev->udma_mask);
2701 }
2702
2703 /**
2704 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2705 * @ap: Port associated with device @dev
2706 * @dev: Device to which command will be sent
2707 *
2708 * Issue SET FEATURES - XFER MODE command to device @dev
2709 * on port @ap.
2710 *
2711 * LOCKING:
2712 * PCI/etc. bus probe sem.
2713 *
2714 * RETURNS:
2715 * 0 on success, AC_ERR_* mask otherwise.
2716 */
2717
2718 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2719 struct ata_device *dev)
2720 {
2721 struct ata_taskfile tf;
2722 unsigned int err_mask;
2723
2724 /* set up set-features taskfile */
2725 DPRINTK("set features - xfer mode\n");
2726
2727 ata_tf_init(ap, &tf, dev->devno);
2728 tf.command = ATA_CMD_SET_FEATURES;
2729 tf.feature = SETFEATURES_XFER;
2730 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2731 tf.protocol = ATA_PROT_NODATA;
2732 tf.nsect = dev->xfer_mode;
2733
2734 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2735
2736 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2737 return err_mask;
2738 }
2739
2740 /**
2741 * ata_dev_init_params - Issue INIT DEV PARAMS command
2742 * @ap: Port associated with device @dev
2743 * @dev: Device to which command will be sent
2744 *
2745 * LOCKING:
2746 * Kernel thread context (may sleep)
2747 *
2748 * RETURNS:
2749 * 0 on success, AC_ERR_* mask otherwise.
2750 */
2751
2752 static unsigned int ata_dev_init_params(struct ata_port *ap,
2753 struct ata_device *dev,
2754 u16 heads,
2755 u16 sectors)
2756 {
2757 struct ata_taskfile tf;
2758 unsigned int err_mask;
2759
2760 /* Number of sectors per track 1-255. Number of heads 1-16 */
2761 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2762 return AC_ERR_INVALID;
2763
2764 /* set up init dev params taskfile */
2765 DPRINTK("init dev params \n");
2766
2767 ata_tf_init(ap, &tf, dev->devno);
2768 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2769 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2770 tf.protocol = ATA_PROT_NODATA;
2771 tf.nsect = sectors;
2772 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2773
2774 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2775
2776 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2777 return err_mask;
2778 }
2779
2780 /**
2781 * ata_sg_clean - Unmap DMA memory associated with command
2782 * @qc: Command containing DMA memory to be released
2783 *
2784 * Unmap all mapped DMA memory associated with this command.
2785 *
2786 * LOCKING:
2787 * spin_lock_irqsave(host_set lock)
2788 */
2789
2790 static void ata_sg_clean(struct ata_queued_cmd *qc)
2791 {
2792 struct ata_port *ap = qc->ap;
2793 struct scatterlist *sg = qc->__sg;
2794 int dir = qc->dma_dir;
2795 void *pad_buf = NULL;
2796
2797 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2798 WARN_ON(sg == NULL);
2799
2800 if (qc->flags & ATA_QCFLAG_SINGLE)
2801 WARN_ON(qc->n_elem > 1);
2802
2803 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2804
2805 /* if we padded the buffer out to 32-bit bound, and data
2806 * xfer direction is from-device, we must copy from the
2807 * pad buffer back into the supplied buffer
2808 */
2809 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2810 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2811
2812 if (qc->flags & ATA_QCFLAG_SG) {
2813 if (qc->n_elem)
2814 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2815 /* restore last sg */
2816 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2817 if (pad_buf) {
2818 struct scatterlist *psg = &qc->pad_sgent;
2819 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2820 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2821 kunmap_atomic(addr, KM_IRQ0);
2822 }
2823 } else {
2824 if (qc->n_elem)
2825 dma_unmap_single(ap->dev,
2826 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2827 dir);
2828 /* restore sg */
2829 sg->length += qc->pad_len;
2830 if (pad_buf)
2831 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2832 pad_buf, qc->pad_len);
2833 }
2834
2835 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2836 qc->__sg = NULL;
2837 }
2838
2839 /**
2840 * ata_fill_sg - Fill PCI IDE PRD table
2841 * @qc: Metadata associated with taskfile to be transferred
2842 *
2843 * Fill PCI IDE PRD (scatter-gather) table with segments
2844 * associated with the current disk command.
2845 *
2846 * LOCKING:
2847 * spin_lock_irqsave(host_set lock)
2848 *
2849 */
2850 static void ata_fill_sg(struct ata_queued_cmd *qc)
2851 {
2852 struct ata_port *ap = qc->ap;
2853 struct scatterlist *sg;
2854 unsigned int idx;
2855
2856 WARN_ON(qc->__sg == NULL);
2857 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2858
2859 idx = 0;
2860 ata_for_each_sg(sg, qc) {
2861 u32 addr, offset;
2862 u32 sg_len, len;
2863
2864 /* determine if physical DMA addr spans 64K boundary.
2865 * Note h/w doesn't support 64-bit, so we unconditionally
2866 * truncate dma_addr_t to u32.
2867 */
2868 addr = (u32) sg_dma_address(sg);
2869 sg_len = sg_dma_len(sg);
2870
2871 while (sg_len) {
2872 offset = addr & 0xffff;
2873 len = sg_len;
2874 if ((offset + sg_len) > 0x10000)
2875 len = 0x10000 - offset;
2876
2877 ap->prd[idx].addr = cpu_to_le32(addr);
2878 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2879 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2880
2881 idx++;
2882 sg_len -= len;
2883 addr += len;
2884 }
2885 }
2886
2887 if (idx)
2888 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2889 }
2890 /**
2891 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2892 * @qc: Metadata associated with taskfile to check
2893 *
2894 * Allow low-level driver to filter ATA PACKET commands, returning
2895 * a status indicating whether or not it is OK to use DMA for the
2896 * supplied PACKET command.
2897 *
2898 * LOCKING:
2899 * spin_lock_irqsave(host_set lock)
2900 *
2901 * RETURNS: 0 when ATAPI DMA can be used
2902 * nonzero otherwise
2903 */
2904 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2905 {
2906 struct ata_port *ap = qc->ap;
2907 int rc = 0; /* Assume ATAPI DMA is OK by default */
2908
2909 if (ap->ops->check_atapi_dma)
2910 rc = ap->ops->check_atapi_dma(qc);
2911
2912 return rc;
2913 }
2914 /**
2915 * ata_qc_prep - Prepare taskfile for submission
2916 * @qc: Metadata associated with taskfile to be prepared
2917 *
2918 * Prepare ATA taskfile for submission.
2919 *
2920 * LOCKING:
2921 * spin_lock_irqsave(host_set lock)
2922 */
2923 void ata_qc_prep(struct ata_queued_cmd *qc)
2924 {
2925 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2926 return;
2927
2928 ata_fill_sg(qc);
2929 }
2930
2931 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2932
2933 /**
2934 * ata_sg_init_one - Associate command with memory buffer
2935 * @qc: Command to be associated
2936 * @buf: Memory buffer
2937 * @buflen: Length of memory buffer, in bytes.
2938 *
2939 * Initialize the data-related elements of queued_cmd @qc
2940 * to point to a single memory buffer, @buf of byte length @buflen.
2941 *
2942 * LOCKING:
2943 * spin_lock_irqsave(host_set lock)
2944 */
2945
2946 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2947 {
2948 struct scatterlist *sg;
2949
2950 qc->flags |= ATA_QCFLAG_SINGLE;
2951
2952 memset(&qc->sgent, 0, sizeof(qc->sgent));
2953 qc->__sg = &qc->sgent;
2954 qc->n_elem = 1;
2955 qc->orig_n_elem = 1;
2956 qc->buf_virt = buf;
2957
2958 sg = qc->__sg;
2959 sg_init_one(sg, buf, buflen);
2960 }
2961
2962 /**
2963 * ata_sg_init - Associate command with scatter-gather table.
2964 * @qc: Command to be associated
2965 * @sg: Scatter-gather table.
2966 * @n_elem: Number of elements in s/g table.
2967 *
2968 * Initialize the data-related elements of queued_cmd @qc
2969 * to point to a scatter-gather table @sg, containing @n_elem
2970 * elements.
2971 *
2972 * LOCKING:
2973 * spin_lock_irqsave(host_set lock)
2974 */
2975
2976 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2977 unsigned int n_elem)
2978 {
2979 qc->flags |= ATA_QCFLAG_SG;
2980 qc->__sg = sg;
2981 qc->n_elem = n_elem;
2982 qc->orig_n_elem = n_elem;
2983 }
2984
2985 /**
2986 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2987 * @qc: Command with memory buffer to be mapped.
2988 *
2989 * DMA-map the memory buffer associated with queued_cmd @qc.
2990 *
2991 * LOCKING:
2992 * spin_lock_irqsave(host_set lock)
2993 *
2994 * RETURNS:
2995 * Zero on success, negative on error.
2996 */
2997
2998 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2999 {
3000 struct ata_port *ap = qc->ap;
3001 int dir = qc->dma_dir;
3002 struct scatterlist *sg = qc->__sg;
3003 dma_addr_t dma_address;
3004 int trim_sg = 0;
3005
3006 /* we must lengthen transfers to end on a 32-bit boundary */
3007 qc->pad_len = sg->length & 3;
3008 if (qc->pad_len) {
3009 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3010 struct scatterlist *psg = &qc->pad_sgent;
3011
3012 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3013
3014 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3015
3016 if (qc->tf.flags & ATA_TFLAG_WRITE)
3017 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3018 qc->pad_len);
3019
3020 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3021 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3022 /* trim sg */
3023 sg->length -= qc->pad_len;
3024 if (sg->length == 0)
3025 trim_sg = 1;
3026
3027 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3028 sg->length, qc->pad_len);
3029 }
3030
3031 if (trim_sg) {
3032 qc->n_elem--;
3033 goto skip_map;
3034 }
3035
3036 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3037 sg->length, dir);
3038 if (dma_mapping_error(dma_address)) {
3039 /* restore sg */
3040 sg->length += qc->pad_len;
3041 return -1;
3042 }
3043
3044 sg_dma_address(sg) = dma_address;
3045 sg_dma_len(sg) = sg->length;
3046
3047 skip_map:
3048 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3049 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3050
3051 return 0;
3052 }
3053
3054 /**
3055 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3056 * @qc: Command with scatter-gather table to be mapped.
3057 *
3058 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3059 *
3060 * LOCKING:
3061 * spin_lock_irqsave(host_set lock)
3062 *
3063 * RETURNS:
3064 * Zero on success, negative on error.
3065 *
3066 */
3067
3068 static int ata_sg_setup(struct ata_queued_cmd *qc)
3069 {
3070 struct ata_port *ap = qc->ap;
3071 struct scatterlist *sg = qc->__sg;
3072 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3073 int n_elem, pre_n_elem, dir, trim_sg = 0;
3074
3075 VPRINTK("ENTER, ata%u\n", ap->id);
3076 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3077
3078 /* we must lengthen transfers to end on a 32-bit boundary */
3079 qc->pad_len = lsg->length & 3;
3080 if (qc->pad_len) {
3081 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3082 struct scatterlist *psg = &qc->pad_sgent;
3083 unsigned int offset;
3084
3085 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3086
3087 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3088
3089 /*
3090 * psg->page/offset are used to copy to-be-written
3091 * data in this function or read data in ata_sg_clean.
3092 */
3093 offset = lsg->offset + lsg->length - qc->pad_len;
3094 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3095 psg->offset = offset_in_page(offset);
3096
3097 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3098 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3099 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3100 kunmap_atomic(addr, KM_IRQ0);
3101 }
3102
3103 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3104 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3105 /* trim last sg */
3106 lsg->length -= qc->pad_len;
3107 if (lsg->length == 0)
3108 trim_sg = 1;
3109
3110 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3111 qc->n_elem - 1, lsg->length, qc->pad_len);
3112 }
3113
3114 pre_n_elem = qc->n_elem;
3115 if (trim_sg && pre_n_elem)
3116 pre_n_elem--;
3117
3118 if (!pre_n_elem) {
3119 n_elem = 0;
3120 goto skip_map;
3121 }
3122
3123 dir = qc->dma_dir;
3124 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3125 if (n_elem < 1) {
3126 /* restore last sg */
3127 lsg->length += qc->pad_len;
3128 return -1;
3129 }
3130
3131 DPRINTK("%d sg elements mapped\n", n_elem);
3132
3133 skip_map:
3134 qc->n_elem = n_elem;
3135
3136 return 0;
3137 }
3138
3139 /**
3140 * ata_poll_qc_complete - turn irq back on and finish qc
3141 * @qc: Command to complete
3142 * @err_mask: ATA status register content
3143 *
3144 * LOCKING:
3145 * None. (grabs host lock)
3146 */
3147
3148 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3149 {
3150 struct ata_port *ap = qc->ap;
3151 unsigned long flags;
3152
3153 spin_lock_irqsave(&ap->host_set->lock, flags);
3154 ap->flags &= ~ATA_FLAG_NOINTR;
3155 ata_irq_on(ap);
3156 ata_qc_complete(qc);
3157 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3158 }
3159
3160 /**
3161 * ata_pio_poll - poll using PIO, depending on current state
3162 * @ap: the target ata_port
3163 *
3164 * LOCKING:
3165 * None. (executing in kernel thread context)
3166 *
3167 * RETURNS:
3168 * timeout value to use
3169 */
3170
3171 static unsigned long ata_pio_poll(struct ata_port *ap)
3172 {
3173 struct ata_queued_cmd *qc;
3174 u8 status;
3175 unsigned int poll_state = HSM_ST_UNKNOWN;
3176 unsigned int reg_state = HSM_ST_UNKNOWN;
3177
3178 qc = ata_qc_from_tag(ap, ap->active_tag);
3179 WARN_ON(qc == NULL);
3180
3181 switch (ap->hsm_task_state) {
3182 case HSM_ST:
3183 case HSM_ST_POLL:
3184 poll_state = HSM_ST_POLL;
3185 reg_state = HSM_ST;
3186 break;
3187 case HSM_ST_LAST:
3188 case HSM_ST_LAST_POLL:
3189 poll_state = HSM_ST_LAST_POLL;
3190 reg_state = HSM_ST_LAST;
3191 break;
3192 default:
3193 BUG();
3194 break;
3195 }
3196
3197 status = ata_chk_status(ap);
3198 if (status & ATA_BUSY) {
3199 if (time_after(jiffies, ap->pio_task_timeout)) {
3200 qc->err_mask |= AC_ERR_TIMEOUT;
3201 ap->hsm_task_state = HSM_ST_TMOUT;
3202 return 0;
3203 }
3204 ap->hsm_task_state = poll_state;
3205 return ATA_SHORT_PAUSE;
3206 }
3207
3208 ap->hsm_task_state = reg_state;
3209 return 0;
3210 }
3211
3212 /**
3213 * ata_pio_complete - check if drive is busy or idle
3214 * @ap: the target ata_port
3215 *
3216 * LOCKING:
3217 * None. (executing in kernel thread context)
3218 *
3219 * RETURNS:
3220 * Non-zero if qc completed, zero otherwise.
3221 */
3222
3223 static int ata_pio_complete (struct ata_port *ap)
3224 {
3225 struct ata_queued_cmd *qc;
3226 u8 drv_stat;
3227
3228 /*
3229 * This is purely heuristic. This is a fast path. Sometimes when
3230 * we enter, BSY will be cleared in a chk-status or two. If not,
3231 * the drive is probably seeking or something. Snooze for a couple
3232 * msecs, then chk-status again. If still busy, fall back to
3233 * HSM_ST_POLL state.
3234 */
3235 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3236 if (drv_stat & ATA_BUSY) {
3237 msleep(2);
3238 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3239 if (drv_stat & ATA_BUSY) {
3240 ap->hsm_task_state = HSM_ST_LAST_POLL;
3241 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3242 return 0;
3243 }
3244 }
3245
3246 qc = ata_qc_from_tag(ap, ap->active_tag);
3247 WARN_ON(qc == NULL);
3248
3249 drv_stat = ata_wait_idle(ap);
3250 if (!ata_ok(drv_stat)) {
3251 qc->err_mask |= __ac_err_mask(drv_stat);
3252 ap->hsm_task_state = HSM_ST_ERR;
3253 return 0;
3254 }
3255
3256 ap->hsm_task_state = HSM_ST_IDLE;
3257
3258 WARN_ON(qc->err_mask);
3259 ata_poll_qc_complete(qc);
3260
3261 /* another command may start at this point */
3262
3263 return 1;
3264 }
3265
3266
3267 /**
3268 * swap_buf_le16 - swap halves of 16-bit words in place
3269 * @buf: Buffer to swap
3270 * @buf_words: Number of 16-bit words in buffer.
3271 *
3272 * Swap halves of 16-bit words if needed to convert from
3273 * little-endian byte order to native cpu byte order, or
3274 * vice-versa.
3275 *
3276 * LOCKING:
3277 * Inherited from caller.
3278 */
3279 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3280 {
3281 #ifdef __BIG_ENDIAN
3282 unsigned int i;
3283
3284 for (i = 0; i < buf_words; i++)
3285 buf[i] = le16_to_cpu(buf[i]);
3286 #endif /* __BIG_ENDIAN */
3287 }
3288
3289 /**
3290 * ata_mmio_data_xfer - Transfer data by MMIO
3291 * @ap: port to read/write
3292 * @buf: data buffer
3293 * @buflen: buffer length
3294 * @write_data: read/write
3295 *
3296 * Transfer data from/to the device data register by MMIO.
3297 *
3298 * LOCKING:
3299 * Inherited from caller.
3300 */
3301
3302 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3303 unsigned int buflen, int write_data)
3304 {
3305 unsigned int i;
3306 unsigned int words = buflen >> 1;
3307 u16 *buf16 = (u16 *) buf;
3308 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3309
3310 /* Transfer multiple of 2 bytes */
3311 if (write_data) {
3312 for (i = 0; i < words; i++)
3313 writew(le16_to_cpu(buf16[i]), mmio);
3314 } else {
3315 for (i = 0; i < words; i++)
3316 buf16[i] = cpu_to_le16(readw(mmio));
3317 }
3318
3319 /* Transfer trailing 1 byte, if any. */
3320 if (unlikely(buflen & 0x01)) {
3321 u16 align_buf[1] = { 0 };
3322 unsigned char *trailing_buf = buf + buflen - 1;
3323
3324 if (write_data) {
3325 memcpy(align_buf, trailing_buf, 1);
3326 writew(le16_to_cpu(align_buf[0]), mmio);
3327 } else {
3328 align_buf[0] = cpu_to_le16(readw(mmio));
3329 memcpy(trailing_buf, align_buf, 1);
3330 }
3331 }
3332 }
3333
3334 /**
3335 * ata_pio_data_xfer - Transfer data by PIO
3336 * @ap: port to read/write
3337 * @buf: data buffer
3338 * @buflen: buffer length
3339 * @write_data: read/write
3340 *
3341 * Transfer data from/to the device data register by PIO.
3342 *
3343 * LOCKING:
3344 * Inherited from caller.
3345 */
3346
3347 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3348 unsigned int buflen, int write_data)
3349 {
3350 unsigned int words = buflen >> 1;
3351
3352 /* Transfer multiple of 2 bytes */
3353 if (write_data)
3354 outsw(ap->ioaddr.data_addr, buf, words);
3355 else
3356 insw(ap->ioaddr.data_addr, buf, words);
3357
3358 /* Transfer trailing 1 byte, if any. */
3359 if (unlikely(buflen & 0x01)) {
3360 u16 align_buf[1] = { 0 };
3361 unsigned char *trailing_buf = buf + buflen - 1;
3362
3363 if (write_data) {
3364 memcpy(align_buf, trailing_buf, 1);
3365 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3366 } else {
3367 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3368 memcpy(trailing_buf, align_buf, 1);
3369 }
3370 }
3371 }
3372
3373 /**
3374 * ata_data_xfer - Transfer data from/to the data register.
3375 * @ap: port to read/write
3376 * @buf: data buffer
3377 * @buflen: buffer length
3378 * @do_write: read/write
3379 *
3380 * Transfer data from/to the device data register.
3381 *
3382 * LOCKING:
3383 * Inherited from caller.
3384 */
3385
3386 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3387 unsigned int buflen, int do_write)
3388 {
3389 /* Make the crap hardware pay the costs not the good stuff */
3390 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3391 unsigned long flags;
3392 local_irq_save(flags);
3393 if (ap->flags & ATA_FLAG_MMIO)
3394 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3395 else
3396 ata_pio_data_xfer(ap, buf, buflen, do_write);
3397 local_irq_restore(flags);
3398 } else {
3399 if (ap->flags & ATA_FLAG_MMIO)
3400 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3401 else
3402 ata_pio_data_xfer(ap, buf, buflen, do_write);
3403 }
3404 }
3405
3406 /**
3407 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3408 * @qc: Command on going
3409 *
3410 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3411 *
3412 * LOCKING:
3413 * Inherited from caller.
3414 */
3415
3416 static void ata_pio_sector(struct ata_queued_cmd *qc)
3417 {
3418 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3419 struct scatterlist *sg = qc->__sg;
3420 struct ata_port *ap = qc->ap;
3421 struct page *page;
3422 unsigned int offset;
3423 unsigned char *buf;
3424
3425 if (qc->cursect == (qc->nsect - 1))
3426 ap->hsm_task_state = HSM_ST_LAST;
3427
3428 page = sg[qc->cursg].page;
3429 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3430
3431 /* get the current page and offset */
3432 page = nth_page(page, (offset >> PAGE_SHIFT));
3433 offset %= PAGE_SIZE;
3434
3435 buf = kmap(page) + offset;
3436
3437 qc->cursect++;
3438 qc->cursg_ofs++;
3439
3440 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3441 qc->cursg++;
3442 qc->cursg_ofs = 0;
3443 }
3444
3445 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3446
3447 /* do the actual data transfer */
3448 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3449 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3450
3451 kunmap(page);
3452 }
3453
3454 /**
3455 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3456 * @qc: Command on going
3457 * @bytes: number of bytes
3458 *
3459 * Transfer Transfer data from/to the ATAPI device.
3460 *
3461 * LOCKING:
3462 * Inherited from caller.
3463 *
3464 */
3465
3466 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3467 {
3468 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3469 struct scatterlist *sg = qc->__sg;
3470 struct ata_port *ap = qc->ap;
3471 struct page *page;
3472 unsigned char *buf;
3473 unsigned int offset, count;
3474
3475 if (qc->curbytes + bytes >= qc->nbytes)
3476 ap->hsm_task_state = HSM_ST_LAST;
3477
3478 next_sg:
3479 if (unlikely(qc->cursg >= qc->n_elem)) {
3480 /*
3481 * The end of qc->sg is reached and the device expects
3482 * more data to transfer. In order not to overrun qc->sg
3483 * and fulfill length specified in the byte count register,
3484 * - for read case, discard trailing data from the device
3485 * - for write case, padding zero data to the device
3486 */
3487 u16 pad_buf[1] = { 0 };
3488 unsigned int words = bytes >> 1;
3489 unsigned int i;
3490
3491 if (words) /* warning if bytes > 1 */
3492 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3493 ap->id, bytes);
3494
3495 for (i = 0; i < words; i++)
3496 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3497
3498 ap->hsm_task_state = HSM_ST_LAST;
3499 return;
3500 }
3501
3502 sg = &qc->__sg[qc->cursg];
3503
3504 page = sg->page;
3505 offset = sg->offset + qc->cursg_ofs;
3506
3507 /* get the current page and offset */
3508 page = nth_page(page, (offset >> PAGE_SHIFT));
3509 offset %= PAGE_SIZE;
3510
3511 /* don't overrun current sg */
3512 count = min(sg->length - qc->cursg_ofs, bytes);
3513
3514 /* don't cross page boundaries */
3515 count = min(count, (unsigned int)PAGE_SIZE - offset);
3516
3517 buf = kmap(page) + offset;
3518
3519 bytes -= count;
3520 qc->curbytes += count;
3521 qc->cursg_ofs += count;
3522
3523 if (qc->cursg_ofs == sg->length) {
3524 qc->cursg++;
3525 qc->cursg_ofs = 0;
3526 }
3527
3528 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3529
3530 /* do the actual data transfer */
3531 ata_data_xfer(ap, buf, count, do_write);
3532
3533 kunmap(page);
3534
3535 if (bytes)
3536 goto next_sg;
3537 }
3538
3539 /**
3540 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3541 * @qc: Command on going
3542 *
3543 * Transfer Transfer data from/to the ATAPI device.
3544 *
3545 * LOCKING:
3546 * Inherited from caller.
3547 */
3548
3549 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3550 {
3551 struct ata_port *ap = qc->ap;
3552 struct ata_device *dev = qc->dev;
3553 unsigned int ireason, bc_lo, bc_hi, bytes;
3554 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3555
3556 ap->ops->tf_read(ap, &qc->tf);
3557 ireason = qc->tf.nsect;
3558 bc_lo = qc->tf.lbam;
3559 bc_hi = qc->tf.lbah;
3560 bytes = (bc_hi << 8) | bc_lo;
3561
3562 /* shall be cleared to zero, indicating xfer of data */
3563 if (ireason & (1 << 0))
3564 goto err_out;
3565
3566 /* make sure transfer direction matches expected */
3567 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3568 if (do_write != i_write)
3569 goto err_out;
3570
3571 __atapi_pio_bytes(qc, bytes);
3572
3573 return;
3574
3575 err_out:
3576 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3577 ap->id, dev->devno);
3578 qc->err_mask |= AC_ERR_HSM;
3579 ap->hsm_task_state = HSM_ST_ERR;
3580 }
3581
3582 /**
3583 * ata_pio_block - start PIO on a block
3584 * @ap: the target ata_port
3585 *
3586 * LOCKING:
3587 * None. (executing in kernel thread context)
3588 */
3589
3590 static void ata_pio_block(struct ata_port *ap)
3591 {
3592 struct ata_queued_cmd *qc;
3593 u8 status;
3594
3595 /*
3596 * This is purely heuristic. This is a fast path.
3597 * Sometimes when we enter, BSY will be cleared in
3598 * a chk-status or two. If not, the drive is probably seeking
3599 * or something. Snooze for a couple msecs, then
3600 * chk-status again. If still busy, fall back to
3601 * HSM_ST_POLL state.
3602 */
3603 status = ata_busy_wait(ap, ATA_BUSY, 5);
3604 if (status & ATA_BUSY) {
3605 msleep(2);
3606 status = ata_busy_wait(ap, ATA_BUSY, 10);
3607 if (status & ATA_BUSY) {
3608 ap->hsm_task_state = HSM_ST_POLL;
3609 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3610 return;
3611 }
3612 }
3613
3614 qc = ata_qc_from_tag(ap, ap->active_tag);
3615 WARN_ON(qc == NULL);
3616
3617 /* check error */
3618 if (status & (ATA_ERR | ATA_DF)) {
3619 qc->err_mask |= AC_ERR_DEV;
3620 ap->hsm_task_state = HSM_ST_ERR;
3621 return;
3622 }
3623
3624 /* transfer data if any */
3625 if (is_atapi_taskfile(&qc->tf)) {
3626 /* DRQ=0 means no more data to transfer */
3627 if ((status & ATA_DRQ) == 0) {
3628 ap->hsm_task_state = HSM_ST_LAST;
3629 return;
3630 }
3631
3632 atapi_pio_bytes(qc);
3633 } else {
3634 /* handle BSY=0, DRQ=0 as error */
3635 if ((status & ATA_DRQ) == 0) {
3636 qc->err_mask |= AC_ERR_HSM;
3637 ap->hsm_task_state = HSM_ST_ERR;
3638 return;
3639 }
3640
3641 ata_pio_sector(qc);
3642 }
3643 }
3644
3645 static void ata_pio_error(struct ata_port *ap)
3646 {
3647 struct ata_queued_cmd *qc;
3648
3649 qc = ata_qc_from_tag(ap, ap->active_tag);
3650 WARN_ON(qc == NULL);
3651
3652 if (qc->tf.command != ATA_CMD_PACKET)
3653 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3654
3655 /* make sure qc->err_mask is available to
3656 * know what's wrong and recover
3657 */
3658 WARN_ON(qc->err_mask == 0);
3659
3660 ap->hsm_task_state = HSM_ST_IDLE;
3661
3662 ata_poll_qc_complete(qc);
3663 }
3664
3665 static void ata_pio_task(void *_data)
3666 {
3667 struct ata_port *ap = _data;
3668 unsigned long timeout;
3669 int qc_completed;
3670
3671 fsm_start:
3672 timeout = 0;
3673 qc_completed = 0;
3674
3675 switch (ap->hsm_task_state) {
3676 case HSM_ST_IDLE:
3677 return;
3678
3679 case HSM_ST:
3680 ata_pio_block(ap);
3681 break;
3682
3683 case HSM_ST_LAST:
3684 qc_completed = ata_pio_complete(ap);
3685 break;
3686
3687 case HSM_ST_POLL:
3688 case HSM_ST_LAST_POLL:
3689 timeout = ata_pio_poll(ap);
3690 break;
3691
3692 case HSM_ST_TMOUT:
3693 case HSM_ST_ERR:
3694 ata_pio_error(ap);
3695 return;
3696 }
3697
3698 if (timeout)
3699 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3700 else if (!qc_completed)
3701 goto fsm_start;
3702 }
3703
3704 /**
3705 * atapi_packet_task - Write CDB bytes to hardware
3706 * @_data: Port to which ATAPI device is attached.
3707 *
3708 * When device has indicated its readiness to accept
3709 * a CDB, this function is called. Send the CDB.
3710 * If DMA is to be performed, exit immediately.
3711 * Otherwise, we are in polling mode, so poll
3712 * status under operation succeeds or fails.
3713 *
3714 * LOCKING:
3715 * Kernel thread context (may sleep)
3716 */
3717
3718 static void atapi_packet_task(void *_data)
3719 {
3720 struct ata_port *ap = _data;
3721 struct ata_queued_cmd *qc;
3722 u8 status;
3723
3724 qc = ata_qc_from_tag(ap, ap->active_tag);
3725 WARN_ON(qc == NULL);
3726 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3727
3728 /* sleep-wait for BSY to clear */
3729 DPRINTK("busy wait\n");
3730 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3731 qc->err_mask |= AC_ERR_TIMEOUT;
3732 goto err_out;
3733 }
3734
3735 /* make sure DRQ is set */
3736 status = ata_chk_status(ap);
3737 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3738 qc->err_mask |= AC_ERR_HSM;
3739 goto err_out;
3740 }
3741
3742 /* send SCSI cdb */
3743 DPRINTK("send cdb\n");
3744 WARN_ON(qc->dev->cdb_len < 12);
3745
3746 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3747 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3748 unsigned long flags;
3749
3750 /* Once we're done issuing command and kicking bmdma,
3751 * irq handler takes over. To not lose irq, we need
3752 * to clear NOINTR flag before sending cdb, but
3753 * interrupt handler shouldn't be invoked before we're
3754 * finished. Hence, the following locking.
3755 */
3756 spin_lock_irqsave(&ap->host_set->lock, flags);
3757 ap->flags &= ~ATA_FLAG_NOINTR;
3758 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3759 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3760 ap->ops->bmdma_start(qc); /* initiate bmdma */
3761 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3762 } else {
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764
3765 /* PIO commands are handled by polling */
3766 ap->hsm_task_state = HSM_ST;
3767 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3768 }
3769
3770 return;
3771
3772 err_out:
3773 ata_poll_qc_complete(qc);
3774 }
3775
3776 /**
3777 * ata_qc_timeout - Handle timeout of queued command
3778 * @qc: Command that timed out
3779 *
3780 * Some part of the kernel (currently, only the SCSI layer)
3781 * has noticed that the active command on port @ap has not
3782 * completed after a specified length of time. Handle this
3783 * condition by disabling DMA (if necessary) and completing
3784 * transactions, with error if necessary.
3785 *
3786 * This also handles the case of the "lost interrupt", where
3787 * for some reason (possibly hardware bug, possibly driver bug)
3788 * an interrupt was not delivered to the driver, even though the
3789 * transaction completed successfully.
3790 *
3791 * LOCKING:
3792 * Inherited from SCSI layer (none, can sleep)
3793 */
3794
3795 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3796 {
3797 struct ata_port *ap = qc->ap;
3798 struct ata_host_set *host_set = ap->host_set;
3799 u8 host_stat = 0, drv_stat;
3800 unsigned long flags;
3801
3802 DPRINTK("ENTER\n");
3803
3804 ap->hsm_task_state = HSM_ST_IDLE;
3805
3806 spin_lock_irqsave(&host_set->lock, flags);
3807
3808 switch (qc->tf.protocol) {
3809
3810 case ATA_PROT_DMA:
3811 case ATA_PROT_ATAPI_DMA:
3812 host_stat = ap->ops->bmdma_status(ap);
3813
3814 /* before we do anything else, clear DMA-Start bit */
3815 ap->ops->bmdma_stop(qc);
3816
3817 /* fall through */
3818
3819 default:
3820 ata_altstatus(ap);
3821 drv_stat = ata_chk_status(ap);
3822
3823 /* ack bmdma irq events */
3824 ap->ops->irq_clear(ap);
3825
3826 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3827 ap->id, qc->tf.command, drv_stat, host_stat);
3828
3829 /* complete taskfile transaction */
3830 qc->err_mask |= ac_err_mask(drv_stat);
3831 break;
3832 }
3833
3834 spin_unlock_irqrestore(&host_set->lock, flags);
3835
3836 ata_eh_qc_complete(qc);
3837
3838 DPRINTK("EXIT\n");
3839 }
3840
3841 /**
3842 * ata_eng_timeout - Handle timeout of queued command
3843 * @ap: Port on which timed-out command is active
3844 *
3845 * Some part of the kernel (currently, only the SCSI layer)
3846 * has noticed that the active command on port @ap has not
3847 * completed after a specified length of time. Handle this
3848 * condition by disabling DMA (if necessary) and completing
3849 * transactions, with error if necessary.
3850 *
3851 * This also handles the case of the "lost interrupt", where
3852 * for some reason (possibly hardware bug, possibly driver bug)
3853 * an interrupt was not delivered to the driver, even though the
3854 * transaction completed successfully.
3855 *
3856 * LOCKING:
3857 * Inherited from SCSI layer (none, can sleep)
3858 */
3859
3860 void ata_eng_timeout(struct ata_port *ap)
3861 {
3862 DPRINTK("ENTER\n");
3863
3864 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3865
3866 DPRINTK("EXIT\n");
3867 }
3868
3869 /**
3870 * ata_qc_new - Request an available ATA command, for queueing
3871 * @ap: Port associated with device @dev
3872 * @dev: Device from whom we request an available command structure
3873 *
3874 * LOCKING:
3875 * None.
3876 */
3877
3878 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3879 {
3880 struct ata_queued_cmd *qc = NULL;
3881 unsigned int i;
3882
3883 for (i = 0; i < ATA_MAX_QUEUE; i++)
3884 if (!test_and_set_bit(i, &ap->qactive)) {
3885 qc = ata_qc_from_tag(ap, i);
3886 break;
3887 }
3888
3889 if (qc)
3890 qc->tag = i;
3891
3892 return qc;
3893 }
3894
3895 /**
3896 * ata_qc_new_init - Request an available ATA command, and initialize it
3897 * @ap: Port associated with device @dev
3898 * @dev: Device from whom we request an available command structure
3899 *
3900 * LOCKING:
3901 * None.
3902 */
3903
3904 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3905 struct ata_device *dev)
3906 {
3907 struct ata_queued_cmd *qc;
3908
3909 qc = ata_qc_new(ap);
3910 if (qc) {
3911 qc->scsicmd = NULL;
3912 qc->ap = ap;
3913 qc->dev = dev;
3914
3915 ata_qc_reinit(qc);
3916 }
3917
3918 return qc;
3919 }
3920
3921 /**
3922 * ata_qc_free - free unused ata_queued_cmd
3923 * @qc: Command to complete
3924 *
3925 * Designed to free unused ata_queued_cmd object
3926 * in case something prevents using it.
3927 *
3928 * LOCKING:
3929 * spin_lock_irqsave(host_set lock)
3930 */
3931 void ata_qc_free(struct ata_queued_cmd *qc)
3932 {
3933 struct ata_port *ap = qc->ap;
3934 unsigned int tag;
3935
3936 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3937
3938 qc->flags = 0;
3939 tag = qc->tag;
3940 if (likely(ata_tag_valid(tag))) {
3941 if (tag == ap->active_tag)
3942 ap->active_tag = ATA_TAG_POISON;
3943 qc->tag = ATA_TAG_POISON;
3944 clear_bit(tag, &ap->qactive);
3945 }
3946 }
3947
3948 void __ata_qc_complete(struct ata_queued_cmd *qc)
3949 {
3950 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3951 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3952
3953 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3954 ata_sg_clean(qc);
3955
3956 /* atapi: mark qc as inactive to prevent the interrupt handler
3957 * from completing the command twice later, before the error handler
3958 * is called. (when rc != 0 and atapi request sense is needed)
3959 */
3960 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3961
3962 /* call completion callback */
3963 qc->complete_fn(qc);
3964 }
3965
3966 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3967 {
3968 struct ata_port *ap = qc->ap;
3969
3970 switch (qc->tf.protocol) {
3971 case ATA_PROT_DMA:
3972 case ATA_PROT_ATAPI_DMA:
3973 return 1;
3974
3975 case ATA_PROT_ATAPI:
3976 case ATA_PROT_PIO:
3977 if (ap->flags & ATA_FLAG_PIO_DMA)
3978 return 1;
3979
3980 /* fall through */
3981
3982 default:
3983 return 0;
3984 }
3985
3986 /* never reached */
3987 }
3988
3989 /**
3990 * ata_qc_issue - issue taskfile to device
3991 * @qc: command to issue to device
3992 *
3993 * Prepare an ATA command to submission to device.
3994 * This includes mapping the data into a DMA-able
3995 * area, filling in the S/G table, and finally
3996 * writing the taskfile to hardware, starting the command.
3997 *
3998 * LOCKING:
3999 * spin_lock_irqsave(host_set lock)
4000 *
4001 * RETURNS:
4002 * Zero on success, AC_ERR_* mask on failure
4003 */
4004
4005 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4006 {
4007 struct ata_port *ap = qc->ap;
4008
4009 qc->ap->active_tag = qc->tag;
4010 qc->flags |= ATA_QCFLAG_ACTIVE;
4011
4012 if (ata_should_dma_map(qc)) {
4013 if (qc->flags & ATA_QCFLAG_SG) {
4014 if (ata_sg_setup(qc))
4015 goto sg_err;
4016 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4017 if (ata_sg_setup_one(qc))
4018 goto sg_err;
4019 }
4020 } else {
4021 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4022 }
4023
4024 ap->ops->qc_prep(qc);
4025
4026 return ap->ops->qc_issue(qc);
4027
4028 sg_err:
4029 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4030 return AC_ERR_SYSTEM;
4031 }
4032
4033
4034 /**
4035 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4036 * @qc: command to issue to device
4037 *
4038 * Using various libata functions and hooks, this function
4039 * starts an ATA command. ATA commands are grouped into
4040 * classes called "protocols", and issuing each type of protocol
4041 * is slightly different.
4042 *
4043 * May be used as the qc_issue() entry in ata_port_operations.
4044 *
4045 * LOCKING:
4046 * spin_lock_irqsave(host_set lock)
4047 *
4048 * RETURNS:
4049 * Zero on success, AC_ERR_* mask on failure
4050 */
4051
4052 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4053 {
4054 struct ata_port *ap = qc->ap;
4055
4056 ata_dev_select(ap, qc->dev->devno, 1, 0);
4057
4058 switch (qc->tf.protocol) {
4059 case ATA_PROT_NODATA:
4060 ata_tf_to_host(ap, &qc->tf);
4061 break;
4062
4063 case ATA_PROT_DMA:
4064 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4065 ap->ops->bmdma_setup(qc); /* set up bmdma */
4066 ap->ops->bmdma_start(qc); /* initiate bmdma */
4067 break;
4068
4069 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4070 ata_qc_set_polling(qc);
4071 ata_tf_to_host(ap, &qc->tf);
4072 ap->hsm_task_state = HSM_ST;
4073 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4074 break;
4075
4076 case ATA_PROT_ATAPI:
4077 ata_qc_set_polling(qc);
4078 ata_tf_to_host(ap, &qc->tf);
4079 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4080 break;
4081
4082 case ATA_PROT_ATAPI_NODATA:
4083 ap->flags |= ATA_FLAG_NOINTR;
4084 ata_tf_to_host(ap, &qc->tf);
4085 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4086 break;
4087
4088 case ATA_PROT_ATAPI_DMA:
4089 ap->flags |= ATA_FLAG_NOINTR;
4090 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4091 ap->ops->bmdma_setup(qc); /* set up bmdma */
4092 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4093 break;
4094
4095 default:
4096 WARN_ON(1);
4097 return AC_ERR_SYSTEM;
4098 }
4099
4100 return 0;
4101 }
4102
4103 /**
4104 * ata_host_intr - Handle host interrupt for given (port, task)
4105 * @ap: Port on which interrupt arrived (possibly...)
4106 * @qc: Taskfile currently active in engine
4107 *
4108 * Handle host interrupt for given queued command. Currently,
4109 * only DMA interrupts are handled. All other commands are
4110 * handled via polling with interrupts disabled (nIEN bit).
4111 *
4112 * LOCKING:
4113 * spin_lock_irqsave(host_set lock)
4114 *
4115 * RETURNS:
4116 * One if interrupt was handled, zero if not (shared irq).
4117 */
4118
4119 inline unsigned int ata_host_intr (struct ata_port *ap,
4120 struct ata_queued_cmd *qc)
4121 {
4122 u8 status, host_stat;
4123
4124 switch (qc->tf.protocol) {
4125
4126 case ATA_PROT_DMA:
4127 case ATA_PROT_ATAPI_DMA:
4128 case ATA_PROT_ATAPI:
4129 /* check status of DMA engine */
4130 host_stat = ap->ops->bmdma_status(ap);
4131 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4132
4133 /* if it's not our irq... */
4134 if (!(host_stat & ATA_DMA_INTR))
4135 goto idle_irq;
4136
4137 /* before we do anything else, clear DMA-Start bit */
4138 ap->ops->bmdma_stop(qc);
4139
4140 /* fall through */
4141
4142 case ATA_PROT_ATAPI_NODATA:
4143 case ATA_PROT_NODATA:
4144 /* check altstatus */
4145 status = ata_altstatus(ap);
4146 if (status & ATA_BUSY)
4147 goto idle_irq;
4148
4149 /* check main status, clearing INTRQ */
4150 status = ata_chk_status(ap);
4151 if (unlikely(status & ATA_BUSY))
4152 goto idle_irq;
4153 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4154 ap->id, qc->tf.protocol, status);
4155
4156 /* ack bmdma irq events */
4157 ap->ops->irq_clear(ap);
4158
4159 /* complete taskfile transaction */
4160 qc->err_mask |= ac_err_mask(status);
4161 ata_qc_complete(qc);
4162 break;
4163
4164 default:
4165 goto idle_irq;
4166 }
4167
4168 return 1; /* irq handled */
4169
4170 idle_irq:
4171 ap->stats.idle_irq++;
4172
4173 #ifdef ATA_IRQ_TRAP
4174 if ((ap->stats.idle_irq % 1000) == 0) {
4175 ata_irq_ack(ap, 0); /* debug trap */
4176 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4177 return 1;
4178 }
4179 #endif
4180 return 0; /* irq not handled */
4181 }
4182
4183 /**
4184 * ata_interrupt - Default ATA host interrupt handler
4185 * @irq: irq line (unused)
4186 * @dev_instance: pointer to our ata_host_set information structure
4187 * @regs: unused
4188 *
4189 * Default interrupt handler for PCI IDE devices. Calls
4190 * ata_host_intr() for each port that is not disabled.
4191 *
4192 * LOCKING:
4193 * Obtains host_set lock during operation.
4194 *
4195 * RETURNS:
4196 * IRQ_NONE or IRQ_HANDLED.
4197 */
4198
4199 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4200 {
4201 struct ata_host_set *host_set = dev_instance;
4202 unsigned int i;
4203 unsigned int handled = 0;
4204 unsigned long flags;
4205
4206 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4207 spin_lock_irqsave(&host_set->lock, flags);
4208
4209 for (i = 0; i < host_set->n_ports; i++) {
4210 struct ata_port *ap;
4211
4212 ap = host_set->ports[i];
4213 if (ap &&
4214 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4215 struct ata_queued_cmd *qc;
4216
4217 qc = ata_qc_from_tag(ap, ap->active_tag);
4218 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4219 (qc->flags & ATA_QCFLAG_ACTIVE))
4220 handled |= ata_host_intr(ap, qc);
4221 }
4222 }
4223
4224 spin_unlock_irqrestore(&host_set->lock, flags);
4225
4226 return IRQ_RETVAL(handled);
4227 }
4228
4229
4230 /*
4231 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4232 * without filling any other registers
4233 */
4234 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4235 u8 cmd)
4236 {
4237 struct ata_taskfile tf;
4238 int err;
4239
4240 ata_tf_init(ap, &tf, dev->devno);
4241
4242 tf.command = cmd;
4243 tf.flags |= ATA_TFLAG_DEVICE;
4244 tf.protocol = ATA_PROT_NODATA;
4245
4246 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4247 if (err)
4248 printk(KERN_ERR "%s: ata command failed: %d\n",
4249 __FUNCTION__, err);
4250
4251 return err;
4252 }
4253
4254 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4255 {
4256 u8 cmd;
4257
4258 if (!ata_try_flush_cache(dev))
4259 return 0;
4260
4261 if (ata_id_has_flush_ext(dev->id))
4262 cmd = ATA_CMD_FLUSH_EXT;
4263 else
4264 cmd = ATA_CMD_FLUSH;
4265
4266 return ata_do_simple_cmd(ap, dev, cmd);
4267 }
4268
4269 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4270 {
4271 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4272 }
4273
4274 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4275 {
4276 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4277 }
4278
4279 /**
4280 * ata_device_resume - wakeup a previously suspended devices
4281 * @ap: port the device is connected to
4282 * @dev: the device to resume
4283 *
4284 * Kick the drive back into action, by sending it an idle immediate
4285 * command and making sure its transfer mode matches between drive
4286 * and host.
4287 *
4288 */
4289 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4290 {
4291 if (ap->flags & ATA_FLAG_SUSPENDED) {
4292 ap->flags &= ~ATA_FLAG_SUSPENDED;
4293 ata_set_mode(ap);
4294 }
4295 if (!ata_dev_present(dev))
4296 return 0;
4297 if (dev->class == ATA_DEV_ATA)
4298 ata_start_drive(ap, dev);
4299
4300 return 0;
4301 }
4302
4303 /**
4304 * ata_device_suspend - prepare a device for suspend
4305 * @ap: port the device is connected to
4306 * @dev: the device to suspend
4307 *
4308 * Flush the cache on the drive, if appropriate, then issue a
4309 * standbynow command.
4310 */
4311 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4312 {
4313 if (!ata_dev_present(dev))
4314 return 0;
4315 if (dev->class == ATA_DEV_ATA)
4316 ata_flush_cache(ap, dev);
4317
4318 if (state.event != PM_EVENT_FREEZE)
4319 ata_standby_drive(ap, dev);
4320 ap->flags |= ATA_FLAG_SUSPENDED;
4321 return 0;
4322 }
4323
4324 /**
4325 * ata_port_start - Set port up for dma.
4326 * @ap: Port to initialize
4327 *
4328 * Called just after data structures for each port are
4329 * initialized. Allocates space for PRD table.
4330 *
4331 * May be used as the port_start() entry in ata_port_operations.
4332 *
4333 * LOCKING:
4334 * Inherited from caller.
4335 */
4336
4337 int ata_port_start (struct ata_port *ap)
4338 {
4339 struct device *dev = ap->dev;
4340 int rc;
4341
4342 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4343 if (!ap->prd)
4344 return -ENOMEM;
4345
4346 rc = ata_pad_alloc(ap, dev);
4347 if (rc) {
4348 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4349 return rc;
4350 }
4351
4352 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4353
4354 return 0;
4355 }
4356
4357
4358 /**
4359 * ata_port_stop - Undo ata_port_start()
4360 * @ap: Port to shut down
4361 *
4362 * Frees the PRD table.
4363 *
4364 * May be used as the port_stop() entry in ata_port_operations.
4365 *
4366 * LOCKING:
4367 * Inherited from caller.
4368 */
4369
4370 void ata_port_stop (struct ata_port *ap)
4371 {
4372 struct device *dev = ap->dev;
4373
4374 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4375 ata_pad_free(ap, dev);
4376 }
4377
4378 void ata_host_stop (struct ata_host_set *host_set)
4379 {
4380 if (host_set->mmio_base)
4381 iounmap(host_set->mmio_base);
4382 }
4383
4384
4385 /**
4386 * ata_host_remove - Unregister SCSI host structure with upper layers
4387 * @ap: Port to unregister
4388 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4389 *
4390 * LOCKING:
4391 * Inherited from caller.
4392 */
4393
4394 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4395 {
4396 struct Scsi_Host *sh = ap->host;
4397
4398 DPRINTK("ENTER\n");
4399
4400 if (do_unregister)
4401 scsi_remove_host(sh);
4402
4403 ap->ops->port_stop(ap);
4404 }
4405
4406 /**
4407 * ata_host_init - Initialize an ata_port structure
4408 * @ap: Structure to initialize
4409 * @host: associated SCSI mid-layer structure
4410 * @host_set: Collection of hosts to which @ap belongs
4411 * @ent: Probe information provided by low-level driver
4412 * @port_no: Port number associated with this ata_port
4413 *
4414 * Initialize a new ata_port structure, and its associated
4415 * scsi_host.
4416 *
4417 * LOCKING:
4418 * Inherited from caller.
4419 */
4420
4421 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4422 struct ata_host_set *host_set,
4423 const struct ata_probe_ent *ent, unsigned int port_no)
4424 {
4425 unsigned int i;
4426
4427 host->max_id = 16;
4428 host->max_lun = 1;
4429 host->max_channel = 1;
4430 host->unique_id = ata_unique_id++;
4431 host->max_cmd_len = 12;
4432
4433 ap->flags = ATA_FLAG_PORT_DISABLED;
4434 ap->id = host->unique_id;
4435 ap->host = host;
4436 ap->ctl = ATA_DEVCTL_OBS;
4437 ap->host_set = host_set;
4438 ap->dev = ent->dev;
4439 ap->port_no = port_no;
4440 ap->hard_port_no =
4441 ent->legacy_mode ? ent->hard_port_no : port_no;
4442 ap->pio_mask = ent->pio_mask;
4443 ap->mwdma_mask = ent->mwdma_mask;
4444 ap->udma_mask = ent->udma_mask;
4445 ap->flags |= ent->host_flags;
4446 ap->ops = ent->port_ops;
4447 ap->cbl = ATA_CBL_NONE;
4448 ap->active_tag = ATA_TAG_POISON;
4449 ap->last_ctl = 0xFF;
4450
4451 INIT_WORK(&ap->port_task, NULL, NULL);
4452 INIT_LIST_HEAD(&ap->eh_done_q);
4453
4454 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4455 struct ata_device *dev = &ap->device[i];
4456 dev->devno = i;
4457 dev->pio_mask = UINT_MAX;
4458 dev->mwdma_mask = UINT_MAX;
4459 dev->udma_mask = UINT_MAX;
4460 }
4461
4462 #ifdef ATA_IRQ_TRAP
4463 ap->stats.unhandled_irq = 1;
4464 ap->stats.idle_irq = 1;
4465 #endif
4466
4467 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4468 }
4469
4470 /**
4471 * ata_host_add - Attach low-level ATA driver to system
4472 * @ent: Information provided by low-level driver
4473 * @host_set: Collections of ports to which we add
4474 * @port_no: Port number associated with this host
4475 *
4476 * Attach low-level ATA driver to system.
4477 *
4478 * LOCKING:
4479 * PCI/etc. bus probe sem.
4480 *
4481 * RETURNS:
4482 * New ata_port on success, for NULL on error.
4483 */
4484
4485 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4486 struct ata_host_set *host_set,
4487 unsigned int port_no)
4488 {
4489 struct Scsi_Host *host;
4490 struct ata_port *ap;
4491 int rc;
4492
4493 DPRINTK("ENTER\n");
4494
4495 if (!ent->port_ops->probe_reset &&
4496 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4497 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4498 port_no);
4499 return NULL;
4500 }
4501
4502 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4503 if (!host)
4504 return NULL;
4505
4506 host->transportt = &ata_scsi_transport_template;
4507
4508 ap = (struct ata_port *) &host->hostdata[0];
4509
4510 ata_host_init(ap, host, host_set, ent, port_no);
4511
4512 rc = ap->ops->port_start(ap);
4513 if (rc)
4514 goto err_out;
4515
4516 return ap;
4517
4518 err_out:
4519 scsi_host_put(host);
4520 return NULL;
4521 }
4522
4523 /**
4524 * ata_device_add - Register hardware device with ATA and SCSI layers
4525 * @ent: Probe information describing hardware device to be registered
4526 *
4527 * This function processes the information provided in the probe
4528 * information struct @ent, allocates the necessary ATA and SCSI
4529 * host information structures, initializes them, and registers
4530 * everything with requisite kernel subsystems.
4531 *
4532 * This function requests irqs, probes the ATA bus, and probes
4533 * the SCSI bus.
4534 *
4535 * LOCKING:
4536 * PCI/etc. bus probe sem.
4537 *
4538 * RETURNS:
4539 * Number of ports registered. Zero on error (no ports registered).
4540 */
4541
4542 int ata_device_add(const struct ata_probe_ent *ent)
4543 {
4544 unsigned int count = 0, i;
4545 struct device *dev = ent->dev;
4546 struct ata_host_set *host_set;
4547
4548 DPRINTK("ENTER\n");
4549 /* alloc a container for our list of ATA ports (buses) */
4550 host_set = kzalloc(sizeof(struct ata_host_set) +
4551 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4552 if (!host_set)
4553 return 0;
4554 spin_lock_init(&host_set->lock);
4555
4556 host_set->dev = dev;
4557 host_set->n_ports = ent->n_ports;
4558 host_set->irq = ent->irq;
4559 host_set->mmio_base = ent->mmio_base;
4560 host_set->private_data = ent->private_data;
4561 host_set->ops = ent->port_ops;
4562 host_set->flags = ent->host_set_flags;
4563
4564 /* register each port bound to this device */
4565 for (i = 0; i < ent->n_ports; i++) {
4566 struct ata_port *ap;
4567 unsigned long xfer_mode_mask;
4568
4569 ap = ata_host_add(ent, host_set, i);
4570 if (!ap)
4571 goto err_out;
4572
4573 host_set->ports[i] = ap;
4574 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4575 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4576 (ap->pio_mask << ATA_SHIFT_PIO);
4577
4578 /* print per-port info to dmesg */
4579 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4580 "bmdma 0x%lX irq %lu\n",
4581 ap->id,
4582 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4583 ata_mode_string(xfer_mode_mask),
4584 ap->ioaddr.cmd_addr,
4585 ap->ioaddr.ctl_addr,
4586 ap->ioaddr.bmdma_addr,
4587 ent->irq);
4588
4589 ata_chk_status(ap);
4590 host_set->ops->irq_clear(ap);
4591 count++;
4592 }
4593
4594 if (!count)
4595 goto err_free_ret;
4596
4597 /* obtain irq, that is shared between channels */
4598 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4599 DRV_NAME, host_set))
4600 goto err_out;
4601
4602 /* perform each probe synchronously */
4603 DPRINTK("probe begin\n");
4604 for (i = 0; i < count; i++) {
4605 struct ata_port *ap;
4606 int rc;
4607
4608 ap = host_set->ports[i];
4609
4610 DPRINTK("ata%u: bus probe begin\n", ap->id);
4611 rc = ata_bus_probe(ap);
4612 DPRINTK("ata%u: bus probe end\n", ap->id);
4613
4614 if (rc) {
4615 /* FIXME: do something useful here?
4616 * Current libata behavior will
4617 * tear down everything when
4618 * the module is removed
4619 * or the h/w is unplugged.
4620 */
4621 }
4622
4623 rc = scsi_add_host(ap->host, dev);
4624 if (rc) {
4625 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4626 ap->id);
4627 /* FIXME: do something useful here */
4628 /* FIXME: handle unconditional calls to
4629 * scsi_scan_host and ata_host_remove, below,
4630 * at the very least
4631 */
4632 }
4633 }
4634
4635 /* probes are done, now scan each port's disk(s) */
4636 DPRINTK("host probe begin\n");
4637 for (i = 0; i < count; i++) {
4638 struct ata_port *ap = host_set->ports[i];
4639
4640 ata_scsi_scan_host(ap);
4641 }
4642
4643 dev_set_drvdata(dev, host_set);
4644
4645 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4646 return ent->n_ports; /* success */
4647
4648 err_out:
4649 for (i = 0; i < count; i++) {
4650 ata_host_remove(host_set->ports[i], 1);
4651 scsi_host_put(host_set->ports[i]->host);
4652 }
4653 err_free_ret:
4654 kfree(host_set);
4655 VPRINTK("EXIT, returning 0\n");
4656 return 0;
4657 }
4658
4659 /**
4660 * ata_host_set_remove - PCI layer callback for device removal
4661 * @host_set: ATA host set that was removed
4662 *
4663 * Unregister all objects associated with this host set. Free those
4664 * objects.
4665 *
4666 * LOCKING:
4667 * Inherited from calling layer (may sleep).
4668 */
4669
4670 void ata_host_set_remove(struct ata_host_set *host_set)
4671 {
4672 struct ata_port *ap;
4673 unsigned int i;
4674
4675 for (i = 0; i < host_set->n_ports; i++) {
4676 ap = host_set->ports[i];
4677 scsi_remove_host(ap->host);
4678 }
4679
4680 free_irq(host_set->irq, host_set);
4681
4682 for (i = 0; i < host_set->n_ports; i++) {
4683 ap = host_set->ports[i];
4684
4685 ata_scsi_release(ap->host);
4686
4687 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4688 struct ata_ioports *ioaddr = &ap->ioaddr;
4689
4690 if (ioaddr->cmd_addr == 0x1f0)
4691 release_region(0x1f0, 8);
4692 else if (ioaddr->cmd_addr == 0x170)
4693 release_region(0x170, 8);
4694 }
4695
4696 scsi_host_put(ap->host);
4697 }
4698
4699 if (host_set->ops->host_stop)
4700 host_set->ops->host_stop(host_set);
4701
4702 kfree(host_set);
4703 }
4704
4705 /**
4706 * ata_scsi_release - SCSI layer callback hook for host unload
4707 * @host: libata host to be unloaded
4708 *
4709 * Performs all duties necessary to shut down a libata port...
4710 * Kill port kthread, disable port, and release resources.
4711 *
4712 * LOCKING:
4713 * Inherited from SCSI layer.
4714 *
4715 * RETURNS:
4716 * One.
4717 */
4718
4719 int ata_scsi_release(struct Scsi_Host *host)
4720 {
4721 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4722 int i;
4723
4724 DPRINTK("ENTER\n");
4725
4726 ap->ops->port_disable(ap);
4727 ata_host_remove(ap, 0);
4728 for (i = 0; i < ATA_MAX_DEVICES; i++)
4729 kfree(ap->device[i].id);
4730
4731 DPRINTK("EXIT\n");
4732 return 1;
4733 }
4734
4735 /**
4736 * ata_std_ports - initialize ioaddr with standard port offsets.
4737 * @ioaddr: IO address structure to be initialized
4738 *
4739 * Utility function which initializes data_addr, error_addr,
4740 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4741 * device_addr, status_addr, and command_addr to standard offsets
4742 * relative to cmd_addr.
4743 *
4744 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4745 */
4746
4747 void ata_std_ports(struct ata_ioports *ioaddr)
4748 {
4749 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4750 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4751 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4752 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4753 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4754 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4755 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4756 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4757 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4758 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4759 }
4760
4761
4762 #ifdef CONFIG_PCI
4763
4764 void ata_pci_host_stop (struct ata_host_set *host_set)
4765 {
4766 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4767
4768 pci_iounmap(pdev, host_set->mmio_base);
4769 }
4770
4771 /**
4772 * ata_pci_remove_one - PCI layer callback for device removal
4773 * @pdev: PCI device that was removed
4774 *
4775 * PCI layer indicates to libata via this hook that
4776 * hot-unplug or module unload event has occurred.
4777 * Handle this by unregistering all objects associated
4778 * with this PCI device. Free those objects. Then finally
4779 * release PCI resources and disable device.
4780 *
4781 * LOCKING:
4782 * Inherited from PCI layer (may sleep).
4783 */
4784
4785 void ata_pci_remove_one (struct pci_dev *pdev)
4786 {
4787 struct device *dev = pci_dev_to_dev(pdev);
4788 struct ata_host_set *host_set = dev_get_drvdata(dev);
4789
4790 ata_host_set_remove(host_set);
4791 pci_release_regions(pdev);
4792 pci_disable_device(pdev);
4793 dev_set_drvdata(dev, NULL);
4794 }
4795
4796 /* move to PCI subsystem */
4797 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4798 {
4799 unsigned long tmp = 0;
4800
4801 switch (bits->width) {
4802 case 1: {
4803 u8 tmp8 = 0;
4804 pci_read_config_byte(pdev, bits->reg, &tmp8);
4805 tmp = tmp8;
4806 break;
4807 }
4808 case 2: {
4809 u16 tmp16 = 0;
4810 pci_read_config_word(pdev, bits->reg, &tmp16);
4811 tmp = tmp16;
4812 break;
4813 }
4814 case 4: {
4815 u32 tmp32 = 0;
4816 pci_read_config_dword(pdev, bits->reg, &tmp32);
4817 tmp = tmp32;
4818 break;
4819 }
4820
4821 default:
4822 return -EINVAL;
4823 }
4824
4825 tmp &= bits->mask;
4826
4827 return (tmp == bits->val) ? 1 : 0;
4828 }
4829
4830 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4831 {
4832 pci_save_state(pdev);
4833 pci_disable_device(pdev);
4834 pci_set_power_state(pdev, PCI_D3hot);
4835 return 0;
4836 }
4837
4838 int ata_pci_device_resume(struct pci_dev *pdev)
4839 {
4840 pci_set_power_state(pdev, PCI_D0);
4841 pci_restore_state(pdev);
4842 pci_enable_device(pdev);
4843 pci_set_master(pdev);
4844 return 0;
4845 }
4846 #endif /* CONFIG_PCI */
4847
4848
4849 static int __init ata_init(void)
4850 {
4851 ata_wq = create_workqueue("ata");
4852 if (!ata_wq)
4853 return -ENOMEM;
4854
4855 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4856 return 0;
4857 }
4858
4859 static void __exit ata_exit(void)
4860 {
4861 destroy_workqueue(ata_wq);
4862 }
4863
4864 module_init(ata_init);
4865 module_exit(ata_exit);
4866
4867 static unsigned long ratelimit_time;
4868 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4869
4870 int ata_ratelimit(void)
4871 {
4872 int rc;
4873 unsigned long flags;
4874
4875 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4876
4877 if (time_after(jiffies, ratelimit_time)) {
4878 rc = 1;
4879 ratelimit_time = jiffies + (HZ/5);
4880 } else
4881 rc = 0;
4882
4883 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4884
4885 return rc;
4886 }
4887
4888 /*
4889 * libata is essentially a library of internal helper functions for
4890 * low-level ATA host controller drivers. As such, the API/ABI is
4891 * likely to change as new drivers are added and updated.
4892 * Do not depend on ABI/API stability.
4893 */
4894
4895 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4896 EXPORT_SYMBOL_GPL(ata_std_ports);
4897 EXPORT_SYMBOL_GPL(ata_device_add);
4898 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4899 EXPORT_SYMBOL_GPL(ata_sg_init);
4900 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4901 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4902 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4903 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4904 EXPORT_SYMBOL_GPL(ata_tf_load);
4905 EXPORT_SYMBOL_GPL(ata_tf_read);
4906 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4907 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4908 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4909 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4910 EXPORT_SYMBOL_GPL(ata_check_status);
4911 EXPORT_SYMBOL_GPL(ata_altstatus);
4912 EXPORT_SYMBOL_GPL(ata_exec_command);
4913 EXPORT_SYMBOL_GPL(ata_port_start);
4914 EXPORT_SYMBOL_GPL(ata_port_stop);
4915 EXPORT_SYMBOL_GPL(ata_host_stop);
4916 EXPORT_SYMBOL_GPL(ata_interrupt);
4917 EXPORT_SYMBOL_GPL(ata_qc_prep);
4918 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4919 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4920 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4921 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4922 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4923 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4924 EXPORT_SYMBOL_GPL(ata_port_probe);
4925 EXPORT_SYMBOL_GPL(sata_phy_reset);
4926 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4927 EXPORT_SYMBOL_GPL(ata_bus_reset);
4928 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4929 EXPORT_SYMBOL_GPL(ata_std_softreset);
4930 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4931 EXPORT_SYMBOL_GPL(ata_std_postreset);
4932 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4933 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4934 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4935 EXPORT_SYMBOL_GPL(ata_dev_classify);
4936 EXPORT_SYMBOL_GPL(ata_dev_pair);
4937 EXPORT_SYMBOL_GPL(ata_port_disable);
4938 EXPORT_SYMBOL_GPL(ata_ratelimit);
4939 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4940 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4941 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4942 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4943 EXPORT_SYMBOL_GPL(ata_scsi_error);
4944 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4945 EXPORT_SYMBOL_GPL(ata_scsi_release);
4946 EXPORT_SYMBOL_GPL(ata_host_intr);
4947 EXPORT_SYMBOL_GPL(ata_id_string);
4948 EXPORT_SYMBOL_GPL(ata_id_c_string);
4949 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4950 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4951 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4952
4953 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4954 EXPORT_SYMBOL_GPL(ata_timing_compute);
4955 EXPORT_SYMBOL_GPL(ata_timing_merge);
4956
4957 #ifdef CONFIG_PCI
4958 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4959 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4960 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4961 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4962 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4963 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4964 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4965 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4966 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4967 #endif /* CONFIG_PCI */
4968
4969 EXPORT_SYMBOL_GPL(ata_device_suspend);
4970 EXPORT_SYMBOL_GPL(ata_device_resume);
4971 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4972 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);