]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/libata-core.c
[PATCH] libata-dev: irq-pio minor fix
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
69 struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
74
75 int atapi_enabled = 1;
76 module_param(atapi_enabled, int, 0444);
77 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78
79 int libata_fua = 0;
80 module_param_named(fua, libata_fua, int, 0444);
81 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82
83 MODULE_AUTHOR("Jeff Garzik");
84 MODULE_DESCRIPTION("Library module for ATA devices");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION);
87
88
89 /**
90 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
91 * @tf: Taskfile to convert
92 * @fis: Buffer into which data will output
93 * @pmp: Port multiplier port
94 *
95 * Converts a standard ATA taskfile to a Serial ATA
96 * FIS structure (Register - Host to Device).
97 *
98 * LOCKING:
99 * Inherited from caller.
100 */
101
102 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 {
104 fis[0] = 0x27; /* Register - Host to Device FIS */
105 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
106 bit 7 indicates Command FIS */
107 fis[2] = tf->command;
108 fis[3] = tf->feature;
109
110 fis[4] = tf->lbal;
111 fis[5] = tf->lbam;
112 fis[6] = tf->lbah;
113 fis[7] = tf->device;
114
115 fis[8] = tf->hob_lbal;
116 fis[9] = tf->hob_lbam;
117 fis[10] = tf->hob_lbah;
118 fis[11] = tf->hob_feature;
119
120 fis[12] = tf->nsect;
121 fis[13] = tf->hob_nsect;
122 fis[14] = 0;
123 fis[15] = tf->ctl;
124
125 fis[16] = 0;
126 fis[17] = 0;
127 fis[18] = 0;
128 fis[19] = 0;
129 }
130
131 /**
132 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
133 * @fis: Buffer from which data will be input
134 * @tf: Taskfile to output
135 *
136 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 *
138 * LOCKING:
139 * Inherited from caller.
140 */
141
142 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 {
144 tf->command = fis[2]; /* status */
145 tf->feature = fis[3]; /* error */
146
147 tf->lbal = fis[4];
148 tf->lbam = fis[5];
149 tf->lbah = fis[6];
150 tf->device = fis[7];
151
152 tf->hob_lbal = fis[8];
153 tf->hob_lbam = fis[9];
154 tf->hob_lbah = fis[10];
155
156 tf->nsect = fis[12];
157 tf->hob_nsect = fis[13];
158 }
159
160 static const u8 ata_rw_cmds[] = {
161 /* pio multi */
162 ATA_CMD_READ_MULTI,
163 ATA_CMD_WRITE_MULTI,
164 ATA_CMD_READ_MULTI_EXT,
165 ATA_CMD_WRITE_MULTI_EXT,
166 0,
167 0,
168 0,
169 ATA_CMD_WRITE_MULTI_FUA_EXT,
170 /* pio */
171 ATA_CMD_PIO_READ,
172 ATA_CMD_PIO_WRITE,
173 ATA_CMD_PIO_READ_EXT,
174 ATA_CMD_PIO_WRITE_EXT,
175 0,
176 0,
177 0,
178 0,
179 /* dma */
180 ATA_CMD_READ,
181 ATA_CMD_WRITE,
182 ATA_CMD_READ_EXT,
183 ATA_CMD_WRITE_EXT,
184 0,
185 0,
186 0,
187 ATA_CMD_WRITE_FUA_EXT
188 };
189
190 /**
191 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
192 * @qc: command to examine and configure
193 *
194 * Examine the device configuration and tf->flags to calculate
195 * the proper read/write commands and protocol to use.
196 *
197 * LOCKING:
198 * caller.
199 */
200 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 {
202 struct ata_taskfile *tf = &qc->tf;
203 struct ata_device *dev = qc->dev;
204 u8 cmd;
205
206 int index, fua, lba48, write;
207
208 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
209 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
210 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211
212 if (dev->flags & ATA_DFLAG_PIO) {
213 tf->protocol = ATA_PROT_PIO;
214 index = dev->multi_count ? 0 : 8;
215 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
216 /* Unable to use DMA due to host limitation */
217 tf->protocol = ATA_PROT_PIO;
218 index = dev->multi_count ? 0 : 8;
219 } else {
220 tf->protocol = ATA_PROT_DMA;
221 index = 16;
222 }
223
224 cmd = ata_rw_cmds[index + fua + lba48 + write];
225 if (cmd) {
226 tf->command = cmd;
227 return 0;
228 }
229 return -1;
230 }
231
232 /**
233 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
234 * @pio_mask: pio_mask
235 * @mwdma_mask: mwdma_mask
236 * @udma_mask: udma_mask
237 *
238 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
239 * unsigned int xfer_mask.
240 *
241 * LOCKING:
242 * None.
243 *
244 * RETURNS:
245 * Packed xfer_mask.
246 */
247 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
248 unsigned int mwdma_mask,
249 unsigned int udma_mask)
250 {
251 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
252 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
253 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
254 }
255
256 /**
257 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
258 * @xfer_mask: xfer_mask to unpack
259 * @pio_mask: resulting pio_mask
260 * @mwdma_mask: resulting mwdma_mask
261 * @udma_mask: resulting udma_mask
262 *
263 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
264 * Any NULL distination masks will be ignored.
265 */
266 static void ata_unpack_xfermask(unsigned int xfer_mask,
267 unsigned int *pio_mask,
268 unsigned int *mwdma_mask,
269 unsigned int *udma_mask)
270 {
271 if (pio_mask)
272 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
273 if (mwdma_mask)
274 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
275 if (udma_mask)
276 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
277 }
278
279 static const struct ata_xfer_ent {
280 int shift, bits;
281 u8 base;
282 } ata_xfer_tbl[] = {
283 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
284 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
285 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
286 { -1, },
287 };
288
289 /**
290 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
291 * @xfer_mask: xfer_mask of interest
292 *
293 * Return matching XFER_* value for @xfer_mask. Only the highest
294 * bit of @xfer_mask is considered.
295 *
296 * LOCKING:
297 * None.
298 *
299 * RETURNS:
300 * Matching XFER_* value, 0 if no match found.
301 */
302 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
303 {
304 int highbit = fls(xfer_mask) - 1;
305 const struct ata_xfer_ent *ent;
306
307 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
308 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
309 return ent->base + highbit - ent->shift;
310 return 0;
311 }
312
313 /**
314 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
315 * @xfer_mode: XFER_* of interest
316 *
317 * Return matching xfer_mask for @xfer_mode.
318 *
319 * LOCKING:
320 * None.
321 *
322 * RETURNS:
323 * Matching xfer_mask, 0 if no match found.
324 */
325 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
326 {
327 const struct ata_xfer_ent *ent;
328
329 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
330 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
331 return 1 << (ent->shift + xfer_mode - ent->base);
332 return 0;
333 }
334
335 /**
336 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
337 * @xfer_mode: XFER_* of interest
338 *
339 * Return matching xfer_shift for @xfer_mode.
340 *
341 * LOCKING:
342 * None.
343 *
344 * RETURNS:
345 * Matching xfer_shift, -1 if no match found.
346 */
347 static int ata_xfer_mode2shift(unsigned int xfer_mode)
348 {
349 const struct ata_xfer_ent *ent;
350
351 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
352 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
353 return ent->shift;
354 return -1;
355 }
356
357 /**
358 * ata_mode_string - convert xfer_mask to string
359 * @xfer_mask: mask of bits supported; only highest bit counts.
360 *
361 * Determine string which represents the highest speed
362 * (highest bit in @modemask).
363 *
364 * LOCKING:
365 * None.
366 *
367 * RETURNS:
368 * Constant C string representing highest speed listed in
369 * @mode_mask, or the constant C string "<n/a>".
370 */
371 static const char *ata_mode_string(unsigned int xfer_mask)
372 {
373 static const char * const xfer_mode_str[] = {
374 "PIO0",
375 "PIO1",
376 "PIO2",
377 "PIO3",
378 "PIO4",
379 "MWDMA0",
380 "MWDMA1",
381 "MWDMA2",
382 "UDMA/16",
383 "UDMA/25",
384 "UDMA/33",
385 "UDMA/44",
386 "UDMA/66",
387 "UDMA/100",
388 "UDMA/133",
389 "UDMA7",
390 };
391 int highbit;
392
393 highbit = fls(xfer_mask) - 1;
394 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
395 return xfer_mode_str[highbit];
396 return "<n/a>";
397 }
398
399 static const char *sata_spd_string(unsigned int spd)
400 {
401 static const char * const spd_str[] = {
402 "1.5 Gbps",
403 "3.0 Gbps",
404 };
405
406 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
407 return "<unknown>";
408 return spd_str[spd - 1];
409 }
410
411 void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
412 {
413 if (ata_dev_enabled(dev)) {
414 printk(KERN_WARNING "ata%u: dev %u disabled\n",
415 ap->id, dev->devno);
416 dev->class++;
417 }
418 }
419
420 /**
421 * ata_pio_devchk - PATA device presence detection
422 * @ap: ATA channel to examine
423 * @device: Device to examine (starting at zero)
424 *
425 * This technique was originally described in
426 * Hale Landis's ATADRVR (www.ata-atapi.com), and
427 * later found its way into the ATA/ATAPI spec.
428 *
429 * Write a pattern to the ATA shadow registers,
430 * and if a device is present, it will respond by
431 * correctly storing and echoing back the
432 * ATA shadow register contents.
433 *
434 * LOCKING:
435 * caller.
436 */
437
438 static unsigned int ata_pio_devchk(struct ata_port *ap,
439 unsigned int device)
440 {
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442 u8 nsect, lbal;
443
444 ap->ops->dev_select(ap, device);
445
446 outb(0x55, ioaddr->nsect_addr);
447 outb(0xaa, ioaddr->lbal_addr);
448
449 outb(0xaa, ioaddr->nsect_addr);
450 outb(0x55, ioaddr->lbal_addr);
451
452 outb(0x55, ioaddr->nsect_addr);
453 outb(0xaa, ioaddr->lbal_addr);
454
455 nsect = inb(ioaddr->nsect_addr);
456 lbal = inb(ioaddr->lbal_addr);
457
458 if ((nsect == 0x55) && (lbal == 0xaa))
459 return 1; /* we found a device */
460
461 return 0; /* nothing found */
462 }
463
464 /**
465 * ata_mmio_devchk - PATA device presence detection
466 * @ap: ATA channel to examine
467 * @device: Device to examine (starting at zero)
468 *
469 * This technique was originally described in
470 * Hale Landis's ATADRVR (www.ata-atapi.com), and
471 * later found its way into the ATA/ATAPI spec.
472 *
473 * Write a pattern to the ATA shadow registers,
474 * and if a device is present, it will respond by
475 * correctly storing and echoing back the
476 * ATA shadow register contents.
477 *
478 * LOCKING:
479 * caller.
480 */
481
482 static unsigned int ata_mmio_devchk(struct ata_port *ap,
483 unsigned int device)
484 {
485 struct ata_ioports *ioaddr = &ap->ioaddr;
486 u8 nsect, lbal;
487
488 ap->ops->dev_select(ap, device);
489
490 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
491 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
492
493 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
494 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
495
496 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
497 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
498
499 nsect = readb((void __iomem *) ioaddr->nsect_addr);
500 lbal = readb((void __iomem *) ioaddr->lbal_addr);
501
502 if ((nsect == 0x55) && (lbal == 0xaa))
503 return 1; /* we found a device */
504
505 return 0; /* nothing found */
506 }
507
508 /**
509 * ata_devchk - PATA device presence detection
510 * @ap: ATA channel to examine
511 * @device: Device to examine (starting at zero)
512 *
513 * Dispatch ATA device presence detection, depending
514 * on whether we are using PIO or MMIO to talk to the
515 * ATA shadow registers.
516 *
517 * LOCKING:
518 * caller.
519 */
520
521 static unsigned int ata_devchk(struct ata_port *ap,
522 unsigned int device)
523 {
524 if (ap->flags & ATA_FLAG_MMIO)
525 return ata_mmio_devchk(ap, device);
526 return ata_pio_devchk(ap, device);
527 }
528
529 /**
530 * ata_dev_classify - determine device type based on ATA-spec signature
531 * @tf: ATA taskfile register set for device to be identified
532 *
533 * Determine from taskfile register contents whether a device is
534 * ATA or ATAPI, as per "Signature and persistence" section
535 * of ATA/PI spec (volume 1, sect 5.14).
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
542 * the event of failure.
543 */
544
545 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
546 {
547 /* Apple's open source Darwin code hints that some devices only
548 * put a proper signature into the LBA mid/high registers,
549 * So, we only check those. It's sufficient for uniqueness.
550 */
551
552 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
553 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
554 DPRINTK("found ATA device by sig\n");
555 return ATA_DEV_ATA;
556 }
557
558 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
559 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
560 DPRINTK("found ATAPI device by sig\n");
561 return ATA_DEV_ATAPI;
562 }
563
564 DPRINTK("unknown device\n");
565 return ATA_DEV_UNKNOWN;
566 }
567
568 /**
569 * ata_dev_try_classify - Parse returned ATA device signature
570 * @ap: ATA channel to examine
571 * @device: Device to examine (starting at zero)
572 * @r_err: Value of error register on completion
573 *
574 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
575 * an ATA/ATAPI-defined set of values is placed in the ATA
576 * shadow registers, indicating the results of device detection
577 * and diagnostics.
578 *
579 * Select the ATA device, and read the values from the ATA shadow
580 * registers. Then parse according to the Error register value,
581 * and the spec-defined values examined by ata_dev_classify().
582 *
583 * LOCKING:
584 * caller.
585 *
586 * RETURNS:
587 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
588 */
589
590 static unsigned int
591 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
592 {
593 struct ata_taskfile tf;
594 unsigned int class;
595 u8 err;
596
597 ap->ops->dev_select(ap, device);
598
599 memset(&tf, 0, sizeof(tf));
600
601 ap->ops->tf_read(ap, &tf);
602 err = tf.feature;
603 if (r_err)
604 *r_err = err;
605
606 /* see if device passed diags */
607 if (err == 1)
608 /* do nothing */ ;
609 else if ((device == 0) && (err == 0x81))
610 /* do nothing */ ;
611 else
612 return ATA_DEV_NONE;
613
614 /* determine if device is ATA or ATAPI */
615 class = ata_dev_classify(&tf);
616
617 if (class == ATA_DEV_UNKNOWN)
618 return ATA_DEV_NONE;
619 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
620 return ATA_DEV_NONE;
621 return class;
622 }
623
624 /**
625 * ata_id_string - Convert IDENTIFY DEVICE page into string
626 * @id: IDENTIFY DEVICE results we will examine
627 * @s: string into which data is output
628 * @ofs: offset into identify device page
629 * @len: length of string to return. must be an even number.
630 *
631 * The strings in the IDENTIFY DEVICE page are broken up into
632 * 16-bit chunks. Run through the string, and output each
633 * 8-bit chunk linearly, regardless of platform.
634 *
635 * LOCKING:
636 * caller.
637 */
638
639 void ata_id_string(const u16 *id, unsigned char *s,
640 unsigned int ofs, unsigned int len)
641 {
642 unsigned int c;
643
644 while (len > 0) {
645 c = id[ofs] >> 8;
646 *s = c;
647 s++;
648
649 c = id[ofs] & 0xff;
650 *s = c;
651 s++;
652
653 ofs++;
654 len -= 2;
655 }
656 }
657
658 /**
659 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
660 * @id: IDENTIFY DEVICE results we will examine
661 * @s: string into which data is output
662 * @ofs: offset into identify device page
663 * @len: length of string to return. must be an odd number.
664 *
665 * This function is identical to ata_id_string except that it
666 * trims trailing spaces and terminates the resulting string with
667 * null. @len must be actual maximum length (even number) + 1.
668 *
669 * LOCKING:
670 * caller.
671 */
672 void ata_id_c_string(const u16 *id, unsigned char *s,
673 unsigned int ofs, unsigned int len)
674 {
675 unsigned char *p;
676
677 WARN_ON(!(len & 1));
678
679 ata_id_string(id, s, ofs, len - 1);
680
681 p = s + strnlen(s, len - 1);
682 while (p > s && p[-1] == ' ')
683 p--;
684 *p = '\0';
685 }
686
687 static u64 ata_id_n_sectors(const u16 *id)
688 {
689 if (ata_id_has_lba(id)) {
690 if (ata_id_has_lba48(id))
691 return ata_id_u64(id, 100);
692 else
693 return ata_id_u32(id, 60);
694 } else {
695 if (ata_id_current_chs_valid(id))
696 return ata_id_u32(id, 57);
697 else
698 return id[1] * id[3] * id[6];
699 }
700 }
701
702 /**
703 * ata_noop_dev_select - Select device 0/1 on ATA bus
704 * @ap: ATA channel to manipulate
705 * @device: ATA device (numbered from zero) to select
706 *
707 * This function performs no actual function.
708 *
709 * May be used as the dev_select() entry in ata_port_operations.
710 *
711 * LOCKING:
712 * caller.
713 */
714 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
715 {
716 }
717
718
719 /**
720 * ata_std_dev_select - Select device 0/1 on ATA bus
721 * @ap: ATA channel to manipulate
722 * @device: ATA device (numbered from zero) to select
723 *
724 * Use the method defined in the ATA specification to
725 * make either device 0, or device 1, active on the
726 * ATA channel. Works with both PIO and MMIO.
727 *
728 * May be used as the dev_select() entry in ata_port_operations.
729 *
730 * LOCKING:
731 * caller.
732 */
733
734 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
735 {
736 u8 tmp;
737
738 if (device == 0)
739 tmp = ATA_DEVICE_OBS;
740 else
741 tmp = ATA_DEVICE_OBS | ATA_DEV1;
742
743 if (ap->flags & ATA_FLAG_MMIO) {
744 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
745 } else {
746 outb(tmp, ap->ioaddr.device_addr);
747 }
748 ata_pause(ap); /* needed; also flushes, for mmio */
749 }
750
751 /**
752 * ata_dev_select - Select device 0/1 on ATA bus
753 * @ap: ATA channel to manipulate
754 * @device: ATA device (numbered from zero) to select
755 * @wait: non-zero to wait for Status register BSY bit to clear
756 * @can_sleep: non-zero if context allows sleeping
757 *
758 * Use the method defined in the ATA specification to
759 * make either device 0, or device 1, active on the
760 * ATA channel.
761 *
762 * This is a high-level version of ata_std_dev_select(),
763 * which additionally provides the services of inserting
764 * the proper pauses and status polling, where needed.
765 *
766 * LOCKING:
767 * caller.
768 */
769
770 void ata_dev_select(struct ata_port *ap, unsigned int device,
771 unsigned int wait, unsigned int can_sleep)
772 {
773 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
774 ap->id, device, wait);
775
776 if (wait)
777 ata_wait_idle(ap);
778
779 ap->ops->dev_select(ap, device);
780
781 if (wait) {
782 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
783 msleep(150);
784 ata_wait_idle(ap);
785 }
786 }
787
788 /**
789 * ata_dump_id - IDENTIFY DEVICE info debugging output
790 * @id: IDENTIFY DEVICE page to dump
791 *
792 * Dump selected 16-bit words from the given IDENTIFY DEVICE
793 * page.
794 *
795 * LOCKING:
796 * caller.
797 */
798
799 static inline void ata_dump_id(const u16 *id)
800 {
801 DPRINTK("49==0x%04x "
802 "53==0x%04x "
803 "63==0x%04x "
804 "64==0x%04x "
805 "75==0x%04x \n",
806 id[49],
807 id[53],
808 id[63],
809 id[64],
810 id[75]);
811 DPRINTK("80==0x%04x "
812 "81==0x%04x "
813 "82==0x%04x "
814 "83==0x%04x "
815 "84==0x%04x \n",
816 id[80],
817 id[81],
818 id[82],
819 id[83],
820 id[84]);
821 DPRINTK("88==0x%04x "
822 "93==0x%04x\n",
823 id[88],
824 id[93]);
825 }
826
827 /**
828 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
829 * @id: IDENTIFY data to compute xfer mask from
830 *
831 * Compute the xfermask for this device. This is not as trivial
832 * as it seems if we must consider early devices correctly.
833 *
834 * FIXME: pre IDE drive timing (do we care ?).
835 *
836 * LOCKING:
837 * None.
838 *
839 * RETURNS:
840 * Computed xfermask
841 */
842 static unsigned int ata_id_xfermask(const u16 *id)
843 {
844 unsigned int pio_mask, mwdma_mask, udma_mask;
845
846 /* Usual case. Word 53 indicates word 64 is valid */
847 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
848 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
849 pio_mask <<= 3;
850 pio_mask |= 0x7;
851 } else {
852 /* If word 64 isn't valid then Word 51 high byte holds
853 * the PIO timing number for the maximum. Turn it into
854 * a mask.
855 */
856 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
857
858 /* But wait.. there's more. Design your standards by
859 * committee and you too can get a free iordy field to
860 * process. However its the speeds not the modes that
861 * are supported... Note drivers using the timing API
862 * will get this right anyway
863 */
864 }
865
866 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
867
868 udma_mask = 0;
869 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
870 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
871
872 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
873 }
874
875 /**
876 * ata_port_queue_task - Queue port_task
877 * @ap: The ata_port to queue port_task for
878 *
879 * Schedule @fn(@data) for execution after @delay jiffies using
880 * port_task. There is one port_task per port and it's the
881 * user(low level driver)'s responsibility to make sure that only
882 * one task is active at any given time.
883 *
884 * libata core layer takes care of synchronization between
885 * port_task and EH. ata_port_queue_task() may be ignored for EH
886 * synchronization.
887 *
888 * LOCKING:
889 * Inherited from caller.
890 */
891 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
892 unsigned long delay)
893 {
894 int rc;
895
896 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
897 return;
898
899 PREPARE_WORK(&ap->port_task, fn, data);
900
901 if (!delay)
902 rc = queue_work(ata_wq, &ap->port_task);
903 else
904 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
905
906 /* rc == 0 means that another user is using port task */
907 WARN_ON(rc == 0);
908 }
909
910 /**
911 * ata_port_flush_task - Flush port_task
912 * @ap: The ata_port to flush port_task for
913 *
914 * After this function completes, port_task is guranteed not to
915 * be running or scheduled.
916 *
917 * LOCKING:
918 * Kernel thread context (may sleep)
919 */
920 void ata_port_flush_task(struct ata_port *ap)
921 {
922 unsigned long flags;
923
924 DPRINTK("ENTER\n");
925
926 spin_lock_irqsave(&ap->host_set->lock, flags);
927 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
928 spin_unlock_irqrestore(&ap->host_set->lock, flags);
929
930 DPRINTK("flush #1\n");
931 flush_workqueue(ata_wq);
932
933 /*
934 * At this point, if a task is running, it's guaranteed to see
935 * the FLUSH flag; thus, it will never queue pio tasks again.
936 * Cancel and flush.
937 */
938 if (!cancel_delayed_work(&ap->port_task)) {
939 DPRINTK("flush #2\n");
940 flush_workqueue(ata_wq);
941 }
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
945 spin_unlock_irqrestore(&ap->host_set->lock, flags);
946
947 DPRINTK("EXIT\n");
948 }
949
950 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
951 {
952 struct completion *waiting = qc->private_data;
953
954 qc->ap->ops->tf_read(qc->ap, &qc->tf);
955 complete(waiting);
956 }
957
958 /**
959 * ata_exec_internal - execute libata internal command
960 * @ap: Port to which the command is sent
961 * @dev: Device to which the command is sent
962 * @tf: Taskfile registers for the command and the result
963 * @cdb: CDB for packet command
964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer
967 *
968 * Executes libata internal command with timeout. @tf contains
969 * command on entry and result on return. Timeout and error
970 * conditions are reported via return value. No recovery action
971 * is taken after a command times out. It's caller's duty to
972 * clean up after timeout.
973 *
974 * LOCKING:
975 * None. Should be called with kernel context, might sleep.
976 */
977
978 unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
979 struct ata_taskfile *tf, const u8 *cdb,
980 int dma_dir, void *buf, unsigned int buflen)
981 {
982 u8 command = tf->command;
983 struct ata_queued_cmd *qc;
984 DECLARE_COMPLETION(wait);
985 unsigned long flags;
986 unsigned int err_mask;
987
988 spin_lock_irqsave(&ap->host_set->lock, flags);
989
990 qc = ata_qc_new_init(ap, dev);
991 BUG_ON(qc == NULL);
992
993 qc->tf = *tf;
994 if (cdb)
995 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
996 qc->dma_dir = dma_dir;
997 if (dma_dir != DMA_NONE) {
998 ata_sg_init_one(qc, buf, buflen);
999 qc->nsect = buflen / ATA_SECT_SIZE;
1000 }
1001
1002 qc->private_data = &wait;
1003 qc->complete_fn = ata_qc_complete_internal;
1004
1005 ata_qc_issue(qc);
1006
1007 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1008
1009 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1010 ata_port_flush_task(ap);
1011
1012 spin_lock_irqsave(&ap->host_set->lock, flags);
1013
1014 /* We're racing with irq here. If we lose, the
1015 * following test prevents us from completing the qc
1016 * again. If completion irq occurs after here but
1017 * before the caller cleans up, it will result in a
1018 * spurious interrupt. We can live with that.
1019 */
1020 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1021 qc->err_mask = AC_ERR_TIMEOUT;
1022 ata_qc_complete(qc);
1023 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1024 ap->id, command);
1025 }
1026
1027 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1028 }
1029
1030 *tf = qc->tf;
1031 err_mask = qc->err_mask;
1032
1033 ata_qc_free(qc);
1034
1035 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1036 * Until those drivers are fixed, we detect the condition
1037 * here, fail the command with AC_ERR_SYSTEM and reenable the
1038 * port.
1039 *
1040 * Note that this doesn't change any behavior as internal
1041 * command failure results in disabling the device in the
1042 * higher layer for LLDDs without new reset/EH callbacks.
1043 *
1044 * Kill the following code as soon as those drivers are fixed.
1045 */
1046 if (ap->flags & ATA_FLAG_DISABLED) {
1047 err_mask |= AC_ERR_SYSTEM;
1048 ata_port_probe(ap);
1049 }
1050
1051 return err_mask;
1052 }
1053
1054 /**
1055 * ata_pio_need_iordy - check if iordy needed
1056 * @adev: ATA device
1057 *
1058 * Check if the current speed of the device requires IORDY. Used
1059 * by various controllers for chip configuration.
1060 */
1061
1062 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1063 {
1064 int pio;
1065 int speed = adev->pio_mode - XFER_PIO_0;
1066
1067 if (speed < 2)
1068 return 0;
1069 if (speed > 2)
1070 return 1;
1071
1072 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1073
1074 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1075 pio = adev->id[ATA_ID_EIDE_PIO];
1076 /* Is the speed faster than the drive allows non IORDY ? */
1077 if (pio) {
1078 /* This is cycle times not frequency - watch the logic! */
1079 if (pio > 240) /* PIO2 is 240nS per cycle */
1080 return 1;
1081 return 0;
1082 }
1083 }
1084 return 0;
1085 }
1086
1087 /**
1088 * ata_dev_read_id - Read ID data from the specified device
1089 * @ap: port on which target device resides
1090 * @dev: target device
1091 * @p_class: pointer to class of the target device (may be changed)
1092 * @post_reset: is this read ID post-reset?
1093 * @p_id: read IDENTIFY page (newly allocated)
1094 *
1095 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1096 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1097 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1098 * for pre-ATA4 drives.
1099 *
1100 * LOCKING:
1101 * Kernel thread context (may sleep)
1102 *
1103 * RETURNS:
1104 * 0 on success, -errno otherwise.
1105 */
1106 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1107 unsigned int *p_class, int post_reset, u16 **p_id)
1108 {
1109 unsigned int class = *p_class;
1110 struct ata_taskfile tf;
1111 unsigned int err_mask = 0;
1112 u16 *id;
1113 const char *reason;
1114 int rc;
1115
1116 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1117
1118 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1119
1120 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1121 if (id == NULL) {
1122 rc = -ENOMEM;
1123 reason = "out of memory";
1124 goto err_out;
1125 }
1126
1127 retry:
1128 ata_tf_init(ap, &tf, dev->devno);
1129
1130 switch (class) {
1131 case ATA_DEV_ATA:
1132 tf.command = ATA_CMD_ID_ATA;
1133 break;
1134 case ATA_DEV_ATAPI:
1135 tf.command = ATA_CMD_ID_ATAPI;
1136 break;
1137 default:
1138 rc = -ENODEV;
1139 reason = "unsupported class";
1140 goto err_out;
1141 }
1142
1143 tf.protocol = ATA_PROT_PIO;
1144
1145 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE,
1146 id, sizeof(id[0]) * ATA_ID_WORDS);
1147 if (err_mask) {
1148 rc = -EIO;
1149 reason = "I/O error";
1150 goto err_out;
1151 }
1152
1153 swap_buf_le16(id, ATA_ID_WORDS);
1154
1155 /* sanity check */
1156 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1157 rc = -EINVAL;
1158 reason = "device reports illegal type";
1159 goto err_out;
1160 }
1161
1162 if (post_reset && class == ATA_DEV_ATA) {
1163 /*
1164 * The exact sequence expected by certain pre-ATA4 drives is:
1165 * SRST RESET
1166 * IDENTIFY
1167 * INITIALIZE DEVICE PARAMETERS
1168 * anything else..
1169 * Some drives were very specific about that exact sequence.
1170 */
1171 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1172 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1173 if (err_mask) {
1174 rc = -EIO;
1175 reason = "INIT_DEV_PARAMS failed";
1176 goto err_out;
1177 }
1178
1179 /* current CHS translation info (id[53-58]) might be
1180 * changed. reread the identify device info.
1181 */
1182 post_reset = 0;
1183 goto retry;
1184 }
1185 }
1186
1187 *p_class = class;
1188 *p_id = id;
1189 return 0;
1190
1191 err_out:
1192 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1193 ap->id, dev->devno, reason);
1194 kfree(id);
1195 return rc;
1196 }
1197
1198 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1199 struct ata_device *dev)
1200 {
1201 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1202 }
1203
1204 /**
1205 * ata_dev_configure - Configure the specified ATA/ATAPI device
1206 * @ap: Port on which target device resides
1207 * @dev: Target device to configure
1208 * @print_info: Enable device info printout
1209 *
1210 * Configure @dev according to @dev->id. Generic and low-level
1211 * driver specific fixups are also applied.
1212 *
1213 * LOCKING:
1214 * Kernel thread context (may sleep)
1215 *
1216 * RETURNS:
1217 * 0 on success, -errno otherwise
1218 */
1219 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1220 int print_info)
1221 {
1222 const u16 *id = dev->id;
1223 unsigned int xfer_mask;
1224 int i, rc;
1225
1226 if (!ata_dev_enabled(dev)) {
1227 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1228 ap->id, dev->devno);
1229 return 0;
1230 }
1231
1232 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1233
1234 /* print device capabilities */
1235 if (print_info)
1236 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1237 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1238 ap->id, dev->devno, id[49], id[82], id[83],
1239 id[84], id[85], id[86], id[87], id[88]);
1240
1241 /* initialize to-be-configured parameters */
1242 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1243 dev->max_sectors = 0;
1244 dev->cdb_len = 0;
1245 dev->n_sectors = 0;
1246 dev->cylinders = 0;
1247 dev->heads = 0;
1248 dev->sectors = 0;
1249
1250 /*
1251 * common ATA, ATAPI feature tests
1252 */
1253
1254 /* find max transfer mode; for printk only */
1255 xfer_mask = ata_id_xfermask(id);
1256
1257 ata_dump_id(id);
1258
1259 /* ATA-specific feature tests */
1260 if (dev->class == ATA_DEV_ATA) {
1261 dev->n_sectors = ata_id_n_sectors(id);
1262
1263 if (ata_id_has_lba(id)) {
1264 const char *lba_desc;
1265
1266 lba_desc = "LBA";
1267 dev->flags |= ATA_DFLAG_LBA;
1268 if (ata_id_has_lba48(id)) {
1269 dev->flags |= ATA_DFLAG_LBA48;
1270 lba_desc = "LBA48";
1271 }
1272
1273 /* print device info to dmesg */
1274 if (print_info)
1275 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1276 "max %s, %Lu sectors: %s\n",
1277 ap->id, dev->devno,
1278 ata_id_major_version(id),
1279 ata_mode_string(xfer_mask),
1280 (unsigned long long)dev->n_sectors,
1281 lba_desc);
1282 } else {
1283 /* CHS */
1284
1285 /* Default translation */
1286 dev->cylinders = id[1];
1287 dev->heads = id[3];
1288 dev->sectors = id[6];
1289
1290 if (ata_id_current_chs_valid(id)) {
1291 /* Current CHS translation is valid. */
1292 dev->cylinders = id[54];
1293 dev->heads = id[55];
1294 dev->sectors = id[56];
1295 }
1296
1297 /* print device info to dmesg */
1298 if (print_info)
1299 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1300 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1301 ap->id, dev->devno,
1302 ata_id_major_version(id),
1303 ata_mode_string(xfer_mask),
1304 (unsigned long long)dev->n_sectors,
1305 dev->cylinders, dev->heads, dev->sectors);
1306 }
1307
1308 if (dev->id[59] & 0x100) {
1309 dev->multi_count = dev->id[59] & 0xff;
1310 DPRINTK("ata%u: dev %u multi count %u\n",
1311 ap->id, dev->devno, dev->multi_count);
1312 }
1313
1314 dev->cdb_len = 16;
1315 }
1316
1317 /* ATAPI-specific feature tests */
1318 else if (dev->class == ATA_DEV_ATAPI) {
1319 char *cdb_intr_string = "";
1320
1321 rc = atapi_cdb_len(id);
1322 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1323 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1324 rc = -EINVAL;
1325 goto err_out_nosup;
1326 }
1327 dev->cdb_len = (unsigned int) rc;
1328
1329 if (ata_id_cdb_intr(dev->id)) {
1330 dev->flags |= ATA_DFLAG_CDB_INTR;
1331 cdb_intr_string = ", CDB intr";
1332 }
1333
1334 /* print device info to dmesg */
1335 if (print_info)
1336 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
1337 ap->id, dev->devno, ata_mode_string(xfer_mask),
1338 cdb_intr_string);
1339 }
1340
1341 ap->host->max_cmd_len = 0;
1342 for (i = 0; i < ATA_MAX_DEVICES; i++)
1343 ap->host->max_cmd_len = max_t(unsigned int,
1344 ap->host->max_cmd_len,
1345 ap->device[i].cdb_len);
1346
1347 /* limit bridge transfers to udma5, 200 sectors */
1348 if (ata_dev_knobble(ap, dev)) {
1349 if (print_info)
1350 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1351 ap->id, dev->devno);
1352 dev->udma_mask &= ATA_UDMA5;
1353 dev->max_sectors = ATA_MAX_SECTORS;
1354 }
1355
1356 if (ap->ops->dev_config)
1357 ap->ops->dev_config(ap, dev);
1358
1359 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1360 return 0;
1361
1362 err_out_nosup:
1363 DPRINTK("EXIT, err\n");
1364 return rc;
1365 }
1366
1367 /**
1368 * ata_bus_probe - Reset and probe ATA bus
1369 * @ap: Bus to probe
1370 *
1371 * Master ATA bus probing function. Initiates a hardware-dependent
1372 * bus reset, then attempts to identify any devices found on
1373 * the bus.
1374 *
1375 * LOCKING:
1376 * PCI/etc. bus probe sem.
1377 *
1378 * RETURNS:
1379 * Zero on success, negative errno otherwise.
1380 */
1381
1382 static int ata_bus_probe(struct ata_port *ap)
1383 {
1384 unsigned int classes[ATA_MAX_DEVICES];
1385 int tries[ATA_MAX_DEVICES];
1386 int i, rc, down_xfermask;
1387 struct ata_device *dev;
1388
1389 ata_port_probe(ap);
1390
1391 for (i = 0; i < ATA_MAX_DEVICES; i++)
1392 tries[i] = ATA_PROBE_MAX_TRIES;
1393
1394 retry:
1395 down_xfermask = 0;
1396
1397 /* reset and determine device classes */
1398 for (i = 0; i < ATA_MAX_DEVICES; i++)
1399 classes[i] = ATA_DEV_UNKNOWN;
1400
1401 if (ap->ops->probe_reset) {
1402 rc = ap->ops->probe_reset(ap, classes);
1403 if (rc) {
1404 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1405 return rc;
1406 }
1407 } else {
1408 ap->ops->phy_reset(ap);
1409
1410 if (!(ap->flags & ATA_FLAG_DISABLED))
1411 for (i = 0; i < ATA_MAX_DEVICES; i++)
1412 classes[i] = ap->device[i].class;
1413
1414 ata_port_probe(ap);
1415 }
1416
1417 for (i = 0; i < ATA_MAX_DEVICES; i++)
1418 if (classes[i] == ATA_DEV_UNKNOWN)
1419 classes[i] = ATA_DEV_NONE;
1420
1421 /* read IDENTIFY page and configure devices */
1422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1423 dev = &ap->device[i];
1424 dev->class = classes[i];
1425
1426 if (!tries[i]) {
1427 ata_down_xfermask_limit(ap, dev, 1);
1428 ata_dev_disable(ap, dev);
1429 }
1430
1431 if (!ata_dev_enabled(dev))
1432 continue;
1433
1434 kfree(dev->id);
1435 dev->id = NULL;
1436 rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
1437 if (rc)
1438 goto fail;
1439
1440 rc = ata_dev_configure(ap, dev, 1);
1441 if (rc)
1442 goto fail;
1443 }
1444
1445 /* configure transfer mode */
1446 if (ap->ops->set_mode) {
1447 /* FIXME: make ->set_mode handle no device case and
1448 * return error code and failing device on failure as
1449 * ata_set_mode() does.
1450 */
1451 for (i = 0; i < ATA_MAX_DEVICES; i++)
1452 if (ata_dev_enabled(&ap->device[i])) {
1453 ap->ops->set_mode(ap);
1454 break;
1455 }
1456 rc = 0;
1457 } else {
1458 rc = ata_set_mode(ap, &dev);
1459 if (rc) {
1460 down_xfermask = 1;
1461 goto fail;
1462 }
1463 }
1464
1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
1466 if (ata_dev_enabled(&ap->device[i]))
1467 return 0;
1468
1469 /* no device present, disable port */
1470 ata_port_disable(ap);
1471 ap->ops->port_disable(ap);
1472 return -ENODEV;
1473
1474 fail:
1475 switch (rc) {
1476 case -EINVAL:
1477 case -ENODEV:
1478 tries[dev->devno] = 0;
1479 break;
1480 case -EIO:
1481 ata_down_sata_spd_limit(ap);
1482 /* fall through */
1483 default:
1484 tries[dev->devno]--;
1485 if (down_xfermask &&
1486 ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1))
1487 tries[dev->devno] = 0;
1488 }
1489
1490 goto retry;
1491 }
1492
1493 /**
1494 * ata_port_probe - Mark port as enabled
1495 * @ap: Port for which we indicate enablement
1496 *
1497 * Modify @ap data structure such that the system
1498 * thinks that the entire port is enabled.
1499 *
1500 * LOCKING: host_set lock, or some other form of
1501 * serialization.
1502 */
1503
1504 void ata_port_probe(struct ata_port *ap)
1505 {
1506 ap->flags &= ~ATA_FLAG_DISABLED;
1507 }
1508
1509 /**
1510 * sata_print_link_status - Print SATA link status
1511 * @ap: SATA port to printk link status about
1512 *
1513 * This function prints link speed and status of a SATA link.
1514 *
1515 * LOCKING:
1516 * None.
1517 */
1518 static void sata_print_link_status(struct ata_port *ap)
1519 {
1520 u32 sstatus, scontrol, tmp;
1521
1522 if (!ap->ops->scr_read)
1523 return;
1524
1525 sstatus = scr_read(ap, SCR_STATUS);
1526 scontrol = scr_read(ap, SCR_CONTROL);
1527
1528 if (sata_dev_present(ap)) {
1529 tmp = (sstatus >> 4) & 0xf;
1530 printk(KERN_INFO
1531 "ata%u: SATA link up %s (SStatus %X SControl %X)\n",
1532 ap->id, sata_spd_string(tmp), sstatus, scontrol);
1533 } else {
1534 printk(KERN_INFO
1535 "ata%u: SATA link down (SStatus %X SControl %X)\n",
1536 ap->id, sstatus, scontrol);
1537 }
1538 }
1539
1540 /**
1541 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1542 * @ap: SATA port associated with target SATA PHY.
1543 *
1544 * This function issues commands to standard SATA Sxxx
1545 * PHY registers, to wake up the phy (and device), and
1546 * clear any reset condition.
1547 *
1548 * LOCKING:
1549 * PCI/etc. bus probe sem.
1550 *
1551 */
1552 void __sata_phy_reset(struct ata_port *ap)
1553 {
1554 u32 sstatus;
1555 unsigned long timeout = jiffies + (HZ * 5);
1556
1557 if (ap->flags & ATA_FLAG_SATA_RESET) {
1558 /* issue phy wake/reset */
1559 scr_write_flush(ap, SCR_CONTROL, 0x301);
1560 /* Couldn't find anything in SATA I/II specs, but
1561 * AHCI-1.1 10.4.2 says at least 1 ms. */
1562 mdelay(1);
1563 }
1564 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1565
1566 /* wait for phy to become ready, if necessary */
1567 do {
1568 msleep(200);
1569 sstatus = scr_read(ap, SCR_STATUS);
1570 if ((sstatus & 0xf) != 1)
1571 break;
1572 } while (time_before(jiffies, timeout));
1573
1574 /* print link status */
1575 sata_print_link_status(ap);
1576
1577 /* TODO: phy layer with polling, timeouts, etc. */
1578 if (sata_dev_present(ap))
1579 ata_port_probe(ap);
1580 else
1581 ata_port_disable(ap);
1582
1583 if (ap->flags & ATA_FLAG_DISABLED)
1584 return;
1585
1586 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1587 ata_port_disable(ap);
1588 return;
1589 }
1590
1591 ap->cbl = ATA_CBL_SATA;
1592 }
1593
1594 /**
1595 * sata_phy_reset - Reset SATA bus.
1596 * @ap: SATA port associated with target SATA PHY.
1597 *
1598 * This function resets the SATA bus, and then probes
1599 * the bus for devices.
1600 *
1601 * LOCKING:
1602 * PCI/etc. bus probe sem.
1603 *
1604 */
1605 void sata_phy_reset(struct ata_port *ap)
1606 {
1607 __sata_phy_reset(ap);
1608 if (ap->flags & ATA_FLAG_DISABLED)
1609 return;
1610 ata_bus_reset(ap);
1611 }
1612
1613 /**
1614 * ata_dev_pair - return other device on cable
1615 * @ap: port
1616 * @adev: device
1617 *
1618 * Obtain the other device on the same cable, or if none is
1619 * present NULL is returned
1620 */
1621
1622 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1623 {
1624 struct ata_device *pair = &ap->device[1 - adev->devno];
1625 if (!ata_dev_enabled(pair))
1626 return NULL;
1627 return pair;
1628 }
1629
1630 /**
1631 * ata_port_disable - Disable port.
1632 * @ap: Port to be disabled.
1633 *
1634 * Modify @ap data structure such that the system
1635 * thinks that the entire port is disabled, and should
1636 * never attempt to probe or communicate with devices
1637 * on this port.
1638 *
1639 * LOCKING: host_set lock, or some other form of
1640 * serialization.
1641 */
1642
1643 void ata_port_disable(struct ata_port *ap)
1644 {
1645 ap->device[0].class = ATA_DEV_NONE;
1646 ap->device[1].class = ATA_DEV_NONE;
1647 ap->flags |= ATA_FLAG_DISABLED;
1648 }
1649
1650 /**
1651 * ata_down_sata_spd_limit - adjust SATA spd limit downward
1652 * @ap: Port to adjust SATA spd limit for
1653 *
1654 * Adjust SATA spd limit of @ap downward. Note that this
1655 * function only adjusts the limit. The change must be applied
1656 * using ata_set_sata_spd().
1657 *
1658 * LOCKING:
1659 * Inherited from caller.
1660 *
1661 * RETURNS:
1662 * 0 on success, negative errno on failure
1663 */
1664 int ata_down_sata_spd_limit(struct ata_port *ap)
1665 {
1666 u32 spd, mask;
1667 int highbit;
1668
1669 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1670 return -EOPNOTSUPP;
1671
1672 mask = ap->sata_spd_limit;
1673 if (mask <= 1)
1674 return -EINVAL;
1675 highbit = fls(mask) - 1;
1676 mask &= ~(1 << highbit);
1677
1678 spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf;
1679 if (spd <= 1)
1680 return -EINVAL;
1681 spd--;
1682 mask &= (1 << spd) - 1;
1683 if (!mask)
1684 return -EINVAL;
1685
1686 ap->sata_spd_limit = mask;
1687
1688 printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n",
1689 ap->id, sata_spd_string(fls(mask)));
1690
1691 return 0;
1692 }
1693
1694 static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1695 {
1696 u32 spd, limit;
1697
1698 if (ap->sata_spd_limit == UINT_MAX)
1699 limit = 0;
1700 else
1701 limit = fls(ap->sata_spd_limit);
1702
1703 spd = (*scontrol >> 4) & 0xf;
1704 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1705
1706 return spd != limit;
1707 }
1708
1709 /**
1710 * ata_set_sata_spd_needed - is SATA spd configuration needed
1711 * @ap: Port in question
1712 *
1713 * Test whether the spd limit in SControl matches
1714 * @ap->sata_spd_limit. This function is used to determine
1715 * whether hardreset is necessary to apply SATA spd
1716 * configuration.
1717 *
1718 * LOCKING:
1719 * Inherited from caller.
1720 *
1721 * RETURNS:
1722 * 1 if SATA spd configuration is needed, 0 otherwise.
1723 */
1724 int ata_set_sata_spd_needed(struct ata_port *ap)
1725 {
1726 u32 scontrol;
1727
1728 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1729 return 0;
1730
1731 scontrol = scr_read(ap, SCR_CONTROL);
1732
1733 return __ata_set_sata_spd_needed(ap, &scontrol);
1734 }
1735
1736 /**
1737 * ata_set_sata_spd - set SATA spd according to spd limit
1738 * @ap: Port to set SATA spd for
1739 *
1740 * Set SATA spd of @ap according to sata_spd_limit.
1741 *
1742 * LOCKING:
1743 * Inherited from caller.
1744 *
1745 * RETURNS:
1746 * 0 if spd doesn't need to be changed, 1 if spd has been
1747 * changed. -EOPNOTSUPP if SCR registers are inaccessible.
1748 */
1749 static int ata_set_sata_spd(struct ata_port *ap)
1750 {
1751 u32 scontrol;
1752
1753 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1754 return -EOPNOTSUPP;
1755
1756 scontrol = scr_read(ap, SCR_CONTROL);
1757 if (!__ata_set_sata_spd_needed(ap, &scontrol))
1758 return 0;
1759
1760 scr_write(ap, SCR_CONTROL, scontrol);
1761 return 1;
1762 }
1763
1764 /*
1765 * This mode timing computation functionality is ported over from
1766 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1767 */
1768 /*
1769 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1770 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1771 * for PIO 5, which is a nonstandard extension and UDMA6, which
1772 * is currently supported only by Maxtor drives.
1773 */
1774
1775 static const struct ata_timing ata_timing[] = {
1776
1777 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1778 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1779 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1780 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1781
1782 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1783 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1784 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1785
1786 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1787
1788 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1789 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1790 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1791
1792 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1793 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1794 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1795
1796 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1797 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1798 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1799
1800 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1801 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1802 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1803
1804 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1805
1806 { 0xFF }
1807 };
1808
1809 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1810 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1811
1812 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1813 {
1814 q->setup = EZ(t->setup * 1000, T);
1815 q->act8b = EZ(t->act8b * 1000, T);
1816 q->rec8b = EZ(t->rec8b * 1000, T);
1817 q->cyc8b = EZ(t->cyc8b * 1000, T);
1818 q->active = EZ(t->active * 1000, T);
1819 q->recover = EZ(t->recover * 1000, T);
1820 q->cycle = EZ(t->cycle * 1000, T);
1821 q->udma = EZ(t->udma * 1000, UT);
1822 }
1823
1824 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1825 struct ata_timing *m, unsigned int what)
1826 {
1827 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1828 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1829 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1830 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1831 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1832 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1833 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1834 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1835 }
1836
1837 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1838 {
1839 const struct ata_timing *t;
1840
1841 for (t = ata_timing; t->mode != speed; t++)
1842 if (t->mode == 0xFF)
1843 return NULL;
1844 return t;
1845 }
1846
1847 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1848 struct ata_timing *t, int T, int UT)
1849 {
1850 const struct ata_timing *s;
1851 struct ata_timing p;
1852
1853 /*
1854 * Find the mode.
1855 */
1856
1857 if (!(s = ata_timing_find_mode(speed)))
1858 return -EINVAL;
1859
1860 memcpy(t, s, sizeof(*s));
1861
1862 /*
1863 * If the drive is an EIDE drive, it can tell us it needs extended
1864 * PIO/MW_DMA cycle timing.
1865 */
1866
1867 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1868 memset(&p, 0, sizeof(p));
1869 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1870 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1871 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1872 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1873 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1874 }
1875 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1876 }
1877
1878 /*
1879 * Convert the timing to bus clock counts.
1880 */
1881
1882 ata_timing_quantize(t, t, T, UT);
1883
1884 /*
1885 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1886 * S.M.A.R.T * and some other commands. We have to ensure that the
1887 * DMA cycle timing is slower/equal than the fastest PIO timing.
1888 */
1889
1890 if (speed > XFER_PIO_4) {
1891 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1892 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1893 }
1894
1895 /*
1896 * Lengthen active & recovery time so that cycle time is correct.
1897 */
1898
1899 if (t->act8b + t->rec8b < t->cyc8b) {
1900 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1901 t->rec8b = t->cyc8b - t->act8b;
1902 }
1903
1904 if (t->active + t->recover < t->cycle) {
1905 t->active += (t->cycle - (t->active + t->recover)) / 2;
1906 t->recover = t->cycle - t->active;
1907 }
1908
1909 return 0;
1910 }
1911
1912 /**
1913 * ata_down_xfermask_limit - adjust dev xfer masks downward
1914 * @ap: Port associated with device @dev
1915 * @dev: Device to adjust xfer masks
1916 * @force_pio0: Force PIO0
1917 *
1918 * Adjust xfer masks of @dev downward. Note that this function
1919 * does not apply the change. Invoking ata_set_mode() afterwards
1920 * will apply the limit.
1921 *
1922 * LOCKING:
1923 * Inherited from caller.
1924 *
1925 * RETURNS:
1926 * 0 on success, negative errno on failure
1927 */
1928 int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1929 int force_pio0)
1930 {
1931 unsigned long xfer_mask;
1932 int highbit;
1933
1934 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
1935 dev->udma_mask);
1936
1937 if (!xfer_mask)
1938 goto fail;
1939 /* don't gear down to MWDMA from UDMA, go directly to PIO */
1940 if (xfer_mask & ATA_MASK_UDMA)
1941 xfer_mask &= ~ATA_MASK_MWDMA;
1942
1943 highbit = fls(xfer_mask) - 1;
1944 xfer_mask &= ~(1 << highbit);
1945 if (force_pio0)
1946 xfer_mask &= 1 << ATA_SHIFT_PIO;
1947 if (!xfer_mask)
1948 goto fail;
1949
1950 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1951 &dev->udma_mask);
1952
1953 printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n",
1954 ap->id, dev->devno, ata_mode_string(xfer_mask));
1955
1956 return 0;
1957
1958 fail:
1959 return -EINVAL;
1960 }
1961
1962 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1963 {
1964 unsigned int err_mask;
1965 int rc;
1966
1967 dev->flags &= ~ATA_DFLAG_PIO;
1968 if (dev->xfer_shift == ATA_SHIFT_PIO)
1969 dev->flags |= ATA_DFLAG_PIO;
1970
1971 err_mask = ata_dev_set_xfermode(ap, dev);
1972 if (err_mask) {
1973 printk(KERN_ERR
1974 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1975 ap->id, err_mask);
1976 return -EIO;
1977 }
1978
1979 rc = ata_dev_revalidate(ap, dev, 0);
1980 if (rc)
1981 return rc;
1982
1983 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1984 dev->xfer_shift, (int)dev->xfer_mode);
1985
1986 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1987 ap->id, dev->devno,
1988 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1989 return 0;
1990 }
1991
1992 /**
1993 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1994 * @ap: port on which timings will be programmed
1995 * @r_failed_dev: out paramter for failed device
1996 *
1997 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
1998 * ata_set_mode() fails, pointer to the failing device is
1999 * returned in @r_failed_dev.
2000 *
2001 * LOCKING:
2002 * PCI/etc. bus probe sem.
2003 *
2004 * RETURNS:
2005 * 0 on success, negative errno otherwise
2006 */
2007 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2008 {
2009 struct ata_device *dev;
2010 int i, rc = 0, used_dma = 0, found = 0;
2011
2012 /* step 1: calculate xfer_mask */
2013 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2014 unsigned int pio_mask, dma_mask;
2015
2016 dev = &ap->device[i];
2017
2018 if (!ata_dev_enabled(dev))
2019 continue;
2020
2021 ata_dev_xfermask(ap, dev);
2022
2023 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2024 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2025 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2026 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2027
2028 found = 1;
2029 if (dev->dma_mode)
2030 used_dma = 1;
2031 }
2032 if (!found)
2033 goto out;
2034
2035 /* step 2: always set host PIO timings */
2036 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2037 dev = &ap->device[i];
2038 if (!ata_dev_enabled(dev))
2039 continue;
2040
2041 if (!dev->pio_mode) {
2042 printk(KERN_WARNING "ata%u: dev %u no PIO support\n",
2043 ap->id, dev->devno);
2044 rc = -EINVAL;
2045 goto out;
2046 }
2047
2048 dev->xfer_mode = dev->pio_mode;
2049 dev->xfer_shift = ATA_SHIFT_PIO;
2050 if (ap->ops->set_piomode)
2051 ap->ops->set_piomode(ap, dev);
2052 }
2053
2054 /* step 3: set host DMA timings */
2055 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2056 dev = &ap->device[i];
2057
2058 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2059 continue;
2060
2061 dev->xfer_mode = dev->dma_mode;
2062 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2063 if (ap->ops->set_dmamode)
2064 ap->ops->set_dmamode(ap, dev);
2065 }
2066
2067 /* step 4: update devices' xfer mode */
2068 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2069 dev = &ap->device[i];
2070
2071 if (!ata_dev_enabled(dev))
2072 continue;
2073
2074 rc = ata_dev_set_mode(ap, dev);
2075 if (rc)
2076 goto out;
2077 }
2078
2079 /* Record simplex status. If we selected DMA then the other
2080 * host channels are not permitted to do so.
2081 */
2082 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2083 ap->host_set->simplex_claimed = 1;
2084
2085 /* step5: chip specific finalisation */
2086 if (ap->ops->post_set_mode)
2087 ap->ops->post_set_mode(ap);
2088
2089 out:
2090 if (rc)
2091 *r_failed_dev = dev;
2092 return rc;
2093 }
2094
2095 /**
2096 * ata_tf_to_host - issue ATA taskfile to host controller
2097 * @ap: port to which command is being issued
2098 * @tf: ATA taskfile register set
2099 *
2100 * Issues ATA taskfile register set to ATA host controller,
2101 * with proper synchronization with interrupt handler and
2102 * other threads.
2103 *
2104 * LOCKING:
2105 * spin_lock_irqsave(host_set lock)
2106 */
2107
2108 static inline void ata_tf_to_host(struct ata_port *ap,
2109 const struct ata_taskfile *tf)
2110 {
2111 ap->ops->tf_load(ap, tf);
2112 ap->ops->exec_command(ap, tf);
2113 }
2114
2115 /**
2116 * ata_busy_sleep - sleep until BSY clears, or timeout
2117 * @ap: port containing status register to be polled
2118 * @tmout_pat: impatience timeout
2119 * @tmout: overall timeout
2120 *
2121 * Sleep until ATA Status register bit BSY clears,
2122 * or a timeout occurs.
2123 *
2124 * LOCKING: None.
2125 */
2126
2127 unsigned int ata_busy_sleep (struct ata_port *ap,
2128 unsigned long tmout_pat, unsigned long tmout)
2129 {
2130 unsigned long timer_start, timeout;
2131 u8 status;
2132
2133 status = ata_busy_wait(ap, ATA_BUSY, 300);
2134 timer_start = jiffies;
2135 timeout = timer_start + tmout_pat;
2136 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2137 msleep(50);
2138 status = ata_busy_wait(ap, ATA_BUSY, 3);
2139 }
2140
2141 if (status & ATA_BUSY)
2142 printk(KERN_WARNING "ata%u is slow to respond, "
2143 "please be patient\n", ap->id);
2144
2145 timeout = timer_start + tmout;
2146 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2147 msleep(50);
2148 status = ata_chk_status(ap);
2149 }
2150
2151 if (status & ATA_BUSY) {
2152 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
2153 ap->id, tmout / HZ);
2154 return 1;
2155 }
2156
2157 return 0;
2158 }
2159
2160 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2161 {
2162 struct ata_ioports *ioaddr = &ap->ioaddr;
2163 unsigned int dev0 = devmask & (1 << 0);
2164 unsigned int dev1 = devmask & (1 << 1);
2165 unsigned long timeout;
2166
2167 /* if device 0 was found in ata_devchk, wait for its
2168 * BSY bit to clear
2169 */
2170 if (dev0)
2171 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2172
2173 /* if device 1 was found in ata_devchk, wait for
2174 * register access, then wait for BSY to clear
2175 */
2176 timeout = jiffies + ATA_TMOUT_BOOT;
2177 while (dev1) {
2178 u8 nsect, lbal;
2179
2180 ap->ops->dev_select(ap, 1);
2181 if (ap->flags & ATA_FLAG_MMIO) {
2182 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2183 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2184 } else {
2185 nsect = inb(ioaddr->nsect_addr);
2186 lbal = inb(ioaddr->lbal_addr);
2187 }
2188 if ((nsect == 1) && (lbal == 1))
2189 break;
2190 if (time_after(jiffies, timeout)) {
2191 dev1 = 0;
2192 break;
2193 }
2194 msleep(50); /* give drive a breather */
2195 }
2196 if (dev1)
2197 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2198
2199 /* is all this really necessary? */
2200 ap->ops->dev_select(ap, 0);
2201 if (dev1)
2202 ap->ops->dev_select(ap, 1);
2203 if (dev0)
2204 ap->ops->dev_select(ap, 0);
2205 }
2206
2207 static unsigned int ata_bus_softreset(struct ata_port *ap,
2208 unsigned int devmask)
2209 {
2210 struct ata_ioports *ioaddr = &ap->ioaddr;
2211
2212 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2213
2214 /* software reset. causes dev0 to be selected */
2215 if (ap->flags & ATA_FLAG_MMIO) {
2216 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2217 udelay(20); /* FIXME: flush */
2218 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2219 udelay(20); /* FIXME: flush */
2220 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2221 } else {
2222 outb(ap->ctl, ioaddr->ctl_addr);
2223 udelay(10);
2224 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2225 udelay(10);
2226 outb(ap->ctl, ioaddr->ctl_addr);
2227 }
2228
2229 /* spec mandates ">= 2ms" before checking status.
2230 * We wait 150ms, because that was the magic delay used for
2231 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2232 * between when the ATA command register is written, and then
2233 * status is checked. Because waiting for "a while" before
2234 * checking status is fine, post SRST, we perform this magic
2235 * delay here as well.
2236 *
2237 * Old drivers/ide uses the 2mS rule and then waits for ready
2238 */
2239 msleep(150);
2240
2241 /* Before we perform post reset processing we want to see if
2242 * the bus shows 0xFF because the odd clown forgets the D7
2243 * pulldown resistor.
2244 */
2245 if (ata_check_status(ap) == 0xFF)
2246 return AC_ERR_OTHER;
2247
2248 ata_bus_post_reset(ap, devmask);
2249
2250 return 0;
2251 }
2252
2253 /**
2254 * ata_bus_reset - reset host port and associated ATA channel
2255 * @ap: port to reset
2256 *
2257 * This is typically the first time we actually start issuing
2258 * commands to the ATA channel. We wait for BSY to clear, then
2259 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2260 * result. Determine what devices, if any, are on the channel
2261 * by looking at the device 0/1 error register. Look at the signature
2262 * stored in each device's taskfile registers, to determine if
2263 * the device is ATA or ATAPI.
2264 *
2265 * LOCKING:
2266 * PCI/etc. bus probe sem.
2267 * Obtains host_set lock.
2268 *
2269 * SIDE EFFECTS:
2270 * Sets ATA_FLAG_DISABLED if bus reset fails.
2271 */
2272
2273 void ata_bus_reset(struct ata_port *ap)
2274 {
2275 struct ata_ioports *ioaddr = &ap->ioaddr;
2276 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2277 u8 err;
2278 unsigned int dev0, dev1 = 0, devmask = 0;
2279
2280 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2281
2282 /* determine if device 0/1 are present */
2283 if (ap->flags & ATA_FLAG_SATA_RESET)
2284 dev0 = 1;
2285 else {
2286 dev0 = ata_devchk(ap, 0);
2287 if (slave_possible)
2288 dev1 = ata_devchk(ap, 1);
2289 }
2290
2291 if (dev0)
2292 devmask |= (1 << 0);
2293 if (dev1)
2294 devmask |= (1 << 1);
2295
2296 /* select device 0 again */
2297 ap->ops->dev_select(ap, 0);
2298
2299 /* issue bus reset */
2300 if (ap->flags & ATA_FLAG_SRST)
2301 if (ata_bus_softreset(ap, devmask))
2302 goto err_out;
2303
2304 /*
2305 * determine by signature whether we have ATA or ATAPI devices
2306 */
2307 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2308 if ((slave_possible) && (err != 0x81))
2309 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2310
2311 /* re-enable interrupts */
2312 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2313 ata_irq_on(ap);
2314
2315 /* is double-select really necessary? */
2316 if (ap->device[1].class != ATA_DEV_NONE)
2317 ap->ops->dev_select(ap, 1);
2318 if (ap->device[0].class != ATA_DEV_NONE)
2319 ap->ops->dev_select(ap, 0);
2320
2321 /* if no devices were detected, disable this port */
2322 if ((ap->device[0].class == ATA_DEV_NONE) &&
2323 (ap->device[1].class == ATA_DEV_NONE))
2324 goto err_out;
2325
2326 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2327 /* set up device control for ATA_FLAG_SATA_RESET */
2328 if (ap->flags & ATA_FLAG_MMIO)
2329 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2330 else
2331 outb(ap->ctl, ioaddr->ctl_addr);
2332 }
2333
2334 DPRINTK("EXIT\n");
2335 return;
2336
2337 err_out:
2338 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2339 ap->ops->port_disable(ap);
2340
2341 DPRINTK("EXIT\n");
2342 }
2343
2344 static int sata_phy_resume(struct ata_port *ap)
2345 {
2346 unsigned long timeout = jiffies + (HZ * 5);
2347 u32 scontrol, sstatus;
2348
2349 scontrol = scr_read(ap, SCR_CONTROL);
2350 scontrol = (scontrol & 0x0f0) | 0x300;
2351 scr_write_flush(ap, SCR_CONTROL, scontrol);
2352
2353 /* Wait for phy to become ready, if necessary. */
2354 do {
2355 msleep(200);
2356 sstatus = scr_read(ap, SCR_STATUS);
2357 if ((sstatus & 0xf) != 1)
2358 return 0;
2359 } while (time_before(jiffies, timeout));
2360
2361 return -1;
2362 }
2363
2364 /**
2365 * ata_std_probeinit - initialize probing
2366 * @ap: port to be probed
2367 *
2368 * @ap is about to be probed. Initialize it. This function is
2369 * to be used as standard callback for ata_drive_probe_reset().
2370 *
2371 * NOTE!!! Do not use this function as probeinit if a low level
2372 * driver implements only hardreset. Just pass NULL as probeinit
2373 * in that case. Using this function is probably okay but doing
2374 * so makes reset sequence different from the original
2375 * ->phy_reset implementation and Jeff nervous. :-P
2376 */
2377 void ata_std_probeinit(struct ata_port *ap)
2378 {
2379 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2380 u32 spd;
2381
2382 sata_phy_resume(ap);
2383
2384 spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4;
2385 if (spd)
2386 ap->sata_spd_limit &= (1 << spd) - 1;
2387
2388 if (sata_dev_present(ap))
2389 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2390 }
2391 }
2392
2393 /**
2394 * ata_std_softreset - reset host port via ATA SRST
2395 * @ap: port to reset
2396 * @verbose: fail verbosely
2397 * @classes: resulting classes of attached devices
2398 *
2399 * Reset host port using ATA SRST. This function is to be used
2400 * as standard callback for ata_drive_*_reset() functions.
2401 *
2402 * LOCKING:
2403 * Kernel thread context (may sleep)
2404 *
2405 * RETURNS:
2406 * 0 on success, -errno otherwise.
2407 */
2408 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2409 {
2410 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2411 unsigned int devmask = 0, err_mask;
2412 u8 err;
2413
2414 DPRINTK("ENTER\n");
2415
2416 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2417 classes[0] = ATA_DEV_NONE;
2418 goto out;
2419 }
2420
2421 /* determine if device 0/1 are present */
2422 if (ata_devchk(ap, 0))
2423 devmask |= (1 << 0);
2424 if (slave_possible && ata_devchk(ap, 1))
2425 devmask |= (1 << 1);
2426
2427 /* select device 0 again */
2428 ap->ops->dev_select(ap, 0);
2429
2430 /* issue bus reset */
2431 DPRINTK("about to softreset, devmask=%x\n", devmask);
2432 err_mask = ata_bus_softreset(ap, devmask);
2433 if (err_mask) {
2434 if (verbose)
2435 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2436 ap->id, err_mask);
2437 else
2438 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2439 err_mask);
2440 return -EIO;
2441 }
2442
2443 /* determine by signature whether we have ATA or ATAPI devices */
2444 classes[0] = ata_dev_try_classify(ap, 0, &err);
2445 if (slave_possible && err != 0x81)
2446 classes[1] = ata_dev_try_classify(ap, 1, &err);
2447
2448 out:
2449 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2450 return 0;
2451 }
2452
2453 /**
2454 * sata_std_hardreset - reset host port via SATA phy reset
2455 * @ap: port to reset
2456 * @verbose: fail verbosely
2457 * @class: resulting class of attached device
2458 *
2459 * SATA phy-reset host port using DET bits of SControl register.
2460 * This function is to be used as standard callback for
2461 * ata_drive_*_reset().
2462 *
2463 * LOCKING:
2464 * Kernel thread context (may sleep)
2465 *
2466 * RETURNS:
2467 * 0 on success, -errno otherwise.
2468 */
2469 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2470 {
2471 u32 scontrol;
2472
2473 DPRINTK("ENTER\n");
2474
2475 if (ata_set_sata_spd_needed(ap)) {
2476 /* SATA spec says nothing about how to reconfigure
2477 * spd. To be on the safe side, turn off phy during
2478 * reconfiguration. This works for at least ICH7 AHCI
2479 * and Sil3124.
2480 */
2481 scontrol = scr_read(ap, SCR_CONTROL);
2482 scontrol = (scontrol & 0x0f0) | 0x302;
2483 scr_write_flush(ap, SCR_CONTROL, scontrol);
2484
2485 ata_set_sata_spd(ap);
2486 }
2487
2488 /* issue phy wake/reset */
2489 scontrol = scr_read(ap, SCR_CONTROL);
2490 scontrol = (scontrol & 0x0f0) | 0x301;
2491 scr_write_flush(ap, SCR_CONTROL, scontrol);
2492
2493 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2494 * 10.4.2 says at least 1 ms.
2495 */
2496 msleep(1);
2497
2498 /* bring phy back */
2499 sata_phy_resume(ap);
2500
2501 /* TODO: phy layer with polling, timeouts, etc. */
2502 if (!sata_dev_present(ap)) {
2503 *class = ATA_DEV_NONE;
2504 DPRINTK("EXIT, link offline\n");
2505 return 0;
2506 }
2507
2508 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2509 if (verbose)
2510 printk(KERN_ERR "ata%u: COMRESET failed "
2511 "(device not ready)\n", ap->id);
2512 else
2513 DPRINTK("EXIT, device not ready\n");
2514 return -EIO;
2515 }
2516
2517 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2518
2519 *class = ata_dev_try_classify(ap, 0, NULL);
2520
2521 DPRINTK("EXIT, class=%u\n", *class);
2522 return 0;
2523 }
2524
2525 /**
2526 * ata_std_postreset - standard postreset callback
2527 * @ap: the target ata_port
2528 * @classes: classes of attached devices
2529 *
2530 * This function is invoked after a successful reset. Note that
2531 * the device might have been reset more than once using
2532 * different reset methods before postreset is invoked.
2533 *
2534 * This function is to be used as standard callback for
2535 * ata_drive_*_reset().
2536 *
2537 * LOCKING:
2538 * Kernel thread context (may sleep)
2539 */
2540 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2541 {
2542 DPRINTK("ENTER\n");
2543
2544 /* set cable type if it isn't already set */
2545 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2546 ap->cbl = ATA_CBL_SATA;
2547
2548 /* print link status */
2549 if (ap->cbl == ATA_CBL_SATA)
2550 sata_print_link_status(ap);
2551
2552 /* re-enable interrupts */
2553 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2554 ata_irq_on(ap);
2555
2556 /* is double-select really necessary? */
2557 if (classes[0] != ATA_DEV_NONE)
2558 ap->ops->dev_select(ap, 1);
2559 if (classes[1] != ATA_DEV_NONE)
2560 ap->ops->dev_select(ap, 0);
2561
2562 /* bail out if no device is present */
2563 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2564 DPRINTK("EXIT, no device\n");
2565 return;
2566 }
2567
2568 /* set up device control */
2569 if (ap->ioaddr.ctl_addr) {
2570 if (ap->flags & ATA_FLAG_MMIO)
2571 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2572 else
2573 outb(ap->ctl, ap->ioaddr.ctl_addr);
2574 }
2575
2576 DPRINTK("EXIT\n");
2577 }
2578
2579 /**
2580 * ata_std_probe_reset - standard probe reset method
2581 * @ap: prot to perform probe-reset
2582 * @classes: resulting classes of attached devices
2583 *
2584 * The stock off-the-shelf ->probe_reset method.
2585 *
2586 * LOCKING:
2587 * Kernel thread context (may sleep)
2588 *
2589 * RETURNS:
2590 * 0 on success, -errno otherwise.
2591 */
2592 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2593 {
2594 ata_reset_fn_t hardreset;
2595
2596 hardreset = NULL;
2597 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2598 hardreset = sata_std_hardreset;
2599
2600 return ata_drive_probe_reset(ap, ata_std_probeinit,
2601 ata_std_softreset, hardreset,
2602 ata_std_postreset, classes);
2603 }
2604
2605 int ata_do_reset(struct ata_port *ap,
2606 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2607 int verbose, unsigned int *classes)
2608 {
2609 int i, rc;
2610
2611 for (i = 0; i < ATA_MAX_DEVICES; i++)
2612 classes[i] = ATA_DEV_UNKNOWN;
2613
2614 rc = reset(ap, verbose, classes);
2615 if (rc)
2616 return rc;
2617
2618 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2619 * is complete and convert all ATA_DEV_UNKNOWN to
2620 * ATA_DEV_NONE.
2621 */
2622 for (i = 0; i < ATA_MAX_DEVICES; i++)
2623 if (classes[i] != ATA_DEV_UNKNOWN)
2624 break;
2625
2626 if (i < ATA_MAX_DEVICES)
2627 for (i = 0; i < ATA_MAX_DEVICES; i++)
2628 if (classes[i] == ATA_DEV_UNKNOWN)
2629 classes[i] = ATA_DEV_NONE;
2630
2631 if (postreset)
2632 postreset(ap, classes);
2633
2634 return 0;
2635 }
2636
2637 /**
2638 * ata_drive_probe_reset - Perform probe reset with given methods
2639 * @ap: port to reset
2640 * @probeinit: probeinit method (can be NULL)
2641 * @softreset: softreset method (can be NULL)
2642 * @hardreset: hardreset method (can be NULL)
2643 * @postreset: postreset method (can be NULL)
2644 * @classes: resulting classes of attached devices
2645 *
2646 * Reset the specified port and classify attached devices using
2647 * given methods. This function prefers softreset but tries all
2648 * possible reset sequences to reset and classify devices. This
2649 * function is intended to be used for constructing ->probe_reset
2650 * callback by low level drivers.
2651 *
2652 * Reset methods should follow the following rules.
2653 *
2654 * - Return 0 on sucess, -errno on failure.
2655 * - If classification is supported, fill classes[] with
2656 * recognized class codes.
2657 * - If classification is not supported, leave classes[] alone.
2658 * - If verbose is non-zero, print error message on failure;
2659 * otherwise, shut up.
2660 *
2661 * LOCKING:
2662 * Kernel thread context (may sleep)
2663 *
2664 * RETURNS:
2665 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2666 * if classification fails, and any error code from reset
2667 * methods.
2668 */
2669 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2670 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2671 ata_postreset_fn_t postreset, unsigned int *classes)
2672 {
2673 int rc = -EINVAL;
2674
2675 if (probeinit)
2676 probeinit(ap);
2677
2678 if (softreset && !ata_set_sata_spd_needed(ap)) {
2679 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2680 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2681 goto done;
2682 printk(KERN_INFO "ata%u: softreset failed, will try "
2683 "hardreset in 5 secs\n", ap->id);
2684 ssleep(5);
2685 }
2686
2687 if (!hardreset)
2688 goto done;
2689
2690 while (1) {
2691 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2692 if (rc == 0) {
2693 if (classes[0] != ATA_DEV_UNKNOWN)
2694 goto done;
2695 break;
2696 }
2697
2698 if (ata_down_sata_spd_limit(ap))
2699 goto done;
2700
2701 printk(KERN_INFO "ata%u: hardreset failed, will retry "
2702 "in 5 secs\n", ap->id);
2703 ssleep(5);
2704 }
2705
2706 if (softreset) {
2707 printk(KERN_INFO "ata%u: hardreset succeeded without "
2708 "classification, will retry softreset in 5 secs\n",
2709 ap->id);
2710 ssleep(5);
2711
2712 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2713 }
2714
2715 done:
2716 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
2717 rc = -ENODEV;
2718 return rc;
2719 }
2720
2721 /**
2722 * ata_dev_same_device - Determine whether new ID matches configured device
2723 * @ap: port on which the device to compare against resides
2724 * @dev: device to compare against
2725 * @new_class: class of the new device
2726 * @new_id: IDENTIFY page of the new device
2727 *
2728 * Compare @new_class and @new_id against @dev and determine
2729 * whether @dev is the device indicated by @new_class and
2730 * @new_id.
2731 *
2732 * LOCKING:
2733 * None.
2734 *
2735 * RETURNS:
2736 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2737 */
2738 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2739 unsigned int new_class, const u16 *new_id)
2740 {
2741 const u16 *old_id = dev->id;
2742 unsigned char model[2][41], serial[2][21];
2743 u64 new_n_sectors;
2744
2745 if (dev->class != new_class) {
2746 printk(KERN_INFO
2747 "ata%u: dev %u class mismatch %d != %d\n",
2748 ap->id, dev->devno, dev->class, new_class);
2749 return 0;
2750 }
2751
2752 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2753 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2754 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2755 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2756 new_n_sectors = ata_id_n_sectors(new_id);
2757
2758 if (strcmp(model[0], model[1])) {
2759 printk(KERN_INFO
2760 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2761 ap->id, dev->devno, model[0], model[1]);
2762 return 0;
2763 }
2764
2765 if (strcmp(serial[0], serial[1])) {
2766 printk(KERN_INFO
2767 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2768 ap->id, dev->devno, serial[0], serial[1]);
2769 return 0;
2770 }
2771
2772 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2773 printk(KERN_INFO
2774 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2775 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2776 (unsigned long long)new_n_sectors);
2777 return 0;
2778 }
2779
2780 return 1;
2781 }
2782
2783 /**
2784 * ata_dev_revalidate - Revalidate ATA device
2785 * @ap: port on which the device to revalidate resides
2786 * @dev: device to revalidate
2787 * @post_reset: is this revalidation after reset?
2788 *
2789 * Re-read IDENTIFY page and make sure @dev is still attached to
2790 * the port.
2791 *
2792 * LOCKING:
2793 * Kernel thread context (may sleep)
2794 *
2795 * RETURNS:
2796 * 0 on success, negative errno otherwise
2797 */
2798 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2799 int post_reset)
2800 {
2801 unsigned int class = dev->class;
2802 u16 *id = NULL;
2803 int rc;
2804
2805 if (!ata_dev_enabled(dev)) {
2806 rc = -ENODEV;
2807 goto fail;
2808 }
2809
2810 /* allocate & read ID data */
2811 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2812 if (rc)
2813 goto fail;
2814
2815 /* is the device still there? */
2816 if (!ata_dev_same_device(ap, dev, class, id)) {
2817 rc = -ENODEV;
2818 goto fail;
2819 }
2820
2821 kfree(dev->id);
2822 dev->id = id;
2823
2824 /* configure device according to the new ID */
2825 rc = ata_dev_configure(ap, dev, 0);
2826 if (rc == 0)
2827 return 0;
2828
2829 fail:
2830 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2831 ap->id, dev->devno, rc);
2832 kfree(id);
2833 return rc;
2834 }
2835
2836 static const char * const ata_dma_blacklist [] = {
2837 "WDC AC11000H", NULL,
2838 "WDC AC22100H", NULL,
2839 "WDC AC32500H", NULL,
2840 "WDC AC33100H", NULL,
2841 "WDC AC31600H", NULL,
2842 "WDC AC32100H", "24.09P07",
2843 "WDC AC23200L", "21.10N21",
2844 "Compaq CRD-8241B", NULL,
2845 "CRD-8400B", NULL,
2846 "CRD-8480B", NULL,
2847 "CRD-8482B", NULL,
2848 "CRD-84", NULL,
2849 "SanDisk SDP3B", NULL,
2850 "SanDisk SDP3B-64", NULL,
2851 "SANYO CD-ROM CRD", NULL,
2852 "HITACHI CDR-8", NULL,
2853 "HITACHI CDR-8335", NULL,
2854 "HITACHI CDR-8435", NULL,
2855 "Toshiba CD-ROM XM-6202B", NULL,
2856 "TOSHIBA CD-ROM XM-1702BC", NULL,
2857 "CD-532E-A", NULL,
2858 "E-IDE CD-ROM CR-840", NULL,
2859 "CD-ROM Drive/F5A", NULL,
2860 "WPI CDD-820", NULL,
2861 "SAMSUNG CD-ROM SC-148C", NULL,
2862 "SAMSUNG CD-ROM SC", NULL,
2863 "SanDisk SDP3B-64", NULL,
2864 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2865 "_NEC DV5800A", NULL,
2866 "SAMSUNG CD-ROM SN-124", "N001"
2867 };
2868
2869 static int ata_strim(char *s, size_t len)
2870 {
2871 len = strnlen(s, len);
2872
2873 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2874 while ((len > 0) && (s[len - 1] == ' ')) {
2875 len--;
2876 s[len] = 0;
2877 }
2878 return len;
2879 }
2880
2881 static int ata_dma_blacklisted(const struct ata_device *dev)
2882 {
2883 unsigned char model_num[40];
2884 unsigned char model_rev[16];
2885 unsigned int nlen, rlen;
2886 int i;
2887
2888 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2889 sizeof(model_num));
2890 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2891 sizeof(model_rev));
2892 nlen = ata_strim(model_num, sizeof(model_num));
2893 rlen = ata_strim(model_rev, sizeof(model_rev));
2894
2895 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2896 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2897 if (ata_dma_blacklist[i+1] == NULL)
2898 return 1;
2899 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2900 return 1;
2901 }
2902 }
2903 return 0;
2904 }
2905
2906 /**
2907 * ata_dev_xfermask - Compute supported xfermask of the given device
2908 * @ap: Port on which the device to compute xfermask for resides
2909 * @dev: Device to compute xfermask for
2910 *
2911 * Compute supported xfermask of @dev and store it in
2912 * dev->*_mask. This function is responsible for applying all
2913 * known limits including host controller limits, device
2914 * blacklist, etc...
2915 *
2916 * FIXME: The current implementation limits all transfer modes to
2917 * the fastest of the lowested device on the port. This is not
2918 * required on most controllers.
2919 *
2920 * LOCKING:
2921 * None.
2922 */
2923 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2924 {
2925 struct ata_host_set *hs = ap->host_set;
2926 unsigned long xfer_mask;
2927 int i;
2928
2929 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2930 ap->mwdma_mask, ap->udma_mask);
2931
2932 /* Apply cable rule here. Don't apply it early because when
2933 * we handle hot plug the cable type can itself change.
2934 */
2935 if (ap->cbl == ATA_CBL_PATA40)
2936 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2937
2938 /* FIXME: Use port-wide xfermask for now */
2939 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2940 struct ata_device *d = &ap->device[i];
2941
2942 if (ata_dev_absent(d))
2943 continue;
2944
2945 if (ata_dev_disabled(d)) {
2946 /* to avoid violating device selection timing */
2947 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2948 UINT_MAX, UINT_MAX);
2949 continue;
2950 }
2951
2952 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2953 d->mwdma_mask, d->udma_mask);
2954 xfer_mask &= ata_id_xfermask(d->id);
2955 if (ata_dma_blacklisted(d))
2956 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2957 }
2958
2959 if (ata_dma_blacklisted(dev))
2960 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2961 "disabling DMA\n", ap->id, dev->devno);
2962
2963 if (hs->flags & ATA_HOST_SIMPLEX) {
2964 if (hs->simplex_claimed)
2965 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2966 }
2967
2968 if (ap->ops->mode_filter)
2969 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2970
2971 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2972 &dev->mwdma_mask, &dev->udma_mask);
2973 }
2974
2975 /**
2976 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2977 * @ap: Port associated with device @dev
2978 * @dev: Device to which command will be sent
2979 *
2980 * Issue SET FEATURES - XFER MODE command to device @dev
2981 * on port @ap.
2982 *
2983 * LOCKING:
2984 * PCI/etc. bus probe sem.
2985 *
2986 * RETURNS:
2987 * 0 on success, AC_ERR_* mask otherwise.
2988 */
2989
2990 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2991 struct ata_device *dev)
2992 {
2993 struct ata_taskfile tf;
2994 unsigned int err_mask;
2995
2996 /* set up set-features taskfile */
2997 DPRINTK("set features - xfer mode\n");
2998
2999 ata_tf_init(ap, &tf, dev->devno);
3000 tf.command = ATA_CMD_SET_FEATURES;
3001 tf.feature = SETFEATURES_XFER;
3002 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3003 tf.protocol = ATA_PROT_NODATA;
3004 tf.nsect = dev->xfer_mode;
3005
3006 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
3007
3008 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3009 return err_mask;
3010 }
3011
3012 /**
3013 * ata_dev_init_params - Issue INIT DEV PARAMS command
3014 * @ap: Port associated with device @dev
3015 * @dev: Device to which command will be sent
3016 *
3017 * LOCKING:
3018 * Kernel thread context (may sleep)
3019 *
3020 * RETURNS:
3021 * 0 on success, AC_ERR_* mask otherwise.
3022 */
3023
3024 static unsigned int ata_dev_init_params(struct ata_port *ap,
3025 struct ata_device *dev,
3026 u16 heads,
3027 u16 sectors)
3028 {
3029 struct ata_taskfile tf;
3030 unsigned int err_mask;
3031
3032 /* Number of sectors per track 1-255. Number of heads 1-16 */
3033 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3034 return AC_ERR_INVALID;
3035
3036 /* set up init dev params taskfile */
3037 DPRINTK("init dev params \n");
3038
3039 ata_tf_init(ap, &tf, dev->devno);
3040 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3041 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3042 tf.protocol = ATA_PROT_NODATA;
3043 tf.nsect = sectors;
3044 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3045
3046 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
3047
3048 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3049 return err_mask;
3050 }
3051
3052 /**
3053 * ata_sg_clean - Unmap DMA memory associated with command
3054 * @qc: Command containing DMA memory to be released
3055 *
3056 * Unmap all mapped DMA memory associated with this command.
3057 *
3058 * LOCKING:
3059 * spin_lock_irqsave(host_set lock)
3060 */
3061
3062 static void ata_sg_clean(struct ata_queued_cmd *qc)
3063 {
3064 struct ata_port *ap = qc->ap;
3065 struct scatterlist *sg = qc->__sg;
3066 int dir = qc->dma_dir;
3067 void *pad_buf = NULL;
3068
3069 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3070 WARN_ON(sg == NULL);
3071
3072 if (qc->flags & ATA_QCFLAG_SINGLE)
3073 WARN_ON(qc->n_elem > 1);
3074
3075 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3076
3077 /* if we padded the buffer out to 32-bit bound, and data
3078 * xfer direction is from-device, we must copy from the
3079 * pad buffer back into the supplied buffer
3080 */
3081 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3082 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3083
3084 if (qc->flags & ATA_QCFLAG_SG) {
3085 if (qc->n_elem)
3086 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3087 /* restore last sg */
3088 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3089 if (pad_buf) {
3090 struct scatterlist *psg = &qc->pad_sgent;
3091 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3092 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3093 kunmap_atomic(addr, KM_IRQ0);
3094 }
3095 } else {
3096 if (qc->n_elem)
3097 dma_unmap_single(ap->dev,
3098 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3099 dir);
3100 /* restore sg */
3101 sg->length += qc->pad_len;
3102 if (pad_buf)
3103 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3104 pad_buf, qc->pad_len);
3105 }
3106
3107 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3108 qc->__sg = NULL;
3109 }
3110
3111 /**
3112 * ata_fill_sg - Fill PCI IDE PRD table
3113 * @qc: Metadata associated with taskfile to be transferred
3114 *
3115 * Fill PCI IDE PRD (scatter-gather) table with segments
3116 * associated with the current disk command.
3117 *
3118 * LOCKING:
3119 * spin_lock_irqsave(host_set lock)
3120 *
3121 */
3122 static void ata_fill_sg(struct ata_queued_cmd *qc)
3123 {
3124 struct ata_port *ap = qc->ap;
3125 struct scatterlist *sg;
3126 unsigned int idx;
3127
3128 WARN_ON(qc->__sg == NULL);
3129 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3130
3131 idx = 0;
3132 ata_for_each_sg(sg, qc) {
3133 u32 addr, offset;
3134 u32 sg_len, len;
3135
3136 /* determine if physical DMA addr spans 64K boundary.
3137 * Note h/w doesn't support 64-bit, so we unconditionally
3138 * truncate dma_addr_t to u32.
3139 */
3140 addr = (u32) sg_dma_address(sg);
3141 sg_len = sg_dma_len(sg);
3142
3143 while (sg_len) {
3144 offset = addr & 0xffff;
3145 len = sg_len;
3146 if ((offset + sg_len) > 0x10000)
3147 len = 0x10000 - offset;
3148
3149 ap->prd[idx].addr = cpu_to_le32(addr);
3150 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3151 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3152
3153 idx++;
3154 sg_len -= len;
3155 addr += len;
3156 }
3157 }
3158
3159 if (idx)
3160 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3161 }
3162 /**
3163 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3164 * @qc: Metadata associated with taskfile to check
3165 *
3166 * Allow low-level driver to filter ATA PACKET commands, returning
3167 * a status indicating whether or not it is OK to use DMA for the
3168 * supplied PACKET command.
3169 *
3170 * LOCKING:
3171 * spin_lock_irqsave(host_set lock)
3172 *
3173 * RETURNS: 0 when ATAPI DMA can be used
3174 * nonzero otherwise
3175 */
3176 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3177 {
3178 struct ata_port *ap = qc->ap;
3179 int rc = 0; /* Assume ATAPI DMA is OK by default */
3180
3181 if (ap->ops->check_atapi_dma)
3182 rc = ap->ops->check_atapi_dma(qc);
3183
3184 /* We don't support polling DMA.
3185 * Use PIO if the LLDD handles only interrupts in
3186 * the HSM_ST_LAST state and the ATAPI device
3187 * generates CDB interrupts.
3188 */
3189 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3190 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3191 rc = 1;
3192
3193 return rc;
3194 }
3195 /**
3196 * ata_qc_prep - Prepare taskfile for submission
3197 * @qc: Metadata associated with taskfile to be prepared
3198 *
3199 * Prepare ATA taskfile for submission.
3200 *
3201 * LOCKING:
3202 * spin_lock_irqsave(host_set lock)
3203 */
3204 void ata_qc_prep(struct ata_queued_cmd *qc)
3205 {
3206 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3207 return;
3208
3209 ata_fill_sg(qc);
3210 }
3211
3212 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3213
3214 /**
3215 * ata_sg_init_one - Associate command with memory buffer
3216 * @qc: Command to be associated
3217 * @buf: Memory buffer
3218 * @buflen: Length of memory buffer, in bytes.
3219 *
3220 * Initialize the data-related elements of queued_cmd @qc
3221 * to point to a single memory buffer, @buf of byte length @buflen.
3222 *
3223 * LOCKING:
3224 * spin_lock_irqsave(host_set lock)
3225 */
3226
3227 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3228 {
3229 struct scatterlist *sg;
3230
3231 qc->flags |= ATA_QCFLAG_SINGLE;
3232
3233 memset(&qc->sgent, 0, sizeof(qc->sgent));
3234 qc->__sg = &qc->sgent;
3235 qc->n_elem = 1;
3236 qc->orig_n_elem = 1;
3237 qc->buf_virt = buf;
3238
3239 sg = qc->__sg;
3240 sg_init_one(sg, buf, buflen);
3241 }
3242
3243 /**
3244 * ata_sg_init - Associate command with scatter-gather table.
3245 * @qc: Command to be associated
3246 * @sg: Scatter-gather table.
3247 * @n_elem: Number of elements in s/g table.
3248 *
3249 * Initialize the data-related elements of queued_cmd @qc
3250 * to point to a scatter-gather table @sg, containing @n_elem
3251 * elements.
3252 *
3253 * LOCKING:
3254 * spin_lock_irqsave(host_set lock)
3255 */
3256
3257 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3258 unsigned int n_elem)
3259 {
3260 qc->flags |= ATA_QCFLAG_SG;
3261 qc->__sg = sg;
3262 qc->n_elem = n_elem;
3263 qc->orig_n_elem = n_elem;
3264 }
3265
3266 /**
3267 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3268 * @qc: Command with memory buffer to be mapped.
3269 *
3270 * DMA-map the memory buffer associated with queued_cmd @qc.
3271 *
3272 * LOCKING:
3273 * spin_lock_irqsave(host_set lock)
3274 *
3275 * RETURNS:
3276 * Zero on success, negative on error.
3277 */
3278
3279 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3280 {
3281 struct ata_port *ap = qc->ap;
3282 int dir = qc->dma_dir;
3283 struct scatterlist *sg = qc->__sg;
3284 dma_addr_t dma_address;
3285 int trim_sg = 0;
3286
3287 /* we must lengthen transfers to end on a 32-bit boundary */
3288 qc->pad_len = sg->length & 3;
3289 if (qc->pad_len) {
3290 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3291 struct scatterlist *psg = &qc->pad_sgent;
3292
3293 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3294
3295 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3296
3297 if (qc->tf.flags & ATA_TFLAG_WRITE)
3298 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3299 qc->pad_len);
3300
3301 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3302 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3303 /* trim sg */
3304 sg->length -= qc->pad_len;
3305 if (sg->length == 0)
3306 trim_sg = 1;
3307
3308 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3309 sg->length, qc->pad_len);
3310 }
3311
3312 if (trim_sg) {
3313 qc->n_elem--;
3314 goto skip_map;
3315 }
3316
3317 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3318 sg->length, dir);
3319 if (dma_mapping_error(dma_address)) {
3320 /* restore sg */
3321 sg->length += qc->pad_len;
3322 return -1;
3323 }
3324
3325 sg_dma_address(sg) = dma_address;
3326 sg_dma_len(sg) = sg->length;
3327
3328 skip_map:
3329 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3330 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3331
3332 return 0;
3333 }
3334
3335 /**
3336 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3337 * @qc: Command with scatter-gather table to be mapped.
3338 *
3339 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3340 *
3341 * LOCKING:
3342 * spin_lock_irqsave(host_set lock)
3343 *
3344 * RETURNS:
3345 * Zero on success, negative on error.
3346 *
3347 */
3348
3349 static int ata_sg_setup(struct ata_queued_cmd *qc)
3350 {
3351 struct ata_port *ap = qc->ap;
3352 struct scatterlist *sg = qc->__sg;
3353 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3354 int n_elem, pre_n_elem, dir, trim_sg = 0;
3355
3356 VPRINTK("ENTER, ata%u\n", ap->id);
3357 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3358
3359 /* we must lengthen transfers to end on a 32-bit boundary */
3360 qc->pad_len = lsg->length & 3;
3361 if (qc->pad_len) {
3362 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3363 struct scatterlist *psg = &qc->pad_sgent;
3364 unsigned int offset;
3365
3366 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3367
3368 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3369
3370 /*
3371 * psg->page/offset are used to copy to-be-written
3372 * data in this function or read data in ata_sg_clean.
3373 */
3374 offset = lsg->offset + lsg->length - qc->pad_len;
3375 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3376 psg->offset = offset_in_page(offset);
3377
3378 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3379 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3380 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3381 kunmap_atomic(addr, KM_IRQ0);
3382 }
3383
3384 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3385 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3386 /* trim last sg */
3387 lsg->length -= qc->pad_len;
3388 if (lsg->length == 0)
3389 trim_sg = 1;
3390
3391 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3392 qc->n_elem - 1, lsg->length, qc->pad_len);
3393 }
3394
3395 pre_n_elem = qc->n_elem;
3396 if (trim_sg && pre_n_elem)
3397 pre_n_elem--;
3398
3399 if (!pre_n_elem) {
3400 n_elem = 0;
3401 goto skip_map;
3402 }
3403
3404 dir = qc->dma_dir;
3405 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3406 if (n_elem < 1) {
3407 /* restore last sg */
3408 lsg->length += qc->pad_len;
3409 return -1;
3410 }
3411
3412 DPRINTK("%d sg elements mapped\n", n_elem);
3413
3414 skip_map:
3415 qc->n_elem = n_elem;
3416
3417 return 0;
3418 }
3419
3420 /**
3421 * ata_poll_qc_complete - turn irq back on and finish qc
3422 * @qc: Command to complete
3423 * @err_mask: ATA status register content
3424 *
3425 * LOCKING:
3426 * None. (grabs host lock)
3427 */
3428
3429 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3430 {
3431 struct ata_port *ap = qc->ap;
3432 unsigned long flags;
3433
3434 spin_lock_irqsave(&ap->host_set->lock, flags);
3435 ata_irq_on(ap);
3436 ata_qc_complete(qc);
3437 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3438 }
3439
3440 /**
3441 * swap_buf_le16 - swap halves of 16-bit words in place
3442 * @buf: Buffer to swap
3443 * @buf_words: Number of 16-bit words in buffer.
3444 *
3445 * Swap halves of 16-bit words if needed to convert from
3446 * little-endian byte order to native cpu byte order, or
3447 * vice-versa.
3448 *
3449 * LOCKING:
3450 * Inherited from caller.
3451 */
3452 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3453 {
3454 #ifdef __BIG_ENDIAN
3455 unsigned int i;
3456
3457 for (i = 0; i < buf_words; i++)
3458 buf[i] = le16_to_cpu(buf[i]);
3459 #endif /* __BIG_ENDIAN */
3460 }
3461
3462 /**
3463 * ata_mmio_data_xfer - Transfer data by MMIO
3464 * @ap: port to read/write
3465 * @buf: data buffer
3466 * @buflen: buffer length
3467 * @write_data: read/write
3468 *
3469 * Transfer data from/to the device data register by MMIO.
3470 *
3471 * LOCKING:
3472 * Inherited from caller.
3473 */
3474
3475 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3476 unsigned int buflen, int write_data)
3477 {
3478 unsigned int i;
3479 unsigned int words = buflen >> 1;
3480 u16 *buf16 = (u16 *) buf;
3481 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3482
3483 /* Transfer multiple of 2 bytes */
3484 if (write_data) {
3485 for (i = 0; i < words; i++)
3486 writew(le16_to_cpu(buf16[i]), mmio);
3487 } else {
3488 for (i = 0; i < words; i++)
3489 buf16[i] = cpu_to_le16(readw(mmio));
3490 }
3491
3492 /* Transfer trailing 1 byte, if any. */
3493 if (unlikely(buflen & 0x01)) {
3494 u16 align_buf[1] = { 0 };
3495 unsigned char *trailing_buf = buf + buflen - 1;
3496
3497 if (write_data) {
3498 memcpy(align_buf, trailing_buf, 1);
3499 writew(le16_to_cpu(align_buf[0]), mmio);
3500 } else {
3501 align_buf[0] = cpu_to_le16(readw(mmio));
3502 memcpy(trailing_buf, align_buf, 1);
3503 }
3504 }
3505 }
3506
3507 /**
3508 * ata_pio_data_xfer - Transfer data by PIO
3509 * @ap: port to read/write
3510 * @buf: data buffer
3511 * @buflen: buffer length
3512 * @write_data: read/write
3513 *
3514 * Transfer data from/to the device data register by PIO.
3515 *
3516 * LOCKING:
3517 * Inherited from caller.
3518 */
3519
3520 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3521 unsigned int buflen, int write_data)
3522 {
3523 unsigned int words = buflen >> 1;
3524
3525 /* Transfer multiple of 2 bytes */
3526 if (write_data)
3527 outsw(ap->ioaddr.data_addr, buf, words);
3528 else
3529 insw(ap->ioaddr.data_addr, buf, words);
3530
3531 /* Transfer trailing 1 byte, if any. */
3532 if (unlikely(buflen & 0x01)) {
3533 u16 align_buf[1] = { 0 };
3534 unsigned char *trailing_buf = buf + buflen - 1;
3535
3536 if (write_data) {
3537 memcpy(align_buf, trailing_buf, 1);
3538 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3539 } else {
3540 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3541 memcpy(trailing_buf, align_buf, 1);
3542 }
3543 }
3544 }
3545
3546 /**
3547 * ata_data_xfer - Transfer data from/to the data register.
3548 * @ap: port to read/write
3549 * @buf: data buffer
3550 * @buflen: buffer length
3551 * @do_write: read/write
3552 *
3553 * Transfer data from/to the device data register.
3554 *
3555 * LOCKING:
3556 * Inherited from caller.
3557 */
3558
3559 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3560 unsigned int buflen, int do_write)
3561 {
3562 /* Make the crap hardware pay the costs not the good stuff */
3563 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3564 unsigned long flags;
3565 local_irq_save(flags);
3566 if (ap->flags & ATA_FLAG_MMIO)
3567 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3568 else
3569 ata_pio_data_xfer(ap, buf, buflen, do_write);
3570 local_irq_restore(flags);
3571 } else {
3572 if (ap->flags & ATA_FLAG_MMIO)
3573 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3574 else
3575 ata_pio_data_xfer(ap, buf, buflen, do_write);
3576 }
3577 }
3578
3579 /**
3580 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3581 * @qc: Command on going
3582 *
3583 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3584 *
3585 * LOCKING:
3586 * Inherited from caller.
3587 */
3588
3589 static void ata_pio_sector(struct ata_queued_cmd *qc)
3590 {
3591 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3592 struct scatterlist *sg = qc->__sg;
3593 struct ata_port *ap = qc->ap;
3594 struct page *page;
3595 unsigned int offset;
3596 unsigned char *buf;
3597
3598 if (qc->cursect == (qc->nsect - 1))
3599 ap->hsm_task_state = HSM_ST_LAST;
3600
3601 page = sg[qc->cursg].page;
3602 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3603
3604 /* get the current page and offset */
3605 page = nth_page(page, (offset >> PAGE_SHIFT));
3606 offset %= PAGE_SIZE;
3607
3608 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3609
3610 if (PageHighMem(page)) {
3611 unsigned long flags;
3612
3613 local_irq_save(flags);
3614 buf = kmap_atomic(page, KM_IRQ0);
3615
3616 /* do the actual data transfer */
3617 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3618
3619 kunmap_atomic(buf, KM_IRQ0);
3620 local_irq_restore(flags);
3621 } else {
3622 buf = page_address(page);
3623 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3624 }
3625
3626 qc->cursect++;
3627 qc->cursg_ofs++;
3628
3629 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3630 qc->cursg++;
3631 qc->cursg_ofs = 0;
3632 }
3633 }
3634
3635 /**
3636 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3637 * @qc: Command on going
3638 *
3639 * Transfer one or many ATA_SECT_SIZE of data from/to the
3640 * ATA device for the DRQ request.
3641 *
3642 * LOCKING:
3643 * Inherited from caller.
3644 */
3645
3646 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3647 {
3648 if (is_multi_taskfile(&qc->tf)) {
3649 /* READ/WRITE MULTIPLE */
3650 unsigned int nsect;
3651
3652 WARN_ON(qc->dev->multi_count == 0);
3653
3654 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3655 while (nsect--)
3656 ata_pio_sector(qc);
3657 } else
3658 ata_pio_sector(qc);
3659 }
3660
3661 /**
3662 * atapi_send_cdb - Write CDB bytes to hardware
3663 * @ap: Port to which ATAPI device is attached.
3664 * @qc: Taskfile currently active
3665 *
3666 * When device has indicated its readiness to accept
3667 * a CDB, this function is called. Send the CDB.
3668 *
3669 * LOCKING:
3670 * caller.
3671 */
3672
3673 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3674 {
3675 /* send SCSI cdb */
3676 DPRINTK("send cdb\n");
3677 WARN_ON(qc->dev->cdb_len < 12);
3678
3679 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3680 ata_altstatus(ap); /* flush */
3681
3682 switch (qc->tf.protocol) {
3683 case ATA_PROT_ATAPI:
3684 ap->hsm_task_state = HSM_ST;
3685 break;
3686 case ATA_PROT_ATAPI_NODATA:
3687 ap->hsm_task_state = HSM_ST_LAST;
3688 break;
3689 case ATA_PROT_ATAPI_DMA:
3690 ap->hsm_task_state = HSM_ST_LAST;
3691 /* initiate bmdma */
3692 ap->ops->bmdma_start(qc);
3693 break;
3694 }
3695 }
3696
3697 /**
3698 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3699 * @qc: Command on going
3700 * @bytes: number of bytes
3701 *
3702 * Transfer Transfer data from/to the ATAPI device.
3703 *
3704 * LOCKING:
3705 * Inherited from caller.
3706 *
3707 */
3708
3709 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3710 {
3711 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3712 struct scatterlist *sg = qc->__sg;
3713 struct ata_port *ap = qc->ap;
3714 struct page *page;
3715 unsigned char *buf;
3716 unsigned int offset, count;
3717
3718 if (qc->curbytes + bytes >= qc->nbytes)
3719 ap->hsm_task_state = HSM_ST_LAST;
3720
3721 next_sg:
3722 if (unlikely(qc->cursg >= qc->n_elem)) {
3723 /*
3724 * The end of qc->sg is reached and the device expects
3725 * more data to transfer. In order not to overrun qc->sg
3726 * and fulfill length specified in the byte count register,
3727 * - for read case, discard trailing data from the device
3728 * - for write case, padding zero data to the device
3729 */
3730 u16 pad_buf[1] = { 0 };
3731 unsigned int words = bytes >> 1;
3732 unsigned int i;
3733
3734 if (words) /* warning if bytes > 1 */
3735 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3736 ap->id, bytes);
3737
3738 for (i = 0; i < words; i++)
3739 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3740
3741 ap->hsm_task_state = HSM_ST_LAST;
3742 return;
3743 }
3744
3745 sg = &qc->__sg[qc->cursg];
3746
3747 page = sg->page;
3748 offset = sg->offset + qc->cursg_ofs;
3749
3750 /* get the current page and offset */
3751 page = nth_page(page, (offset >> PAGE_SHIFT));
3752 offset %= PAGE_SIZE;
3753
3754 /* don't overrun current sg */
3755 count = min(sg->length - qc->cursg_ofs, bytes);
3756
3757 /* don't cross page boundaries */
3758 count = min(count, (unsigned int)PAGE_SIZE - offset);
3759
3760 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3761
3762 if (PageHighMem(page)) {
3763 unsigned long flags;
3764
3765 local_irq_save(flags);
3766 buf = kmap_atomic(page, KM_IRQ0);
3767
3768 /* do the actual data transfer */
3769 ata_data_xfer(ap, buf + offset, count, do_write);
3770
3771 kunmap_atomic(buf, KM_IRQ0);
3772 local_irq_restore(flags);
3773 } else {
3774 buf = page_address(page);
3775 ata_data_xfer(ap, buf + offset, count, do_write);
3776 }
3777
3778 bytes -= count;
3779 qc->curbytes += count;
3780 qc->cursg_ofs += count;
3781
3782 if (qc->cursg_ofs == sg->length) {
3783 qc->cursg++;
3784 qc->cursg_ofs = 0;
3785 }
3786
3787 if (bytes)
3788 goto next_sg;
3789 }
3790
3791 /**
3792 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3793 * @qc: Command on going
3794 *
3795 * Transfer Transfer data from/to the ATAPI device.
3796 *
3797 * LOCKING:
3798 * Inherited from caller.
3799 */
3800
3801 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3802 {
3803 struct ata_port *ap = qc->ap;
3804 struct ata_device *dev = qc->dev;
3805 unsigned int ireason, bc_lo, bc_hi, bytes;
3806 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3807
3808 ap->ops->tf_read(ap, &qc->tf);
3809 ireason = qc->tf.nsect;
3810 bc_lo = qc->tf.lbam;
3811 bc_hi = qc->tf.lbah;
3812 bytes = (bc_hi << 8) | bc_lo;
3813
3814 /* shall be cleared to zero, indicating xfer of data */
3815 if (ireason & (1 << 0))
3816 goto err_out;
3817
3818 /* make sure transfer direction matches expected */
3819 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3820 if (do_write != i_write)
3821 goto err_out;
3822
3823 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3824
3825 __atapi_pio_bytes(qc, bytes);
3826
3827 return;
3828
3829 err_out:
3830 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3831 ap->id, dev->devno);
3832 qc->err_mask |= AC_ERR_HSM;
3833 ap->hsm_task_state = HSM_ST_ERR;
3834 }
3835
3836 /**
3837 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3838 * @ap: the target ata_port
3839 * @qc: qc on going
3840 *
3841 * RETURNS:
3842 * 1 if ok in workqueue, 0 otherwise.
3843 */
3844
3845 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3846 {
3847 if (qc->tf.flags & ATA_TFLAG_POLLING)
3848 return 1;
3849
3850 if (ap->hsm_task_state == HSM_ST_FIRST) {
3851 if (qc->tf.protocol == ATA_PROT_PIO &&
3852 (qc->tf.flags & ATA_TFLAG_WRITE))
3853 return 1;
3854
3855 if (is_atapi_taskfile(&qc->tf) &&
3856 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3857 return 1;
3858 }
3859
3860 return 0;
3861 }
3862
3863 /**
3864 * ata_hsm_move - move the HSM to the next state.
3865 * @ap: the target ata_port
3866 * @qc: qc on going
3867 * @status: current device status
3868 * @in_wq: 1 if called from workqueue, 0 otherwise
3869 *
3870 * RETURNS:
3871 * 1 when poll next status needed, 0 otherwise.
3872 */
3873
3874 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3875 u8 status, int in_wq)
3876 {
3877 unsigned long flags = 0;
3878 int poll_next;
3879
3880 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3881
3882 /* Make sure ata_qc_issue_prot() does not throw things
3883 * like DMA polling into the workqueue. Notice that
3884 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3885 */
3886 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3887
3888 fsm_start:
3889 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3890 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3891
3892 switch (ap->hsm_task_state) {
3893 case HSM_ST_FIRST:
3894 /* Send first data block or PACKET CDB */
3895
3896 /* If polling, we will stay in the work queue after
3897 * sending the data. Otherwise, interrupt handler
3898 * takes over after sending the data.
3899 */
3900 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3901
3902 /* check device status */
3903 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3904 /* Wrong status. Let EH handle this */
3905 qc->err_mask |= AC_ERR_HSM;
3906 ap->hsm_task_state = HSM_ST_ERR;
3907 goto fsm_start;
3908 }
3909
3910 /* Device should not ask for data transfer (DRQ=1)
3911 * when it finds something wrong.
3912 * We ignore DRQ here and stop the HSM by
3913 * changing hsm_task_state to HSM_ST_ERR and
3914 * let the EH abort the command or reset the device.
3915 */
3916 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3917 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3918 ap->id, status);
3919 qc->err_mask |= AC_ERR_DEV;
3920 ap->hsm_task_state = HSM_ST_ERR;
3921 goto fsm_start;
3922 }
3923
3924 /* Send the CDB (atapi) or the first data block (ata pio out).
3925 * During the state transition, interrupt handler shouldn't
3926 * be invoked before the data transfer is complete and
3927 * hsm_task_state is changed. Hence, the following locking.
3928 */
3929 if (in_wq)
3930 spin_lock_irqsave(&ap->host_set->lock, flags);
3931
3932 if (qc->tf.protocol == ATA_PROT_PIO) {
3933 /* PIO data out protocol.
3934 * send first data block.
3935 */
3936
3937 /* ata_pio_sectors() might change the state
3938 * to HSM_ST_LAST. so, the state is changed here
3939 * before ata_pio_sectors().
3940 */
3941 ap->hsm_task_state = HSM_ST;
3942 ata_pio_sectors(qc);
3943 ata_altstatus(ap); /* flush */
3944 } else
3945 /* send CDB */
3946 atapi_send_cdb(ap, qc);
3947
3948 if (in_wq)
3949 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3950
3951 /* if polling, ata_pio_task() handles the rest.
3952 * otherwise, interrupt handler takes over from here.
3953 */
3954 break;
3955
3956 case HSM_ST:
3957 /* complete command or read/write the data register */
3958 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3959 /* ATAPI PIO protocol */
3960 if ((status & ATA_DRQ) == 0) {
3961 /* no more data to transfer */
3962 ap->hsm_task_state = HSM_ST_LAST;
3963 goto fsm_start;
3964 }
3965
3966 /* Device should not ask for data transfer (DRQ=1)
3967 * when it finds something wrong.
3968 * We ignore DRQ here and stop the HSM by
3969 * changing hsm_task_state to HSM_ST_ERR and
3970 * let the EH abort the command or reset the device.
3971 */
3972 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3973 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3974 ap->id, status);
3975 qc->err_mask |= AC_ERR_DEV;
3976 ap->hsm_task_state = HSM_ST_ERR;
3977 goto fsm_start;
3978 }
3979
3980 atapi_pio_bytes(qc);
3981
3982 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3983 /* bad ireason reported by device */
3984 goto fsm_start;
3985
3986 } else {
3987 /* ATA PIO protocol */
3988 if (unlikely((status & ATA_DRQ) == 0)) {
3989 /* handle BSY=0, DRQ=0 as error */
3990 qc->err_mask |= AC_ERR_HSM;
3991 ap->hsm_task_state = HSM_ST_ERR;
3992 goto fsm_start;
3993 }
3994
3995 /* For PIO reads, some devices may ask for
3996 * data transfer (DRQ=1) alone with ERR=1.
3997 * We respect DRQ here and transfer one
3998 * block of junk data before changing the
3999 * hsm_task_state to HSM_ST_ERR.
4000 *
4001 * For PIO writes, ERR=1 DRQ=1 doesn't make
4002 * sense since the data block has been
4003 * transferred to the device.
4004 */
4005 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4006 /* data might be corrputed */
4007 qc->err_mask |= AC_ERR_DEV;
4008
4009 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4010 ata_pio_sectors(qc);
4011 ata_altstatus(ap);
4012 status = ata_wait_idle(ap);
4013 }
4014
4015 /* ata_pio_sectors() might change the
4016 * state to HSM_ST_LAST. so, the state
4017 * is changed after ata_pio_sectors().
4018 */
4019 ap->hsm_task_state = HSM_ST_ERR;
4020 goto fsm_start;
4021 }
4022
4023 ata_pio_sectors(qc);
4024
4025 if (ap->hsm_task_state == HSM_ST_LAST &&
4026 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4027 /* all data read */
4028 ata_altstatus(ap);
4029 status = ata_wait_idle(ap);
4030 goto fsm_start;
4031 }
4032 }
4033
4034 ata_altstatus(ap); /* flush */
4035 poll_next = 1;
4036 break;
4037
4038 case HSM_ST_LAST:
4039 if (unlikely(!ata_ok(status))) {
4040 qc->err_mask |= __ac_err_mask(status);
4041 ap->hsm_task_state = HSM_ST_ERR;
4042 goto fsm_start;
4043 }
4044
4045 /* no more data to transfer */
4046 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4047 ap->id, qc->dev->devno, status);
4048
4049 WARN_ON(qc->err_mask);
4050
4051 ap->hsm_task_state = HSM_ST_IDLE;
4052
4053 /* complete taskfile transaction */
4054 if (in_wq)
4055 ata_poll_qc_complete(qc);
4056 else
4057 ata_qc_complete(qc);
4058
4059 poll_next = 0;
4060 break;
4061
4062 case HSM_ST_ERR:
4063 if (qc->tf.command != ATA_CMD_PACKET)
4064 printk(KERN_ERR "ata%u: dev %u command error, drv_stat 0x%x\n",
4065 ap->id, qc->dev->devno, status);
4066
4067 /* make sure qc->err_mask is available to
4068 * know what's wrong and recover
4069 */
4070 WARN_ON(qc->err_mask == 0);
4071
4072 ap->hsm_task_state = HSM_ST_IDLE;
4073
4074 /* complete taskfile transaction */
4075 if (in_wq)
4076 ata_poll_qc_complete(qc);
4077 else
4078 ata_qc_complete(qc);
4079
4080 poll_next = 0;
4081 break;
4082 default:
4083 poll_next = 0;
4084 BUG();
4085 }
4086
4087 return poll_next;
4088 }
4089
4090 static void ata_pio_task(void *_data)
4091 {
4092 struct ata_queued_cmd *qc = _data;
4093 struct ata_port *ap = qc->ap;
4094 u8 status;
4095 int poll_next;
4096
4097 fsm_start:
4098 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4099
4100 qc = ata_qc_from_tag(ap, ap->active_tag);
4101 WARN_ON(qc == NULL);
4102
4103 /*
4104 * This is purely heuristic. This is a fast path.
4105 * Sometimes when we enter, BSY will be cleared in
4106 * a chk-status or two. If not, the drive is probably seeking
4107 * or something. Snooze for a couple msecs, then
4108 * chk-status again. If still busy, queue delayed work.
4109 */
4110 status = ata_busy_wait(ap, ATA_BUSY, 5);
4111 if (status & ATA_BUSY) {
4112 msleep(2);
4113 status = ata_busy_wait(ap, ATA_BUSY, 10);
4114 if (status & ATA_BUSY) {
4115 ata_port_queue_task(ap, ata_pio_task, ap, ATA_SHORT_PAUSE);
4116 return;
4117 }
4118 }
4119
4120 /* move the HSM */
4121 poll_next = ata_hsm_move(ap, qc, status, 1);
4122
4123 /* another command or interrupt handler
4124 * may be running at this point.
4125 */
4126 if (poll_next)
4127 goto fsm_start;
4128 }
4129
4130 /**
4131 * ata_qc_new - Request an available ATA command, for queueing
4132 * @ap: Port associated with device @dev
4133 * @dev: Device from whom we request an available command structure
4134 *
4135 * LOCKING:
4136 * None.
4137 */
4138
4139 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4140 {
4141 struct ata_queued_cmd *qc = NULL;
4142 unsigned int i;
4143
4144 for (i = 0; i < ATA_MAX_QUEUE; i++)
4145 if (!test_and_set_bit(i, &ap->qactive)) {
4146 qc = ata_qc_from_tag(ap, i);
4147 break;
4148 }
4149
4150 if (qc)
4151 qc->tag = i;
4152
4153 return qc;
4154 }
4155
4156 /**
4157 * ata_qc_new_init - Request an available ATA command, and initialize it
4158 * @ap: Port associated with device @dev
4159 * @dev: Device from whom we request an available command structure
4160 *
4161 * LOCKING:
4162 * None.
4163 */
4164
4165 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
4166 struct ata_device *dev)
4167 {
4168 struct ata_queued_cmd *qc;
4169
4170 qc = ata_qc_new(ap);
4171 if (qc) {
4172 qc->scsicmd = NULL;
4173 qc->ap = ap;
4174 qc->dev = dev;
4175
4176 ata_qc_reinit(qc);
4177 }
4178
4179 return qc;
4180 }
4181
4182 /**
4183 * ata_qc_free - free unused ata_queued_cmd
4184 * @qc: Command to complete
4185 *
4186 * Designed to free unused ata_queued_cmd object
4187 * in case something prevents using it.
4188 *
4189 * LOCKING:
4190 * spin_lock_irqsave(host_set lock)
4191 */
4192 void ata_qc_free(struct ata_queued_cmd *qc)
4193 {
4194 struct ata_port *ap = qc->ap;
4195 unsigned int tag;
4196
4197 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4198
4199 qc->flags = 0;
4200 tag = qc->tag;
4201 if (likely(ata_tag_valid(tag))) {
4202 if (tag == ap->active_tag)
4203 ap->active_tag = ATA_TAG_POISON;
4204 qc->tag = ATA_TAG_POISON;
4205 clear_bit(tag, &ap->qactive);
4206 }
4207 }
4208
4209 void __ata_qc_complete(struct ata_queued_cmd *qc)
4210 {
4211 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4212 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4213
4214 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4215 ata_sg_clean(qc);
4216
4217 /* atapi: mark qc as inactive to prevent the interrupt handler
4218 * from completing the command twice later, before the error handler
4219 * is called. (when rc != 0 and atapi request sense is needed)
4220 */
4221 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4222
4223 /* call completion callback */
4224 qc->complete_fn(qc);
4225 }
4226
4227 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4228 {
4229 struct ata_port *ap = qc->ap;
4230
4231 switch (qc->tf.protocol) {
4232 case ATA_PROT_DMA:
4233 case ATA_PROT_ATAPI_DMA:
4234 return 1;
4235
4236 case ATA_PROT_ATAPI:
4237 case ATA_PROT_PIO:
4238 if (ap->flags & ATA_FLAG_PIO_DMA)
4239 return 1;
4240
4241 /* fall through */
4242
4243 default:
4244 return 0;
4245 }
4246
4247 /* never reached */
4248 }
4249
4250 /**
4251 * ata_qc_issue - issue taskfile to device
4252 * @qc: command to issue to device
4253 *
4254 * Prepare an ATA command to submission to device.
4255 * This includes mapping the data into a DMA-able
4256 * area, filling in the S/G table, and finally
4257 * writing the taskfile to hardware, starting the command.
4258 *
4259 * LOCKING:
4260 * spin_lock_irqsave(host_set lock)
4261 */
4262 void ata_qc_issue(struct ata_queued_cmd *qc)
4263 {
4264 struct ata_port *ap = qc->ap;
4265
4266 qc->ap->active_tag = qc->tag;
4267 qc->flags |= ATA_QCFLAG_ACTIVE;
4268
4269 if (ata_should_dma_map(qc)) {
4270 if (qc->flags & ATA_QCFLAG_SG) {
4271 if (ata_sg_setup(qc))
4272 goto sg_err;
4273 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4274 if (ata_sg_setup_one(qc))
4275 goto sg_err;
4276 }
4277 } else {
4278 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4279 }
4280
4281 ap->ops->qc_prep(qc);
4282
4283 qc->err_mask |= ap->ops->qc_issue(qc);
4284 if (unlikely(qc->err_mask))
4285 goto err;
4286 return;
4287
4288 sg_err:
4289 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4290 qc->err_mask |= AC_ERR_SYSTEM;
4291 err:
4292 ata_qc_complete(qc);
4293 }
4294
4295 /**
4296 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4297 * @qc: command to issue to device
4298 *
4299 * Using various libata functions and hooks, this function
4300 * starts an ATA command. ATA commands are grouped into
4301 * classes called "protocols", and issuing each type of protocol
4302 * is slightly different.
4303 *
4304 * May be used as the qc_issue() entry in ata_port_operations.
4305 *
4306 * LOCKING:
4307 * spin_lock_irqsave(host_set lock)
4308 *
4309 * RETURNS:
4310 * Zero on success, AC_ERR_* mask on failure
4311 */
4312
4313 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4314 {
4315 struct ata_port *ap = qc->ap;
4316
4317 /* Use polling pio if the LLD doesn't handle
4318 * interrupt driven pio and atapi CDB interrupt.
4319 */
4320 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4321 switch (qc->tf.protocol) {
4322 case ATA_PROT_PIO:
4323 case ATA_PROT_ATAPI:
4324 case ATA_PROT_ATAPI_NODATA:
4325 qc->tf.flags |= ATA_TFLAG_POLLING;
4326 break;
4327 case ATA_PROT_ATAPI_DMA:
4328 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4329 /* see ata_check_atapi_dma() */
4330 BUG();
4331 break;
4332 default:
4333 break;
4334 }
4335 }
4336
4337 /* select the device */
4338 ata_dev_select(ap, qc->dev->devno, 1, 0);
4339
4340 /* start the command */
4341 switch (qc->tf.protocol) {
4342 case ATA_PROT_NODATA:
4343 if (qc->tf.flags & ATA_TFLAG_POLLING)
4344 ata_qc_set_polling(qc);
4345
4346 ata_tf_to_host(ap, &qc->tf);
4347 ap->hsm_task_state = HSM_ST_LAST;
4348
4349 if (qc->tf.flags & ATA_TFLAG_POLLING)
4350 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4351
4352 break;
4353
4354 case ATA_PROT_DMA:
4355 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4356
4357 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4358 ap->ops->bmdma_setup(qc); /* set up bmdma */
4359 ap->ops->bmdma_start(qc); /* initiate bmdma */
4360 ap->hsm_task_state = HSM_ST_LAST;
4361 break;
4362
4363 case ATA_PROT_PIO:
4364 if (qc->tf.flags & ATA_TFLAG_POLLING)
4365 ata_qc_set_polling(qc);
4366
4367 ata_tf_to_host(ap, &qc->tf);
4368
4369 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4370 /* PIO data out protocol */
4371 ap->hsm_task_state = HSM_ST_FIRST;
4372 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4373
4374 /* always send first data block using
4375 * the ata_pio_task() codepath.
4376 */
4377 } else {
4378 /* PIO data in protocol */
4379 ap->hsm_task_state = HSM_ST;
4380
4381 if (qc->tf.flags & ATA_TFLAG_POLLING)
4382 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4383
4384 /* if polling, ata_pio_task() handles the rest.
4385 * otherwise, interrupt handler takes over from here.
4386 */
4387 }
4388
4389 break;
4390
4391 case ATA_PROT_ATAPI:
4392 case ATA_PROT_ATAPI_NODATA:
4393 if (qc->tf.flags & ATA_TFLAG_POLLING)
4394 ata_qc_set_polling(qc);
4395
4396 ata_tf_to_host(ap, &qc->tf);
4397
4398 ap->hsm_task_state = HSM_ST_FIRST;
4399
4400 /* send cdb by polling if no cdb interrupt */
4401 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4402 (qc->tf.flags & ATA_TFLAG_POLLING))
4403 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4404 break;
4405
4406 case ATA_PROT_ATAPI_DMA:
4407 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4408
4409 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4410 ap->ops->bmdma_setup(qc); /* set up bmdma */
4411 ap->hsm_task_state = HSM_ST_FIRST;
4412
4413 /* send cdb by polling if no cdb interrupt */
4414 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4415 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4416 break;
4417
4418 default:
4419 WARN_ON(1);
4420 return AC_ERR_SYSTEM;
4421 }
4422
4423 return 0;
4424 }
4425
4426 /**
4427 * ata_host_intr - Handle host interrupt for given (port, task)
4428 * @ap: Port on which interrupt arrived (possibly...)
4429 * @qc: Taskfile currently active in engine
4430 *
4431 * Handle host interrupt for given queued command. Currently,
4432 * only DMA interrupts are handled. All other commands are
4433 * handled via polling with interrupts disabled (nIEN bit).
4434 *
4435 * LOCKING:
4436 * spin_lock_irqsave(host_set lock)
4437 *
4438 * RETURNS:
4439 * One if interrupt was handled, zero if not (shared irq).
4440 */
4441
4442 inline unsigned int ata_host_intr (struct ata_port *ap,
4443 struct ata_queued_cmd *qc)
4444 {
4445 u8 status, host_stat = 0;
4446
4447 VPRINTK("ata%u: protocol %d task_state %d\n",
4448 ap->id, qc->tf.protocol, ap->hsm_task_state);
4449
4450 /* Check whether we are expecting interrupt in this state */
4451 switch (ap->hsm_task_state) {
4452 case HSM_ST_FIRST:
4453 /* Some pre-ATAPI-4 devices assert INTRQ
4454 * at this state when ready to receive CDB.
4455 */
4456
4457 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4458 * The flag was turned on only for atapi devices.
4459 * No need to check is_atapi_taskfile(&qc->tf) again.
4460 */
4461 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4462 goto idle_irq;
4463 break;
4464 case HSM_ST_LAST:
4465 if (qc->tf.protocol == ATA_PROT_DMA ||
4466 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4467 /* check status of DMA engine */
4468 host_stat = ap->ops->bmdma_status(ap);
4469 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4470
4471 /* if it's not our irq... */
4472 if (!(host_stat & ATA_DMA_INTR))
4473 goto idle_irq;
4474
4475 /* before we do anything else, clear DMA-Start bit */
4476 ap->ops->bmdma_stop(qc);
4477
4478 if (unlikely(host_stat & ATA_DMA_ERR)) {
4479 /* error when transfering data to/from memory */
4480 qc->err_mask |= AC_ERR_HOST_BUS;
4481 ap->hsm_task_state = HSM_ST_ERR;
4482 }
4483 }
4484 break;
4485 case HSM_ST:
4486 break;
4487 default:
4488 goto idle_irq;
4489 }
4490
4491 /* check altstatus */
4492 status = ata_altstatus(ap);
4493 if (status & ATA_BUSY)
4494 goto idle_irq;
4495
4496 /* check main status, clearing INTRQ */
4497 status = ata_chk_status(ap);
4498 if (unlikely(status & ATA_BUSY))
4499 goto idle_irq;
4500
4501 /* ack bmdma irq events */
4502 ap->ops->irq_clear(ap);
4503
4504 ata_hsm_move(ap, qc, status, 0);
4505 return 1; /* irq handled */
4506
4507 idle_irq:
4508 ap->stats.idle_irq++;
4509
4510 #ifdef ATA_IRQ_TRAP
4511 if ((ap->stats.idle_irq % 1000) == 0) {
4512 ata_irq_ack(ap, 0); /* debug trap */
4513 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4514 return 1;
4515 }
4516 #endif
4517 return 0; /* irq not handled */
4518 }
4519
4520 /**
4521 * ata_interrupt - Default ATA host interrupt handler
4522 * @irq: irq line (unused)
4523 * @dev_instance: pointer to our ata_host_set information structure
4524 * @regs: unused
4525 *
4526 * Default interrupt handler for PCI IDE devices. Calls
4527 * ata_host_intr() for each port that is not disabled.
4528 *
4529 * LOCKING:
4530 * Obtains host_set lock during operation.
4531 *
4532 * RETURNS:
4533 * IRQ_NONE or IRQ_HANDLED.
4534 */
4535
4536 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4537 {
4538 struct ata_host_set *host_set = dev_instance;
4539 unsigned int i;
4540 unsigned int handled = 0;
4541 unsigned long flags;
4542
4543 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4544 spin_lock_irqsave(&host_set->lock, flags);
4545
4546 for (i = 0; i < host_set->n_ports; i++) {
4547 struct ata_port *ap;
4548
4549 ap = host_set->ports[i];
4550 if (ap &&
4551 !(ap->flags & ATA_FLAG_DISABLED)) {
4552 struct ata_queued_cmd *qc;
4553
4554 qc = ata_qc_from_tag(ap, ap->active_tag);
4555 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4556 (qc->flags & ATA_QCFLAG_ACTIVE))
4557 handled |= ata_host_intr(ap, qc);
4558 }
4559 }
4560
4561 spin_unlock_irqrestore(&host_set->lock, flags);
4562
4563 return IRQ_RETVAL(handled);
4564 }
4565
4566
4567 /*
4568 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4569 * without filling any other registers
4570 */
4571 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4572 u8 cmd)
4573 {
4574 struct ata_taskfile tf;
4575 int err;
4576
4577 ata_tf_init(ap, &tf, dev->devno);
4578
4579 tf.command = cmd;
4580 tf.flags |= ATA_TFLAG_DEVICE;
4581 tf.protocol = ATA_PROT_NODATA;
4582
4583 err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
4584 if (err)
4585 printk(KERN_ERR "%s: ata command failed: %d\n",
4586 __FUNCTION__, err);
4587
4588 return err;
4589 }
4590
4591 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4592 {
4593 u8 cmd;
4594
4595 if (!ata_try_flush_cache(dev))
4596 return 0;
4597
4598 if (ata_id_has_flush_ext(dev->id))
4599 cmd = ATA_CMD_FLUSH_EXT;
4600 else
4601 cmd = ATA_CMD_FLUSH;
4602
4603 return ata_do_simple_cmd(ap, dev, cmd);
4604 }
4605
4606 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4607 {
4608 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4609 }
4610
4611 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4612 {
4613 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4614 }
4615
4616 /**
4617 * ata_device_resume - wakeup a previously suspended devices
4618 * @ap: port the device is connected to
4619 * @dev: the device to resume
4620 *
4621 * Kick the drive back into action, by sending it an idle immediate
4622 * command and making sure its transfer mode matches between drive
4623 * and host.
4624 *
4625 */
4626 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4627 {
4628 if (ap->flags & ATA_FLAG_SUSPENDED) {
4629 struct ata_device *failed_dev;
4630 ap->flags &= ~ATA_FLAG_SUSPENDED;
4631 while (ata_set_mode(ap, &failed_dev))
4632 ata_dev_disable(ap, failed_dev);
4633 }
4634 if (!ata_dev_enabled(dev))
4635 return 0;
4636 if (dev->class == ATA_DEV_ATA)
4637 ata_start_drive(ap, dev);
4638
4639 return 0;
4640 }
4641
4642 /**
4643 * ata_device_suspend - prepare a device for suspend
4644 * @ap: port the device is connected to
4645 * @dev: the device to suspend
4646 *
4647 * Flush the cache on the drive, if appropriate, then issue a
4648 * standbynow command.
4649 */
4650 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4651 {
4652 if (!ata_dev_enabled(dev))
4653 return 0;
4654 if (dev->class == ATA_DEV_ATA)
4655 ata_flush_cache(ap, dev);
4656
4657 if (state.event != PM_EVENT_FREEZE)
4658 ata_standby_drive(ap, dev);
4659 ap->flags |= ATA_FLAG_SUSPENDED;
4660 return 0;
4661 }
4662
4663 /**
4664 * ata_port_start - Set port up for dma.
4665 * @ap: Port to initialize
4666 *
4667 * Called just after data structures for each port are
4668 * initialized. Allocates space for PRD table.
4669 *
4670 * May be used as the port_start() entry in ata_port_operations.
4671 *
4672 * LOCKING:
4673 * Inherited from caller.
4674 */
4675
4676 int ata_port_start (struct ata_port *ap)
4677 {
4678 struct device *dev = ap->dev;
4679 int rc;
4680
4681 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4682 if (!ap->prd)
4683 return -ENOMEM;
4684
4685 rc = ata_pad_alloc(ap, dev);
4686 if (rc) {
4687 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4688 return rc;
4689 }
4690
4691 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4692
4693 return 0;
4694 }
4695
4696
4697 /**
4698 * ata_port_stop - Undo ata_port_start()
4699 * @ap: Port to shut down
4700 *
4701 * Frees the PRD table.
4702 *
4703 * May be used as the port_stop() entry in ata_port_operations.
4704 *
4705 * LOCKING:
4706 * Inherited from caller.
4707 */
4708
4709 void ata_port_stop (struct ata_port *ap)
4710 {
4711 struct device *dev = ap->dev;
4712
4713 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4714 ata_pad_free(ap, dev);
4715 }
4716
4717 void ata_host_stop (struct ata_host_set *host_set)
4718 {
4719 if (host_set->mmio_base)
4720 iounmap(host_set->mmio_base);
4721 }
4722
4723
4724 /**
4725 * ata_host_remove - Unregister SCSI host structure with upper layers
4726 * @ap: Port to unregister
4727 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4728 *
4729 * LOCKING:
4730 * Inherited from caller.
4731 */
4732
4733 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4734 {
4735 struct Scsi_Host *sh = ap->host;
4736
4737 DPRINTK("ENTER\n");
4738
4739 if (do_unregister)
4740 scsi_remove_host(sh);
4741
4742 ap->ops->port_stop(ap);
4743 }
4744
4745 /**
4746 * ata_host_init - Initialize an ata_port structure
4747 * @ap: Structure to initialize
4748 * @host: associated SCSI mid-layer structure
4749 * @host_set: Collection of hosts to which @ap belongs
4750 * @ent: Probe information provided by low-level driver
4751 * @port_no: Port number associated with this ata_port
4752 *
4753 * Initialize a new ata_port structure, and its associated
4754 * scsi_host.
4755 *
4756 * LOCKING:
4757 * Inherited from caller.
4758 */
4759
4760 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4761 struct ata_host_set *host_set,
4762 const struct ata_probe_ent *ent, unsigned int port_no)
4763 {
4764 unsigned int i;
4765
4766 host->max_id = 16;
4767 host->max_lun = 1;
4768 host->max_channel = 1;
4769 host->unique_id = ata_unique_id++;
4770 host->max_cmd_len = 12;
4771
4772 ap->flags = ATA_FLAG_DISABLED;
4773 ap->id = host->unique_id;
4774 ap->host = host;
4775 ap->ctl = ATA_DEVCTL_OBS;
4776 ap->host_set = host_set;
4777 ap->dev = ent->dev;
4778 ap->port_no = port_no;
4779 ap->hard_port_no =
4780 ent->legacy_mode ? ent->hard_port_no : port_no;
4781 ap->pio_mask = ent->pio_mask;
4782 ap->mwdma_mask = ent->mwdma_mask;
4783 ap->udma_mask = ent->udma_mask;
4784 ap->flags |= ent->host_flags;
4785 ap->ops = ent->port_ops;
4786 ap->cbl = ATA_CBL_NONE;
4787 ap->sata_spd_limit = UINT_MAX;
4788 ap->active_tag = ATA_TAG_POISON;
4789 ap->last_ctl = 0xFF;
4790
4791 INIT_WORK(&ap->port_task, NULL, NULL);
4792 INIT_LIST_HEAD(&ap->eh_done_q);
4793
4794 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4795 struct ata_device *dev = &ap->device[i];
4796 dev->devno = i;
4797 dev->pio_mask = UINT_MAX;
4798 dev->mwdma_mask = UINT_MAX;
4799 dev->udma_mask = UINT_MAX;
4800 }
4801
4802 #ifdef ATA_IRQ_TRAP
4803 ap->stats.unhandled_irq = 1;
4804 ap->stats.idle_irq = 1;
4805 #endif
4806
4807 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4808 }
4809
4810 /**
4811 * ata_host_add - Attach low-level ATA driver to system
4812 * @ent: Information provided by low-level driver
4813 * @host_set: Collections of ports to which we add
4814 * @port_no: Port number associated with this host
4815 *
4816 * Attach low-level ATA driver to system.
4817 *
4818 * LOCKING:
4819 * PCI/etc. bus probe sem.
4820 *
4821 * RETURNS:
4822 * New ata_port on success, for NULL on error.
4823 */
4824
4825 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4826 struct ata_host_set *host_set,
4827 unsigned int port_no)
4828 {
4829 struct Scsi_Host *host;
4830 struct ata_port *ap;
4831 int rc;
4832
4833 DPRINTK("ENTER\n");
4834
4835 if (!ent->port_ops->probe_reset &&
4836 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4837 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4838 port_no);
4839 return NULL;
4840 }
4841
4842 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4843 if (!host)
4844 return NULL;
4845
4846 host->transportt = &ata_scsi_transport_template;
4847
4848 ap = (struct ata_port *) &host->hostdata[0];
4849
4850 ata_host_init(ap, host, host_set, ent, port_no);
4851
4852 rc = ap->ops->port_start(ap);
4853 if (rc)
4854 goto err_out;
4855
4856 return ap;
4857
4858 err_out:
4859 scsi_host_put(host);
4860 return NULL;
4861 }
4862
4863 /**
4864 * ata_device_add - Register hardware device with ATA and SCSI layers
4865 * @ent: Probe information describing hardware device to be registered
4866 *
4867 * This function processes the information provided in the probe
4868 * information struct @ent, allocates the necessary ATA and SCSI
4869 * host information structures, initializes them, and registers
4870 * everything with requisite kernel subsystems.
4871 *
4872 * This function requests irqs, probes the ATA bus, and probes
4873 * the SCSI bus.
4874 *
4875 * LOCKING:
4876 * PCI/etc. bus probe sem.
4877 *
4878 * RETURNS:
4879 * Number of ports registered. Zero on error (no ports registered).
4880 */
4881
4882 int ata_device_add(const struct ata_probe_ent *ent)
4883 {
4884 unsigned int count = 0, i;
4885 struct device *dev = ent->dev;
4886 struct ata_host_set *host_set;
4887
4888 DPRINTK("ENTER\n");
4889 /* alloc a container for our list of ATA ports (buses) */
4890 host_set = kzalloc(sizeof(struct ata_host_set) +
4891 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4892 if (!host_set)
4893 return 0;
4894 spin_lock_init(&host_set->lock);
4895
4896 host_set->dev = dev;
4897 host_set->n_ports = ent->n_ports;
4898 host_set->irq = ent->irq;
4899 host_set->mmio_base = ent->mmio_base;
4900 host_set->private_data = ent->private_data;
4901 host_set->ops = ent->port_ops;
4902 host_set->flags = ent->host_set_flags;
4903
4904 /* register each port bound to this device */
4905 for (i = 0; i < ent->n_ports; i++) {
4906 struct ata_port *ap;
4907 unsigned long xfer_mode_mask;
4908
4909 ap = ata_host_add(ent, host_set, i);
4910 if (!ap)
4911 goto err_out;
4912
4913 host_set->ports[i] = ap;
4914 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4915 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4916 (ap->pio_mask << ATA_SHIFT_PIO);
4917
4918 /* print per-port info to dmesg */
4919 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4920 "bmdma 0x%lX irq %lu\n",
4921 ap->id,
4922 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4923 ata_mode_string(xfer_mode_mask),
4924 ap->ioaddr.cmd_addr,
4925 ap->ioaddr.ctl_addr,
4926 ap->ioaddr.bmdma_addr,
4927 ent->irq);
4928
4929 ata_chk_status(ap);
4930 host_set->ops->irq_clear(ap);
4931 count++;
4932 }
4933
4934 if (!count)
4935 goto err_free_ret;
4936
4937 /* obtain irq, that is shared between channels */
4938 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4939 DRV_NAME, host_set))
4940 goto err_out;
4941
4942 /* perform each probe synchronously */
4943 DPRINTK("probe begin\n");
4944 for (i = 0; i < count; i++) {
4945 struct ata_port *ap;
4946 int rc;
4947
4948 ap = host_set->ports[i];
4949
4950 DPRINTK("ata%u: bus probe begin\n", ap->id);
4951 rc = ata_bus_probe(ap);
4952 DPRINTK("ata%u: bus probe end\n", ap->id);
4953
4954 if (rc) {
4955 /* FIXME: do something useful here?
4956 * Current libata behavior will
4957 * tear down everything when
4958 * the module is removed
4959 * or the h/w is unplugged.
4960 */
4961 }
4962
4963 rc = scsi_add_host(ap->host, dev);
4964 if (rc) {
4965 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4966 ap->id);
4967 /* FIXME: do something useful here */
4968 /* FIXME: handle unconditional calls to
4969 * scsi_scan_host and ata_host_remove, below,
4970 * at the very least
4971 */
4972 }
4973 }
4974
4975 /* probes are done, now scan each port's disk(s) */
4976 DPRINTK("host probe begin\n");
4977 for (i = 0; i < count; i++) {
4978 struct ata_port *ap = host_set->ports[i];
4979
4980 ata_scsi_scan_host(ap);
4981 }
4982
4983 dev_set_drvdata(dev, host_set);
4984
4985 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4986 return ent->n_ports; /* success */
4987
4988 err_out:
4989 for (i = 0; i < count; i++) {
4990 ata_host_remove(host_set->ports[i], 1);
4991 scsi_host_put(host_set->ports[i]->host);
4992 }
4993 err_free_ret:
4994 kfree(host_set);
4995 VPRINTK("EXIT, returning 0\n");
4996 return 0;
4997 }
4998
4999 /**
5000 * ata_host_set_remove - PCI layer callback for device removal
5001 * @host_set: ATA host set that was removed
5002 *
5003 * Unregister all objects associated with this host set. Free those
5004 * objects.
5005 *
5006 * LOCKING:
5007 * Inherited from calling layer (may sleep).
5008 */
5009
5010 void ata_host_set_remove(struct ata_host_set *host_set)
5011 {
5012 struct ata_port *ap;
5013 unsigned int i;
5014
5015 for (i = 0; i < host_set->n_ports; i++) {
5016 ap = host_set->ports[i];
5017 scsi_remove_host(ap->host);
5018 }
5019
5020 free_irq(host_set->irq, host_set);
5021
5022 for (i = 0; i < host_set->n_ports; i++) {
5023 ap = host_set->ports[i];
5024
5025 ata_scsi_release(ap->host);
5026
5027 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5028 struct ata_ioports *ioaddr = &ap->ioaddr;
5029
5030 if (ioaddr->cmd_addr == 0x1f0)
5031 release_region(0x1f0, 8);
5032 else if (ioaddr->cmd_addr == 0x170)
5033 release_region(0x170, 8);
5034 }
5035
5036 scsi_host_put(ap->host);
5037 }
5038
5039 if (host_set->ops->host_stop)
5040 host_set->ops->host_stop(host_set);
5041
5042 kfree(host_set);
5043 }
5044
5045 /**
5046 * ata_scsi_release - SCSI layer callback hook for host unload
5047 * @host: libata host to be unloaded
5048 *
5049 * Performs all duties necessary to shut down a libata port...
5050 * Kill port kthread, disable port, and release resources.
5051 *
5052 * LOCKING:
5053 * Inherited from SCSI layer.
5054 *
5055 * RETURNS:
5056 * One.
5057 */
5058
5059 int ata_scsi_release(struct Scsi_Host *host)
5060 {
5061 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
5062 int i;
5063
5064 DPRINTK("ENTER\n");
5065
5066 ap->ops->port_disable(ap);
5067 ata_host_remove(ap, 0);
5068 for (i = 0; i < ATA_MAX_DEVICES; i++)
5069 kfree(ap->device[i].id);
5070
5071 DPRINTK("EXIT\n");
5072 return 1;
5073 }
5074
5075 /**
5076 * ata_std_ports - initialize ioaddr with standard port offsets.
5077 * @ioaddr: IO address structure to be initialized
5078 *
5079 * Utility function which initializes data_addr, error_addr,
5080 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5081 * device_addr, status_addr, and command_addr to standard offsets
5082 * relative to cmd_addr.
5083 *
5084 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5085 */
5086
5087 void ata_std_ports(struct ata_ioports *ioaddr)
5088 {
5089 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5090 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5091 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5092 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5093 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5094 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5095 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5096 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5097 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5098 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5099 }
5100
5101
5102 #ifdef CONFIG_PCI
5103
5104 void ata_pci_host_stop (struct ata_host_set *host_set)
5105 {
5106 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5107
5108 pci_iounmap(pdev, host_set->mmio_base);
5109 }
5110
5111 /**
5112 * ata_pci_remove_one - PCI layer callback for device removal
5113 * @pdev: PCI device that was removed
5114 *
5115 * PCI layer indicates to libata via this hook that
5116 * hot-unplug or module unload event has occurred.
5117 * Handle this by unregistering all objects associated
5118 * with this PCI device. Free those objects. Then finally
5119 * release PCI resources and disable device.
5120 *
5121 * LOCKING:
5122 * Inherited from PCI layer (may sleep).
5123 */
5124
5125 void ata_pci_remove_one (struct pci_dev *pdev)
5126 {
5127 struct device *dev = pci_dev_to_dev(pdev);
5128 struct ata_host_set *host_set = dev_get_drvdata(dev);
5129
5130 ata_host_set_remove(host_set);
5131 pci_release_regions(pdev);
5132 pci_disable_device(pdev);
5133 dev_set_drvdata(dev, NULL);
5134 }
5135
5136 /* move to PCI subsystem */
5137 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5138 {
5139 unsigned long tmp = 0;
5140
5141 switch (bits->width) {
5142 case 1: {
5143 u8 tmp8 = 0;
5144 pci_read_config_byte(pdev, bits->reg, &tmp8);
5145 tmp = tmp8;
5146 break;
5147 }
5148 case 2: {
5149 u16 tmp16 = 0;
5150 pci_read_config_word(pdev, bits->reg, &tmp16);
5151 tmp = tmp16;
5152 break;
5153 }
5154 case 4: {
5155 u32 tmp32 = 0;
5156 pci_read_config_dword(pdev, bits->reg, &tmp32);
5157 tmp = tmp32;
5158 break;
5159 }
5160
5161 default:
5162 return -EINVAL;
5163 }
5164
5165 tmp &= bits->mask;
5166
5167 return (tmp == bits->val) ? 1 : 0;
5168 }
5169
5170 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5171 {
5172 pci_save_state(pdev);
5173 pci_disable_device(pdev);
5174 pci_set_power_state(pdev, PCI_D3hot);
5175 return 0;
5176 }
5177
5178 int ata_pci_device_resume(struct pci_dev *pdev)
5179 {
5180 pci_set_power_state(pdev, PCI_D0);
5181 pci_restore_state(pdev);
5182 pci_enable_device(pdev);
5183 pci_set_master(pdev);
5184 return 0;
5185 }
5186 #endif /* CONFIG_PCI */
5187
5188
5189 static int __init ata_init(void)
5190 {
5191 ata_wq = create_workqueue("ata");
5192 if (!ata_wq)
5193 return -ENOMEM;
5194
5195 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5196 return 0;
5197 }
5198
5199 static void __exit ata_exit(void)
5200 {
5201 destroy_workqueue(ata_wq);
5202 }
5203
5204 module_init(ata_init);
5205 module_exit(ata_exit);
5206
5207 static unsigned long ratelimit_time;
5208 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5209
5210 int ata_ratelimit(void)
5211 {
5212 int rc;
5213 unsigned long flags;
5214
5215 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5216
5217 if (time_after(jiffies, ratelimit_time)) {
5218 rc = 1;
5219 ratelimit_time = jiffies + (HZ/5);
5220 } else
5221 rc = 0;
5222
5223 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5224
5225 return rc;
5226 }
5227
5228 /*
5229 * libata is essentially a library of internal helper functions for
5230 * low-level ATA host controller drivers. As such, the API/ABI is
5231 * likely to change as new drivers are added and updated.
5232 * Do not depend on ABI/API stability.
5233 */
5234
5235 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5236 EXPORT_SYMBOL_GPL(ata_std_ports);
5237 EXPORT_SYMBOL_GPL(ata_device_add);
5238 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5239 EXPORT_SYMBOL_GPL(ata_sg_init);
5240 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5241 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5242 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5243 EXPORT_SYMBOL_GPL(ata_tf_load);
5244 EXPORT_SYMBOL_GPL(ata_tf_read);
5245 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5246 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5247 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5248 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5249 EXPORT_SYMBOL_GPL(ata_check_status);
5250 EXPORT_SYMBOL_GPL(ata_altstatus);
5251 EXPORT_SYMBOL_GPL(ata_exec_command);
5252 EXPORT_SYMBOL_GPL(ata_port_start);
5253 EXPORT_SYMBOL_GPL(ata_port_stop);
5254 EXPORT_SYMBOL_GPL(ata_host_stop);
5255 EXPORT_SYMBOL_GPL(ata_interrupt);
5256 EXPORT_SYMBOL_GPL(ata_qc_prep);
5257 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5258 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5259 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5260 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5261 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5262 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5263 EXPORT_SYMBOL_GPL(ata_port_probe);
5264 EXPORT_SYMBOL_GPL(sata_phy_reset);
5265 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5266 EXPORT_SYMBOL_GPL(ata_bus_reset);
5267 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5268 EXPORT_SYMBOL_GPL(ata_std_softreset);
5269 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5270 EXPORT_SYMBOL_GPL(ata_std_postreset);
5271 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5272 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5273 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5274 EXPORT_SYMBOL_GPL(ata_dev_classify);
5275 EXPORT_SYMBOL_GPL(ata_dev_pair);
5276 EXPORT_SYMBOL_GPL(ata_port_disable);
5277 EXPORT_SYMBOL_GPL(ata_ratelimit);
5278 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5279 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5280 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5281 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5282 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5283 EXPORT_SYMBOL_GPL(ata_scsi_release);
5284 EXPORT_SYMBOL_GPL(ata_host_intr);
5285 EXPORT_SYMBOL_GPL(ata_id_string);
5286 EXPORT_SYMBOL_GPL(ata_id_c_string);
5287 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5288
5289 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5290 EXPORT_SYMBOL_GPL(ata_timing_compute);
5291 EXPORT_SYMBOL_GPL(ata_timing_merge);
5292
5293 #ifdef CONFIG_PCI
5294 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5295 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5296 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5297 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5298 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5299 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5300 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5301 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5302 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5303 #endif /* CONFIG_PCI */
5304
5305 EXPORT_SYMBOL_GPL(ata_device_suspend);
5306 EXPORT_SYMBOL_GPL(ata_device_resume);
5307 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5308 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5309
5310 EXPORT_SYMBOL_GPL(ata_scsi_error);
5311 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5312 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5313 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);