]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/libata-core.c
[PATCH] libata: make irqtrap mode compile
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_dev_xfermask(struct ata_port *ap,
69 struct ata_device *dev);
70
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
73
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
77
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
81
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86
87
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
93 *
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
102 {
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
108
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
113
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
118
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
123
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
128 }
129
130 /**
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
134 *
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
136 *
137 * LOCKING:
138 * Inherited from caller.
139 */
140
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
142 {
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
145
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
150
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
154
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
157 }
158
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
165 0,
166 0,
167 0,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
174 0,
175 0,
176 0,
177 0,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_FUA_EXT
187 };
188
189 /**
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
192 *
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
195 *
196 * LOCKING:
197 * caller.
198 */
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
200 {
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
204
205 int index, fua, lba48, write;
206
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
210
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
221 }
222
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
227 }
228 return -1;
229 }
230
231 /**
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
236 *
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
239 *
240 * LOCKING:
241 * None.
242 *
243 * RETURNS:
244 * Packed xfer_mask.
245 */
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249 {
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253 }
254
255 static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258 } ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
263 };
264
265 /**
266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
267 * @xfer_mask: xfer_mask of interest
268 *
269 * Return matching XFER_* value for @xfer_mask. Only the highest
270 * bit of @xfer_mask is considered.
271 *
272 * LOCKING:
273 * None.
274 *
275 * RETURNS:
276 * Matching XFER_* value, 0 if no match found.
277 */
278 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
279 {
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
282
283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
287 }
288
289 /**
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
292 *
293 * Return matching xfer_mask for @xfer_mode.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
300 */
301 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
302 {
303 const struct ata_xfer_ent *ent;
304
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
307 return 1 << (ent->shift + xfer_mode - ent->base);
308 return 0;
309 }
310
311 /**
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_shift for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
322 */
323 static int ata_xfer_mode2shift(unsigned int xfer_mode)
324 {
325 const struct ata_xfer_ent *ent;
326
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
331 }
332
333 /**
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
336 *
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
346 */
347 static const char *ata_mode_string(unsigned int xfer_mask)
348 {
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
366 };
367 int highbit;
368
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
373 }
374
375 /**
376 * ata_pio_devchk - PATA device presence detection
377 * @ap: ATA channel to examine
378 * @device: Device to examine (starting at zero)
379 *
380 * This technique was originally described in
381 * Hale Landis's ATADRVR (www.ata-atapi.com), and
382 * later found its way into the ATA/ATAPI spec.
383 *
384 * Write a pattern to the ATA shadow registers,
385 * and if a device is present, it will respond by
386 * correctly storing and echoing back the
387 * ATA shadow register contents.
388 *
389 * LOCKING:
390 * caller.
391 */
392
393 static unsigned int ata_pio_devchk(struct ata_port *ap,
394 unsigned int device)
395 {
396 struct ata_ioports *ioaddr = &ap->ioaddr;
397 u8 nsect, lbal;
398
399 ap->ops->dev_select(ap, device);
400
401 outb(0x55, ioaddr->nsect_addr);
402 outb(0xaa, ioaddr->lbal_addr);
403
404 outb(0xaa, ioaddr->nsect_addr);
405 outb(0x55, ioaddr->lbal_addr);
406
407 outb(0x55, ioaddr->nsect_addr);
408 outb(0xaa, ioaddr->lbal_addr);
409
410 nsect = inb(ioaddr->nsect_addr);
411 lbal = inb(ioaddr->lbal_addr);
412
413 if ((nsect == 0x55) && (lbal == 0xaa))
414 return 1; /* we found a device */
415
416 return 0; /* nothing found */
417 }
418
419 /**
420 * ata_mmio_devchk - PATA device presence detection
421 * @ap: ATA channel to examine
422 * @device: Device to examine (starting at zero)
423 *
424 * This technique was originally described in
425 * Hale Landis's ATADRVR (www.ata-atapi.com), and
426 * later found its way into the ATA/ATAPI spec.
427 *
428 * Write a pattern to the ATA shadow registers,
429 * and if a device is present, it will respond by
430 * correctly storing and echoing back the
431 * ATA shadow register contents.
432 *
433 * LOCKING:
434 * caller.
435 */
436
437 static unsigned int ata_mmio_devchk(struct ata_port *ap,
438 unsigned int device)
439 {
440 struct ata_ioports *ioaddr = &ap->ioaddr;
441 u8 nsect, lbal;
442
443 ap->ops->dev_select(ap, device);
444
445 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
446 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
447
448 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
449 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
450
451 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
452 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
453
454 nsect = readb((void __iomem *) ioaddr->nsect_addr);
455 lbal = readb((void __iomem *) ioaddr->lbal_addr);
456
457 if ((nsect == 0x55) && (lbal == 0xaa))
458 return 1; /* we found a device */
459
460 return 0; /* nothing found */
461 }
462
463 /**
464 * ata_devchk - PATA device presence detection
465 * @ap: ATA channel to examine
466 * @device: Device to examine (starting at zero)
467 *
468 * Dispatch ATA device presence detection, depending
469 * on whether we are using PIO or MMIO to talk to the
470 * ATA shadow registers.
471 *
472 * LOCKING:
473 * caller.
474 */
475
476 static unsigned int ata_devchk(struct ata_port *ap,
477 unsigned int device)
478 {
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_mmio_devchk(ap, device);
481 return ata_pio_devchk(ap, device);
482 }
483
484 /**
485 * ata_dev_classify - determine device type based on ATA-spec signature
486 * @tf: ATA taskfile register set for device to be identified
487 *
488 * Determine from taskfile register contents whether a device is
489 * ATA or ATAPI, as per "Signature and persistence" section
490 * of ATA/PI spec (volume 1, sect 5.14).
491 *
492 * LOCKING:
493 * None.
494 *
495 * RETURNS:
496 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
497 * the event of failure.
498 */
499
500 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
501 {
502 /* Apple's open source Darwin code hints that some devices only
503 * put a proper signature into the LBA mid/high registers,
504 * So, we only check those. It's sufficient for uniqueness.
505 */
506
507 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
508 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
509 DPRINTK("found ATA device by sig\n");
510 return ATA_DEV_ATA;
511 }
512
513 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
514 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
515 DPRINTK("found ATAPI device by sig\n");
516 return ATA_DEV_ATAPI;
517 }
518
519 DPRINTK("unknown device\n");
520 return ATA_DEV_UNKNOWN;
521 }
522
523 /**
524 * ata_dev_try_classify - Parse returned ATA device signature
525 * @ap: ATA channel to examine
526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
528 *
529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
530 * an ATA/ATAPI-defined set of values is placed in the ATA
531 * shadow registers, indicating the results of device detection
532 * and diagnostics.
533 *
534 * Select the ATA device, and read the values from the ATA shadow
535 * registers. Then parse according to the Error register value,
536 * and the spec-defined values examined by ata_dev_classify().
537 *
538 * LOCKING:
539 * caller.
540 *
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
543 */
544
545 static unsigned int
546 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
547 {
548 struct ata_taskfile tf;
549 unsigned int class;
550 u8 err;
551
552 ap->ops->dev_select(ap, device);
553
554 memset(&tf, 0, sizeof(tf));
555
556 ap->ops->tf_read(ap, &tf);
557 err = tf.feature;
558 if (r_err)
559 *r_err = err;
560
561 /* see if device passed diags */
562 if (err == 1)
563 /* do nothing */ ;
564 else if ((device == 0) && (err == 0x81))
565 /* do nothing */ ;
566 else
567 return ATA_DEV_NONE;
568
569 /* determine if device is ATA or ATAPI */
570 class = ata_dev_classify(&tf);
571
572 if (class == ATA_DEV_UNKNOWN)
573 return ATA_DEV_NONE;
574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
575 return ATA_DEV_NONE;
576 return class;
577 }
578
579 /**
580 * ata_id_string - Convert IDENTIFY DEVICE page into string
581 * @id: IDENTIFY DEVICE results we will examine
582 * @s: string into which data is output
583 * @ofs: offset into identify device page
584 * @len: length of string to return. must be an even number.
585 *
586 * The strings in the IDENTIFY DEVICE page are broken up into
587 * 16-bit chunks. Run through the string, and output each
588 * 8-bit chunk linearly, regardless of platform.
589 *
590 * LOCKING:
591 * caller.
592 */
593
594 void ata_id_string(const u16 *id, unsigned char *s,
595 unsigned int ofs, unsigned int len)
596 {
597 unsigned int c;
598
599 while (len > 0) {
600 c = id[ofs] >> 8;
601 *s = c;
602 s++;
603
604 c = id[ofs] & 0xff;
605 *s = c;
606 s++;
607
608 ofs++;
609 len -= 2;
610 }
611 }
612
613 /**
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
619 *
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
623 *
624 * LOCKING:
625 * caller.
626 */
627 void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629 {
630 unsigned char *p;
631
632 WARN_ON(!(len & 1));
633
634 ata_id_string(id, s, ofs, len - 1);
635
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
640 }
641
642 static u64 ata_id_n_sectors(const u16 *id)
643 {
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
654 }
655 }
656
657 /**
658 * ata_noop_dev_select - Select device 0/1 on ATA bus
659 * @ap: ATA channel to manipulate
660 * @device: ATA device (numbered from zero) to select
661 *
662 * This function performs no actual function.
663 *
664 * May be used as the dev_select() entry in ata_port_operations.
665 *
666 * LOCKING:
667 * caller.
668 */
669 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
670 {
671 }
672
673
674 /**
675 * ata_std_dev_select - Select device 0/1 on ATA bus
676 * @ap: ATA channel to manipulate
677 * @device: ATA device (numbered from zero) to select
678 *
679 * Use the method defined in the ATA specification to
680 * make either device 0, or device 1, active on the
681 * ATA channel. Works with both PIO and MMIO.
682 *
683 * May be used as the dev_select() entry in ata_port_operations.
684 *
685 * LOCKING:
686 * caller.
687 */
688
689 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
690 {
691 u8 tmp;
692
693 if (device == 0)
694 tmp = ATA_DEVICE_OBS;
695 else
696 tmp = ATA_DEVICE_OBS | ATA_DEV1;
697
698 if (ap->flags & ATA_FLAG_MMIO) {
699 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
700 } else {
701 outb(tmp, ap->ioaddr.device_addr);
702 }
703 ata_pause(ap); /* needed; also flushes, for mmio */
704 }
705
706 /**
707 * ata_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
710 * @wait: non-zero to wait for Status register BSY bit to clear
711 * @can_sleep: non-zero if context allows sleeping
712 *
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel.
716 *
717 * This is a high-level version of ata_std_dev_select(),
718 * which additionally provides the services of inserting
719 * the proper pauses and status polling, where needed.
720 *
721 * LOCKING:
722 * caller.
723 */
724
725 void ata_dev_select(struct ata_port *ap, unsigned int device,
726 unsigned int wait, unsigned int can_sleep)
727 {
728 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
729 ap->id, device, wait);
730
731 if (wait)
732 ata_wait_idle(ap);
733
734 ap->ops->dev_select(ap, device);
735
736 if (wait) {
737 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
738 msleep(150);
739 ata_wait_idle(ap);
740 }
741 }
742
743 /**
744 * ata_dump_id - IDENTIFY DEVICE info debugging output
745 * @id: IDENTIFY DEVICE page to dump
746 *
747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
748 * page.
749 *
750 * LOCKING:
751 * caller.
752 */
753
754 static inline void ata_dump_id(const u16 *id)
755 {
756 DPRINTK("49==0x%04x "
757 "53==0x%04x "
758 "63==0x%04x "
759 "64==0x%04x "
760 "75==0x%04x \n",
761 id[49],
762 id[53],
763 id[63],
764 id[64],
765 id[75]);
766 DPRINTK("80==0x%04x "
767 "81==0x%04x "
768 "82==0x%04x "
769 "83==0x%04x "
770 "84==0x%04x \n",
771 id[80],
772 id[81],
773 id[82],
774 id[83],
775 id[84]);
776 DPRINTK("88==0x%04x "
777 "93==0x%04x\n",
778 id[88],
779 id[93]);
780 }
781
782 /**
783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
784 * @id: IDENTIFY data to compute xfer mask from
785 *
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
788 *
789 * FIXME: pre IDE drive timing (do we care ?).
790 *
791 * LOCKING:
792 * None.
793 *
794 * RETURNS:
795 * Computed xfermask
796 */
797 static unsigned int ata_id_xfermask(const u16 *id)
798 {
799 unsigned int pio_mask, mwdma_mask, udma_mask;
800
801 /* Usual case. Word 53 indicates word 64 is valid */
802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
804 pio_mask <<= 3;
805 pio_mask |= 0x7;
806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
810 */
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
812
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
818 */
819 }
820
821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
822
823 udma_mask = 0;
824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
826
827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
828 }
829
830 /**
831 * ata_port_queue_task - Queue port_task
832 * @ap: The ata_port to queue port_task for
833 *
834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
838 *
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
841 * synchronization.
842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
846 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
847 unsigned long delay)
848 {
849 int rc;
850
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
852 return;
853
854 PREPARE_WORK(&ap->port_task, fn, data);
855
856 if (!delay)
857 rc = queue_work(ata_wq, &ap->port_task);
858 else
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
860
861 /* rc == 0 means that another user is using port task */
862 WARN_ON(rc == 0);
863 }
864
865 /**
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
868 *
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
871 *
872 * LOCKING:
873 * Kernel thread context (may sleep)
874 */
875 void ata_port_flush_task(struct ata_port *ap)
876 {
877 unsigned long flags;
878
879 DPRINTK("ENTER\n");
880
881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
884
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
887
888 /*
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
891 * Cancel and flush.
892 */
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
896 }
897
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
901
902 DPRINTK("EXIT\n");
903 }
904
905 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
906 {
907 struct completion *waiting = qc->private_data;
908
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
910 complete(waiting);
911 }
912
913 /**
914 * ata_exec_internal - execute libata internal command
915 * @ap: Port to which the command is sent
916 * @dev: Device to which the command is sent
917 * @tf: Taskfile registers for the command and the result
918 * @dma_dir: Data tranfer direction of the command
919 * @buf: Data buffer of the command
920 * @buflen: Length of data buffer
921 *
922 * Executes libata internal command with timeout. @tf contains
923 * command on entry and result on return. Timeout and error
924 * conditions are reported via return value. No recovery action
925 * is taken after a command times out. It's caller's duty to
926 * clean up after timeout.
927 *
928 * LOCKING:
929 * None. Should be called with kernel context, might sleep.
930 */
931
932 static unsigned
933 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
934 struct ata_taskfile *tf,
935 int dma_dir, void *buf, unsigned int buflen)
936 {
937 u8 command = tf->command;
938 struct ata_queued_cmd *qc;
939 DECLARE_COMPLETION(wait);
940 unsigned long flags;
941 unsigned int err_mask;
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944
945 qc = ata_qc_new_init(ap, dev);
946 BUG_ON(qc == NULL);
947
948 qc->tf = *tf;
949 qc->dma_dir = dma_dir;
950 if (dma_dir != DMA_NONE) {
951 ata_sg_init_one(qc, buf, buflen);
952 qc->nsect = buflen / ATA_SECT_SIZE;
953 }
954
955 qc->private_data = &wait;
956 qc->complete_fn = ata_qc_complete_internal;
957
958 qc->err_mask = ata_qc_issue(qc);
959 if (qc->err_mask)
960 ata_qc_complete(qc);
961
962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
963
964 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
965 ata_port_flush_task(ap);
966
967 spin_lock_irqsave(&ap->host_set->lock, flags);
968
969 /* We're racing with irq here. If we lose, the
970 * following test prevents us from completing the qc
971 * again. If completion irq occurs after here but
972 * before the caller cleans up, it will result in a
973 * spurious interrupt. We can live with that.
974 */
975 if (qc->flags & ATA_QCFLAG_ACTIVE) {
976 qc->err_mask = AC_ERR_TIMEOUT;
977 ata_qc_complete(qc);
978 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
979 ap->id, command);
980 }
981
982 spin_unlock_irqrestore(&ap->host_set->lock, flags);
983 }
984
985 *tf = qc->tf;
986 err_mask = qc->err_mask;
987
988 ata_qc_free(qc);
989
990 return err_mask;
991 }
992
993 /**
994 * ata_pio_need_iordy - check if iordy needed
995 * @adev: ATA device
996 *
997 * Check if the current speed of the device requires IORDY. Used
998 * by various controllers for chip configuration.
999 */
1000
1001 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1002 {
1003 int pio;
1004 int speed = adev->pio_mode - XFER_PIO_0;
1005
1006 if (speed < 2)
1007 return 0;
1008 if (speed > 2)
1009 return 1;
1010
1011 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1012
1013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1014 pio = adev->id[ATA_ID_EIDE_PIO];
1015 /* Is the speed faster than the drive allows non IORDY ? */
1016 if (pio) {
1017 /* This is cycle times not frequency - watch the logic! */
1018 if (pio > 240) /* PIO2 is 240nS per cycle */
1019 return 1;
1020 return 0;
1021 }
1022 }
1023 return 0;
1024 }
1025
1026 /**
1027 * ata_dev_read_id - Read ID data from the specified device
1028 * @ap: port on which target device resides
1029 * @dev: target device
1030 * @p_class: pointer to class of the target device (may be changed)
1031 * @post_reset: is this read ID post-reset?
1032 * @p_id: read IDENTIFY page (newly allocated)
1033 *
1034 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1035 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1036 * devices. This function also takes care of EDD signature
1037 * misreporting (to be removed once EDD support is gone) and
1038 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1039 *
1040 * LOCKING:
1041 * Kernel thread context (may sleep)
1042 *
1043 * RETURNS:
1044 * 0 on success, -errno otherwise.
1045 */
1046 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1047 unsigned int *p_class, int post_reset, u16 **p_id)
1048 {
1049 unsigned int class = *p_class;
1050 unsigned int using_edd;
1051 struct ata_taskfile tf;
1052 unsigned int err_mask = 0;
1053 u16 *id;
1054 const char *reason;
1055 int rc;
1056
1057 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1058
1059 if (ap->ops->probe_reset ||
1060 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1061 using_edd = 0;
1062 else
1063 using_edd = 1;
1064
1065 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1066
1067 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1068 if (id == NULL) {
1069 rc = -ENOMEM;
1070 reason = "out of memory";
1071 goto err_out;
1072 }
1073
1074 retry:
1075 ata_tf_init(ap, &tf, dev->devno);
1076
1077 switch (class) {
1078 case ATA_DEV_ATA:
1079 tf.command = ATA_CMD_ID_ATA;
1080 break;
1081 case ATA_DEV_ATAPI:
1082 tf.command = ATA_CMD_ID_ATAPI;
1083 break;
1084 default:
1085 rc = -ENODEV;
1086 reason = "unsupported class";
1087 goto err_out;
1088 }
1089
1090 tf.protocol = ATA_PROT_PIO;
1091
1092 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1093 id, sizeof(id[0]) * ATA_ID_WORDS);
1094
1095 if (err_mask) {
1096 rc = -EIO;
1097 reason = "I/O error";
1098
1099 if (err_mask & ~AC_ERR_DEV)
1100 goto err_out;
1101
1102 /*
1103 * arg! EDD works for all test cases, but seems to return
1104 * the ATA signature for some ATAPI devices. Until the
1105 * reason for this is found and fixed, we fix up the mess
1106 * here. If IDENTIFY DEVICE returns command aborted
1107 * (as ATAPI devices do), then we issue an
1108 * IDENTIFY PACKET DEVICE.
1109 *
1110 * ATA software reset (SRST, the default) does not appear
1111 * to have this problem.
1112 */
1113 if ((using_edd) && (class == ATA_DEV_ATA)) {
1114 u8 err = tf.feature;
1115 if (err & ATA_ABORTED) {
1116 class = ATA_DEV_ATAPI;
1117 goto retry;
1118 }
1119 }
1120 goto err_out;
1121 }
1122
1123 swap_buf_le16(id, ATA_ID_WORDS);
1124
1125 /* sanity check */
1126 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1127 rc = -EINVAL;
1128 reason = "device reports illegal type";
1129 goto err_out;
1130 }
1131
1132 if (post_reset && class == ATA_DEV_ATA) {
1133 /*
1134 * The exact sequence expected by certain pre-ATA4 drives is:
1135 * SRST RESET
1136 * IDENTIFY
1137 * INITIALIZE DEVICE PARAMETERS
1138 * anything else..
1139 * Some drives were very specific about that exact sequence.
1140 */
1141 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1142 err_mask = ata_dev_init_params(ap, dev);
1143 if (err_mask) {
1144 rc = -EIO;
1145 reason = "INIT_DEV_PARAMS failed";
1146 goto err_out;
1147 }
1148
1149 /* current CHS translation info (id[53-58]) might be
1150 * changed. reread the identify device info.
1151 */
1152 post_reset = 0;
1153 goto retry;
1154 }
1155 }
1156
1157 *p_class = class;
1158 *p_id = id;
1159 return 0;
1160
1161 err_out:
1162 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1163 ap->id, dev->devno, reason);
1164 kfree(id);
1165 return rc;
1166 }
1167
1168 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1169 struct ata_device *dev)
1170 {
1171 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1172 }
1173
1174 /**
1175 * ata_dev_configure - Configure the specified ATA/ATAPI device
1176 * @ap: Port on which target device resides
1177 * @dev: Target device to configure
1178 * @print_info: Enable device info printout
1179 *
1180 * Configure @dev according to @dev->id. Generic and low-level
1181 * driver specific fixups are also applied.
1182 *
1183 * LOCKING:
1184 * Kernel thread context (may sleep)
1185 *
1186 * RETURNS:
1187 * 0 on success, -errno otherwise
1188 */
1189 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1190 int print_info)
1191 {
1192 const u16 *id = dev->id;
1193 unsigned int xfer_mask;
1194 int i, rc;
1195
1196 if (!ata_dev_present(dev)) {
1197 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1198 ap->id, dev->devno);
1199 return 0;
1200 }
1201
1202 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1203
1204 /* print device capabilities */
1205 if (print_info)
1206 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1207 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1208 ap->id, dev->devno, id[49], id[82], id[83],
1209 id[84], id[85], id[86], id[87], id[88]);
1210
1211 /* initialize to-be-configured parameters */
1212 dev->flags = 0;
1213 dev->max_sectors = 0;
1214 dev->cdb_len = 0;
1215 dev->n_sectors = 0;
1216 dev->cylinders = 0;
1217 dev->heads = 0;
1218 dev->sectors = 0;
1219
1220 /*
1221 * common ATA, ATAPI feature tests
1222 */
1223
1224 /* we require DMA support (bits 8 of word 49) */
1225 if (!ata_id_has_dma(id)) {
1226 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1227 rc = -EINVAL;
1228 goto err_out_nosup;
1229 }
1230
1231 /* find max transfer mode; for printk only */
1232 xfer_mask = ata_id_xfermask(id);
1233
1234 ata_dump_id(id);
1235
1236 /* ATA-specific feature tests */
1237 if (dev->class == ATA_DEV_ATA) {
1238 dev->n_sectors = ata_id_n_sectors(id);
1239
1240 if (ata_id_has_lba(id)) {
1241 const char *lba_desc;
1242
1243 lba_desc = "LBA";
1244 dev->flags |= ATA_DFLAG_LBA;
1245 if (ata_id_has_lba48(id)) {
1246 dev->flags |= ATA_DFLAG_LBA48;
1247 lba_desc = "LBA48";
1248 }
1249
1250 /* print device info to dmesg */
1251 if (print_info)
1252 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1253 "max %s, %Lu sectors: %s\n",
1254 ap->id, dev->devno,
1255 ata_id_major_version(id),
1256 ata_mode_string(xfer_mask),
1257 (unsigned long long)dev->n_sectors,
1258 lba_desc);
1259 } else {
1260 /* CHS */
1261
1262 /* Default translation */
1263 dev->cylinders = id[1];
1264 dev->heads = id[3];
1265 dev->sectors = id[6];
1266
1267 if (ata_id_current_chs_valid(id)) {
1268 /* Current CHS translation is valid. */
1269 dev->cylinders = id[54];
1270 dev->heads = id[55];
1271 dev->sectors = id[56];
1272 }
1273
1274 /* print device info to dmesg */
1275 if (print_info)
1276 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1277 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1278 ap->id, dev->devno,
1279 ata_id_major_version(id),
1280 ata_mode_string(xfer_mask),
1281 (unsigned long long)dev->n_sectors,
1282 dev->cylinders, dev->heads, dev->sectors);
1283 }
1284
1285 dev->cdb_len = 16;
1286 }
1287
1288 /* ATAPI-specific feature tests */
1289 else if (dev->class == ATA_DEV_ATAPI) {
1290 rc = atapi_cdb_len(id);
1291 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1292 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1293 rc = -EINVAL;
1294 goto err_out_nosup;
1295 }
1296 dev->cdb_len = (unsigned int) rc;
1297
1298 /* print device info to dmesg */
1299 if (print_info)
1300 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1301 ap->id, dev->devno, ata_mode_string(xfer_mask));
1302 }
1303
1304 ap->host->max_cmd_len = 0;
1305 for (i = 0; i < ATA_MAX_DEVICES; i++)
1306 ap->host->max_cmd_len = max_t(unsigned int,
1307 ap->host->max_cmd_len,
1308 ap->device[i].cdb_len);
1309
1310 /* limit bridge transfers to udma5, 200 sectors */
1311 if (ata_dev_knobble(ap, dev)) {
1312 if (print_info)
1313 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1314 ap->id, dev->devno);
1315 ap->udma_mask &= ATA_UDMA5;
1316 dev->max_sectors = ATA_MAX_SECTORS;
1317 }
1318
1319 if (ap->ops->dev_config)
1320 ap->ops->dev_config(ap, dev);
1321
1322 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1323 return 0;
1324
1325 err_out_nosup:
1326 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1327 ap->id, dev->devno);
1328 DPRINTK("EXIT, err\n");
1329 return rc;
1330 }
1331
1332 /**
1333 * ata_bus_probe - Reset and probe ATA bus
1334 * @ap: Bus to probe
1335 *
1336 * Master ATA bus probing function. Initiates a hardware-dependent
1337 * bus reset, then attempts to identify any devices found on
1338 * the bus.
1339 *
1340 * LOCKING:
1341 * PCI/etc. bus probe sem.
1342 *
1343 * RETURNS:
1344 * Zero on success, non-zero on error.
1345 */
1346
1347 static int ata_bus_probe(struct ata_port *ap)
1348 {
1349 unsigned int classes[ATA_MAX_DEVICES];
1350 unsigned int i, rc, found = 0;
1351
1352 ata_port_probe(ap);
1353
1354 /* reset and determine device classes */
1355 for (i = 0; i < ATA_MAX_DEVICES; i++)
1356 classes[i] = ATA_DEV_UNKNOWN;
1357
1358 if (ap->ops->probe_reset) {
1359 rc = ap->ops->probe_reset(ap, classes);
1360 if (rc) {
1361 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1362 return rc;
1363 }
1364 } else {
1365 ap->ops->phy_reset(ap);
1366
1367 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1368 for (i = 0; i < ATA_MAX_DEVICES; i++)
1369 classes[i] = ap->device[i].class;
1370
1371 ata_port_probe(ap);
1372 }
1373
1374 for (i = 0; i < ATA_MAX_DEVICES; i++)
1375 if (classes[i] == ATA_DEV_UNKNOWN)
1376 classes[i] = ATA_DEV_NONE;
1377
1378 /* read IDENTIFY page and configure devices */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1380 struct ata_device *dev = &ap->device[i];
1381
1382 dev->class = classes[i];
1383
1384 if (!ata_dev_present(dev))
1385 continue;
1386
1387 WARN_ON(dev->id != NULL);
1388 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1389 dev->class = ATA_DEV_NONE;
1390 continue;
1391 }
1392
1393 if (ata_dev_configure(ap, dev, 1)) {
1394 dev->class++; /* disable device */
1395 continue;
1396 }
1397
1398 found = 1;
1399 }
1400
1401 if (!found)
1402 goto err_out_disable;
1403
1404 ata_set_mode(ap);
1405 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1406 goto err_out_disable;
1407
1408 return 0;
1409
1410 err_out_disable:
1411 ap->ops->port_disable(ap);
1412 return -1;
1413 }
1414
1415 /**
1416 * ata_port_probe - Mark port as enabled
1417 * @ap: Port for which we indicate enablement
1418 *
1419 * Modify @ap data structure such that the system
1420 * thinks that the entire port is enabled.
1421 *
1422 * LOCKING: host_set lock, or some other form of
1423 * serialization.
1424 */
1425
1426 void ata_port_probe(struct ata_port *ap)
1427 {
1428 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1429 }
1430
1431 /**
1432 * sata_print_link_status - Print SATA link status
1433 * @ap: SATA port to printk link status about
1434 *
1435 * This function prints link speed and status of a SATA link.
1436 *
1437 * LOCKING:
1438 * None.
1439 */
1440 static void sata_print_link_status(struct ata_port *ap)
1441 {
1442 u32 sstatus, tmp;
1443 const char *speed;
1444
1445 if (!ap->ops->scr_read)
1446 return;
1447
1448 sstatus = scr_read(ap, SCR_STATUS);
1449
1450 if (sata_dev_present(ap)) {
1451 tmp = (sstatus >> 4) & 0xf;
1452 if (tmp & (1 << 0))
1453 speed = "1.5";
1454 else if (tmp & (1 << 1))
1455 speed = "3.0";
1456 else
1457 speed = "<unknown>";
1458 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1459 ap->id, speed, sstatus);
1460 } else {
1461 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1462 ap->id, sstatus);
1463 }
1464 }
1465
1466 /**
1467 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1468 * @ap: SATA port associated with target SATA PHY.
1469 *
1470 * This function issues commands to standard SATA Sxxx
1471 * PHY registers, to wake up the phy (and device), and
1472 * clear any reset condition.
1473 *
1474 * LOCKING:
1475 * PCI/etc. bus probe sem.
1476 *
1477 */
1478 void __sata_phy_reset(struct ata_port *ap)
1479 {
1480 u32 sstatus;
1481 unsigned long timeout = jiffies + (HZ * 5);
1482
1483 if (ap->flags & ATA_FLAG_SATA_RESET) {
1484 /* issue phy wake/reset */
1485 scr_write_flush(ap, SCR_CONTROL, 0x301);
1486 /* Couldn't find anything in SATA I/II specs, but
1487 * AHCI-1.1 10.4.2 says at least 1 ms. */
1488 mdelay(1);
1489 }
1490 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1491
1492 /* wait for phy to become ready, if necessary */
1493 do {
1494 msleep(200);
1495 sstatus = scr_read(ap, SCR_STATUS);
1496 if ((sstatus & 0xf) != 1)
1497 break;
1498 } while (time_before(jiffies, timeout));
1499
1500 /* print link status */
1501 sata_print_link_status(ap);
1502
1503 /* TODO: phy layer with polling, timeouts, etc. */
1504 if (sata_dev_present(ap))
1505 ata_port_probe(ap);
1506 else
1507 ata_port_disable(ap);
1508
1509 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1510 return;
1511
1512 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1513 ata_port_disable(ap);
1514 return;
1515 }
1516
1517 ap->cbl = ATA_CBL_SATA;
1518 }
1519
1520 /**
1521 * sata_phy_reset - Reset SATA bus.
1522 * @ap: SATA port associated with target SATA PHY.
1523 *
1524 * This function resets the SATA bus, and then probes
1525 * the bus for devices.
1526 *
1527 * LOCKING:
1528 * PCI/etc. bus probe sem.
1529 *
1530 */
1531 void sata_phy_reset(struct ata_port *ap)
1532 {
1533 __sata_phy_reset(ap);
1534 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1535 return;
1536 ata_bus_reset(ap);
1537 }
1538
1539 /**
1540 * ata_port_disable - Disable port.
1541 * @ap: Port to be disabled.
1542 *
1543 * Modify @ap data structure such that the system
1544 * thinks that the entire port is disabled, and should
1545 * never attempt to probe or communicate with devices
1546 * on this port.
1547 *
1548 * LOCKING: host_set lock, or some other form of
1549 * serialization.
1550 */
1551
1552 void ata_port_disable(struct ata_port *ap)
1553 {
1554 ap->device[0].class = ATA_DEV_NONE;
1555 ap->device[1].class = ATA_DEV_NONE;
1556 ap->flags |= ATA_FLAG_PORT_DISABLED;
1557 }
1558
1559 /*
1560 * This mode timing computation functionality is ported over from
1561 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1562 */
1563 /*
1564 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1565 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1566 * for PIO 5, which is a nonstandard extension and UDMA6, which
1567 * is currently supported only by Maxtor drives.
1568 */
1569
1570 static const struct ata_timing ata_timing[] = {
1571
1572 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1573 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1574 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1575 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1576
1577 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1578 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1579 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1580
1581 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1582
1583 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1584 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1585 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1586
1587 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1588 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1589 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1590
1591 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1592 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1593 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1594
1595 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1596 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1597 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1598
1599 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1600
1601 { 0xFF }
1602 };
1603
1604 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1605 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1606
1607 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1608 {
1609 q->setup = EZ(t->setup * 1000, T);
1610 q->act8b = EZ(t->act8b * 1000, T);
1611 q->rec8b = EZ(t->rec8b * 1000, T);
1612 q->cyc8b = EZ(t->cyc8b * 1000, T);
1613 q->active = EZ(t->active * 1000, T);
1614 q->recover = EZ(t->recover * 1000, T);
1615 q->cycle = EZ(t->cycle * 1000, T);
1616 q->udma = EZ(t->udma * 1000, UT);
1617 }
1618
1619 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1620 struct ata_timing *m, unsigned int what)
1621 {
1622 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1623 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1624 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1625 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1626 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1627 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1628 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1629 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1630 }
1631
1632 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1633 {
1634 const struct ata_timing *t;
1635
1636 for (t = ata_timing; t->mode != speed; t++)
1637 if (t->mode == 0xFF)
1638 return NULL;
1639 return t;
1640 }
1641
1642 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1643 struct ata_timing *t, int T, int UT)
1644 {
1645 const struct ata_timing *s;
1646 struct ata_timing p;
1647
1648 /*
1649 * Find the mode.
1650 */
1651
1652 if (!(s = ata_timing_find_mode(speed)))
1653 return -EINVAL;
1654
1655 memcpy(t, s, sizeof(*s));
1656
1657 /*
1658 * If the drive is an EIDE drive, it can tell us it needs extended
1659 * PIO/MW_DMA cycle timing.
1660 */
1661
1662 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1663 memset(&p, 0, sizeof(p));
1664 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1665 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1666 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1667 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1668 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1669 }
1670 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1671 }
1672
1673 /*
1674 * Convert the timing to bus clock counts.
1675 */
1676
1677 ata_timing_quantize(t, t, T, UT);
1678
1679 /*
1680 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1681 * S.M.A.R.T * and some other commands. We have to ensure that the
1682 * DMA cycle timing is slower/equal than the fastest PIO timing.
1683 */
1684
1685 if (speed > XFER_PIO_4) {
1686 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1687 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1688 }
1689
1690 /*
1691 * Lengthen active & recovery time so that cycle time is correct.
1692 */
1693
1694 if (t->act8b + t->rec8b < t->cyc8b) {
1695 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1696 t->rec8b = t->cyc8b - t->act8b;
1697 }
1698
1699 if (t->active + t->recover < t->cycle) {
1700 t->active += (t->cycle - (t->active + t->recover)) / 2;
1701 t->recover = t->cycle - t->active;
1702 }
1703
1704 return 0;
1705 }
1706
1707 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1708 {
1709 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1710 return;
1711
1712 if (dev->xfer_shift == ATA_SHIFT_PIO)
1713 dev->flags |= ATA_DFLAG_PIO;
1714
1715 ata_dev_set_xfermode(ap, dev);
1716
1717 if (ata_dev_revalidate(ap, dev, 0)) {
1718 printk(KERN_ERR "ata%u: failed to revalidate after set "
1719 "xfermode, disabled\n", ap->id);
1720 ata_port_disable(ap);
1721 }
1722
1723 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1724 dev->xfer_shift, (int)dev->xfer_mode);
1725
1726 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1727 ap->id, dev->devno,
1728 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1729 }
1730
1731 static int ata_host_set_pio(struct ata_port *ap)
1732 {
1733 int i;
1734
1735 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1736 struct ata_device *dev = &ap->device[i];
1737
1738 if (!ata_dev_present(dev))
1739 continue;
1740
1741 if (!dev->pio_mode) {
1742 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1743 return -1;
1744 }
1745
1746 dev->xfer_mode = dev->pio_mode;
1747 dev->xfer_shift = ATA_SHIFT_PIO;
1748 if (ap->ops->set_piomode)
1749 ap->ops->set_piomode(ap, dev);
1750 }
1751
1752 return 0;
1753 }
1754
1755 static void ata_host_set_dma(struct ata_port *ap)
1756 {
1757 int i;
1758
1759 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1760 struct ata_device *dev = &ap->device[i];
1761
1762 if (!ata_dev_present(dev) || !dev->dma_mode)
1763 continue;
1764
1765 dev->xfer_mode = dev->dma_mode;
1766 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1767 if (ap->ops->set_dmamode)
1768 ap->ops->set_dmamode(ap, dev);
1769 }
1770 }
1771
1772 /**
1773 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1774 * @ap: port on which timings will be programmed
1775 *
1776 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1777 *
1778 * LOCKING:
1779 * PCI/etc. bus probe sem.
1780 */
1781 static void ata_set_mode(struct ata_port *ap)
1782 {
1783 int i, rc;
1784
1785 /* step 1: calculate xfer_mask */
1786 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1787 struct ata_device *dev = &ap->device[i];
1788 unsigned int xfer_mask;
1789
1790 if (!ata_dev_present(dev))
1791 continue;
1792
1793 xfer_mask = ata_dev_xfermask(ap, dev);
1794
1795 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1796 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1797 ATA_MASK_UDMA));
1798 }
1799
1800 /* step 2: always set host PIO timings */
1801 rc = ata_host_set_pio(ap);
1802 if (rc)
1803 goto err_out;
1804
1805 /* step 3: set host DMA timings */
1806 ata_host_set_dma(ap);
1807
1808 /* step 4: update devices' xfer mode */
1809 for (i = 0; i < ATA_MAX_DEVICES; i++)
1810 ata_dev_set_mode(ap, &ap->device[i]);
1811
1812 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1813 return;
1814
1815 if (ap->ops->post_set_mode)
1816 ap->ops->post_set_mode(ap);
1817
1818 return;
1819
1820 err_out:
1821 ata_port_disable(ap);
1822 }
1823
1824 /**
1825 * ata_tf_to_host - issue ATA taskfile to host controller
1826 * @ap: port to which command is being issued
1827 * @tf: ATA taskfile register set
1828 *
1829 * Issues ATA taskfile register set to ATA host controller,
1830 * with proper synchronization with interrupt handler and
1831 * other threads.
1832 *
1833 * LOCKING:
1834 * spin_lock_irqsave(host_set lock)
1835 */
1836
1837 static inline void ata_tf_to_host(struct ata_port *ap,
1838 const struct ata_taskfile *tf)
1839 {
1840 ap->ops->tf_load(ap, tf);
1841 ap->ops->exec_command(ap, tf);
1842 }
1843
1844 /**
1845 * ata_busy_sleep - sleep until BSY clears, or timeout
1846 * @ap: port containing status register to be polled
1847 * @tmout_pat: impatience timeout
1848 * @tmout: overall timeout
1849 *
1850 * Sleep until ATA Status register bit BSY clears,
1851 * or a timeout occurs.
1852 *
1853 * LOCKING: None.
1854 */
1855
1856 unsigned int ata_busy_sleep (struct ata_port *ap,
1857 unsigned long tmout_pat, unsigned long tmout)
1858 {
1859 unsigned long timer_start, timeout;
1860 u8 status;
1861
1862 status = ata_busy_wait(ap, ATA_BUSY, 300);
1863 timer_start = jiffies;
1864 timeout = timer_start + tmout_pat;
1865 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1866 msleep(50);
1867 status = ata_busy_wait(ap, ATA_BUSY, 3);
1868 }
1869
1870 if (status & ATA_BUSY)
1871 printk(KERN_WARNING "ata%u is slow to respond, "
1872 "please be patient\n", ap->id);
1873
1874 timeout = timer_start + tmout;
1875 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1876 msleep(50);
1877 status = ata_chk_status(ap);
1878 }
1879
1880 if (status & ATA_BUSY) {
1881 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1882 ap->id, tmout / HZ);
1883 return 1;
1884 }
1885
1886 return 0;
1887 }
1888
1889 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1890 {
1891 struct ata_ioports *ioaddr = &ap->ioaddr;
1892 unsigned int dev0 = devmask & (1 << 0);
1893 unsigned int dev1 = devmask & (1 << 1);
1894 unsigned long timeout;
1895
1896 /* if device 0 was found in ata_devchk, wait for its
1897 * BSY bit to clear
1898 */
1899 if (dev0)
1900 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1901
1902 /* if device 1 was found in ata_devchk, wait for
1903 * register access, then wait for BSY to clear
1904 */
1905 timeout = jiffies + ATA_TMOUT_BOOT;
1906 while (dev1) {
1907 u8 nsect, lbal;
1908
1909 ap->ops->dev_select(ap, 1);
1910 if (ap->flags & ATA_FLAG_MMIO) {
1911 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1912 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1913 } else {
1914 nsect = inb(ioaddr->nsect_addr);
1915 lbal = inb(ioaddr->lbal_addr);
1916 }
1917 if ((nsect == 1) && (lbal == 1))
1918 break;
1919 if (time_after(jiffies, timeout)) {
1920 dev1 = 0;
1921 break;
1922 }
1923 msleep(50); /* give drive a breather */
1924 }
1925 if (dev1)
1926 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1927
1928 /* is all this really necessary? */
1929 ap->ops->dev_select(ap, 0);
1930 if (dev1)
1931 ap->ops->dev_select(ap, 1);
1932 if (dev0)
1933 ap->ops->dev_select(ap, 0);
1934 }
1935
1936 /**
1937 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1938 * @ap: Port to reset and probe
1939 *
1940 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1941 * probe the bus. Not often used these days.
1942 *
1943 * LOCKING:
1944 * PCI/etc. bus probe sem.
1945 * Obtains host_set lock.
1946 *
1947 */
1948
1949 static unsigned int ata_bus_edd(struct ata_port *ap)
1950 {
1951 struct ata_taskfile tf;
1952 unsigned long flags;
1953
1954 /* set up execute-device-diag (bus reset) taskfile */
1955 /* also, take interrupts to a known state (disabled) */
1956 DPRINTK("execute-device-diag\n");
1957 ata_tf_init(ap, &tf, 0);
1958 tf.ctl |= ATA_NIEN;
1959 tf.command = ATA_CMD_EDD;
1960 tf.protocol = ATA_PROT_NODATA;
1961
1962 /* do bus reset */
1963 spin_lock_irqsave(&ap->host_set->lock, flags);
1964 ata_tf_to_host(ap, &tf);
1965 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1966
1967 /* spec says at least 2ms. but who knows with those
1968 * crazy ATAPI devices...
1969 */
1970 msleep(150);
1971
1972 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1973 }
1974
1975 static unsigned int ata_bus_softreset(struct ata_port *ap,
1976 unsigned int devmask)
1977 {
1978 struct ata_ioports *ioaddr = &ap->ioaddr;
1979
1980 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1981
1982 /* software reset. causes dev0 to be selected */
1983 if (ap->flags & ATA_FLAG_MMIO) {
1984 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1985 udelay(20); /* FIXME: flush */
1986 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1987 udelay(20); /* FIXME: flush */
1988 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1989 } else {
1990 outb(ap->ctl, ioaddr->ctl_addr);
1991 udelay(10);
1992 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1993 udelay(10);
1994 outb(ap->ctl, ioaddr->ctl_addr);
1995 }
1996
1997 /* spec mandates ">= 2ms" before checking status.
1998 * We wait 150ms, because that was the magic delay used for
1999 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2000 * between when the ATA command register is written, and then
2001 * status is checked. Because waiting for "a while" before
2002 * checking status is fine, post SRST, we perform this magic
2003 * delay here as well.
2004 */
2005 msleep(150);
2006
2007 ata_bus_post_reset(ap, devmask);
2008
2009 return 0;
2010 }
2011
2012 /**
2013 * ata_bus_reset - reset host port and associated ATA channel
2014 * @ap: port to reset
2015 *
2016 * This is typically the first time we actually start issuing
2017 * commands to the ATA channel. We wait for BSY to clear, then
2018 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2019 * result. Determine what devices, if any, are on the channel
2020 * by looking at the device 0/1 error register. Look at the signature
2021 * stored in each device's taskfile registers, to determine if
2022 * the device is ATA or ATAPI.
2023 *
2024 * LOCKING:
2025 * PCI/etc. bus probe sem.
2026 * Obtains host_set lock.
2027 *
2028 * SIDE EFFECTS:
2029 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2030 */
2031
2032 void ata_bus_reset(struct ata_port *ap)
2033 {
2034 struct ata_ioports *ioaddr = &ap->ioaddr;
2035 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2036 u8 err;
2037 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2038
2039 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2040
2041 /* determine if device 0/1 are present */
2042 if (ap->flags & ATA_FLAG_SATA_RESET)
2043 dev0 = 1;
2044 else {
2045 dev0 = ata_devchk(ap, 0);
2046 if (slave_possible)
2047 dev1 = ata_devchk(ap, 1);
2048 }
2049
2050 if (dev0)
2051 devmask |= (1 << 0);
2052 if (dev1)
2053 devmask |= (1 << 1);
2054
2055 /* select device 0 again */
2056 ap->ops->dev_select(ap, 0);
2057
2058 /* issue bus reset */
2059 if (ap->flags & ATA_FLAG_SRST)
2060 rc = ata_bus_softreset(ap, devmask);
2061 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2062 /* set up device control */
2063 if (ap->flags & ATA_FLAG_MMIO)
2064 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2065 else
2066 outb(ap->ctl, ioaddr->ctl_addr);
2067 rc = ata_bus_edd(ap);
2068 }
2069
2070 if (rc)
2071 goto err_out;
2072
2073 /*
2074 * determine by signature whether we have ATA or ATAPI devices
2075 */
2076 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2077 if ((slave_possible) && (err != 0x81))
2078 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2079
2080 /* re-enable interrupts */
2081 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2082 ata_irq_on(ap);
2083
2084 /* is double-select really necessary? */
2085 if (ap->device[1].class != ATA_DEV_NONE)
2086 ap->ops->dev_select(ap, 1);
2087 if (ap->device[0].class != ATA_DEV_NONE)
2088 ap->ops->dev_select(ap, 0);
2089
2090 /* if no devices were detected, disable this port */
2091 if ((ap->device[0].class == ATA_DEV_NONE) &&
2092 (ap->device[1].class == ATA_DEV_NONE))
2093 goto err_out;
2094
2095 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2096 /* set up device control for ATA_FLAG_SATA_RESET */
2097 if (ap->flags & ATA_FLAG_MMIO)
2098 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2099 else
2100 outb(ap->ctl, ioaddr->ctl_addr);
2101 }
2102
2103 DPRINTK("EXIT\n");
2104 return;
2105
2106 err_out:
2107 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2108 ap->ops->port_disable(ap);
2109
2110 DPRINTK("EXIT\n");
2111 }
2112
2113 static int sata_phy_resume(struct ata_port *ap)
2114 {
2115 unsigned long timeout = jiffies + (HZ * 5);
2116 u32 sstatus;
2117
2118 scr_write_flush(ap, SCR_CONTROL, 0x300);
2119
2120 /* Wait for phy to become ready, if necessary. */
2121 do {
2122 msleep(200);
2123 sstatus = scr_read(ap, SCR_STATUS);
2124 if ((sstatus & 0xf) != 1)
2125 return 0;
2126 } while (time_before(jiffies, timeout));
2127
2128 return -1;
2129 }
2130
2131 /**
2132 * ata_std_probeinit - initialize probing
2133 * @ap: port to be probed
2134 *
2135 * @ap is about to be probed. Initialize it. This function is
2136 * to be used as standard callback for ata_drive_probe_reset().
2137 *
2138 * NOTE!!! Do not use this function as probeinit if a low level
2139 * driver implements only hardreset. Just pass NULL as probeinit
2140 * in that case. Using this function is probably okay but doing
2141 * so makes reset sequence different from the original
2142 * ->phy_reset implementation and Jeff nervous. :-P
2143 */
2144 extern void ata_std_probeinit(struct ata_port *ap)
2145 {
2146 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2147 sata_phy_resume(ap);
2148 if (sata_dev_present(ap))
2149 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2150 }
2151 }
2152
2153 /**
2154 * ata_std_softreset - reset host port via ATA SRST
2155 * @ap: port to reset
2156 * @verbose: fail verbosely
2157 * @classes: resulting classes of attached devices
2158 *
2159 * Reset host port using ATA SRST. This function is to be used
2160 * as standard callback for ata_drive_*_reset() functions.
2161 *
2162 * LOCKING:
2163 * Kernel thread context (may sleep)
2164 *
2165 * RETURNS:
2166 * 0 on success, -errno otherwise.
2167 */
2168 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2169 {
2170 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2171 unsigned int devmask = 0, err_mask;
2172 u8 err;
2173
2174 DPRINTK("ENTER\n");
2175
2176 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2177 classes[0] = ATA_DEV_NONE;
2178 goto out;
2179 }
2180
2181 /* determine if device 0/1 are present */
2182 if (ata_devchk(ap, 0))
2183 devmask |= (1 << 0);
2184 if (slave_possible && ata_devchk(ap, 1))
2185 devmask |= (1 << 1);
2186
2187 /* select device 0 again */
2188 ap->ops->dev_select(ap, 0);
2189
2190 /* issue bus reset */
2191 DPRINTK("about to softreset, devmask=%x\n", devmask);
2192 err_mask = ata_bus_softreset(ap, devmask);
2193 if (err_mask) {
2194 if (verbose)
2195 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2196 ap->id, err_mask);
2197 else
2198 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2199 err_mask);
2200 return -EIO;
2201 }
2202
2203 /* determine by signature whether we have ATA or ATAPI devices */
2204 classes[0] = ata_dev_try_classify(ap, 0, &err);
2205 if (slave_possible && err != 0x81)
2206 classes[1] = ata_dev_try_classify(ap, 1, &err);
2207
2208 out:
2209 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2210 return 0;
2211 }
2212
2213 /**
2214 * sata_std_hardreset - reset host port via SATA phy reset
2215 * @ap: port to reset
2216 * @verbose: fail verbosely
2217 * @class: resulting class of attached device
2218 *
2219 * SATA phy-reset host port using DET bits of SControl register.
2220 * This function is to be used as standard callback for
2221 * ata_drive_*_reset().
2222 *
2223 * LOCKING:
2224 * Kernel thread context (may sleep)
2225 *
2226 * RETURNS:
2227 * 0 on success, -errno otherwise.
2228 */
2229 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2230 {
2231 DPRINTK("ENTER\n");
2232
2233 /* Issue phy wake/reset */
2234 scr_write_flush(ap, SCR_CONTROL, 0x301);
2235
2236 /*
2237 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2238 * 10.4.2 says at least 1 ms.
2239 */
2240 msleep(1);
2241
2242 /* Bring phy back */
2243 sata_phy_resume(ap);
2244
2245 /* TODO: phy layer with polling, timeouts, etc. */
2246 if (!sata_dev_present(ap)) {
2247 *class = ATA_DEV_NONE;
2248 DPRINTK("EXIT, link offline\n");
2249 return 0;
2250 }
2251
2252 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2253 if (verbose)
2254 printk(KERN_ERR "ata%u: COMRESET failed "
2255 "(device not ready)\n", ap->id);
2256 else
2257 DPRINTK("EXIT, device not ready\n");
2258 return -EIO;
2259 }
2260
2261 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2262
2263 *class = ata_dev_try_classify(ap, 0, NULL);
2264
2265 DPRINTK("EXIT, class=%u\n", *class);
2266 return 0;
2267 }
2268
2269 /**
2270 * ata_std_postreset - standard postreset callback
2271 * @ap: the target ata_port
2272 * @classes: classes of attached devices
2273 *
2274 * This function is invoked after a successful reset. Note that
2275 * the device might have been reset more than once using
2276 * different reset methods before postreset is invoked.
2277 *
2278 * This function is to be used as standard callback for
2279 * ata_drive_*_reset().
2280 *
2281 * LOCKING:
2282 * Kernel thread context (may sleep)
2283 */
2284 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2285 {
2286 DPRINTK("ENTER\n");
2287
2288 /* set cable type if it isn't already set */
2289 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2290 ap->cbl = ATA_CBL_SATA;
2291
2292 /* print link status */
2293 if (ap->cbl == ATA_CBL_SATA)
2294 sata_print_link_status(ap);
2295
2296 /* re-enable interrupts */
2297 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2298 ata_irq_on(ap);
2299
2300 /* is double-select really necessary? */
2301 if (classes[0] != ATA_DEV_NONE)
2302 ap->ops->dev_select(ap, 1);
2303 if (classes[1] != ATA_DEV_NONE)
2304 ap->ops->dev_select(ap, 0);
2305
2306 /* bail out if no device is present */
2307 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2308 DPRINTK("EXIT, no device\n");
2309 return;
2310 }
2311
2312 /* set up device control */
2313 if (ap->ioaddr.ctl_addr) {
2314 if (ap->flags & ATA_FLAG_MMIO)
2315 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2316 else
2317 outb(ap->ctl, ap->ioaddr.ctl_addr);
2318 }
2319
2320 DPRINTK("EXIT\n");
2321 }
2322
2323 /**
2324 * ata_std_probe_reset - standard probe reset method
2325 * @ap: prot to perform probe-reset
2326 * @classes: resulting classes of attached devices
2327 *
2328 * The stock off-the-shelf ->probe_reset method.
2329 *
2330 * LOCKING:
2331 * Kernel thread context (may sleep)
2332 *
2333 * RETURNS:
2334 * 0 on success, -errno otherwise.
2335 */
2336 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2337 {
2338 ata_reset_fn_t hardreset;
2339
2340 hardreset = NULL;
2341 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2342 hardreset = sata_std_hardreset;
2343
2344 return ata_drive_probe_reset(ap, ata_std_probeinit,
2345 ata_std_softreset, hardreset,
2346 ata_std_postreset, classes);
2347 }
2348
2349 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2350 ata_postreset_fn_t postreset,
2351 unsigned int *classes)
2352 {
2353 int i, rc;
2354
2355 for (i = 0; i < ATA_MAX_DEVICES; i++)
2356 classes[i] = ATA_DEV_UNKNOWN;
2357
2358 rc = reset(ap, 0, classes);
2359 if (rc)
2360 return rc;
2361
2362 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2363 * is complete and convert all ATA_DEV_UNKNOWN to
2364 * ATA_DEV_NONE.
2365 */
2366 for (i = 0; i < ATA_MAX_DEVICES; i++)
2367 if (classes[i] != ATA_DEV_UNKNOWN)
2368 break;
2369
2370 if (i < ATA_MAX_DEVICES)
2371 for (i = 0; i < ATA_MAX_DEVICES; i++)
2372 if (classes[i] == ATA_DEV_UNKNOWN)
2373 classes[i] = ATA_DEV_NONE;
2374
2375 if (postreset)
2376 postreset(ap, classes);
2377
2378 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2379 }
2380
2381 /**
2382 * ata_drive_probe_reset - Perform probe reset with given methods
2383 * @ap: port to reset
2384 * @probeinit: probeinit method (can be NULL)
2385 * @softreset: softreset method (can be NULL)
2386 * @hardreset: hardreset method (can be NULL)
2387 * @postreset: postreset method (can be NULL)
2388 * @classes: resulting classes of attached devices
2389 *
2390 * Reset the specified port and classify attached devices using
2391 * given methods. This function prefers softreset but tries all
2392 * possible reset sequences to reset and classify devices. This
2393 * function is intended to be used for constructing ->probe_reset
2394 * callback by low level drivers.
2395 *
2396 * Reset methods should follow the following rules.
2397 *
2398 * - Return 0 on sucess, -errno on failure.
2399 * - If classification is supported, fill classes[] with
2400 * recognized class codes.
2401 * - If classification is not supported, leave classes[] alone.
2402 * - If verbose is non-zero, print error message on failure;
2403 * otherwise, shut up.
2404 *
2405 * LOCKING:
2406 * Kernel thread context (may sleep)
2407 *
2408 * RETURNS:
2409 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2410 * if classification fails, and any error code from reset
2411 * methods.
2412 */
2413 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2414 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2415 ata_postreset_fn_t postreset, unsigned int *classes)
2416 {
2417 int rc = -EINVAL;
2418
2419 if (probeinit)
2420 probeinit(ap);
2421
2422 if (softreset) {
2423 rc = do_probe_reset(ap, softreset, postreset, classes);
2424 if (rc == 0)
2425 return 0;
2426 }
2427
2428 if (!hardreset)
2429 return rc;
2430
2431 rc = do_probe_reset(ap, hardreset, postreset, classes);
2432 if (rc == 0 || rc != -ENODEV)
2433 return rc;
2434
2435 if (softreset)
2436 rc = do_probe_reset(ap, softreset, postreset, classes);
2437
2438 return rc;
2439 }
2440
2441 /**
2442 * ata_dev_same_device - Determine whether new ID matches configured device
2443 * @ap: port on which the device to compare against resides
2444 * @dev: device to compare against
2445 * @new_class: class of the new device
2446 * @new_id: IDENTIFY page of the new device
2447 *
2448 * Compare @new_class and @new_id against @dev and determine
2449 * whether @dev is the device indicated by @new_class and
2450 * @new_id.
2451 *
2452 * LOCKING:
2453 * None.
2454 *
2455 * RETURNS:
2456 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2457 */
2458 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2459 unsigned int new_class, const u16 *new_id)
2460 {
2461 const u16 *old_id = dev->id;
2462 unsigned char model[2][41], serial[2][21];
2463 u64 new_n_sectors;
2464
2465 if (dev->class != new_class) {
2466 printk(KERN_INFO
2467 "ata%u: dev %u class mismatch %d != %d\n",
2468 ap->id, dev->devno, dev->class, new_class);
2469 return 0;
2470 }
2471
2472 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2473 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2474 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2475 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2476 new_n_sectors = ata_id_n_sectors(new_id);
2477
2478 if (strcmp(model[0], model[1])) {
2479 printk(KERN_INFO
2480 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2481 ap->id, dev->devno, model[0], model[1]);
2482 return 0;
2483 }
2484
2485 if (strcmp(serial[0], serial[1])) {
2486 printk(KERN_INFO
2487 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2488 ap->id, dev->devno, serial[0], serial[1]);
2489 return 0;
2490 }
2491
2492 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2493 printk(KERN_INFO
2494 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2495 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2496 (unsigned long long)new_n_sectors);
2497 return 0;
2498 }
2499
2500 return 1;
2501 }
2502
2503 /**
2504 * ata_dev_revalidate - Revalidate ATA device
2505 * @ap: port on which the device to revalidate resides
2506 * @dev: device to revalidate
2507 * @post_reset: is this revalidation after reset?
2508 *
2509 * Re-read IDENTIFY page and make sure @dev is still attached to
2510 * the port.
2511 *
2512 * LOCKING:
2513 * Kernel thread context (may sleep)
2514 *
2515 * RETURNS:
2516 * 0 on success, negative errno otherwise
2517 */
2518 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2519 int post_reset)
2520 {
2521 unsigned int class;
2522 u16 *id;
2523 int rc;
2524
2525 if (!ata_dev_present(dev))
2526 return -ENODEV;
2527
2528 class = dev->class;
2529 id = NULL;
2530
2531 /* allocate & read ID data */
2532 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2533 if (rc)
2534 goto fail;
2535
2536 /* is the device still there? */
2537 if (!ata_dev_same_device(ap, dev, class, id)) {
2538 rc = -ENODEV;
2539 goto fail;
2540 }
2541
2542 kfree(dev->id);
2543 dev->id = id;
2544
2545 /* configure device according to the new ID */
2546 return ata_dev_configure(ap, dev, 0);
2547
2548 fail:
2549 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2550 ap->id, dev->devno, rc);
2551 kfree(id);
2552 return rc;
2553 }
2554
2555 static const char * const ata_dma_blacklist [] = {
2556 "WDC AC11000H",
2557 "WDC AC22100H",
2558 "WDC AC32500H",
2559 "WDC AC33100H",
2560 "WDC AC31600H",
2561 "WDC AC32100H",
2562 "WDC AC23200L",
2563 "Compaq CRD-8241B",
2564 "CRD-8400B",
2565 "CRD-8480B",
2566 "CRD-8482B",
2567 "CRD-84",
2568 "SanDisk SDP3B",
2569 "SanDisk SDP3B-64",
2570 "SANYO CD-ROM CRD",
2571 "HITACHI CDR-8",
2572 "HITACHI CDR-8335",
2573 "HITACHI CDR-8435",
2574 "Toshiba CD-ROM XM-6202B",
2575 "TOSHIBA CD-ROM XM-1702BC",
2576 "CD-532E-A",
2577 "E-IDE CD-ROM CR-840",
2578 "CD-ROM Drive/F5A",
2579 "WPI CDD-820",
2580 "SAMSUNG CD-ROM SC-148C",
2581 "SAMSUNG CD-ROM SC",
2582 "SanDisk SDP3B-64",
2583 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
2584 "_NEC DV5800A",
2585 };
2586
2587 static int ata_dma_blacklisted(const struct ata_device *dev)
2588 {
2589 unsigned char model_num[41];
2590 int i;
2591
2592 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2593
2594 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2595 if (!strcmp(ata_dma_blacklist[i], model_num))
2596 return 1;
2597
2598 return 0;
2599 }
2600
2601 /**
2602 * ata_dev_xfermask - Compute supported xfermask of the given device
2603 * @ap: Port on which the device to compute xfermask for resides
2604 * @dev: Device to compute xfermask for
2605 *
2606 * Compute supported xfermask of @dev. This function is
2607 * responsible for applying all known limits including host
2608 * controller limits, device blacklist, etc...
2609 *
2610 * LOCKING:
2611 * None.
2612 *
2613 * RETURNS:
2614 * Computed xfermask.
2615 */
2616 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2617 struct ata_device *dev)
2618 {
2619 unsigned long xfer_mask;
2620 int i;
2621
2622 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2623 ap->udma_mask);
2624
2625 /* use port-wide xfermask for now */
2626 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2627 struct ata_device *d = &ap->device[i];
2628 if (!ata_dev_present(d))
2629 continue;
2630 xfer_mask &= ata_id_xfermask(d->id);
2631 if (ata_dma_blacklisted(d))
2632 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2633 }
2634
2635 if (ata_dma_blacklisted(dev))
2636 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2637 "disabling DMA\n", ap->id, dev->devno);
2638
2639 return xfer_mask;
2640 }
2641
2642 /**
2643 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2644 * @ap: Port associated with device @dev
2645 * @dev: Device to which command will be sent
2646 *
2647 * Issue SET FEATURES - XFER MODE command to device @dev
2648 * on port @ap.
2649 *
2650 * LOCKING:
2651 * PCI/etc. bus probe sem.
2652 */
2653
2654 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2655 {
2656 struct ata_taskfile tf;
2657
2658 /* set up set-features taskfile */
2659 DPRINTK("set features - xfer mode\n");
2660
2661 ata_tf_init(ap, &tf, dev->devno);
2662 tf.command = ATA_CMD_SET_FEATURES;
2663 tf.feature = SETFEATURES_XFER;
2664 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2665 tf.protocol = ATA_PROT_NODATA;
2666 tf.nsect = dev->xfer_mode;
2667
2668 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2669 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2670 ap->id);
2671 ata_port_disable(ap);
2672 }
2673
2674 DPRINTK("EXIT\n");
2675 }
2676
2677 /**
2678 * ata_dev_init_params - Issue INIT DEV PARAMS command
2679 * @ap: Port associated with device @dev
2680 * @dev: Device to which command will be sent
2681 *
2682 * LOCKING:
2683 * Kernel thread context (may sleep)
2684 *
2685 * RETURNS:
2686 * 0 on success, AC_ERR_* mask otherwise.
2687 */
2688
2689 static unsigned int ata_dev_init_params(struct ata_port *ap,
2690 struct ata_device *dev)
2691 {
2692 struct ata_taskfile tf;
2693 unsigned int err_mask;
2694 u16 sectors = dev->id[6];
2695 u16 heads = dev->id[3];
2696
2697 /* Number of sectors per track 1-255. Number of heads 1-16 */
2698 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2699 return 0;
2700
2701 /* set up init dev params taskfile */
2702 DPRINTK("init dev params \n");
2703
2704 ata_tf_init(ap, &tf, dev->devno);
2705 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2706 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2707 tf.protocol = ATA_PROT_NODATA;
2708 tf.nsect = sectors;
2709 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2710
2711 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2712
2713 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2714 return err_mask;
2715 }
2716
2717 /**
2718 * ata_sg_clean - Unmap DMA memory associated with command
2719 * @qc: Command containing DMA memory to be released
2720 *
2721 * Unmap all mapped DMA memory associated with this command.
2722 *
2723 * LOCKING:
2724 * spin_lock_irqsave(host_set lock)
2725 */
2726
2727 static void ata_sg_clean(struct ata_queued_cmd *qc)
2728 {
2729 struct ata_port *ap = qc->ap;
2730 struct scatterlist *sg = qc->__sg;
2731 int dir = qc->dma_dir;
2732 void *pad_buf = NULL;
2733
2734 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2735 WARN_ON(sg == NULL);
2736
2737 if (qc->flags & ATA_QCFLAG_SINGLE)
2738 WARN_ON(qc->n_elem > 1);
2739
2740 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2741
2742 /* if we padded the buffer out to 32-bit bound, and data
2743 * xfer direction is from-device, we must copy from the
2744 * pad buffer back into the supplied buffer
2745 */
2746 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2747 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2748
2749 if (qc->flags & ATA_QCFLAG_SG) {
2750 if (qc->n_elem)
2751 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2752 /* restore last sg */
2753 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2754 if (pad_buf) {
2755 struct scatterlist *psg = &qc->pad_sgent;
2756 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2757 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2758 kunmap_atomic(addr, KM_IRQ0);
2759 }
2760 } else {
2761 if (qc->n_elem)
2762 dma_unmap_single(ap->host_set->dev,
2763 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2764 dir);
2765 /* restore sg */
2766 sg->length += qc->pad_len;
2767 if (pad_buf)
2768 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2769 pad_buf, qc->pad_len);
2770 }
2771
2772 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2773 qc->__sg = NULL;
2774 }
2775
2776 /**
2777 * ata_fill_sg - Fill PCI IDE PRD table
2778 * @qc: Metadata associated with taskfile to be transferred
2779 *
2780 * Fill PCI IDE PRD (scatter-gather) table with segments
2781 * associated with the current disk command.
2782 *
2783 * LOCKING:
2784 * spin_lock_irqsave(host_set lock)
2785 *
2786 */
2787 static void ata_fill_sg(struct ata_queued_cmd *qc)
2788 {
2789 struct ata_port *ap = qc->ap;
2790 struct scatterlist *sg;
2791 unsigned int idx;
2792
2793 WARN_ON(qc->__sg == NULL);
2794 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2795
2796 idx = 0;
2797 ata_for_each_sg(sg, qc) {
2798 u32 addr, offset;
2799 u32 sg_len, len;
2800
2801 /* determine if physical DMA addr spans 64K boundary.
2802 * Note h/w doesn't support 64-bit, so we unconditionally
2803 * truncate dma_addr_t to u32.
2804 */
2805 addr = (u32) sg_dma_address(sg);
2806 sg_len = sg_dma_len(sg);
2807
2808 while (sg_len) {
2809 offset = addr & 0xffff;
2810 len = sg_len;
2811 if ((offset + sg_len) > 0x10000)
2812 len = 0x10000 - offset;
2813
2814 ap->prd[idx].addr = cpu_to_le32(addr);
2815 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2816 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2817
2818 idx++;
2819 sg_len -= len;
2820 addr += len;
2821 }
2822 }
2823
2824 if (idx)
2825 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2826 }
2827 /**
2828 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2829 * @qc: Metadata associated with taskfile to check
2830 *
2831 * Allow low-level driver to filter ATA PACKET commands, returning
2832 * a status indicating whether or not it is OK to use DMA for the
2833 * supplied PACKET command.
2834 *
2835 * LOCKING:
2836 * spin_lock_irqsave(host_set lock)
2837 *
2838 * RETURNS: 0 when ATAPI DMA can be used
2839 * nonzero otherwise
2840 */
2841 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2842 {
2843 struct ata_port *ap = qc->ap;
2844 int rc = 0; /* Assume ATAPI DMA is OK by default */
2845
2846 if (ap->ops->check_atapi_dma)
2847 rc = ap->ops->check_atapi_dma(qc);
2848
2849 return rc;
2850 }
2851 /**
2852 * ata_qc_prep - Prepare taskfile for submission
2853 * @qc: Metadata associated with taskfile to be prepared
2854 *
2855 * Prepare ATA taskfile for submission.
2856 *
2857 * LOCKING:
2858 * spin_lock_irqsave(host_set lock)
2859 */
2860 void ata_qc_prep(struct ata_queued_cmd *qc)
2861 {
2862 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2863 return;
2864
2865 ata_fill_sg(qc);
2866 }
2867
2868 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2869
2870 /**
2871 * ata_sg_init_one - Associate command with memory buffer
2872 * @qc: Command to be associated
2873 * @buf: Memory buffer
2874 * @buflen: Length of memory buffer, in bytes.
2875 *
2876 * Initialize the data-related elements of queued_cmd @qc
2877 * to point to a single memory buffer, @buf of byte length @buflen.
2878 *
2879 * LOCKING:
2880 * spin_lock_irqsave(host_set lock)
2881 */
2882
2883 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2884 {
2885 struct scatterlist *sg;
2886
2887 qc->flags |= ATA_QCFLAG_SINGLE;
2888
2889 memset(&qc->sgent, 0, sizeof(qc->sgent));
2890 qc->__sg = &qc->sgent;
2891 qc->n_elem = 1;
2892 qc->orig_n_elem = 1;
2893 qc->buf_virt = buf;
2894
2895 sg = qc->__sg;
2896 sg_init_one(sg, buf, buflen);
2897 }
2898
2899 /**
2900 * ata_sg_init - Associate command with scatter-gather table.
2901 * @qc: Command to be associated
2902 * @sg: Scatter-gather table.
2903 * @n_elem: Number of elements in s/g table.
2904 *
2905 * Initialize the data-related elements of queued_cmd @qc
2906 * to point to a scatter-gather table @sg, containing @n_elem
2907 * elements.
2908 *
2909 * LOCKING:
2910 * spin_lock_irqsave(host_set lock)
2911 */
2912
2913 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2914 unsigned int n_elem)
2915 {
2916 qc->flags |= ATA_QCFLAG_SG;
2917 qc->__sg = sg;
2918 qc->n_elem = n_elem;
2919 qc->orig_n_elem = n_elem;
2920 }
2921
2922 /**
2923 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2924 * @qc: Command with memory buffer to be mapped.
2925 *
2926 * DMA-map the memory buffer associated with queued_cmd @qc.
2927 *
2928 * LOCKING:
2929 * spin_lock_irqsave(host_set lock)
2930 *
2931 * RETURNS:
2932 * Zero on success, negative on error.
2933 */
2934
2935 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2936 {
2937 struct ata_port *ap = qc->ap;
2938 int dir = qc->dma_dir;
2939 struct scatterlist *sg = qc->__sg;
2940 dma_addr_t dma_address;
2941 int trim_sg = 0;
2942
2943 /* we must lengthen transfers to end on a 32-bit boundary */
2944 qc->pad_len = sg->length & 3;
2945 if (qc->pad_len) {
2946 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2947 struct scatterlist *psg = &qc->pad_sgent;
2948
2949 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2950
2951 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2952
2953 if (qc->tf.flags & ATA_TFLAG_WRITE)
2954 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2955 qc->pad_len);
2956
2957 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2958 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2959 /* trim sg */
2960 sg->length -= qc->pad_len;
2961 if (sg->length == 0)
2962 trim_sg = 1;
2963
2964 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2965 sg->length, qc->pad_len);
2966 }
2967
2968 if (trim_sg) {
2969 qc->n_elem--;
2970 goto skip_map;
2971 }
2972
2973 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2974 sg->length, dir);
2975 if (dma_mapping_error(dma_address)) {
2976 /* restore sg */
2977 sg->length += qc->pad_len;
2978 return -1;
2979 }
2980
2981 sg_dma_address(sg) = dma_address;
2982 sg_dma_len(sg) = sg->length;
2983
2984 skip_map:
2985 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2986 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2987
2988 return 0;
2989 }
2990
2991 /**
2992 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2993 * @qc: Command with scatter-gather table to be mapped.
2994 *
2995 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2996 *
2997 * LOCKING:
2998 * spin_lock_irqsave(host_set lock)
2999 *
3000 * RETURNS:
3001 * Zero on success, negative on error.
3002 *
3003 */
3004
3005 static int ata_sg_setup(struct ata_queued_cmd *qc)
3006 {
3007 struct ata_port *ap = qc->ap;
3008 struct scatterlist *sg = qc->__sg;
3009 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3010 int n_elem, pre_n_elem, dir, trim_sg = 0;
3011
3012 VPRINTK("ENTER, ata%u\n", ap->id);
3013 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3014
3015 /* we must lengthen transfers to end on a 32-bit boundary */
3016 qc->pad_len = lsg->length & 3;
3017 if (qc->pad_len) {
3018 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3019 struct scatterlist *psg = &qc->pad_sgent;
3020 unsigned int offset;
3021
3022 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3023
3024 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3025
3026 /*
3027 * psg->page/offset are used to copy to-be-written
3028 * data in this function or read data in ata_sg_clean.
3029 */
3030 offset = lsg->offset + lsg->length - qc->pad_len;
3031 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3032 psg->offset = offset_in_page(offset);
3033
3034 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3035 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3036 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3037 kunmap_atomic(addr, KM_IRQ0);
3038 }
3039
3040 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3041 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3042 /* trim last sg */
3043 lsg->length -= qc->pad_len;
3044 if (lsg->length == 0)
3045 trim_sg = 1;
3046
3047 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3048 qc->n_elem - 1, lsg->length, qc->pad_len);
3049 }
3050
3051 pre_n_elem = qc->n_elem;
3052 if (trim_sg && pre_n_elem)
3053 pre_n_elem--;
3054
3055 if (!pre_n_elem) {
3056 n_elem = 0;
3057 goto skip_map;
3058 }
3059
3060 dir = qc->dma_dir;
3061 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3062 if (n_elem < 1) {
3063 /* restore last sg */
3064 lsg->length += qc->pad_len;
3065 return -1;
3066 }
3067
3068 DPRINTK("%d sg elements mapped\n", n_elem);
3069
3070 skip_map:
3071 qc->n_elem = n_elem;
3072
3073 return 0;
3074 }
3075
3076 /**
3077 * ata_poll_qc_complete - turn irq back on and finish qc
3078 * @qc: Command to complete
3079 * @err_mask: ATA status register content
3080 *
3081 * LOCKING:
3082 * None. (grabs host lock)
3083 */
3084
3085 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3086 {
3087 struct ata_port *ap = qc->ap;
3088 unsigned long flags;
3089
3090 spin_lock_irqsave(&ap->host_set->lock, flags);
3091 ap->flags &= ~ATA_FLAG_NOINTR;
3092 ata_irq_on(ap);
3093 ata_qc_complete(qc);
3094 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3095 }
3096
3097 /**
3098 * ata_pio_poll - poll using PIO, depending on current state
3099 * @ap: the target ata_port
3100 *
3101 * LOCKING:
3102 * None. (executing in kernel thread context)
3103 *
3104 * RETURNS:
3105 * timeout value to use
3106 */
3107
3108 static unsigned long ata_pio_poll(struct ata_port *ap)
3109 {
3110 struct ata_queued_cmd *qc;
3111 u8 status;
3112 unsigned int poll_state = HSM_ST_UNKNOWN;
3113 unsigned int reg_state = HSM_ST_UNKNOWN;
3114
3115 qc = ata_qc_from_tag(ap, ap->active_tag);
3116 WARN_ON(qc == NULL);
3117
3118 switch (ap->hsm_task_state) {
3119 case HSM_ST:
3120 case HSM_ST_POLL:
3121 poll_state = HSM_ST_POLL;
3122 reg_state = HSM_ST;
3123 break;
3124 case HSM_ST_LAST:
3125 case HSM_ST_LAST_POLL:
3126 poll_state = HSM_ST_LAST_POLL;
3127 reg_state = HSM_ST_LAST;
3128 break;
3129 default:
3130 BUG();
3131 break;
3132 }
3133
3134 status = ata_chk_status(ap);
3135 if (status & ATA_BUSY) {
3136 if (time_after(jiffies, ap->pio_task_timeout)) {
3137 qc->err_mask |= AC_ERR_TIMEOUT;
3138 ap->hsm_task_state = HSM_ST_TMOUT;
3139 return 0;
3140 }
3141 ap->hsm_task_state = poll_state;
3142 return ATA_SHORT_PAUSE;
3143 }
3144
3145 ap->hsm_task_state = reg_state;
3146 return 0;
3147 }
3148
3149 /**
3150 * ata_pio_complete - check if drive is busy or idle
3151 * @ap: the target ata_port
3152 *
3153 * LOCKING:
3154 * None. (executing in kernel thread context)
3155 *
3156 * RETURNS:
3157 * Non-zero if qc completed, zero otherwise.
3158 */
3159
3160 static int ata_pio_complete (struct ata_port *ap)
3161 {
3162 struct ata_queued_cmd *qc;
3163 u8 drv_stat;
3164
3165 /*
3166 * This is purely heuristic. This is a fast path. Sometimes when
3167 * we enter, BSY will be cleared in a chk-status or two. If not,
3168 * the drive is probably seeking or something. Snooze for a couple
3169 * msecs, then chk-status again. If still busy, fall back to
3170 * HSM_ST_POLL state.
3171 */
3172 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3173 if (drv_stat & ATA_BUSY) {
3174 msleep(2);
3175 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3176 if (drv_stat & ATA_BUSY) {
3177 ap->hsm_task_state = HSM_ST_LAST_POLL;
3178 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3179 return 0;
3180 }
3181 }
3182
3183 qc = ata_qc_from_tag(ap, ap->active_tag);
3184 WARN_ON(qc == NULL);
3185
3186 drv_stat = ata_wait_idle(ap);
3187 if (!ata_ok(drv_stat)) {
3188 qc->err_mask |= __ac_err_mask(drv_stat);
3189 ap->hsm_task_state = HSM_ST_ERR;
3190 return 0;
3191 }
3192
3193 ap->hsm_task_state = HSM_ST_IDLE;
3194
3195 WARN_ON(qc->err_mask);
3196 ata_poll_qc_complete(qc);
3197
3198 /* another command may start at this point */
3199
3200 return 1;
3201 }
3202
3203
3204 /**
3205 * swap_buf_le16 - swap halves of 16-bit words in place
3206 * @buf: Buffer to swap
3207 * @buf_words: Number of 16-bit words in buffer.
3208 *
3209 * Swap halves of 16-bit words if needed to convert from
3210 * little-endian byte order to native cpu byte order, or
3211 * vice-versa.
3212 *
3213 * LOCKING:
3214 * Inherited from caller.
3215 */
3216 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3217 {
3218 #ifdef __BIG_ENDIAN
3219 unsigned int i;
3220
3221 for (i = 0; i < buf_words; i++)
3222 buf[i] = le16_to_cpu(buf[i]);
3223 #endif /* __BIG_ENDIAN */
3224 }
3225
3226 /**
3227 * ata_mmio_data_xfer - Transfer data by MMIO
3228 * @ap: port to read/write
3229 * @buf: data buffer
3230 * @buflen: buffer length
3231 * @write_data: read/write
3232 *
3233 * Transfer data from/to the device data register by MMIO.
3234 *
3235 * LOCKING:
3236 * Inherited from caller.
3237 */
3238
3239 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3240 unsigned int buflen, int write_data)
3241 {
3242 unsigned int i;
3243 unsigned int words = buflen >> 1;
3244 u16 *buf16 = (u16 *) buf;
3245 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3246
3247 /* Transfer multiple of 2 bytes */
3248 if (write_data) {
3249 for (i = 0; i < words; i++)
3250 writew(le16_to_cpu(buf16[i]), mmio);
3251 } else {
3252 for (i = 0; i < words; i++)
3253 buf16[i] = cpu_to_le16(readw(mmio));
3254 }
3255
3256 /* Transfer trailing 1 byte, if any. */
3257 if (unlikely(buflen & 0x01)) {
3258 u16 align_buf[1] = { 0 };
3259 unsigned char *trailing_buf = buf + buflen - 1;
3260
3261 if (write_data) {
3262 memcpy(align_buf, trailing_buf, 1);
3263 writew(le16_to_cpu(align_buf[0]), mmio);
3264 } else {
3265 align_buf[0] = cpu_to_le16(readw(mmio));
3266 memcpy(trailing_buf, align_buf, 1);
3267 }
3268 }
3269 }
3270
3271 /**
3272 * ata_pio_data_xfer - Transfer data by PIO
3273 * @ap: port to read/write
3274 * @buf: data buffer
3275 * @buflen: buffer length
3276 * @write_data: read/write
3277 *
3278 * Transfer data from/to the device data register by PIO.
3279 *
3280 * LOCKING:
3281 * Inherited from caller.
3282 */
3283
3284 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3285 unsigned int buflen, int write_data)
3286 {
3287 unsigned int words = buflen >> 1;
3288
3289 /* Transfer multiple of 2 bytes */
3290 if (write_data)
3291 outsw(ap->ioaddr.data_addr, buf, words);
3292 else
3293 insw(ap->ioaddr.data_addr, buf, words);
3294
3295 /* Transfer trailing 1 byte, if any. */
3296 if (unlikely(buflen & 0x01)) {
3297 u16 align_buf[1] = { 0 };
3298 unsigned char *trailing_buf = buf + buflen - 1;
3299
3300 if (write_data) {
3301 memcpy(align_buf, trailing_buf, 1);
3302 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3303 } else {
3304 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3305 memcpy(trailing_buf, align_buf, 1);
3306 }
3307 }
3308 }
3309
3310 /**
3311 * ata_data_xfer - Transfer data from/to the data register.
3312 * @ap: port to read/write
3313 * @buf: data buffer
3314 * @buflen: buffer length
3315 * @do_write: read/write
3316 *
3317 * Transfer data from/to the device data register.
3318 *
3319 * LOCKING:
3320 * Inherited from caller.
3321 */
3322
3323 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3324 unsigned int buflen, int do_write)
3325 {
3326 /* Make the crap hardware pay the costs not the good stuff */
3327 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3328 unsigned long flags;
3329 local_irq_save(flags);
3330 if (ap->flags & ATA_FLAG_MMIO)
3331 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3332 else
3333 ata_pio_data_xfer(ap, buf, buflen, do_write);
3334 local_irq_restore(flags);
3335 } else {
3336 if (ap->flags & ATA_FLAG_MMIO)
3337 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3338 else
3339 ata_pio_data_xfer(ap, buf, buflen, do_write);
3340 }
3341 }
3342
3343 /**
3344 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3345 * @qc: Command on going
3346 *
3347 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3348 *
3349 * LOCKING:
3350 * Inherited from caller.
3351 */
3352
3353 static void ata_pio_sector(struct ata_queued_cmd *qc)
3354 {
3355 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3356 struct scatterlist *sg = qc->__sg;
3357 struct ata_port *ap = qc->ap;
3358 struct page *page;
3359 unsigned int offset;
3360 unsigned char *buf;
3361
3362 if (qc->cursect == (qc->nsect - 1))
3363 ap->hsm_task_state = HSM_ST_LAST;
3364
3365 page = sg[qc->cursg].page;
3366 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3367
3368 /* get the current page and offset */
3369 page = nth_page(page, (offset >> PAGE_SHIFT));
3370 offset %= PAGE_SIZE;
3371
3372 buf = kmap(page) + offset;
3373
3374 qc->cursect++;
3375 qc->cursg_ofs++;
3376
3377 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3378 qc->cursg++;
3379 qc->cursg_ofs = 0;
3380 }
3381
3382 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3383
3384 /* do the actual data transfer */
3385 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3386 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3387
3388 kunmap(page);
3389 }
3390
3391 /**
3392 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3393 * @qc: Command on going
3394 * @bytes: number of bytes
3395 *
3396 * Transfer Transfer data from/to the ATAPI device.
3397 *
3398 * LOCKING:
3399 * Inherited from caller.
3400 *
3401 */
3402
3403 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3404 {
3405 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3406 struct scatterlist *sg = qc->__sg;
3407 struct ata_port *ap = qc->ap;
3408 struct page *page;
3409 unsigned char *buf;
3410 unsigned int offset, count;
3411
3412 if (qc->curbytes + bytes >= qc->nbytes)
3413 ap->hsm_task_state = HSM_ST_LAST;
3414
3415 next_sg:
3416 if (unlikely(qc->cursg >= qc->n_elem)) {
3417 /*
3418 * The end of qc->sg is reached and the device expects
3419 * more data to transfer. In order not to overrun qc->sg
3420 * and fulfill length specified in the byte count register,
3421 * - for read case, discard trailing data from the device
3422 * - for write case, padding zero data to the device
3423 */
3424 u16 pad_buf[1] = { 0 };
3425 unsigned int words = bytes >> 1;
3426 unsigned int i;
3427
3428 if (words) /* warning if bytes > 1 */
3429 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3430 ap->id, bytes);
3431
3432 for (i = 0; i < words; i++)
3433 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3434
3435 ap->hsm_task_state = HSM_ST_LAST;
3436 return;
3437 }
3438
3439 sg = &qc->__sg[qc->cursg];
3440
3441 page = sg->page;
3442 offset = sg->offset + qc->cursg_ofs;
3443
3444 /* get the current page and offset */
3445 page = nth_page(page, (offset >> PAGE_SHIFT));
3446 offset %= PAGE_SIZE;
3447
3448 /* don't overrun current sg */
3449 count = min(sg->length - qc->cursg_ofs, bytes);
3450
3451 /* don't cross page boundaries */
3452 count = min(count, (unsigned int)PAGE_SIZE - offset);
3453
3454 buf = kmap(page) + offset;
3455
3456 bytes -= count;
3457 qc->curbytes += count;
3458 qc->cursg_ofs += count;
3459
3460 if (qc->cursg_ofs == sg->length) {
3461 qc->cursg++;
3462 qc->cursg_ofs = 0;
3463 }
3464
3465 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3466
3467 /* do the actual data transfer */
3468 ata_data_xfer(ap, buf, count, do_write);
3469
3470 kunmap(page);
3471
3472 if (bytes)
3473 goto next_sg;
3474 }
3475
3476 /**
3477 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3478 * @qc: Command on going
3479 *
3480 * Transfer Transfer data from/to the ATAPI device.
3481 *
3482 * LOCKING:
3483 * Inherited from caller.
3484 */
3485
3486 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3487 {
3488 struct ata_port *ap = qc->ap;
3489 struct ata_device *dev = qc->dev;
3490 unsigned int ireason, bc_lo, bc_hi, bytes;
3491 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3492
3493 ap->ops->tf_read(ap, &qc->tf);
3494 ireason = qc->tf.nsect;
3495 bc_lo = qc->tf.lbam;
3496 bc_hi = qc->tf.lbah;
3497 bytes = (bc_hi << 8) | bc_lo;
3498
3499 /* shall be cleared to zero, indicating xfer of data */
3500 if (ireason & (1 << 0))
3501 goto err_out;
3502
3503 /* make sure transfer direction matches expected */
3504 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3505 if (do_write != i_write)
3506 goto err_out;
3507
3508 __atapi_pio_bytes(qc, bytes);
3509
3510 return;
3511
3512 err_out:
3513 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3514 ap->id, dev->devno);
3515 qc->err_mask |= AC_ERR_HSM;
3516 ap->hsm_task_state = HSM_ST_ERR;
3517 }
3518
3519 /**
3520 * ata_pio_block - start PIO on a block
3521 * @ap: the target ata_port
3522 *
3523 * LOCKING:
3524 * None. (executing in kernel thread context)
3525 */
3526
3527 static void ata_pio_block(struct ata_port *ap)
3528 {
3529 struct ata_queued_cmd *qc;
3530 u8 status;
3531
3532 /*
3533 * This is purely heuristic. This is a fast path.
3534 * Sometimes when we enter, BSY will be cleared in
3535 * a chk-status or two. If not, the drive is probably seeking
3536 * or something. Snooze for a couple msecs, then
3537 * chk-status again. If still busy, fall back to
3538 * HSM_ST_POLL state.
3539 */
3540 status = ata_busy_wait(ap, ATA_BUSY, 5);
3541 if (status & ATA_BUSY) {
3542 msleep(2);
3543 status = ata_busy_wait(ap, ATA_BUSY, 10);
3544 if (status & ATA_BUSY) {
3545 ap->hsm_task_state = HSM_ST_POLL;
3546 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3547 return;
3548 }
3549 }
3550
3551 qc = ata_qc_from_tag(ap, ap->active_tag);
3552 WARN_ON(qc == NULL);
3553
3554 /* check error */
3555 if (status & (ATA_ERR | ATA_DF)) {
3556 qc->err_mask |= AC_ERR_DEV;
3557 ap->hsm_task_state = HSM_ST_ERR;
3558 return;
3559 }
3560
3561 /* transfer data if any */
3562 if (is_atapi_taskfile(&qc->tf)) {
3563 /* DRQ=0 means no more data to transfer */
3564 if ((status & ATA_DRQ) == 0) {
3565 ap->hsm_task_state = HSM_ST_LAST;
3566 return;
3567 }
3568
3569 atapi_pio_bytes(qc);
3570 } else {
3571 /* handle BSY=0, DRQ=0 as error */
3572 if ((status & ATA_DRQ) == 0) {
3573 qc->err_mask |= AC_ERR_HSM;
3574 ap->hsm_task_state = HSM_ST_ERR;
3575 return;
3576 }
3577
3578 ata_pio_sector(qc);
3579 }
3580 }
3581
3582 static void ata_pio_error(struct ata_port *ap)
3583 {
3584 struct ata_queued_cmd *qc;
3585
3586 qc = ata_qc_from_tag(ap, ap->active_tag);
3587 WARN_ON(qc == NULL);
3588
3589 if (qc->tf.command != ATA_CMD_PACKET)
3590 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3591
3592 /* make sure qc->err_mask is available to
3593 * know what's wrong and recover
3594 */
3595 WARN_ON(qc->err_mask == 0);
3596
3597 ap->hsm_task_state = HSM_ST_IDLE;
3598
3599 ata_poll_qc_complete(qc);
3600 }
3601
3602 static void ata_pio_task(void *_data)
3603 {
3604 struct ata_port *ap = _data;
3605 unsigned long timeout;
3606 int qc_completed;
3607
3608 fsm_start:
3609 timeout = 0;
3610 qc_completed = 0;
3611
3612 switch (ap->hsm_task_state) {
3613 case HSM_ST_IDLE:
3614 return;
3615
3616 case HSM_ST:
3617 ata_pio_block(ap);
3618 break;
3619
3620 case HSM_ST_LAST:
3621 qc_completed = ata_pio_complete(ap);
3622 break;
3623
3624 case HSM_ST_POLL:
3625 case HSM_ST_LAST_POLL:
3626 timeout = ata_pio_poll(ap);
3627 break;
3628
3629 case HSM_ST_TMOUT:
3630 case HSM_ST_ERR:
3631 ata_pio_error(ap);
3632 return;
3633 }
3634
3635 if (timeout)
3636 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3637 else if (!qc_completed)
3638 goto fsm_start;
3639 }
3640
3641 /**
3642 * atapi_packet_task - Write CDB bytes to hardware
3643 * @_data: Port to which ATAPI device is attached.
3644 *
3645 * When device has indicated its readiness to accept
3646 * a CDB, this function is called. Send the CDB.
3647 * If DMA is to be performed, exit immediately.
3648 * Otherwise, we are in polling mode, so poll
3649 * status under operation succeeds or fails.
3650 *
3651 * LOCKING:
3652 * Kernel thread context (may sleep)
3653 */
3654
3655 static void atapi_packet_task(void *_data)
3656 {
3657 struct ata_port *ap = _data;
3658 struct ata_queued_cmd *qc;
3659 u8 status;
3660
3661 qc = ata_qc_from_tag(ap, ap->active_tag);
3662 WARN_ON(qc == NULL);
3663 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3664
3665 /* sleep-wait for BSY to clear */
3666 DPRINTK("busy wait\n");
3667 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3668 qc->err_mask |= AC_ERR_TIMEOUT;
3669 goto err_out;
3670 }
3671
3672 /* make sure DRQ is set */
3673 status = ata_chk_status(ap);
3674 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3675 qc->err_mask |= AC_ERR_HSM;
3676 goto err_out;
3677 }
3678
3679 /* send SCSI cdb */
3680 DPRINTK("send cdb\n");
3681 WARN_ON(qc->dev->cdb_len < 12);
3682
3683 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3684 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3685 unsigned long flags;
3686
3687 /* Once we're done issuing command and kicking bmdma,
3688 * irq handler takes over. To not lose irq, we need
3689 * to clear NOINTR flag before sending cdb, but
3690 * interrupt handler shouldn't be invoked before we're
3691 * finished. Hence, the following locking.
3692 */
3693 spin_lock_irqsave(&ap->host_set->lock, flags);
3694 ap->flags &= ~ATA_FLAG_NOINTR;
3695 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3696 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3697 ap->ops->bmdma_start(qc); /* initiate bmdma */
3698 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3699 } else {
3700 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3701
3702 /* PIO commands are handled by polling */
3703 ap->hsm_task_state = HSM_ST;
3704 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3705 }
3706
3707 return;
3708
3709 err_out:
3710 ata_poll_qc_complete(qc);
3711 }
3712
3713 /**
3714 * ata_qc_timeout - Handle timeout of queued command
3715 * @qc: Command that timed out
3716 *
3717 * Some part of the kernel (currently, only the SCSI layer)
3718 * has noticed that the active command on port @ap has not
3719 * completed after a specified length of time. Handle this
3720 * condition by disabling DMA (if necessary) and completing
3721 * transactions, with error if necessary.
3722 *
3723 * This also handles the case of the "lost interrupt", where
3724 * for some reason (possibly hardware bug, possibly driver bug)
3725 * an interrupt was not delivered to the driver, even though the
3726 * transaction completed successfully.
3727 *
3728 * LOCKING:
3729 * Inherited from SCSI layer (none, can sleep)
3730 */
3731
3732 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3733 {
3734 struct ata_port *ap = qc->ap;
3735 struct ata_host_set *host_set = ap->host_set;
3736 u8 host_stat = 0, drv_stat;
3737 unsigned long flags;
3738
3739 DPRINTK("ENTER\n");
3740
3741 ap->hsm_task_state = HSM_ST_IDLE;
3742
3743 spin_lock_irqsave(&host_set->lock, flags);
3744
3745 switch (qc->tf.protocol) {
3746
3747 case ATA_PROT_DMA:
3748 case ATA_PROT_ATAPI_DMA:
3749 host_stat = ap->ops->bmdma_status(ap);
3750
3751 /* before we do anything else, clear DMA-Start bit */
3752 ap->ops->bmdma_stop(qc);
3753
3754 /* fall through */
3755
3756 default:
3757 ata_altstatus(ap);
3758 drv_stat = ata_chk_status(ap);
3759
3760 /* ack bmdma irq events */
3761 ap->ops->irq_clear(ap);
3762
3763 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3764 ap->id, qc->tf.command, drv_stat, host_stat);
3765
3766 /* complete taskfile transaction */
3767 qc->err_mask |= ac_err_mask(drv_stat);
3768 break;
3769 }
3770
3771 spin_unlock_irqrestore(&host_set->lock, flags);
3772
3773 ata_eh_qc_complete(qc);
3774
3775 DPRINTK("EXIT\n");
3776 }
3777
3778 /**
3779 * ata_eng_timeout - Handle timeout of queued command
3780 * @ap: Port on which timed-out command is active
3781 *
3782 * Some part of the kernel (currently, only the SCSI layer)
3783 * has noticed that the active command on port @ap has not
3784 * completed after a specified length of time. Handle this
3785 * condition by disabling DMA (if necessary) and completing
3786 * transactions, with error if necessary.
3787 *
3788 * This also handles the case of the "lost interrupt", where
3789 * for some reason (possibly hardware bug, possibly driver bug)
3790 * an interrupt was not delivered to the driver, even though the
3791 * transaction completed successfully.
3792 *
3793 * LOCKING:
3794 * Inherited from SCSI layer (none, can sleep)
3795 */
3796
3797 void ata_eng_timeout(struct ata_port *ap)
3798 {
3799 DPRINTK("ENTER\n");
3800
3801 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3802
3803 DPRINTK("EXIT\n");
3804 }
3805
3806 /**
3807 * ata_qc_new - Request an available ATA command, for queueing
3808 * @ap: Port associated with device @dev
3809 * @dev: Device from whom we request an available command structure
3810 *
3811 * LOCKING:
3812 * None.
3813 */
3814
3815 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3816 {
3817 struct ata_queued_cmd *qc = NULL;
3818 unsigned int i;
3819
3820 for (i = 0; i < ATA_MAX_QUEUE; i++)
3821 if (!test_and_set_bit(i, &ap->qactive)) {
3822 qc = ata_qc_from_tag(ap, i);
3823 break;
3824 }
3825
3826 if (qc)
3827 qc->tag = i;
3828
3829 return qc;
3830 }
3831
3832 /**
3833 * ata_qc_new_init - Request an available ATA command, and initialize it
3834 * @ap: Port associated with device @dev
3835 * @dev: Device from whom we request an available command structure
3836 *
3837 * LOCKING:
3838 * None.
3839 */
3840
3841 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3842 struct ata_device *dev)
3843 {
3844 struct ata_queued_cmd *qc;
3845
3846 qc = ata_qc_new(ap);
3847 if (qc) {
3848 qc->scsicmd = NULL;
3849 qc->ap = ap;
3850 qc->dev = dev;
3851
3852 ata_qc_reinit(qc);
3853 }
3854
3855 return qc;
3856 }
3857
3858 /**
3859 * ata_qc_free - free unused ata_queued_cmd
3860 * @qc: Command to complete
3861 *
3862 * Designed to free unused ata_queued_cmd object
3863 * in case something prevents using it.
3864 *
3865 * LOCKING:
3866 * spin_lock_irqsave(host_set lock)
3867 */
3868 void ata_qc_free(struct ata_queued_cmd *qc)
3869 {
3870 struct ata_port *ap = qc->ap;
3871 unsigned int tag;
3872
3873 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3874
3875 qc->flags = 0;
3876 tag = qc->tag;
3877 if (likely(ata_tag_valid(tag))) {
3878 if (tag == ap->active_tag)
3879 ap->active_tag = ATA_TAG_POISON;
3880 qc->tag = ATA_TAG_POISON;
3881 clear_bit(tag, &ap->qactive);
3882 }
3883 }
3884
3885 void __ata_qc_complete(struct ata_queued_cmd *qc)
3886 {
3887 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3888 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3889
3890 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3891 ata_sg_clean(qc);
3892
3893 /* atapi: mark qc as inactive to prevent the interrupt handler
3894 * from completing the command twice later, before the error handler
3895 * is called. (when rc != 0 and atapi request sense is needed)
3896 */
3897 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3898
3899 /* call completion callback */
3900 qc->complete_fn(qc);
3901 }
3902
3903 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3904 {
3905 struct ata_port *ap = qc->ap;
3906
3907 switch (qc->tf.protocol) {
3908 case ATA_PROT_DMA:
3909 case ATA_PROT_ATAPI_DMA:
3910 return 1;
3911
3912 case ATA_PROT_ATAPI:
3913 case ATA_PROT_PIO:
3914 if (ap->flags & ATA_FLAG_PIO_DMA)
3915 return 1;
3916
3917 /* fall through */
3918
3919 default:
3920 return 0;
3921 }
3922
3923 /* never reached */
3924 }
3925
3926 /**
3927 * ata_qc_issue - issue taskfile to device
3928 * @qc: command to issue to device
3929 *
3930 * Prepare an ATA command to submission to device.
3931 * This includes mapping the data into a DMA-able
3932 * area, filling in the S/G table, and finally
3933 * writing the taskfile to hardware, starting the command.
3934 *
3935 * LOCKING:
3936 * spin_lock_irqsave(host_set lock)
3937 *
3938 * RETURNS:
3939 * Zero on success, AC_ERR_* mask on failure
3940 */
3941
3942 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3943 {
3944 struct ata_port *ap = qc->ap;
3945
3946 if (ata_should_dma_map(qc)) {
3947 if (qc->flags & ATA_QCFLAG_SG) {
3948 if (ata_sg_setup(qc))
3949 goto sg_err;
3950 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3951 if (ata_sg_setup_one(qc))
3952 goto sg_err;
3953 }
3954 } else {
3955 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3956 }
3957
3958 ap->ops->qc_prep(qc);
3959
3960 qc->ap->active_tag = qc->tag;
3961 qc->flags |= ATA_QCFLAG_ACTIVE;
3962
3963 return ap->ops->qc_issue(qc);
3964
3965 sg_err:
3966 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3967 return AC_ERR_SYSTEM;
3968 }
3969
3970
3971 /**
3972 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3973 * @qc: command to issue to device
3974 *
3975 * Using various libata functions and hooks, this function
3976 * starts an ATA command. ATA commands are grouped into
3977 * classes called "protocols", and issuing each type of protocol
3978 * is slightly different.
3979 *
3980 * May be used as the qc_issue() entry in ata_port_operations.
3981 *
3982 * LOCKING:
3983 * spin_lock_irqsave(host_set lock)
3984 *
3985 * RETURNS:
3986 * Zero on success, AC_ERR_* mask on failure
3987 */
3988
3989 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3990 {
3991 struct ata_port *ap = qc->ap;
3992
3993 ata_dev_select(ap, qc->dev->devno, 1, 0);
3994
3995 switch (qc->tf.protocol) {
3996 case ATA_PROT_NODATA:
3997 ata_tf_to_host(ap, &qc->tf);
3998 break;
3999
4000 case ATA_PROT_DMA:
4001 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4002 ap->ops->bmdma_setup(qc); /* set up bmdma */
4003 ap->ops->bmdma_start(qc); /* initiate bmdma */
4004 break;
4005
4006 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4007 ata_qc_set_polling(qc);
4008 ata_tf_to_host(ap, &qc->tf);
4009 ap->hsm_task_state = HSM_ST;
4010 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4011 break;
4012
4013 case ATA_PROT_ATAPI:
4014 ata_qc_set_polling(qc);
4015 ata_tf_to_host(ap, &qc->tf);
4016 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4017 break;
4018
4019 case ATA_PROT_ATAPI_NODATA:
4020 ap->flags |= ATA_FLAG_NOINTR;
4021 ata_tf_to_host(ap, &qc->tf);
4022 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4023 break;
4024
4025 case ATA_PROT_ATAPI_DMA:
4026 ap->flags |= ATA_FLAG_NOINTR;
4027 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4028 ap->ops->bmdma_setup(qc); /* set up bmdma */
4029 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4030 break;
4031
4032 default:
4033 WARN_ON(1);
4034 return AC_ERR_SYSTEM;
4035 }
4036
4037 return 0;
4038 }
4039
4040 /**
4041 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4042 * @qc: Info associated with this ATA transaction.
4043 *
4044 * LOCKING:
4045 * spin_lock_irqsave(host_set lock)
4046 */
4047
4048 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4049 {
4050 struct ata_port *ap = qc->ap;
4051 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4052 u8 dmactl;
4053 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4054
4055 /* load PRD table addr. */
4056 mb(); /* make sure PRD table writes are visible to controller */
4057 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4058
4059 /* specify data direction, triple-check start bit is clear */
4060 dmactl = readb(mmio + ATA_DMA_CMD);
4061 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4062 if (!rw)
4063 dmactl |= ATA_DMA_WR;
4064 writeb(dmactl, mmio + ATA_DMA_CMD);
4065
4066 /* issue r/w command */
4067 ap->ops->exec_command(ap, &qc->tf);
4068 }
4069
4070 /**
4071 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4072 * @qc: Info associated with this ATA transaction.
4073 *
4074 * LOCKING:
4075 * spin_lock_irqsave(host_set lock)
4076 */
4077
4078 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4079 {
4080 struct ata_port *ap = qc->ap;
4081 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4082 u8 dmactl;
4083
4084 /* start host DMA transaction */
4085 dmactl = readb(mmio + ATA_DMA_CMD);
4086 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4087
4088 /* Strictly, one may wish to issue a readb() here, to
4089 * flush the mmio write. However, control also passes
4090 * to the hardware at this point, and it will interrupt
4091 * us when we are to resume control. So, in effect,
4092 * we don't care when the mmio write flushes.
4093 * Further, a read of the DMA status register _immediately_
4094 * following the write may not be what certain flaky hardware
4095 * is expected, so I think it is best to not add a readb()
4096 * without first all the MMIO ATA cards/mobos.
4097 * Or maybe I'm just being paranoid.
4098 */
4099 }
4100
4101 /**
4102 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4103 * @qc: Info associated with this ATA transaction.
4104 *
4105 * LOCKING:
4106 * spin_lock_irqsave(host_set lock)
4107 */
4108
4109 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4110 {
4111 struct ata_port *ap = qc->ap;
4112 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4113 u8 dmactl;
4114
4115 /* load PRD table addr. */
4116 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4117
4118 /* specify data direction, triple-check start bit is clear */
4119 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4120 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4121 if (!rw)
4122 dmactl |= ATA_DMA_WR;
4123 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4124
4125 /* issue r/w command */
4126 ap->ops->exec_command(ap, &qc->tf);
4127 }
4128
4129 /**
4130 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4131 * @qc: Info associated with this ATA transaction.
4132 *
4133 * LOCKING:
4134 * spin_lock_irqsave(host_set lock)
4135 */
4136
4137 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4138 {
4139 struct ata_port *ap = qc->ap;
4140 u8 dmactl;
4141
4142 /* start host DMA transaction */
4143 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4144 outb(dmactl | ATA_DMA_START,
4145 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4146 }
4147
4148
4149 /**
4150 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4151 * @qc: Info associated with this ATA transaction.
4152 *
4153 * Writes the ATA_DMA_START flag to the DMA command register.
4154 *
4155 * May be used as the bmdma_start() entry in ata_port_operations.
4156 *
4157 * LOCKING:
4158 * spin_lock_irqsave(host_set lock)
4159 */
4160 void ata_bmdma_start(struct ata_queued_cmd *qc)
4161 {
4162 if (qc->ap->flags & ATA_FLAG_MMIO)
4163 ata_bmdma_start_mmio(qc);
4164 else
4165 ata_bmdma_start_pio(qc);
4166 }
4167
4168
4169 /**
4170 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4171 * @qc: Info associated with this ATA transaction.
4172 *
4173 * Writes address of PRD table to device's PRD Table Address
4174 * register, sets the DMA control register, and calls
4175 * ops->exec_command() to start the transfer.
4176 *
4177 * May be used as the bmdma_setup() entry in ata_port_operations.
4178 *
4179 * LOCKING:
4180 * spin_lock_irqsave(host_set lock)
4181 */
4182 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4183 {
4184 if (qc->ap->flags & ATA_FLAG_MMIO)
4185 ata_bmdma_setup_mmio(qc);
4186 else
4187 ata_bmdma_setup_pio(qc);
4188 }
4189
4190
4191 /**
4192 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4193 * @ap: Port associated with this ATA transaction.
4194 *
4195 * Clear interrupt and error flags in DMA status register.
4196 *
4197 * May be used as the irq_clear() entry in ata_port_operations.
4198 *
4199 * LOCKING:
4200 * spin_lock_irqsave(host_set lock)
4201 */
4202
4203 void ata_bmdma_irq_clear(struct ata_port *ap)
4204 {
4205 if (ap->flags & ATA_FLAG_MMIO) {
4206 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4207 writeb(readb(mmio), mmio);
4208 } else {
4209 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4210 outb(inb(addr), addr);
4211 }
4212
4213 }
4214
4215
4216 /**
4217 * ata_bmdma_status - Read PCI IDE BMDMA status
4218 * @ap: Port associated with this ATA transaction.
4219 *
4220 * Read and return BMDMA status register.
4221 *
4222 * May be used as the bmdma_status() entry in ata_port_operations.
4223 *
4224 * LOCKING:
4225 * spin_lock_irqsave(host_set lock)
4226 */
4227
4228 u8 ata_bmdma_status(struct ata_port *ap)
4229 {
4230 u8 host_stat;
4231 if (ap->flags & ATA_FLAG_MMIO) {
4232 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4233 host_stat = readb(mmio + ATA_DMA_STATUS);
4234 } else
4235 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4236 return host_stat;
4237 }
4238
4239
4240 /**
4241 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4242 * @qc: Command we are ending DMA for
4243 *
4244 * Clears the ATA_DMA_START flag in the dma control register
4245 *
4246 * May be used as the bmdma_stop() entry in ata_port_operations.
4247 *
4248 * LOCKING:
4249 * spin_lock_irqsave(host_set lock)
4250 */
4251
4252 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4253 {
4254 struct ata_port *ap = qc->ap;
4255 if (ap->flags & ATA_FLAG_MMIO) {
4256 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4257
4258 /* clear start/stop bit */
4259 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4260 mmio + ATA_DMA_CMD);
4261 } else {
4262 /* clear start/stop bit */
4263 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4264 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4265 }
4266
4267 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4268 ata_altstatus(ap); /* dummy read */
4269 }
4270
4271 /**
4272 * ata_host_intr - Handle host interrupt for given (port, task)
4273 * @ap: Port on which interrupt arrived (possibly...)
4274 * @qc: Taskfile currently active in engine
4275 *
4276 * Handle host interrupt for given queued command. Currently,
4277 * only DMA interrupts are handled. All other commands are
4278 * handled via polling with interrupts disabled (nIEN bit).
4279 *
4280 * LOCKING:
4281 * spin_lock_irqsave(host_set lock)
4282 *
4283 * RETURNS:
4284 * One if interrupt was handled, zero if not (shared irq).
4285 */
4286
4287 inline unsigned int ata_host_intr (struct ata_port *ap,
4288 struct ata_queued_cmd *qc)
4289 {
4290 u8 status, host_stat;
4291
4292 switch (qc->tf.protocol) {
4293
4294 case ATA_PROT_DMA:
4295 case ATA_PROT_ATAPI_DMA:
4296 case ATA_PROT_ATAPI:
4297 /* check status of DMA engine */
4298 host_stat = ap->ops->bmdma_status(ap);
4299 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4300
4301 /* if it's not our irq... */
4302 if (!(host_stat & ATA_DMA_INTR))
4303 goto idle_irq;
4304
4305 /* before we do anything else, clear DMA-Start bit */
4306 ap->ops->bmdma_stop(qc);
4307
4308 /* fall through */
4309
4310 case ATA_PROT_ATAPI_NODATA:
4311 case ATA_PROT_NODATA:
4312 /* check altstatus */
4313 status = ata_altstatus(ap);
4314 if (status & ATA_BUSY)
4315 goto idle_irq;
4316
4317 /* check main status, clearing INTRQ */
4318 status = ata_chk_status(ap);
4319 if (unlikely(status & ATA_BUSY))
4320 goto idle_irq;
4321 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4322 ap->id, qc->tf.protocol, status);
4323
4324 /* ack bmdma irq events */
4325 ap->ops->irq_clear(ap);
4326
4327 /* complete taskfile transaction */
4328 qc->err_mask |= ac_err_mask(status);
4329 ata_qc_complete(qc);
4330 break;
4331
4332 default:
4333 goto idle_irq;
4334 }
4335
4336 return 1; /* irq handled */
4337
4338 idle_irq:
4339 ap->stats.idle_irq++;
4340
4341 #ifdef ATA_IRQ_TRAP
4342 if ((ap->stats.idle_irq % 1000) == 0) {
4343 ata_irq_ack(ap, 0); /* debug trap */
4344 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4345 return 1;
4346 }
4347 #endif
4348 return 0; /* irq not handled */
4349 }
4350
4351 /**
4352 * ata_interrupt - Default ATA host interrupt handler
4353 * @irq: irq line (unused)
4354 * @dev_instance: pointer to our ata_host_set information structure
4355 * @regs: unused
4356 *
4357 * Default interrupt handler for PCI IDE devices. Calls
4358 * ata_host_intr() for each port that is not disabled.
4359 *
4360 * LOCKING:
4361 * Obtains host_set lock during operation.
4362 *
4363 * RETURNS:
4364 * IRQ_NONE or IRQ_HANDLED.
4365 */
4366
4367 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4368 {
4369 struct ata_host_set *host_set = dev_instance;
4370 unsigned int i;
4371 unsigned int handled = 0;
4372 unsigned long flags;
4373
4374 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4375 spin_lock_irqsave(&host_set->lock, flags);
4376
4377 for (i = 0; i < host_set->n_ports; i++) {
4378 struct ata_port *ap;
4379
4380 ap = host_set->ports[i];
4381 if (ap &&
4382 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4383 struct ata_queued_cmd *qc;
4384
4385 qc = ata_qc_from_tag(ap, ap->active_tag);
4386 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4387 (qc->flags & ATA_QCFLAG_ACTIVE))
4388 handled |= ata_host_intr(ap, qc);
4389 }
4390 }
4391
4392 spin_unlock_irqrestore(&host_set->lock, flags);
4393
4394 return IRQ_RETVAL(handled);
4395 }
4396
4397
4398 /*
4399 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4400 * without filling any other registers
4401 */
4402 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4403 u8 cmd)
4404 {
4405 struct ata_taskfile tf;
4406 int err;
4407
4408 ata_tf_init(ap, &tf, dev->devno);
4409
4410 tf.command = cmd;
4411 tf.flags |= ATA_TFLAG_DEVICE;
4412 tf.protocol = ATA_PROT_NODATA;
4413
4414 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4415 if (err)
4416 printk(KERN_ERR "%s: ata command failed: %d\n",
4417 __FUNCTION__, err);
4418
4419 return err;
4420 }
4421
4422 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4423 {
4424 u8 cmd;
4425
4426 if (!ata_try_flush_cache(dev))
4427 return 0;
4428
4429 if (ata_id_has_flush_ext(dev->id))
4430 cmd = ATA_CMD_FLUSH_EXT;
4431 else
4432 cmd = ATA_CMD_FLUSH;
4433
4434 return ata_do_simple_cmd(ap, dev, cmd);
4435 }
4436
4437 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4438 {
4439 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4440 }
4441
4442 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4443 {
4444 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4445 }
4446
4447 /**
4448 * ata_device_resume - wakeup a previously suspended devices
4449 * @ap: port the device is connected to
4450 * @dev: the device to resume
4451 *
4452 * Kick the drive back into action, by sending it an idle immediate
4453 * command and making sure its transfer mode matches between drive
4454 * and host.
4455 *
4456 */
4457 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4458 {
4459 if (ap->flags & ATA_FLAG_SUSPENDED) {
4460 ap->flags &= ~ATA_FLAG_SUSPENDED;
4461 ata_set_mode(ap);
4462 }
4463 if (!ata_dev_present(dev))
4464 return 0;
4465 if (dev->class == ATA_DEV_ATA)
4466 ata_start_drive(ap, dev);
4467
4468 return 0;
4469 }
4470
4471 /**
4472 * ata_device_suspend - prepare a device for suspend
4473 * @ap: port the device is connected to
4474 * @dev: the device to suspend
4475 *
4476 * Flush the cache on the drive, if appropriate, then issue a
4477 * standbynow command.
4478 */
4479 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4480 {
4481 if (!ata_dev_present(dev))
4482 return 0;
4483 if (dev->class == ATA_DEV_ATA)
4484 ata_flush_cache(ap, dev);
4485
4486 ata_standby_drive(ap, dev);
4487 ap->flags |= ATA_FLAG_SUSPENDED;
4488 return 0;
4489 }
4490
4491 /**
4492 * ata_port_start - Set port up for dma.
4493 * @ap: Port to initialize
4494 *
4495 * Called just after data structures for each port are
4496 * initialized. Allocates space for PRD table.
4497 *
4498 * May be used as the port_start() entry in ata_port_operations.
4499 *
4500 * LOCKING:
4501 * Inherited from caller.
4502 */
4503
4504 int ata_port_start (struct ata_port *ap)
4505 {
4506 struct device *dev = ap->host_set->dev;
4507 int rc;
4508
4509 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4510 if (!ap->prd)
4511 return -ENOMEM;
4512
4513 rc = ata_pad_alloc(ap, dev);
4514 if (rc) {
4515 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4516 return rc;
4517 }
4518
4519 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4520
4521 return 0;
4522 }
4523
4524
4525 /**
4526 * ata_port_stop - Undo ata_port_start()
4527 * @ap: Port to shut down
4528 *
4529 * Frees the PRD table.
4530 *
4531 * May be used as the port_stop() entry in ata_port_operations.
4532 *
4533 * LOCKING:
4534 * Inherited from caller.
4535 */
4536
4537 void ata_port_stop (struct ata_port *ap)
4538 {
4539 struct device *dev = ap->host_set->dev;
4540
4541 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4542 ata_pad_free(ap, dev);
4543 }
4544
4545 void ata_host_stop (struct ata_host_set *host_set)
4546 {
4547 if (host_set->mmio_base)
4548 iounmap(host_set->mmio_base);
4549 }
4550
4551
4552 /**
4553 * ata_host_remove - Unregister SCSI host structure with upper layers
4554 * @ap: Port to unregister
4555 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4556 *
4557 * LOCKING:
4558 * Inherited from caller.
4559 */
4560
4561 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4562 {
4563 struct Scsi_Host *sh = ap->host;
4564
4565 DPRINTK("ENTER\n");
4566
4567 if (do_unregister)
4568 scsi_remove_host(sh);
4569
4570 ap->ops->port_stop(ap);
4571 }
4572
4573 /**
4574 * ata_host_init - Initialize an ata_port structure
4575 * @ap: Structure to initialize
4576 * @host: associated SCSI mid-layer structure
4577 * @host_set: Collection of hosts to which @ap belongs
4578 * @ent: Probe information provided by low-level driver
4579 * @port_no: Port number associated with this ata_port
4580 *
4581 * Initialize a new ata_port structure, and its associated
4582 * scsi_host.
4583 *
4584 * LOCKING:
4585 * Inherited from caller.
4586 */
4587
4588 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4589 struct ata_host_set *host_set,
4590 const struct ata_probe_ent *ent, unsigned int port_no)
4591 {
4592 unsigned int i;
4593
4594 host->max_id = 16;
4595 host->max_lun = 1;
4596 host->max_channel = 1;
4597 host->unique_id = ata_unique_id++;
4598 host->max_cmd_len = 12;
4599
4600 ap->flags = ATA_FLAG_PORT_DISABLED;
4601 ap->id = host->unique_id;
4602 ap->host = host;
4603 ap->ctl = ATA_DEVCTL_OBS;
4604 ap->host_set = host_set;
4605 ap->port_no = port_no;
4606 ap->hard_port_no =
4607 ent->legacy_mode ? ent->hard_port_no : port_no;
4608 ap->pio_mask = ent->pio_mask;
4609 ap->mwdma_mask = ent->mwdma_mask;
4610 ap->udma_mask = ent->udma_mask;
4611 ap->flags |= ent->host_flags;
4612 ap->ops = ent->port_ops;
4613 ap->cbl = ATA_CBL_NONE;
4614 ap->active_tag = ATA_TAG_POISON;
4615 ap->last_ctl = 0xFF;
4616
4617 INIT_WORK(&ap->port_task, NULL, NULL);
4618 INIT_LIST_HEAD(&ap->eh_done_q);
4619
4620 for (i = 0; i < ATA_MAX_DEVICES; i++)
4621 ap->device[i].devno = i;
4622
4623 #ifdef ATA_IRQ_TRAP
4624 ap->stats.unhandled_irq = 1;
4625 ap->stats.idle_irq = 1;
4626 #endif
4627
4628 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4629 }
4630
4631 /**
4632 * ata_host_add - Attach low-level ATA driver to system
4633 * @ent: Information provided by low-level driver
4634 * @host_set: Collections of ports to which we add
4635 * @port_no: Port number associated with this host
4636 *
4637 * Attach low-level ATA driver to system.
4638 *
4639 * LOCKING:
4640 * PCI/etc. bus probe sem.
4641 *
4642 * RETURNS:
4643 * New ata_port on success, for NULL on error.
4644 */
4645
4646 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4647 struct ata_host_set *host_set,
4648 unsigned int port_no)
4649 {
4650 struct Scsi_Host *host;
4651 struct ata_port *ap;
4652 int rc;
4653
4654 DPRINTK("ENTER\n");
4655 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4656 if (!host)
4657 return NULL;
4658
4659 ap = (struct ata_port *) &host->hostdata[0];
4660
4661 ata_host_init(ap, host, host_set, ent, port_no);
4662
4663 rc = ap->ops->port_start(ap);
4664 if (rc)
4665 goto err_out;
4666
4667 return ap;
4668
4669 err_out:
4670 scsi_host_put(host);
4671 return NULL;
4672 }
4673
4674 /**
4675 * ata_device_add - Register hardware device with ATA and SCSI layers
4676 * @ent: Probe information describing hardware device to be registered
4677 *
4678 * This function processes the information provided in the probe
4679 * information struct @ent, allocates the necessary ATA and SCSI
4680 * host information structures, initializes them, and registers
4681 * everything with requisite kernel subsystems.
4682 *
4683 * This function requests irqs, probes the ATA bus, and probes
4684 * the SCSI bus.
4685 *
4686 * LOCKING:
4687 * PCI/etc. bus probe sem.
4688 *
4689 * RETURNS:
4690 * Number of ports registered. Zero on error (no ports registered).
4691 */
4692
4693 int ata_device_add(const struct ata_probe_ent *ent)
4694 {
4695 unsigned int count = 0, i;
4696 struct device *dev = ent->dev;
4697 struct ata_host_set *host_set;
4698
4699 DPRINTK("ENTER\n");
4700 /* alloc a container for our list of ATA ports (buses) */
4701 host_set = kzalloc(sizeof(struct ata_host_set) +
4702 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4703 if (!host_set)
4704 return 0;
4705 spin_lock_init(&host_set->lock);
4706
4707 host_set->dev = dev;
4708 host_set->n_ports = ent->n_ports;
4709 host_set->irq = ent->irq;
4710 host_set->mmio_base = ent->mmio_base;
4711 host_set->private_data = ent->private_data;
4712 host_set->ops = ent->port_ops;
4713
4714 /* register each port bound to this device */
4715 for (i = 0; i < ent->n_ports; i++) {
4716 struct ata_port *ap;
4717 unsigned long xfer_mode_mask;
4718
4719 ap = ata_host_add(ent, host_set, i);
4720 if (!ap)
4721 goto err_out;
4722
4723 host_set->ports[i] = ap;
4724 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4725 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4726 (ap->pio_mask << ATA_SHIFT_PIO);
4727
4728 /* print per-port info to dmesg */
4729 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4730 "bmdma 0x%lX irq %lu\n",
4731 ap->id,
4732 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4733 ata_mode_string(xfer_mode_mask),
4734 ap->ioaddr.cmd_addr,
4735 ap->ioaddr.ctl_addr,
4736 ap->ioaddr.bmdma_addr,
4737 ent->irq);
4738
4739 ata_chk_status(ap);
4740 host_set->ops->irq_clear(ap);
4741 count++;
4742 }
4743
4744 if (!count)
4745 goto err_free_ret;
4746
4747 /* obtain irq, that is shared between channels */
4748 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4749 DRV_NAME, host_set))
4750 goto err_out;
4751
4752 /* perform each probe synchronously */
4753 DPRINTK("probe begin\n");
4754 for (i = 0; i < count; i++) {
4755 struct ata_port *ap;
4756 int rc;
4757
4758 ap = host_set->ports[i];
4759
4760 DPRINTK("ata%u: bus probe begin\n", ap->id);
4761 rc = ata_bus_probe(ap);
4762 DPRINTK("ata%u: bus probe end\n", ap->id);
4763
4764 if (rc) {
4765 /* FIXME: do something useful here?
4766 * Current libata behavior will
4767 * tear down everything when
4768 * the module is removed
4769 * or the h/w is unplugged.
4770 */
4771 }
4772
4773 rc = scsi_add_host(ap->host, dev);
4774 if (rc) {
4775 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4776 ap->id);
4777 /* FIXME: do something useful here */
4778 /* FIXME: handle unconditional calls to
4779 * scsi_scan_host and ata_host_remove, below,
4780 * at the very least
4781 */
4782 }
4783 }
4784
4785 /* probes are done, now scan each port's disk(s) */
4786 DPRINTK("host probe begin\n");
4787 for (i = 0; i < count; i++) {
4788 struct ata_port *ap = host_set->ports[i];
4789
4790 ata_scsi_scan_host(ap);
4791 }
4792
4793 dev_set_drvdata(dev, host_set);
4794
4795 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4796 return ent->n_ports; /* success */
4797
4798 err_out:
4799 for (i = 0; i < count; i++) {
4800 ata_host_remove(host_set->ports[i], 1);
4801 scsi_host_put(host_set->ports[i]->host);
4802 }
4803 err_free_ret:
4804 kfree(host_set);
4805 VPRINTK("EXIT, returning 0\n");
4806 return 0;
4807 }
4808
4809 /**
4810 * ata_host_set_remove - PCI layer callback for device removal
4811 * @host_set: ATA host set that was removed
4812 *
4813 * Unregister all objects associated with this host set. Free those
4814 * objects.
4815 *
4816 * LOCKING:
4817 * Inherited from calling layer (may sleep).
4818 */
4819
4820 void ata_host_set_remove(struct ata_host_set *host_set)
4821 {
4822 struct ata_port *ap;
4823 unsigned int i;
4824
4825 for (i = 0; i < host_set->n_ports; i++) {
4826 ap = host_set->ports[i];
4827 scsi_remove_host(ap->host);
4828 }
4829
4830 free_irq(host_set->irq, host_set);
4831
4832 for (i = 0; i < host_set->n_ports; i++) {
4833 ap = host_set->ports[i];
4834
4835 ata_scsi_release(ap->host);
4836
4837 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4838 struct ata_ioports *ioaddr = &ap->ioaddr;
4839
4840 if (ioaddr->cmd_addr == 0x1f0)
4841 release_region(0x1f0, 8);
4842 else if (ioaddr->cmd_addr == 0x170)
4843 release_region(0x170, 8);
4844 }
4845
4846 scsi_host_put(ap->host);
4847 }
4848
4849 if (host_set->ops->host_stop)
4850 host_set->ops->host_stop(host_set);
4851
4852 kfree(host_set);
4853 }
4854
4855 /**
4856 * ata_scsi_release - SCSI layer callback hook for host unload
4857 * @host: libata host to be unloaded
4858 *
4859 * Performs all duties necessary to shut down a libata port...
4860 * Kill port kthread, disable port, and release resources.
4861 *
4862 * LOCKING:
4863 * Inherited from SCSI layer.
4864 *
4865 * RETURNS:
4866 * One.
4867 */
4868
4869 int ata_scsi_release(struct Scsi_Host *host)
4870 {
4871 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4872 int i;
4873
4874 DPRINTK("ENTER\n");
4875
4876 ap->ops->port_disable(ap);
4877 ata_host_remove(ap, 0);
4878 for (i = 0; i < ATA_MAX_DEVICES; i++)
4879 kfree(ap->device[i].id);
4880
4881 DPRINTK("EXIT\n");
4882 return 1;
4883 }
4884
4885 /**
4886 * ata_std_ports - initialize ioaddr with standard port offsets.
4887 * @ioaddr: IO address structure to be initialized
4888 *
4889 * Utility function which initializes data_addr, error_addr,
4890 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4891 * device_addr, status_addr, and command_addr to standard offsets
4892 * relative to cmd_addr.
4893 *
4894 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4895 */
4896
4897 void ata_std_ports(struct ata_ioports *ioaddr)
4898 {
4899 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4900 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4901 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4902 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4903 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4904 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4905 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4906 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4907 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4908 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4909 }
4910
4911
4912 #ifdef CONFIG_PCI
4913
4914 void ata_pci_host_stop (struct ata_host_set *host_set)
4915 {
4916 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4917
4918 pci_iounmap(pdev, host_set->mmio_base);
4919 }
4920
4921 /**
4922 * ata_pci_remove_one - PCI layer callback for device removal
4923 * @pdev: PCI device that was removed
4924 *
4925 * PCI layer indicates to libata via this hook that
4926 * hot-unplug or module unload event has occurred.
4927 * Handle this by unregistering all objects associated
4928 * with this PCI device. Free those objects. Then finally
4929 * release PCI resources and disable device.
4930 *
4931 * LOCKING:
4932 * Inherited from PCI layer (may sleep).
4933 */
4934
4935 void ata_pci_remove_one (struct pci_dev *pdev)
4936 {
4937 struct device *dev = pci_dev_to_dev(pdev);
4938 struct ata_host_set *host_set = dev_get_drvdata(dev);
4939
4940 ata_host_set_remove(host_set);
4941 pci_release_regions(pdev);
4942 pci_disable_device(pdev);
4943 dev_set_drvdata(dev, NULL);
4944 }
4945
4946 /* move to PCI subsystem */
4947 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4948 {
4949 unsigned long tmp = 0;
4950
4951 switch (bits->width) {
4952 case 1: {
4953 u8 tmp8 = 0;
4954 pci_read_config_byte(pdev, bits->reg, &tmp8);
4955 tmp = tmp8;
4956 break;
4957 }
4958 case 2: {
4959 u16 tmp16 = 0;
4960 pci_read_config_word(pdev, bits->reg, &tmp16);
4961 tmp = tmp16;
4962 break;
4963 }
4964 case 4: {
4965 u32 tmp32 = 0;
4966 pci_read_config_dword(pdev, bits->reg, &tmp32);
4967 tmp = tmp32;
4968 break;
4969 }
4970
4971 default:
4972 return -EINVAL;
4973 }
4974
4975 tmp &= bits->mask;
4976
4977 return (tmp == bits->val) ? 1 : 0;
4978 }
4979
4980 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4981 {
4982 pci_save_state(pdev);
4983 pci_disable_device(pdev);
4984 pci_set_power_state(pdev, PCI_D3hot);
4985 return 0;
4986 }
4987
4988 int ata_pci_device_resume(struct pci_dev *pdev)
4989 {
4990 pci_set_power_state(pdev, PCI_D0);
4991 pci_restore_state(pdev);
4992 pci_enable_device(pdev);
4993 pci_set_master(pdev);
4994 return 0;
4995 }
4996 #endif /* CONFIG_PCI */
4997
4998
4999 static int __init ata_init(void)
5000 {
5001 ata_wq = create_workqueue("ata");
5002 if (!ata_wq)
5003 return -ENOMEM;
5004
5005 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5006 return 0;
5007 }
5008
5009 static void __exit ata_exit(void)
5010 {
5011 destroy_workqueue(ata_wq);
5012 }
5013
5014 module_init(ata_init);
5015 module_exit(ata_exit);
5016
5017 static unsigned long ratelimit_time;
5018 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5019
5020 int ata_ratelimit(void)
5021 {
5022 int rc;
5023 unsigned long flags;
5024
5025 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5026
5027 if (time_after(jiffies, ratelimit_time)) {
5028 rc = 1;
5029 ratelimit_time = jiffies + (HZ/5);
5030 } else
5031 rc = 0;
5032
5033 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5034
5035 return rc;
5036 }
5037
5038 /*
5039 * libata is essentially a library of internal helper functions for
5040 * low-level ATA host controller drivers. As such, the API/ABI is
5041 * likely to change as new drivers are added and updated.
5042 * Do not depend on ABI/API stability.
5043 */
5044
5045 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5046 EXPORT_SYMBOL_GPL(ata_std_ports);
5047 EXPORT_SYMBOL_GPL(ata_device_add);
5048 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5049 EXPORT_SYMBOL_GPL(ata_sg_init);
5050 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5051 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5052 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5053 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5054 EXPORT_SYMBOL_GPL(ata_tf_load);
5055 EXPORT_SYMBOL_GPL(ata_tf_read);
5056 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5057 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5058 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5059 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5060 EXPORT_SYMBOL_GPL(ata_check_status);
5061 EXPORT_SYMBOL_GPL(ata_altstatus);
5062 EXPORT_SYMBOL_GPL(ata_exec_command);
5063 EXPORT_SYMBOL_GPL(ata_port_start);
5064 EXPORT_SYMBOL_GPL(ata_port_stop);
5065 EXPORT_SYMBOL_GPL(ata_host_stop);
5066 EXPORT_SYMBOL_GPL(ata_interrupt);
5067 EXPORT_SYMBOL_GPL(ata_qc_prep);
5068 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5069 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5070 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5071 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5072 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5073 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5074 EXPORT_SYMBOL_GPL(ata_port_probe);
5075 EXPORT_SYMBOL_GPL(sata_phy_reset);
5076 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5077 EXPORT_SYMBOL_GPL(ata_bus_reset);
5078 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5079 EXPORT_SYMBOL_GPL(ata_std_softreset);
5080 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5081 EXPORT_SYMBOL_GPL(ata_std_postreset);
5082 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5083 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5084 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5085 EXPORT_SYMBOL_GPL(ata_port_disable);
5086 EXPORT_SYMBOL_GPL(ata_ratelimit);
5087 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5088 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5089 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5090 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5091 EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5092 EXPORT_SYMBOL_GPL(ata_scsi_error);
5093 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5094 EXPORT_SYMBOL_GPL(ata_scsi_release);
5095 EXPORT_SYMBOL_GPL(ata_host_intr);
5096 EXPORT_SYMBOL_GPL(ata_dev_classify);
5097 EXPORT_SYMBOL_GPL(ata_id_string);
5098 EXPORT_SYMBOL_GPL(ata_id_c_string);
5099 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5100 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5101 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5102
5103 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5104 EXPORT_SYMBOL_GPL(ata_timing_compute);
5105 EXPORT_SYMBOL_GPL(ata_timing_merge);
5106
5107 #ifdef CONFIG_PCI
5108 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5109 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5110 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5111 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5112 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5113 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5114 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5115 #endif /* CONFIG_PCI */
5116
5117 EXPORT_SYMBOL_GPL(ata_device_suspend);
5118 EXPORT_SYMBOL_GPL(ata_device_resume);
5119 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5120 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);