2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
71 /* debounce timing parameters in msecs { interval, duration, timeout } */
72 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
73 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
74 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
76 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
77 u16 heads
, u16 sectors
);
78 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
79 static unsigned int ata_dev_set_feature(struct ata_device
*dev
,
80 u8 enable
, u8 feature
);
81 static void ata_dev_xfermask(struct ata_device
*dev
);
82 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
);
84 unsigned int ata_print_id
= 1;
85 static struct workqueue_struct
*ata_wq
;
87 struct workqueue_struct
*ata_aux_wq
;
89 int atapi_enabled
= 1;
90 module_param(atapi_enabled
, int, 0444);
91 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
94 module_param(atapi_dmadir
, int, 0444);
95 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97 int atapi_passthru16
= 1;
98 module_param(atapi_passthru16
, int, 0444);
99 MODULE_PARM_DESC(atapi_passthru16
, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
102 module_param_named(fua
, libata_fua
, int, 0444);
103 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
105 static int ata_ignore_hpa
;
106 module_param_named(ignore_hpa
, ata_ignore_hpa
, int, 0644);
107 MODULE_PARM_DESC(ignore_hpa
, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
109 static int libata_dma_mask
= ATA_DMA_MASK_ATA
|ATA_DMA_MASK_ATAPI
|ATA_DMA_MASK_CFA
;
110 module_param_named(dma
, libata_dma_mask
, int, 0444);
111 MODULE_PARM_DESC(dma
, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
114 module_param(ata_probe_timeout
, int, 0444);
115 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
117 int libata_noacpi
= 0;
118 module_param_named(noacpi
, libata_noacpi
, int, 0444);
119 MODULE_PARM_DESC(noacpi
, "Disables the use of ACPI in probe/suspend/resume when set");
121 MODULE_AUTHOR("Jeff Garzik");
122 MODULE_DESCRIPTION("Library module for ATA devices");
123 MODULE_LICENSE("GPL");
124 MODULE_VERSION(DRV_VERSION
);
128 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
129 * @tf: Taskfile to convert
130 * @pmp: Port multiplier port
131 * @is_cmd: This FIS is for command
132 * @fis: Buffer into which data will output
134 * Converts a standard ATA taskfile to a Serial ATA
135 * FIS structure (Register - Host to Device).
138 * Inherited from caller.
140 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8 pmp
, int is_cmd
, u8
*fis
)
142 fis
[0] = 0x27; /* Register - Host to Device FIS */
143 fis
[1] = pmp
& 0xf; /* Port multiplier number*/
145 fis
[1] |= (1 << 7); /* bit 7 indicates Command FIS */
147 fis
[2] = tf
->command
;
148 fis
[3] = tf
->feature
;
155 fis
[8] = tf
->hob_lbal
;
156 fis
[9] = tf
->hob_lbam
;
157 fis
[10] = tf
->hob_lbah
;
158 fis
[11] = tf
->hob_feature
;
161 fis
[13] = tf
->hob_nsect
;
172 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
173 * @fis: Buffer from which data will be input
174 * @tf: Taskfile to output
176 * Converts a serial ATA FIS structure to a standard ATA taskfile.
179 * Inherited from caller.
182 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
184 tf
->command
= fis
[2]; /* status */
185 tf
->feature
= fis
[3]; /* error */
192 tf
->hob_lbal
= fis
[8];
193 tf
->hob_lbam
= fis
[9];
194 tf
->hob_lbah
= fis
[10];
197 tf
->hob_nsect
= fis
[13];
200 static const u8 ata_rw_cmds
[] = {
204 ATA_CMD_READ_MULTI_EXT
,
205 ATA_CMD_WRITE_MULTI_EXT
,
209 ATA_CMD_WRITE_MULTI_FUA_EXT
,
213 ATA_CMD_PIO_READ_EXT
,
214 ATA_CMD_PIO_WRITE_EXT
,
227 ATA_CMD_WRITE_FUA_EXT
231 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
232 * @tf: command to examine and configure
233 * @dev: device tf belongs to
235 * Examine the device configuration and tf->flags to calculate
236 * the proper read/write commands and protocol to use.
241 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
245 int index
, fua
, lba48
, write
;
247 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
248 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
249 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
251 if (dev
->flags
& ATA_DFLAG_PIO
) {
252 tf
->protocol
= ATA_PROT_PIO
;
253 index
= dev
->multi_count
? 0 : 8;
254 } else if (lba48
&& (dev
->link
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
255 /* Unable to use DMA due to host limitation */
256 tf
->protocol
= ATA_PROT_PIO
;
257 index
= dev
->multi_count
? 0 : 8;
259 tf
->protocol
= ATA_PROT_DMA
;
263 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
272 * ata_tf_read_block - Read block address from ATA taskfile
273 * @tf: ATA taskfile of interest
274 * @dev: ATA device @tf belongs to
279 * Read block address from @tf. This function can handle all
280 * three address formats - LBA, LBA48 and CHS. tf->protocol and
281 * flags select the address format to use.
284 * Block address read from @tf.
286 u64
ata_tf_read_block(struct ata_taskfile
*tf
, struct ata_device
*dev
)
290 if (tf
->flags
& ATA_TFLAG_LBA
) {
291 if (tf
->flags
& ATA_TFLAG_LBA48
) {
292 block
|= (u64
)tf
->hob_lbah
<< 40;
293 block
|= (u64
)tf
->hob_lbam
<< 32;
294 block
|= tf
->hob_lbal
<< 24;
296 block
|= (tf
->device
& 0xf) << 24;
298 block
|= tf
->lbah
<< 16;
299 block
|= tf
->lbam
<< 8;
304 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
305 head
= tf
->device
& 0xf;
308 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
;
315 * ata_build_rw_tf - Build ATA taskfile for given read/write request
316 * @tf: Target ATA taskfile
317 * @dev: ATA device @tf belongs to
318 * @block: Block address
319 * @n_block: Number of blocks
320 * @tf_flags: RW/FUA etc...
326 * Build ATA taskfile @tf for read/write request described by
327 * @block, @n_block, @tf_flags and @tag on @dev.
331 * 0 on success, -ERANGE if the request is too large for @dev,
332 * -EINVAL if the request is invalid.
334 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
335 u64 block
, u32 n_block
, unsigned int tf_flags
,
338 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
339 tf
->flags
|= tf_flags
;
341 if (ata_ncq_enabled(dev
) && likely(tag
!= ATA_TAG_INTERNAL
)) {
343 if (!lba_48_ok(block
, n_block
))
346 tf
->protocol
= ATA_PROT_NCQ
;
347 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
349 if (tf
->flags
& ATA_TFLAG_WRITE
)
350 tf
->command
= ATA_CMD_FPDMA_WRITE
;
352 tf
->command
= ATA_CMD_FPDMA_READ
;
354 tf
->nsect
= tag
<< 3;
355 tf
->hob_feature
= (n_block
>> 8) & 0xff;
356 tf
->feature
= n_block
& 0xff;
358 tf
->hob_lbah
= (block
>> 40) & 0xff;
359 tf
->hob_lbam
= (block
>> 32) & 0xff;
360 tf
->hob_lbal
= (block
>> 24) & 0xff;
361 tf
->lbah
= (block
>> 16) & 0xff;
362 tf
->lbam
= (block
>> 8) & 0xff;
363 tf
->lbal
= block
& 0xff;
366 if (tf
->flags
& ATA_TFLAG_FUA
)
367 tf
->device
|= 1 << 7;
368 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
369 tf
->flags
|= ATA_TFLAG_LBA
;
371 if (lba_28_ok(block
, n_block
)) {
373 tf
->device
|= (block
>> 24) & 0xf;
374 } else if (lba_48_ok(block
, n_block
)) {
375 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
379 tf
->flags
|= ATA_TFLAG_LBA48
;
381 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
383 tf
->hob_lbah
= (block
>> 40) & 0xff;
384 tf
->hob_lbam
= (block
>> 32) & 0xff;
385 tf
->hob_lbal
= (block
>> 24) & 0xff;
387 /* request too large even for LBA48 */
390 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
393 tf
->nsect
= n_block
& 0xff;
395 tf
->lbah
= (block
>> 16) & 0xff;
396 tf
->lbam
= (block
>> 8) & 0xff;
397 tf
->lbal
= block
& 0xff;
399 tf
->device
|= ATA_LBA
;
402 u32 sect
, head
, cyl
, track
;
404 /* The request -may- be too large for CHS addressing. */
405 if (!lba_28_ok(block
, n_block
))
408 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
411 /* Convert LBA to CHS */
412 track
= (u32
)block
/ dev
->sectors
;
413 cyl
= track
/ dev
->heads
;
414 head
= track
% dev
->heads
;
415 sect
= (u32
)block
% dev
->sectors
+ 1;
417 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
418 (u32
)block
, track
, cyl
, head
, sect
);
420 /* Check whether the converted CHS can fit.
424 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
427 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
438 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
439 * @pio_mask: pio_mask
440 * @mwdma_mask: mwdma_mask
441 * @udma_mask: udma_mask
443 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
444 * unsigned int xfer_mask.
452 static unsigned int ata_pack_xfermask(unsigned int pio_mask
,
453 unsigned int mwdma_mask
,
454 unsigned int udma_mask
)
456 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
457 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
458 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
462 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
463 * @xfer_mask: xfer_mask to unpack
464 * @pio_mask: resulting pio_mask
465 * @mwdma_mask: resulting mwdma_mask
466 * @udma_mask: resulting udma_mask
468 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
469 * Any NULL distination masks will be ignored.
471 static void ata_unpack_xfermask(unsigned int xfer_mask
,
472 unsigned int *pio_mask
,
473 unsigned int *mwdma_mask
,
474 unsigned int *udma_mask
)
477 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
479 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
481 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
484 static const struct ata_xfer_ent
{
488 { ATA_SHIFT_PIO
, ATA_BITS_PIO
, XFER_PIO_0
},
489 { ATA_SHIFT_MWDMA
, ATA_BITS_MWDMA
, XFER_MW_DMA_0
},
490 { ATA_SHIFT_UDMA
, ATA_BITS_UDMA
, XFER_UDMA_0
},
495 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
496 * @xfer_mask: xfer_mask of interest
498 * Return matching XFER_* value for @xfer_mask. Only the highest
499 * bit of @xfer_mask is considered.
505 * Matching XFER_* value, 0 if no match found.
507 static u8
ata_xfer_mask2mode(unsigned int xfer_mask
)
509 int highbit
= fls(xfer_mask
) - 1;
510 const struct ata_xfer_ent
*ent
;
512 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
513 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
514 return ent
->base
+ highbit
- ent
->shift
;
519 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
520 * @xfer_mode: XFER_* of interest
522 * Return matching xfer_mask for @xfer_mode.
528 * Matching xfer_mask, 0 if no match found.
530 static unsigned int ata_xfer_mode2mask(u8 xfer_mode
)
532 const struct ata_xfer_ent
*ent
;
534 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
535 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
536 return 1 << (ent
->shift
+ xfer_mode
- ent
->base
);
541 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
542 * @xfer_mode: XFER_* of interest
544 * Return matching xfer_shift for @xfer_mode.
550 * Matching xfer_shift, -1 if no match found.
552 static int ata_xfer_mode2shift(unsigned int xfer_mode
)
554 const struct ata_xfer_ent
*ent
;
556 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
557 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
563 * ata_mode_string - convert xfer_mask to string
564 * @xfer_mask: mask of bits supported; only highest bit counts.
566 * Determine string which represents the highest speed
567 * (highest bit in @modemask).
573 * Constant C string representing highest speed listed in
574 * @mode_mask, or the constant C string "<n/a>".
576 static const char *ata_mode_string(unsigned int xfer_mask
)
578 static const char * const xfer_mode_str
[] = {
602 highbit
= fls(xfer_mask
) - 1;
603 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
604 return xfer_mode_str
[highbit
];
608 static const char *sata_spd_string(unsigned int spd
)
610 static const char * const spd_str
[] = {
615 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
617 return spd_str
[spd
- 1];
620 void ata_dev_disable(struct ata_device
*dev
)
622 if (ata_dev_enabled(dev
)) {
623 if (ata_msg_drv(dev
->link
->ap
))
624 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
625 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
|
631 static int ata_dev_set_dipm(struct ata_device
*dev
, enum link_pm policy
)
633 struct ata_link
*link
= dev
->link
;
634 struct ata_port
*ap
= link
->ap
;
636 unsigned int err_mask
;
640 * disallow DIPM for drivers which haven't set
641 * ATA_FLAG_IPM. This is because when DIPM is enabled,
642 * phy ready will be set in the interrupt status on
643 * state changes, which will cause some drivers to
644 * think there are errors - additionally drivers will
645 * need to disable hot plug.
647 if (!(ap
->flags
& ATA_FLAG_IPM
) || !ata_dev_enabled(dev
)) {
648 ap
->pm_policy
= NOT_AVAILABLE
;
653 * For DIPM, we will only enable it for the
656 * Why? Because Disks are too stupid to know that
657 * If the host rejects a request to go to SLUMBER
658 * they should retry at PARTIAL, and instead it
659 * just would give up. So, for medium_power to
660 * work at all, we need to only allow HIPM.
662 rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
668 /* no restrictions on IPM transitions */
669 scontrol
&= ~(0x3 << 8);
670 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
675 if (dev
->flags
& ATA_DFLAG_DIPM
)
676 err_mask
= ata_dev_set_feature(dev
,
677 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
680 /* allow IPM to PARTIAL */
681 scontrol
&= ~(0x1 << 8);
682 scontrol
|= (0x2 << 8);
683 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
688 * we don't have to disable DIPM since IPM flags
689 * disallow transitions to SLUMBER, which effectively
690 * disable DIPM if it does not support PARTIAL
694 case MAX_PERFORMANCE
:
695 /* disable all IPM transitions */
696 scontrol
|= (0x3 << 8);
697 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
702 * we don't have to disable DIPM since IPM flags
703 * disallow all transitions which effectively
704 * disable DIPM anyway.
709 /* FIXME: handle SET FEATURES failure */
716 * ata_dev_enable_pm - enable SATA interface power management
717 * @dev: device to enable power management
718 * @policy: the link power management policy
720 * Enable SATA Interface power management. This will enable
721 * Device Interface Power Management (DIPM) for min_power
722 * policy, and then call driver specific callbacks for
723 * enabling Host Initiated Power management.
726 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
728 void ata_dev_enable_pm(struct ata_device
*dev
, enum link_pm policy
)
731 struct ata_port
*ap
= dev
->link
->ap
;
733 /* set HIPM first, then DIPM */
734 if (ap
->ops
->enable_pm
)
735 rc
= ap
->ops
->enable_pm(ap
, policy
);
738 rc
= ata_dev_set_dipm(dev
, policy
);
742 ap
->pm_policy
= MAX_PERFORMANCE
;
744 ap
->pm_policy
= policy
;
745 return /* rc */; /* hopefully we can use 'rc' eventually */
750 * ata_dev_disable_pm - disable SATA interface power management
751 * @dev: device to disable power management
753 * Disable SATA Interface power management. This will disable
754 * Device Interface Power Management (DIPM) without changing
755 * policy, call driver specific callbacks for disabling Host
756 * Initiated Power management.
761 static void ata_dev_disable_pm(struct ata_device
*dev
)
763 struct ata_port
*ap
= dev
->link
->ap
;
765 ata_dev_set_dipm(dev
, MAX_PERFORMANCE
);
766 if (ap
->ops
->disable_pm
)
767 ap
->ops
->disable_pm(ap
);
769 #endif /* CONFIG_PM */
771 void ata_lpm_schedule(struct ata_port
*ap
, enum link_pm policy
)
773 ap
->pm_policy
= policy
;
774 ap
->link
.eh_info
.action
|= ATA_EHI_LPM
;
775 ap
->link
.eh_info
.flags
|= ATA_EHI_NO_AUTOPSY
;
776 ata_port_schedule_eh(ap
);
780 static void ata_lpm_enable(struct ata_host
*host
)
782 struct ata_link
*link
;
784 struct ata_device
*dev
;
787 for (i
= 0; i
< host
->n_ports
; i
++) {
789 ata_port_for_each_link(link
, ap
) {
790 ata_link_for_each_dev(dev
, link
)
791 ata_dev_disable_pm(dev
);
796 static void ata_lpm_disable(struct ata_host
*host
)
800 for (i
= 0; i
< host
->n_ports
; i
++) {
801 struct ata_port
*ap
= host
->ports
[i
];
802 ata_lpm_schedule(ap
, ap
->pm_policy
);
805 #endif /* CONFIG_PM */
809 * ata_devchk - PATA device presence detection
810 * @ap: ATA channel to examine
811 * @device: Device to examine (starting at zero)
813 * This technique was originally described in
814 * Hale Landis's ATADRVR (www.ata-atapi.com), and
815 * later found its way into the ATA/ATAPI spec.
817 * Write a pattern to the ATA shadow registers,
818 * and if a device is present, it will respond by
819 * correctly storing and echoing back the
820 * ATA shadow register contents.
826 static unsigned int ata_devchk(struct ata_port
*ap
, unsigned int device
)
828 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
831 ap
->ops
->dev_select(ap
, device
);
833 iowrite8(0x55, ioaddr
->nsect_addr
);
834 iowrite8(0xaa, ioaddr
->lbal_addr
);
836 iowrite8(0xaa, ioaddr
->nsect_addr
);
837 iowrite8(0x55, ioaddr
->lbal_addr
);
839 iowrite8(0x55, ioaddr
->nsect_addr
);
840 iowrite8(0xaa, ioaddr
->lbal_addr
);
842 nsect
= ioread8(ioaddr
->nsect_addr
);
843 lbal
= ioread8(ioaddr
->lbal_addr
);
845 if ((nsect
== 0x55) && (lbal
== 0xaa))
846 return 1; /* we found a device */
848 return 0; /* nothing found */
852 * ata_dev_classify - determine device type based on ATA-spec signature
853 * @tf: ATA taskfile register set for device to be identified
855 * Determine from taskfile register contents whether a device is
856 * ATA or ATAPI, as per "Signature and persistence" section
857 * of ATA/PI spec (volume 1, sect 5.14).
863 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
864 * %ATA_DEV_UNKNOWN the event of failure.
866 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
868 /* Apple's open source Darwin code hints that some devices only
869 * put a proper signature into the LBA mid/high registers,
870 * So, we only check those. It's sufficient for uniqueness.
872 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
873 * signatures for ATA and ATAPI devices attached on SerialATA,
874 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
875 * spec has never mentioned about using different signatures
876 * for ATA/ATAPI devices. Then, Serial ATA II: Port
877 * Multiplier specification began to use 0x69/0x96 to identify
878 * port multpliers and 0x3c/0xc3 to identify SEMB device.
879 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
880 * 0x69/0x96 shortly and described them as reserved for
883 * We follow the current spec and consider that 0x69/0x96
884 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
886 if ((tf
->lbam
== 0) && (tf
->lbah
== 0)) {
887 DPRINTK("found ATA device by sig\n");
891 if ((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) {
892 DPRINTK("found ATAPI device by sig\n");
893 return ATA_DEV_ATAPI
;
896 if ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96)) {
897 DPRINTK("found PMP device by sig\n");
901 if ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3)) {
902 printk(KERN_INFO
"ata: SEMB device ignored\n");
903 return ATA_DEV_SEMB_UNSUP
; /* not yet */
906 DPRINTK("unknown device\n");
907 return ATA_DEV_UNKNOWN
;
911 * ata_dev_try_classify - Parse returned ATA device signature
912 * @dev: ATA device to classify (starting at zero)
913 * @present: device seems present
914 * @r_err: Value of error register on completion
916 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
917 * an ATA/ATAPI-defined set of values is placed in the ATA
918 * shadow registers, indicating the results of device detection
921 * Select the ATA device, and read the values from the ATA shadow
922 * registers. Then parse according to the Error register value,
923 * and the spec-defined values examined by ata_dev_classify().
929 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
931 unsigned int ata_dev_try_classify(struct ata_device
*dev
, int present
,
934 struct ata_port
*ap
= dev
->link
->ap
;
935 struct ata_taskfile tf
;
939 ap
->ops
->dev_select(ap
, dev
->devno
);
941 memset(&tf
, 0, sizeof(tf
));
943 ap
->ops
->tf_read(ap
, &tf
);
948 /* see if device passed diags: if master then continue and warn later */
949 if (err
== 0 && dev
->devno
== 0)
950 /* diagnostic fail : do nothing _YET_ */
951 dev
->horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
954 else if ((dev
->devno
== 0) && (err
== 0x81))
959 /* determine if device is ATA or ATAPI */
960 class = ata_dev_classify(&tf
);
962 if (class == ATA_DEV_UNKNOWN
) {
963 /* If the device failed diagnostic, it's likely to
964 * have reported incorrect device signature too.
965 * Assume ATA device if the device seems present but
966 * device signature is invalid with diagnostic
969 if (present
&& (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
))
972 class = ATA_DEV_NONE
;
973 } else if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
974 class = ATA_DEV_NONE
;
980 * ata_id_string - Convert IDENTIFY DEVICE page into string
981 * @id: IDENTIFY DEVICE results we will examine
982 * @s: string into which data is output
983 * @ofs: offset into identify device page
984 * @len: length of string to return. must be an even number.
986 * The strings in the IDENTIFY DEVICE page are broken up into
987 * 16-bit chunks. Run through the string, and output each
988 * 8-bit chunk linearly, regardless of platform.
994 void ata_id_string(const u16
*id
, unsigned char *s
,
995 unsigned int ofs
, unsigned int len
)
1014 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1015 * @id: IDENTIFY DEVICE results we will examine
1016 * @s: string into which data is output
1017 * @ofs: offset into identify device page
1018 * @len: length of string to return. must be an odd number.
1020 * This function is identical to ata_id_string except that it
1021 * trims trailing spaces and terminates the resulting string with
1022 * null. @len must be actual maximum length (even number) + 1.
1027 void ata_id_c_string(const u16
*id
, unsigned char *s
,
1028 unsigned int ofs
, unsigned int len
)
1032 WARN_ON(!(len
& 1));
1034 ata_id_string(id
, s
, ofs
, len
- 1);
1036 p
= s
+ strnlen(s
, len
- 1);
1037 while (p
> s
&& p
[-1] == ' ')
1042 static u64
ata_id_n_sectors(const u16
*id
)
1044 if (ata_id_has_lba(id
)) {
1045 if (ata_id_has_lba48(id
))
1046 return ata_id_u64(id
, 100);
1048 return ata_id_u32(id
, 60);
1050 if (ata_id_current_chs_valid(id
))
1051 return ata_id_u32(id
, 57);
1053 return id
[1] * id
[3] * id
[6];
1057 static u64
ata_tf_to_lba48(struct ata_taskfile
*tf
)
1061 sectors
|= ((u64
)(tf
->hob_lbah
& 0xff)) << 40;
1062 sectors
|= ((u64
)(tf
->hob_lbam
& 0xff)) << 32;
1063 sectors
|= (tf
->hob_lbal
& 0xff) << 24;
1064 sectors
|= (tf
->lbah
& 0xff) << 16;
1065 sectors
|= (tf
->lbam
& 0xff) << 8;
1066 sectors
|= (tf
->lbal
& 0xff);
1071 static u64
ata_tf_to_lba(struct ata_taskfile
*tf
)
1075 sectors
|= (tf
->device
& 0x0f) << 24;
1076 sectors
|= (tf
->lbah
& 0xff) << 16;
1077 sectors
|= (tf
->lbam
& 0xff) << 8;
1078 sectors
|= (tf
->lbal
& 0xff);
1084 * ata_read_native_max_address - Read native max address
1085 * @dev: target device
1086 * @max_sectors: out parameter for the result native max address
1088 * Perform an LBA48 or LBA28 native size query upon the device in
1092 * 0 on success, -EACCES if command is aborted by the drive.
1093 * -EIO on other errors.
1095 static int ata_read_native_max_address(struct ata_device
*dev
, u64
*max_sectors
)
1097 unsigned int err_mask
;
1098 struct ata_taskfile tf
;
1099 int lba48
= ata_id_has_lba48(dev
->id
);
1101 ata_tf_init(dev
, &tf
);
1103 /* always clear all address registers */
1104 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1107 tf
.command
= ATA_CMD_READ_NATIVE_MAX_EXT
;
1108 tf
.flags
|= ATA_TFLAG_LBA48
;
1110 tf
.command
= ATA_CMD_READ_NATIVE_MAX
;
1112 tf
.protocol
|= ATA_PROT_NODATA
;
1113 tf
.device
|= ATA_LBA
;
1115 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1117 ata_dev_printk(dev
, KERN_WARNING
, "failed to read native "
1118 "max address (err_mask=0x%x)\n", err_mask
);
1119 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
1125 *max_sectors
= ata_tf_to_lba48(&tf
);
1127 *max_sectors
= ata_tf_to_lba(&tf
);
1128 if (dev
->horkage
& ATA_HORKAGE_HPA_SIZE
)
1134 * ata_set_max_sectors - Set max sectors
1135 * @dev: target device
1136 * @new_sectors: new max sectors value to set for the device
1138 * Set max sectors of @dev to @new_sectors.
1141 * 0 on success, -EACCES if command is aborted or denied (due to
1142 * previous non-volatile SET_MAX) by the drive. -EIO on other
1145 static int ata_set_max_sectors(struct ata_device
*dev
, u64 new_sectors
)
1147 unsigned int err_mask
;
1148 struct ata_taskfile tf
;
1149 int lba48
= ata_id_has_lba48(dev
->id
);
1153 ata_tf_init(dev
, &tf
);
1155 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1158 tf
.command
= ATA_CMD_SET_MAX_EXT
;
1159 tf
.flags
|= ATA_TFLAG_LBA48
;
1161 tf
.hob_lbal
= (new_sectors
>> 24) & 0xff;
1162 tf
.hob_lbam
= (new_sectors
>> 32) & 0xff;
1163 tf
.hob_lbah
= (new_sectors
>> 40) & 0xff;
1165 tf
.command
= ATA_CMD_SET_MAX
;
1167 tf
.device
|= (new_sectors
>> 24) & 0xf;
1170 tf
.protocol
|= ATA_PROT_NODATA
;
1171 tf
.device
|= ATA_LBA
;
1173 tf
.lbal
= (new_sectors
>> 0) & 0xff;
1174 tf
.lbam
= (new_sectors
>> 8) & 0xff;
1175 tf
.lbah
= (new_sectors
>> 16) & 0xff;
1177 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1179 ata_dev_printk(dev
, KERN_WARNING
, "failed to set "
1180 "max address (err_mask=0x%x)\n", err_mask
);
1181 if (err_mask
== AC_ERR_DEV
&&
1182 (tf
.feature
& (ATA_ABORTED
| ATA_IDNF
)))
1191 * ata_hpa_resize - Resize a device with an HPA set
1192 * @dev: Device to resize
1194 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1195 * it if required to the full size of the media. The caller must check
1196 * the drive has the HPA feature set enabled.
1199 * 0 on success, -errno on failure.
1201 static int ata_hpa_resize(struct ata_device
*dev
)
1203 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
1204 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
1205 u64 sectors
= ata_id_n_sectors(dev
->id
);
1209 /* do we need to do it? */
1210 if (dev
->class != ATA_DEV_ATA
||
1211 !ata_id_has_lba(dev
->id
) || !ata_id_hpa_enabled(dev
->id
) ||
1212 (dev
->horkage
& ATA_HORKAGE_BROKEN_HPA
))
1215 /* read native max address */
1216 rc
= ata_read_native_max_address(dev
, &native_sectors
);
1218 /* If HPA isn't going to be unlocked, skip HPA
1219 * resizing from the next try.
1221 if (!ata_ignore_hpa
) {
1222 ata_dev_printk(dev
, KERN_WARNING
, "HPA support seems "
1223 "broken, will skip HPA handling\n");
1224 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1226 /* we can continue if device aborted the command */
1234 /* nothing to do? */
1235 if (native_sectors
<= sectors
|| !ata_ignore_hpa
) {
1236 if (!print_info
|| native_sectors
== sectors
)
1239 if (native_sectors
> sectors
)
1240 ata_dev_printk(dev
, KERN_INFO
,
1241 "HPA detected: current %llu, native %llu\n",
1242 (unsigned long long)sectors
,
1243 (unsigned long long)native_sectors
);
1244 else if (native_sectors
< sectors
)
1245 ata_dev_printk(dev
, KERN_WARNING
,
1246 "native sectors (%llu) is smaller than "
1248 (unsigned long long)native_sectors
,
1249 (unsigned long long)sectors
);
1253 /* let's unlock HPA */
1254 rc
= ata_set_max_sectors(dev
, native_sectors
);
1255 if (rc
== -EACCES
) {
1256 /* if device aborted the command, skip HPA resizing */
1257 ata_dev_printk(dev
, KERN_WARNING
, "device aborted resize "
1258 "(%llu -> %llu), skipping HPA handling\n",
1259 (unsigned long long)sectors
,
1260 (unsigned long long)native_sectors
);
1261 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1266 /* re-read IDENTIFY data */
1267 rc
= ata_dev_reread_id(dev
, 0);
1269 ata_dev_printk(dev
, KERN_ERR
, "failed to re-read IDENTIFY "
1270 "data after HPA resizing\n");
1275 u64 new_sectors
= ata_id_n_sectors(dev
->id
);
1276 ata_dev_printk(dev
, KERN_INFO
,
1277 "HPA unlocked: %llu -> %llu, native %llu\n",
1278 (unsigned long long)sectors
,
1279 (unsigned long long)new_sectors
,
1280 (unsigned long long)native_sectors
);
1287 * ata_id_to_dma_mode - Identify DMA mode from id block
1288 * @dev: device to identify
1289 * @unknown: mode to assume if we cannot tell
1291 * Set up the timing values for the device based upon the identify
1292 * reported values for the DMA mode. This function is used by drivers
1293 * which rely upon firmware configured modes, but wish to report the
1294 * mode correctly when possible.
1296 * In addition we emit similarly formatted messages to the default
1297 * ata_dev_set_mode handler, in order to provide consistency of
1301 void ata_id_to_dma_mode(struct ata_device
*dev
, u8 unknown
)
1306 /* Pack the DMA modes */
1307 mask
= ((dev
->id
[63] >> 8) << ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
;
1308 if (dev
->id
[53] & 0x04)
1309 mask
|= ((dev
->id
[88] >> 8) << ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
;
1311 /* Select the mode in use */
1312 mode
= ata_xfer_mask2mode(mask
);
1315 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
1316 ata_mode_string(mask
));
1318 /* SWDMA perhaps ? */
1320 ata_dev_printk(dev
, KERN_INFO
, "configured for DMA\n");
1323 /* Configure the device reporting */
1324 dev
->xfer_mode
= mode
;
1325 dev
->xfer_shift
= ata_xfer_mode2shift(mode
);
1329 * ata_noop_dev_select - Select device 0/1 on ATA bus
1330 * @ap: ATA channel to manipulate
1331 * @device: ATA device (numbered from zero) to select
1333 * This function performs no actual function.
1335 * May be used as the dev_select() entry in ata_port_operations.
1340 void ata_noop_dev_select(struct ata_port
*ap
, unsigned int device
)
1346 * ata_std_dev_select - Select device 0/1 on ATA bus
1347 * @ap: ATA channel to manipulate
1348 * @device: ATA device (numbered from zero) to select
1350 * Use the method defined in the ATA specification to
1351 * make either device 0, or device 1, active on the
1352 * ATA channel. Works with both PIO and MMIO.
1354 * May be used as the dev_select() entry in ata_port_operations.
1360 void ata_std_dev_select(struct ata_port
*ap
, unsigned int device
)
1365 tmp
= ATA_DEVICE_OBS
;
1367 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
1369 iowrite8(tmp
, ap
->ioaddr
.device_addr
);
1370 ata_pause(ap
); /* needed; also flushes, for mmio */
1374 * ata_dev_select - Select device 0/1 on ATA bus
1375 * @ap: ATA channel to manipulate
1376 * @device: ATA device (numbered from zero) to select
1377 * @wait: non-zero to wait for Status register BSY bit to clear
1378 * @can_sleep: non-zero if context allows sleeping
1380 * Use the method defined in the ATA specification to
1381 * make either device 0, or device 1, active on the
1384 * This is a high-level version of ata_std_dev_select(),
1385 * which additionally provides the services of inserting
1386 * the proper pauses and status polling, where needed.
1392 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
1393 unsigned int wait
, unsigned int can_sleep
)
1395 if (ata_msg_probe(ap
))
1396 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, "
1397 "device %u, wait %u\n", device
, wait
);
1402 ap
->ops
->dev_select(ap
, device
);
1405 if (can_sleep
&& ap
->link
.device
[device
].class == ATA_DEV_ATAPI
)
1412 * ata_dump_id - IDENTIFY DEVICE info debugging output
1413 * @id: IDENTIFY DEVICE page to dump
1415 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1422 static inline void ata_dump_id(const u16
*id
)
1424 DPRINTK("49==0x%04x "
1434 DPRINTK("80==0x%04x "
1444 DPRINTK("88==0x%04x "
1451 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1452 * @id: IDENTIFY data to compute xfer mask from
1454 * Compute the xfermask for this device. This is not as trivial
1455 * as it seems if we must consider early devices correctly.
1457 * FIXME: pre IDE drive timing (do we care ?).
1465 static unsigned int ata_id_xfermask(const u16
*id
)
1467 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
1469 /* Usual case. Word 53 indicates word 64 is valid */
1470 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1471 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1475 /* If word 64 isn't valid then Word 51 high byte holds
1476 * the PIO timing number for the maximum. Turn it into
1479 u8 mode
= (id
[ATA_ID_OLD_PIO_MODES
] >> 8) & 0xFF;
1480 if (mode
< 5) /* Valid PIO range */
1481 pio_mask
= (2 << mode
) - 1;
1485 /* But wait.. there's more. Design your standards by
1486 * committee and you too can get a free iordy field to
1487 * process. However its the speeds not the modes that
1488 * are supported... Note drivers using the timing API
1489 * will get this right anyway
1493 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1495 if (ata_id_is_cfa(id
)) {
1497 * Process compact flash extended modes
1499 int pio
= id
[163] & 0x7;
1500 int dma
= (id
[163] >> 3) & 7;
1503 pio_mask
|= (1 << 5);
1505 pio_mask
|= (1 << 6);
1507 mwdma_mask
|= (1 << 3);
1509 mwdma_mask
|= (1 << 4);
1513 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1514 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1516 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1520 * ata_port_queue_task - Queue port_task
1521 * @ap: The ata_port to queue port_task for
1522 * @fn: workqueue function to be scheduled
1523 * @data: data for @fn to use
1524 * @delay: delay time for workqueue function
1526 * Schedule @fn(@data) for execution after @delay jiffies using
1527 * port_task. There is one port_task per port and it's the
1528 * user(low level driver)'s responsibility to make sure that only
1529 * one task is active at any given time.
1531 * libata core layer takes care of synchronization between
1532 * port_task and EH. ata_port_queue_task() may be ignored for EH
1536 * Inherited from caller.
1538 void ata_port_queue_task(struct ata_port
*ap
, work_func_t fn
, void *data
,
1539 unsigned long delay
)
1541 PREPARE_DELAYED_WORK(&ap
->port_task
, fn
);
1542 ap
->port_task_data
= data
;
1544 /* may fail if ata_port_flush_task() in progress */
1545 queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
1549 * ata_port_flush_task - Flush port_task
1550 * @ap: The ata_port to flush port_task for
1552 * After this function completes, port_task is guranteed not to
1553 * be running or scheduled.
1556 * Kernel thread context (may sleep)
1558 void ata_port_flush_task(struct ata_port
*ap
)
1562 cancel_rearming_delayed_work(&ap
->port_task
);
1564 if (ata_msg_ctl(ap
))
1565 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __FUNCTION__
);
1568 static void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1570 struct completion
*waiting
= qc
->private_data
;
1576 * ata_exec_internal_sg - execute libata internal command
1577 * @dev: Device to which the command is sent
1578 * @tf: Taskfile registers for the command and the result
1579 * @cdb: CDB for packet command
1580 * @dma_dir: Data tranfer direction of the command
1581 * @sgl: sg list for the data buffer of the command
1582 * @n_elem: Number of sg entries
1583 * @timeout: Timeout in msecs (0 for default)
1585 * Executes libata internal command with timeout. @tf contains
1586 * command on entry and result on return. Timeout and error
1587 * conditions are reported via return value. No recovery action
1588 * is taken after a command times out. It's caller's duty to
1589 * clean up after timeout.
1592 * None. Should be called with kernel context, might sleep.
1595 * Zero on success, AC_ERR_* mask on failure
1597 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1598 struct ata_taskfile
*tf
, const u8
*cdb
,
1599 int dma_dir
, struct scatterlist
*sgl
,
1600 unsigned int n_elem
, unsigned long timeout
)
1602 struct ata_link
*link
= dev
->link
;
1603 struct ata_port
*ap
= link
->ap
;
1604 u8 command
= tf
->command
;
1605 struct ata_queued_cmd
*qc
;
1606 unsigned int tag
, preempted_tag
;
1607 u32 preempted_sactive
, preempted_qc_active
;
1608 int preempted_nr_active_links
;
1609 DECLARE_COMPLETION_ONSTACK(wait
);
1610 unsigned long flags
;
1611 unsigned int err_mask
;
1614 spin_lock_irqsave(ap
->lock
, flags
);
1616 /* no internal command while frozen */
1617 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1618 spin_unlock_irqrestore(ap
->lock
, flags
);
1619 return AC_ERR_SYSTEM
;
1622 /* initialize internal qc */
1624 /* XXX: Tag 0 is used for drivers with legacy EH as some
1625 * drivers choke if any other tag is given. This breaks
1626 * ata_tag_internal() test for those drivers. Don't use new
1627 * EH stuff without converting to it.
1629 if (ap
->ops
->error_handler
)
1630 tag
= ATA_TAG_INTERNAL
;
1634 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1636 qc
= __ata_qc_from_tag(ap
, tag
);
1644 preempted_tag
= link
->active_tag
;
1645 preempted_sactive
= link
->sactive
;
1646 preempted_qc_active
= ap
->qc_active
;
1647 preempted_nr_active_links
= ap
->nr_active_links
;
1648 link
->active_tag
= ATA_TAG_POISON
;
1651 ap
->nr_active_links
= 0;
1653 /* prepare & issue qc */
1656 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1657 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1658 qc
->dma_dir
= dma_dir
;
1659 if (dma_dir
!= DMA_NONE
) {
1660 unsigned int i
, buflen
= 0;
1661 struct scatterlist
*sg
;
1663 for_each_sg(sgl
, sg
, n_elem
, i
)
1664 buflen
+= sg
->length
;
1666 ata_sg_init(qc
, sgl
, n_elem
);
1667 qc
->nbytes
= buflen
;
1670 qc
->private_data
= &wait
;
1671 qc
->complete_fn
= ata_qc_complete_internal
;
1675 spin_unlock_irqrestore(ap
->lock
, flags
);
1678 timeout
= ata_probe_timeout
* 1000 / HZ
;
1680 rc
= wait_for_completion_timeout(&wait
, msecs_to_jiffies(timeout
));
1682 ata_port_flush_task(ap
);
1685 spin_lock_irqsave(ap
->lock
, flags
);
1687 /* We're racing with irq here. If we lose, the
1688 * following test prevents us from completing the qc
1689 * twice. If we win, the port is frozen and will be
1690 * cleaned up by ->post_internal_cmd().
1692 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1693 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1695 if (ap
->ops
->error_handler
)
1696 ata_port_freeze(ap
);
1698 ata_qc_complete(qc
);
1700 if (ata_msg_warn(ap
))
1701 ata_dev_printk(dev
, KERN_WARNING
,
1702 "qc timeout (cmd 0x%x)\n", command
);
1705 spin_unlock_irqrestore(ap
->lock
, flags
);
1708 /* do post_internal_cmd */
1709 if (ap
->ops
->post_internal_cmd
)
1710 ap
->ops
->post_internal_cmd(qc
);
1712 /* perform minimal error analysis */
1713 if (qc
->flags
& ATA_QCFLAG_FAILED
) {
1714 if (qc
->result_tf
.command
& (ATA_ERR
| ATA_DF
))
1715 qc
->err_mask
|= AC_ERR_DEV
;
1718 qc
->err_mask
|= AC_ERR_OTHER
;
1720 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1721 qc
->err_mask
&= ~AC_ERR_OTHER
;
1725 spin_lock_irqsave(ap
->lock
, flags
);
1727 *tf
= qc
->result_tf
;
1728 err_mask
= qc
->err_mask
;
1731 link
->active_tag
= preempted_tag
;
1732 link
->sactive
= preempted_sactive
;
1733 ap
->qc_active
= preempted_qc_active
;
1734 ap
->nr_active_links
= preempted_nr_active_links
;
1736 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1737 * Until those drivers are fixed, we detect the condition
1738 * here, fail the command with AC_ERR_SYSTEM and reenable the
1741 * Note that this doesn't change any behavior as internal
1742 * command failure results in disabling the device in the
1743 * higher layer for LLDDs without new reset/EH callbacks.
1745 * Kill the following code as soon as those drivers are fixed.
1747 if (ap
->flags
& ATA_FLAG_DISABLED
) {
1748 err_mask
|= AC_ERR_SYSTEM
;
1752 spin_unlock_irqrestore(ap
->lock
, flags
);
1758 * ata_exec_internal - execute libata internal command
1759 * @dev: Device to which the command is sent
1760 * @tf: Taskfile registers for the command and the result
1761 * @cdb: CDB for packet command
1762 * @dma_dir: Data tranfer direction of the command
1763 * @buf: Data buffer of the command
1764 * @buflen: Length of data buffer
1765 * @timeout: Timeout in msecs (0 for default)
1767 * Wrapper around ata_exec_internal_sg() which takes simple
1768 * buffer instead of sg list.
1771 * None. Should be called with kernel context, might sleep.
1774 * Zero on success, AC_ERR_* mask on failure
1776 unsigned ata_exec_internal(struct ata_device
*dev
,
1777 struct ata_taskfile
*tf
, const u8
*cdb
,
1778 int dma_dir
, void *buf
, unsigned int buflen
,
1779 unsigned long timeout
)
1781 struct scatterlist
*psg
= NULL
, sg
;
1782 unsigned int n_elem
= 0;
1784 if (dma_dir
!= DMA_NONE
) {
1786 sg_init_one(&sg
, buf
, buflen
);
1791 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, psg
, n_elem
,
1796 * ata_do_simple_cmd - execute simple internal command
1797 * @dev: Device to which the command is sent
1798 * @cmd: Opcode to execute
1800 * Execute a 'simple' command, that only consists of the opcode
1801 * 'cmd' itself, without filling any other registers
1804 * Kernel thread context (may sleep).
1807 * Zero on success, AC_ERR_* mask on failure
1809 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
1811 struct ata_taskfile tf
;
1813 ata_tf_init(dev
, &tf
);
1816 tf
.flags
|= ATA_TFLAG_DEVICE
;
1817 tf
.protocol
= ATA_PROT_NODATA
;
1819 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1823 * ata_pio_need_iordy - check if iordy needed
1826 * Check if the current speed of the device requires IORDY. Used
1827 * by various controllers for chip configuration.
1830 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1832 /* Controller doesn't support IORDY. Probably a pointless check
1833 as the caller should know this */
1834 if (adev
->link
->ap
->flags
& ATA_FLAG_NO_IORDY
)
1836 /* PIO3 and higher it is mandatory */
1837 if (adev
->pio_mode
> XFER_PIO_2
)
1839 /* We turn it on when possible */
1840 if (ata_id_has_iordy(adev
->id
))
1846 * ata_pio_mask_no_iordy - Return the non IORDY mask
1849 * Compute the highest mode possible if we are not using iordy. Return
1850 * -1 if no iordy mode is available.
1853 static u32
ata_pio_mask_no_iordy(const struct ata_device
*adev
)
1855 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1856 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1857 u16 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1858 /* Is the speed faster than the drive allows non IORDY ? */
1860 /* This is cycle times not frequency - watch the logic! */
1861 if (pio
> 240) /* PIO2 is 240nS per cycle */
1862 return 3 << ATA_SHIFT_PIO
;
1863 return 7 << ATA_SHIFT_PIO
;
1866 return 3 << ATA_SHIFT_PIO
;
1870 * ata_dev_read_id - Read ID data from the specified device
1871 * @dev: target device
1872 * @p_class: pointer to class of the target device (may be changed)
1873 * @flags: ATA_READID_* flags
1874 * @id: buffer to read IDENTIFY data into
1876 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1877 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1878 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1879 * for pre-ATA4 drives.
1881 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1882 * now we abort if we hit that case.
1885 * Kernel thread context (may sleep)
1888 * 0 on success, -errno otherwise.
1890 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1891 unsigned int flags
, u16
*id
)
1893 struct ata_port
*ap
= dev
->link
->ap
;
1894 unsigned int class = *p_class
;
1895 struct ata_taskfile tf
;
1896 unsigned int err_mask
= 0;
1898 int may_fallback
= 1, tried_spinup
= 0;
1901 if (ata_msg_ctl(ap
))
1902 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
1904 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
1906 ata_tf_init(dev
, &tf
);
1910 tf
.command
= ATA_CMD_ID_ATA
;
1913 tf
.command
= ATA_CMD_ID_ATAPI
;
1917 reason
= "unsupported class";
1921 tf
.protocol
= ATA_PROT_PIO
;
1923 /* Some devices choke if TF registers contain garbage. Make
1924 * sure those are properly initialized.
1926 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1928 /* Device presence detection is unreliable on some
1929 * controllers. Always poll IDENTIFY if available.
1931 tf
.flags
|= ATA_TFLAG_POLLING
;
1933 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1934 id
, sizeof(id
[0]) * ATA_ID_WORDS
, 0);
1936 if (err_mask
& AC_ERR_NODEV_HINT
) {
1937 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1938 ap
->print_id
, dev
->devno
);
1942 /* Device or controller might have reported the wrong
1943 * device class. Give a shot at the other IDENTIFY if
1944 * the current one is aborted by the device.
1947 (err_mask
== AC_ERR_DEV
) && (tf
.feature
& ATA_ABORTED
)) {
1950 if (class == ATA_DEV_ATA
)
1951 class = ATA_DEV_ATAPI
;
1953 class = ATA_DEV_ATA
;
1958 reason
= "I/O error";
1962 /* Falling back doesn't make sense if ID data was read
1963 * successfully at least once.
1967 swap_buf_le16(id
, ATA_ID_WORDS
);
1971 reason
= "device reports invalid type";
1973 if (class == ATA_DEV_ATA
) {
1974 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1977 if (ata_id_is_ata(id
))
1981 if (!tried_spinup
&& (id
[2] == 0x37c8 || id
[2] == 0x738c)) {
1984 * Drive powered-up in standby mode, and requires a specific
1985 * SET_FEATURES spin-up subcommand before it will accept
1986 * anything other than the original IDENTIFY command.
1988 err_mask
= ata_dev_set_feature(dev
, SETFEATURES_SPINUP
, 0);
1989 if (err_mask
&& id
[2] != 0x738c) {
1991 reason
= "SPINUP failed";
1995 * If the drive initially returned incomplete IDENTIFY info,
1996 * we now must reissue the IDENTIFY command.
1998 if (id
[2] == 0x37c8)
2002 if ((flags
& ATA_READID_POSTRESET
) && class == ATA_DEV_ATA
) {
2004 * The exact sequence expected by certain pre-ATA4 drives is:
2006 * IDENTIFY (optional in early ATA)
2007 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2009 * Some drives were very specific about that exact sequence.
2011 * Note that ATA4 says lba is mandatory so the second check
2012 * shoud never trigger.
2014 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
2015 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
2018 reason
= "INIT_DEV_PARAMS failed";
2022 /* current CHS translation info (id[53-58]) might be
2023 * changed. reread the identify device info.
2025 flags
&= ~ATA_READID_POSTRESET
;
2035 if (ata_msg_warn(ap
))
2036 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
2037 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
2041 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
2043 struct ata_port
*ap
= dev
->link
->ap
;
2044 return ((ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
2047 static void ata_dev_config_ncq(struct ata_device
*dev
,
2048 char *desc
, size_t desc_sz
)
2050 struct ata_port
*ap
= dev
->link
->ap
;
2051 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
2053 if (!ata_id_has_ncq(dev
->id
)) {
2057 if (dev
->horkage
& ATA_HORKAGE_NONCQ
) {
2058 snprintf(desc
, desc_sz
, "NCQ (not used)");
2061 if (ap
->flags
& ATA_FLAG_NCQ
) {
2062 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
- 1);
2063 dev
->flags
|= ATA_DFLAG_NCQ
;
2066 if (hdepth
>= ddepth
)
2067 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
2069 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
2073 * ata_dev_configure - Configure the specified ATA/ATAPI device
2074 * @dev: Target device to configure
2076 * Configure @dev according to @dev->id. Generic and low-level
2077 * driver specific fixups are also applied.
2080 * Kernel thread context (may sleep)
2083 * 0 on success, -errno otherwise
2085 int ata_dev_configure(struct ata_device
*dev
)
2087 struct ata_port
*ap
= dev
->link
->ap
;
2088 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
2089 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
2090 const u16
*id
= dev
->id
;
2091 unsigned int xfer_mask
;
2092 char revbuf
[7]; /* XYZ-99\0 */
2093 char fwrevbuf
[ATA_ID_FW_REV_LEN
+1];
2094 char modelbuf
[ATA_ID_PROD_LEN
+1];
2097 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
2098 ata_dev_printk(dev
, KERN_INFO
, "%s: ENTER/EXIT -- nodev\n",
2103 if (ata_msg_probe(ap
))
2104 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
2107 dev
->horkage
|= ata_dev_blacklisted(dev
);
2109 /* let ACPI work its magic */
2110 rc
= ata_acpi_on_devcfg(dev
);
2114 /* massage HPA, do it early as it might change IDENTIFY data */
2115 rc
= ata_hpa_resize(dev
);
2119 /* print device capabilities */
2120 if (ata_msg_probe(ap
))
2121 ata_dev_printk(dev
, KERN_DEBUG
,
2122 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2123 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2125 id
[49], id
[82], id
[83], id
[84],
2126 id
[85], id
[86], id
[87], id
[88]);
2128 /* initialize to-be-configured parameters */
2129 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
2130 dev
->max_sectors
= 0;
2138 * common ATA, ATAPI feature tests
2141 /* find max transfer mode; for printk only */
2142 xfer_mask
= ata_id_xfermask(id
);
2144 if (ata_msg_probe(ap
))
2147 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2148 ata_id_c_string(dev
->id
, fwrevbuf
, ATA_ID_FW_REV
,
2151 ata_id_c_string(dev
->id
, modelbuf
, ATA_ID_PROD
,
2154 /* ATA-specific feature tests */
2155 if (dev
->class == ATA_DEV_ATA
) {
2156 if (ata_id_is_cfa(id
)) {
2157 if (id
[162] & 1) /* CPRM may make this media unusable */
2158 ata_dev_printk(dev
, KERN_WARNING
,
2159 "supports DRM functions and may "
2160 "not be fully accessable.\n");
2161 snprintf(revbuf
, 7, "CFA");
2163 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
2165 dev
->n_sectors
= ata_id_n_sectors(id
);
2167 if (dev
->id
[59] & 0x100)
2168 dev
->multi_count
= dev
->id
[59] & 0xff;
2170 if (ata_id_has_lba(id
)) {
2171 const char *lba_desc
;
2175 dev
->flags
|= ATA_DFLAG_LBA
;
2176 if (ata_id_has_lba48(id
)) {
2177 dev
->flags
|= ATA_DFLAG_LBA48
;
2180 if (dev
->n_sectors
>= (1UL << 28) &&
2181 ata_id_has_flush_ext(id
))
2182 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
2186 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
2188 /* print device info to dmesg */
2189 if (ata_msg_drv(ap
) && print_info
) {
2190 ata_dev_printk(dev
, KERN_INFO
,
2191 "%s: %s, %s, max %s\n",
2192 revbuf
, modelbuf
, fwrevbuf
,
2193 ata_mode_string(xfer_mask
));
2194 ata_dev_printk(dev
, KERN_INFO
,
2195 "%Lu sectors, multi %u: %s %s\n",
2196 (unsigned long long)dev
->n_sectors
,
2197 dev
->multi_count
, lba_desc
, ncq_desc
);
2202 /* Default translation */
2203 dev
->cylinders
= id
[1];
2205 dev
->sectors
= id
[6];
2207 if (ata_id_current_chs_valid(id
)) {
2208 /* Current CHS translation is valid. */
2209 dev
->cylinders
= id
[54];
2210 dev
->heads
= id
[55];
2211 dev
->sectors
= id
[56];
2214 /* print device info to dmesg */
2215 if (ata_msg_drv(ap
) && print_info
) {
2216 ata_dev_printk(dev
, KERN_INFO
,
2217 "%s: %s, %s, max %s\n",
2218 revbuf
, modelbuf
, fwrevbuf
,
2219 ata_mode_string(xfer_mask
));
2220 ata_dev_printk(dev
, KERN_INFO
,
2221 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2222 (unsigned long long)dev
->n_sectors
,
2223 dev
->multi_count
, dev
->cylinders
,
2224 dev
->heads
, dev
->sectors
);
2231 /* ATAPI-specific feature tests */
2232 else if (dev
->class == ATA_DEV_ATAPI
) {
2233 const char *cdb_intr_string
= "";
2234 const char *atapi_an_string
= "";
2237 rc
= atapi_cdb_len(id
);
2238 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
2239 if (ata_msg_warn(ap
))
2240 ata_dev_printk(dev
, KERN_WARNING
,
2241 "unsupported CDB len\n");
2245 dev
->cdb_len
= (unsigned int) rc
;
2247 /* Enable ATAPI AN if both the host and device have
2248 * the support. If PMP is attached, SNTF is required
2249 * to enable ATAPI AN to discern between PHY status
2250 * changed notifications and ATAPI ANs.
2252 if ((ap
->flags
& ATA_FLAG_AN
) && ata_id_has_atapi_AN(id
) &&
2253 (!ap
->nr_pmp_links
||
2254 sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
) == 0)) {
2255 unsigned int err_mask
;
2257 /* issue SET feature command to turn this on */
2258 err_mask
= ata_dev_set_feature(dev
,
2259 SETFEATURES_SATA_ENABLE
, SATA_AN
);
2261 ata_dev_printk(dev
, KERN_ERR
,
2262 "failed to enable ATAPI AN "
2263 "(err_mask=0x%x)\n", err_mask
);
2265 dev
->flags
|= ATA_DFLAG_AN
;
2266 atapi_an_string
= ", ATAPI AN";
2270 if (ata_id_cdb_intr(dev
->id
)) {
2271 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
2272 cdb_intr_string
= ", CDB intr";
2275 /* print device info to dmesg */
2276 if (ata_msg_drv(ap
) && print_info
)
2277 ata_dev_printk(dev
, KERN_INFO
,
2278 "ATAPI: %s, %s, max %s%s%s\n",
2280 ata_mode_string(xfer_mask
),
2281 cdb_intr_string
, atapi_an_string
);
2284 /* determine max_sectors */
2285 dev
->max_sectors
= ATA_MAX_SECTORS
;
2286 if (dev
->flags
& ATA_DFLAG_LBA48
)
2287 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
2289 if (!(dev
->horkage
& ATA_HORKAGE_IPM
)) {
2290 if (ata_id_has_hipm(dev
->id
))
2291 dev
->flags
|= ATA_DFLAG_HIPM
;
2292 if (ata_id_has_dipm(dev
->id
))
2293 dev
->flags
|= ATA_DFLAG_DIPM
;
2296 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
2297 /* Let the user know. We don't want to disallow opens for
2298 rescue purposes, or in case the vendor is just a blithering
2301 ata_dev_printk(dev
, KERN_WARNING
,
2302 "Drive reports diagnostics failure. This may indicate a drive\n");
2303 ata_dev_printk(dev
, KERN_WARNING
,
2304 "fault or invalid emulation. Contact drive vendor for information.\n");
2308 /* limit bridge transfers to udma5, 200 sectors */
2309 if (ata_dev_knobble(dev
)) {
2310 if (ata_msg_drv(ap
) && print_info
)
2311 ata_dev_printk(dev
, KERN_INFO
,
2312 "applying bridge limits\n");
2313 dev
->udma_mask
&= ATA_UDMA5
;
2314 dev
->max_sectors
= ATA_MAX_SECTORS
;
2317 if ((dev
->class == ATA_DEV_ATAPI
) &&
2318 (atapi_command_packet_set(id
) == TYPE_TAPE
)) {
2319 dev
->max_sectors
= ATA_MAX_SECTORS_TAPE
;
2320 dev
->horkage
|= ATA_HORKAGE_STUCK_ERR
;
2323 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_128
)
2324 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_128
,
2327 if (ata_dev_blacklisted(dev
) & ATA_HORKAGE_IPM
) {
2328 dev
->horkage
|= ATA_HORKAGE_IPM
;
2330 /* reset link pm_policy for this port to no pm */
2331 ap
->pm_policy
= MAX_PERFORMANCE
;
2334 if (ap
->ops
->dev_config
)
2335 ap
->ops
->dev_config(dev
);
2337 if (ata_msg_probe(ap
))
2338 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
2339 __FUNCTION__
, ata_chk_status(ap
));
2343 if (ata_msg_probe(ap
))
2344 ata_dev_printk(dev
, KERN_DEBUG
,
2345 "%s: EXIT, err\n", __FUNCTION__
);
2350 * ata_cable_40wire - return 40 wire cable type
2353 * Helper method for drivers which want to hardwire 40 wire cable
2357 int ata_cable_40wire(struct ata_port
*ap
)
2359 return ATA_CBL_PATA40
;
2363 * ata_cable_80wire - return 80 wire cable type
2366 * Helper method for drivers which want to hardwire 80 wire cable
2370 int ata_cable_80wire(struct ata_port
*ap
)
2372 return ATA_CBL_PATA80
;
2376 * ata_cable_unknown - return unknown PATA cable.
2379 * Helper method for drivers which have no PATA cable detection.
2382 int ata_cable_unknown(struct ata_port
*ap
)
2384 return ATA_CBL_PATA_UNK
;
2388 * ata_cable_sata - return SATA cable type
2391 * Helper method for drivers which have SATA cables
2394 int ata_cable_sata(struct ata_port
*ap
)
2396 return ATA_CBL_SATA
;
2400 * ata_bus_probe - Reset and probe ATA bus
2403 * Master ATA bus probing function. Initiates a hardware-dependent
2404 * bus reset, then attempts to identify any devices found on
2408 * PCI/etc. bus probe sem.
2411 * Zero on success, negative errno otherwise.
2414 int ata_bus_probe(struct ata_port
*ap
)
2416 unsigned int classes
[ATA_MAX_DEVICES
];
2417 int tries
[ATA_MAX_DEVICES
];
2419 struct ata_device
*dev
;
2423 ata_link_for_each_dev(dev
, &ap
->link
)
2424 tries
[dev
->devno
] = ATA_PROBE_MAX_TRIES
;
2427 ata_link_for_each_dev(dev
, &ap
->link
) {
2428 /* If we issue an SRST then an ATA drive (not ATAPI)
2429 * may change configuration and be in PIO0 timing. If
2430 * we do a hard reset (or are coming from power on)
2431 * this is true for ATA or ATAPI. Until we've set a
2432 * suitable controller mode we should not touch the
2433 * bus as we may be talking too fast.
2435 dev
->pio_mode
= XFER_PIO_0
;
2437 /* If the controller has a pio mode setup function
2438 * then use it to set the chipset to rights. Don't
2439 * touch the DMA setup as that will be dealt with when
2440 * configuring devices.
2442 if (ap
->ops
->set_piomode
)
2443 ap
->ops
->set_piomode(ap
, dev
);
2446 /* reset and determine device classes */
2447 ap
->ops
->phy_reset(ap
);
2449 ata_link_for_each_dev(dev
, &ap
->link
) {
2450 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
2451 dev
->class != ATA_DEV_UNKNOWN
)
2452 classes
[dev
->devno
] = dev
->class;
2454 classes
[dev
->devno
] = ATA_DEV_NONE
;
2456 dev
->class = ATA_DEV_UNKNOWN
;
2461 /* read IDENTIFY page and configure devices. We have to do the identify
2462 specific sequence bass-ackwards so that PDIAG- is released by
2465 ata_link_for_each_dev(dev
, &ap
->link
) {
2466 if (tries
[dev
->devno
])
2467 dev
->class = classes
[dev
->devno
];
2469 if (!ata_dev_enabled(dev
))
2472 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
2478 /* Now ask for the cable type as PDIAG- should have been released */
2479 if (ap
->ops
->cable_detect
)
2480 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2482 /* We may have SATA bridge glue hiding here irrespective of the
2483 reported cable types and sensed types */
2484 ata_link_for_each_dev(dev
, &ap
->link
) {
2485 if (!ata_dev_enabled(dev
))
2487 /* SATA drives indicate we have a bridge. We don't know which
2488 end of the link the bridge is which is a problem */
2489 if (ata_id_is_sata(dev
->id
))
2490 ap
->cbl
= ATA_CBL_SATA
;
2493 /* After the identify sequence we can now set up the devices. We do
2494 this in the normal order so that the user doesn't get confused */
2496 ata_link_for_each_dev(dev
, &ap
->link
) {
2497 if (!ata_dev_enabled(dev
))
2500 ap
->link
.eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
2501 rc
= ata_dev_configure(dev
);
2502 ap
->link
.eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
2507 /* configure transfer mode */
2508 rc
= ata_set_mode(&ap
->link
, &dev
);
2512 ata_link_for_each_dev(dev
, &ap
->link
)
2513 if (ata_dev_enabled(dev
))
2516 /* no device present, disable port */
2517 ata_port_disable(ap
);
2521 tries
[dev
->devno
]--;
2525 /* eeek, something went very wrong, give up */
2526 tries
[dev
->devno
] = 0;
2530 /* give it just one more chance */
2531 tries
[dev
->devno
] = min(tries
[dev
->devno
], 1);
2533 if (tries
[dev
->devno
] == 1) {
2534 /* This is the last chance, better to slow
2535 * down than lose it.
2537 sata_down_spd_limit(&ap
->link
);
2538 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
2542 if (!tries
[dev
->devno
])
2543 ata_dev_disable(dev
);
2549 * ata_port_probe - Mark port as enabled
2550 * @ap: Port for which we indicate enablement
2552 * Modify @ap data structure such that the system
2553 * thinks that the entire port is enabled.
2555 * LOCKING: host lock, or some other form of
2559 void ata_port_probe(struct ata_port
*ap
)
2561 ap
->flags
&= ~ATA_FLAG_DISABLED
;
2565 * sata_print_link_status - Print SATA link status
2566 * @link: SATA link to printk link status about
2568 * This function prints link speed and status of a SATA link.
2573 void sata_print_link_status(struct ata_link
*link
)
2575 u32 sstatus
, scontrol
, tmp
;
2577 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2579 sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
2581 if (ata_link_online(link
)) {
2582 tmp
= (sstatus
>> 4) & 0xf;
2583 ata_link_printk(link
, KERN_INFO
,
2584 "SATA link up %s (SStatus %X SControl %X)\n",
2585 sata_spd_string(tmp
), sstatus
, scontrol
);
2587 ata_link_printk(link
, KERN_INFO
,
2588 "SATA link down (SStatus %X SControl %X)\n",
2594 * ata_dev_pair - return other device on cable
2597 * Obtain the other device on the same cable, or if none is
2598 * present NULL is returned
2601 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
2603 struct ata_link
*link
= adev
->link
;
2604 struct ata_device
*pair
= &link
->device
[1 - adev
->devno
];
2605 if (!ata_dev_enabled(pair
))
2611 * ata_port_disable - Disable port.
2612 * @ap: Port to be disabled.
2614 * Modify @ap data structure such that the system
2615 * thinks that the entire port is disabled, and should
2616 * never attempt to probe or communicate with devices
2619 * LOCKING: host lock, or some other form of
2623 void ata_port_disable(struct ata_port
*ap
)
2625 ap
->link
.device
[0].class = ATA_DEV_NONE
;
2626 ap
->link
.device
[1].class = ATA_DEV_NONE
;
2627 ap
->flags
|= ATA_FLAG_DISABLED
;
2631 * sata_down_spd_limit - adjust SATA spd limit downward
2632 * @link: Link to adjust SATA spd limit for
2634 * Adjust SATA spd limit of @link downward. Note that this
2635 * function only adjusts the limit. The change must be applied
2636 * using sata_set_spd().
2639 * Inherited from caller.
2642 * 0 on success, negative errno on failure
2644 int sata_down_spd_limit(struct ata_link
*link
)
2646 u32 sstatus
, spd
, mask
;
2649 if (!sata_scr_valid(link
))
2652 /* If SCR can be read, use it to determine the current SPD.
2653 * If not, use cached value in link->sata_spd.
2655 rc
= sata_scr_read(link
, SCR_STATUS
, &sstatus
);
2657 spd
= (sstatus
>> 4) & 0xf;
2659 spd
= link
->sata_spd
;
2661 mask
= link
->sata_spd_limit
;
2665 /* unconditionally mask off the highest bit */
2666 highbit
= fls(mask
) - 1;
2667 mask
&= ~(1 << highbit
);
2669 /* Mask off all speeds higher than or equal to the current
2670 * one. Force 1.5Gbps if current SPD is not available.
2673 mask
&= (1 << (spd
- 1)) - 1;
2677 /* were we already at the bottom? */
2681 link
->sata_spd_limit
= mask
;
2683 ata_link_printk(link
, KERN_WARNING
, "limiting SATA link speed to %s\n",
2684 sata_spd_string(fls(mask
)));
2689 static int __sata_set_spd_needed(struct ata_link
*link
, u32
*scontrol
)
2691 struct ata_link
*host_link
= &link
->ap
->link
;
2692 u32 limit
, target
, spd
;
2694 limit
= link
->sata_spd_limit
;
2696 /* Don't configure downstream link faster than upstream link.
2697 * It doesn't speed up anything and some PMPs choke on such
2700 if (!ata_is_host_link(link
) && host_link
->sata_spd
)
2701 limit
&= (1 << host_link
->sata_spd
) - 1;
2703 if (limit
== UINT_MAX
)
2706 target
= fls(limit
);
2708 spd
= (*scontrol
>> 4) & 0xf;
2709 *scontrol
= (*scontrol
& ~0xf0) | ((target
& 0xf) << 4);
2711 return spd
!= target
;
2715 * sata_set_spd_needed - is SATA spd configuration needed
2716 * @link: Link in question
2718 * Test whether the spd limit in SControl matches
2719 * @link->sata_spd_limit. This function is used to determine
2720 * whether hardreset is necessary to apply SATA spd
2724 * Inherited from caller.
2727 * 1 if SATA spd configuration is needed, 0 otherwise.
2729 int sata_set_spd_needed(struct ata_link
*link
)
2733 if (sata_scr_read(link
, SCR_CONTROL
, &scontrol
))
2736 return __sata_set_spd_needed(link
, &scontrol
);
2740 * sata_set_spd - set SATA spd according to spd limit
2741 * @link: Link to set SATA spd for
2743 * Set SATA spd of @link according to sata_spd_limit.
2746 * Inherited from caller.
2749 * 0 if spd doesn't need to be changed, 1 if spd has been
2750 * changed. Negative errno if SCR registers are inaccessible.
2752 int sata_set_spd(struct ata_link
*link
)
2757 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
2760 if (!__sata_set_spd_needed(link
, &scontrol
))
2763 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
2770 * This mode timing computation functionality is ported over from
2771 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2774 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2775 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2776 * for UDMA6, which is currently supported only by Maxtor drives.
2778 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2781 static const struct ata_timing ata_timing
[] = {
2783 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
2784 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
2785 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
2786 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
2788 { XFER_MW_DMA_4
, 25, 0, 0, 0, 55, 20, 80, 0 },
2789 { XFER_MW_DMA_3
, 25, 0, 0, 0, 65, 25, 100, 0 },
2790 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
2791 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
2792 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
2794 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2796 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
2797 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
2798 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
2800 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
2801 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
2802 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
2804 { XFER_PIO_6
, 10, 55, 20, 80, 55, 20, 80, 0 },
2805 { XFER_PIO_5
, 15, 65, 25, 100, 65, 25, 100, 0 },
2806 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
2807 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
2809 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
2810 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
2811 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
2813 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2818 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2819 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2821 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
2823 q
->setup
= EZ(t
->setup
* 1000, T
);
2824 q
->act8b
= EZ(t
->act8b
* 1000, T
);
2825 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
2826 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
2827 q
->active
= EZ(t
->active
* 1000, T
);
2828 q
->recover
= EZ(t
->recover
* 1000, T
);
2829 q
->cycle
= EZ(t
->cycle
* 1000, T
);
2830 q
->udma
= EZ(t
->udma
* 1000, UT
);
2833 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
2834 struct ata_timing
*m
, unsigned int what
)
2836 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
2837 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
2838 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
2839 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
2840 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
2841 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
2842 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
2843 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
2846 static const struct ata_timing
*ata_timing_find_mode(unsigned short speed
)
2848 const struct ata_timing
*t
;
2850 for (t
= ata_timing
; t
->mode
!= speed
; t
++)
2851 if (t
->mode
== 0xFF)
2856 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
2857 struct ata_timing
*t
, int T
, int UT
)
2859 const struct ata_timing
*s
;
2860 struct ata_timing p
;
2866 if (!(s
= ata_timing_find_mode(speed
)))
2869 memcpy(t
, s
, sizeof(*s
));
2872 * If the drive is an EIDE drive, it can tell us it needs extended
2873 * PIO/MW_DMA cycle timing.
2876 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
2877 memset(&p
, 0, sizeof(p
));
2878 if (speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
2879 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
2880 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
2881 } else if (speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
2882 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
2884 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
2888 * Convert the timing to bus clock counts.
2891 ata_timing_quantize(t
, t
, T
, UT
);
2894 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2895 * S.M.A.R.T * and some other commands. We have to ensure that the
2896 * DMA cycle timing is slower/equal than the fastest PIO timing.
2899 if (speed
> XFER_PIO_6
) {
2900 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
2901 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
2905 * Lengthen active & recovery time so that cycle time is correct.
2908 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
2909 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
2910 t
->rec8b
= t
->cyc8b
- t
->act8b
;
2913 if (t
->active
+ t
->recover
< t
->cycle
) {
2914 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
2915 t
->recover
= t
->cycle
- t
->active
;
2918 /* In a few cases quantisation may produce enough errors to
2919 leave t->cycle too low for the sum of active and recovery
2920 if so we must correct this */
2921 if (t
->active
+ t
->recover
> t
->cycle
)
2922 t
->cycle
= t
->active
+ t
->recover
;
2928 * ata_down_xfermask_limit - adjust dev xfer masks downward
2929 * @dev: Device to adjust xfer masks
2930 * @sel: ATA_DNXFER_* selector
2932 * Adjust xfer masks of @dev downward. Note that this function
2933 * does not apply the change. Invoking ata_set_mode() afterwards
2934 * will apply the limit.
2937 * Inherited from caller.
2940 * 0 on success, negative errno on failure
2942 int ata_down_xfermask_limit(struct ata_device
*dev
, unsigned int sel
)
2945 unsigned int orig_mask
, xfer_mask
;
2946 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
2949 quiet
= !!(sel
& ATA_DNXFER_QUIET
);
2950 sel
&= ~ATA_DNXFER_QUIET
;
2952 xfer_mask
= orig_mask
= ata_pack_xfermask(dev
->pio_mask
,
2955 ata_unpack_xfermask(xfer_mask
, &pio_mask
, &mwdma_mask
, &udma_mask
);
2958 case ATA_DNXFER_PIO
:
2959 highbit
= fls(pio_mask
) - 1;
2960 pio_mask
&= ~(1 << highbit
);
2963 case ATA_DNXFER_DMA
:
2965 highbit
= fls(udma_mask
) - 1;
2966 udma_mask
&= ~(1 << highbit
);
2969 } else if (mwdma_mask
) {
2970 highbit
= fls(mwdma_mask
) - 1;
2971 mwdma_mask
&= ~(1 << highbit
);
2977 case ATA_DNXFER_40C
:
2978 udma_mask
&= ATA_UDMA_MASK_40C
;
2981 case ATA_DNXFER_FORCE_PIO0
:
2983 case ATA_DNXFER_FORCE_PIO
:
2992 xfer_mask
&= ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
2994 if (!(xfer_mask
& ATA_MASK_PIO
) || xfer_mask
== orig_mask
)
2998 if (xfer_mask
& (ATA_MASK_MWDMA
| ATA_MASK_UDMA
))
2999 snprintf(buf
, sizeof(buf
), "%s:%s",
3000 ata_mode_string(xfer_mask
),
3001 ata_mode_string(xfer_mask
& ATA_MASK_PIO
));
3003 snprintf(buf
, sizeof(buf
), "%s",
3004 ata_mode_string(xfer_mask
));
3006 ata_dev_printk(dev
, KERN_WARNING
,
3007 "limiting speed to %s\n", buf
);
3010 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
3016 static int ata_dev_set_mode(struct ata_device
*dev
)
3018 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3019 unsigned int err_mask
;
3022 dev
->flags
&= ~ATA_DFLAG_PIO
;
3023 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
3024 dev
->flags
|= ATA_DFLAG_PIO
;
3026 err_mask
= ata_dev_set_xfermode(dev
);
3028 /* Old CFA may refuse this command, which is just fine */
3029 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& ata_id_is_cfa(dev
->id
))
3030 err_mask
&= ~AC_ERR_DEV
;
3032 /* Some very old devices and some bad newer ones fail any kind of
3033 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3034 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& !ata_id_has_iordy(dev
->id
) &&
3035 dev
->pio_mode
<= XFER_PIO_2
)
3036 err_mask
&= ~AC_ERR_DEV
;
3038 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3039 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3040 if (dev
->xfer_shift
== ATA_SHIFT_MWDMA
&&
3041 dev
->dma_mode
== XFER_MW_DMA_0
&&
3042 (dev
->id
[63] >> 8) & 1)
3043 err_mask
&= ~AC_ERR_DEV
;
3046 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
3047 "(err_mask=0x%x)\n", err_mask
);
3051 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
3052 rc
= ata_dev_revalidate(dev
, ATA_DEV_UNKNOWN
, 0);
3053 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
3057 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3058 dev
->xfer_shift
, (int)dev
->xfer_mode
);
3060 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
3061 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)));
3066 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3067 * @link: link on which timings will be programmed
3068 * @r_failed_dev: out paramter for failed device
3070 * Standard implementation of the function used to tune and set
3071 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3072 * ata_dev_set_mode() fails, pointer to the failing device is
3073 * returned in @r_failed_dev.
3076 * PCI/etc. bus probe sem.
3079 * 0 on success, negative errno otherwise
3082 int ata_do_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3084 struct ata_port
*ap
= link
->ap
;
3085 struct ata_device
*dev
;
3086 int rc
= 0, used_dma
= 0, found
= 0;
3088 /* step 1: calculate xfer_mask */
3089 ata_link_for_each_dev(dev
, link
) {
3090 unsigned int pio_mask
, dma_mask
;
3091 unsigned int mode_mask
;
3093 if (!ata_dev_enabled(dev
))
3096 mode_mask
= ATA_DMA_MASK_ATA
;
3097 if (dev
->class == ATA_DEV_ATAPI
)
3098 mode_mask
= ATA_DMA_MASK_ATAPI
;
3099 else if (ata_id_is_cfa(dev
->id
))
3100 mode_mask
= ATA_DMA_MASK_CFA
;
3102 ata_dev_xfermask(dev
);
3104 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
3105 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
3107 if (libata_dma_mask
& mode_mask
)
3108 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
3112 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
3113 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
3122 /* step 2: always set host PIO timings */
3123 ata_link_for_each_dev(dev
, link
) {
3124 if (!ata_dev_enabled(dev
))
3127 if (!dev
->pio_mode
) {
3128 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
3133 dev
->xfer_mode
= dev
->pio_mode
;
3134 dev
->xfer_shift
= ATA_SHIFT_PIO
;
3135 if (ap
->ops
->set_piomode
)
3136 ap
->ops
->set_piomode(ap
, dev
);
3139 /* step 3: set host DMA timings */
3140 ata_link_for_each_dev(dev
, link
) {
3141 if (!ata_dev_enabled(dev
) || !dev
->dma_mode
)
3144 dev
->xfer_mode
= dev
->dma_mode
;
3145 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
3146 if (ap
->ops
->set_dmamode
)
3147 ap
->ops
->set_dmamode(ap
, dev
);
3150 /* step 4: update devices' xfer mode */
3151 ata_link_for_each_dev(dev
, link
) {
3152 /* don't update suspended devices' xfer mode */
3153 if (!ata_dev_enabled(dev
))
3156 rc
= ata_dev_set_mode(dev
);
3161 /* Record simplex status. If we selected DMA then the other
3162 * host channels are not permitted to do so.
3164 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
3165 ap
->host
->simplex_claimed
= ap
;
3169 *r_failed_dev
= dev
;
3174 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3175 * @link: link on which timings will be programmed
3176 * @r_failed_dev: out paramter for failed device
3178 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3179 * ata_set_mode() fails, pointer to the failing device is
3180 * returned in @r_failed_dev.
3183 * PCI/etc. bus probe sem.
3186 * 0 on success, negative errno otherwise
3188 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3190 struct ata_port
*ap
= link
->ap
;
3192 /* has private set_mode? */
3193 if (ap
->ops
->set_mode
)
3194 return ap
->ops
->set_mode(link
, r_failed_dev
);
3195 return ata_do_set_mode(link
, r_failed_dev
);
3199 * ata_tf_to_host - issue ATA taskfile to host controller
3200 * @ap: port to which command is being issued
3201 * @tf: ATA taskfile register set
3203 * Issues ATA taskfile register set to ATA host controller,
3204 * with proper synchronization with interrupt handler and
3208 * spin_lock_irqsave(host lock)
3211 static inline void ata_tf_to_host(struct ata_port
*ap
,
3212 const struct ata_taskfile
*tf
)
3214 ap
->ops
->tf_load(ap
, tf
);
3215 ap
->ops
->exec_command(ap
, tf
);
3219 * ata_busy_sleep - sleep until BSY clears, or timeout
3220 * @ap: port containing status register to be polled
3221 * @tmout_pat: impatience timeout
3222 * @tmout: overall timeout
3224 * Sleep until ATA Status register bit BSY clears,
3225 * or a timeout occurs.
3228 * Kernel thread context (may sleep).
3231 * 0 on success, -errno otherwise.
3233 int ata_busy_sleep(struct ata_port
*ap
,
3234 unsigned long tmout_pat
, unsigned long tmout
)
3236 unsigned long timer_start
, timeout
;
3239 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
3240 timer_start
= jiffies
;
3241 timeout
= timer_start
+ tmout_pat
;
3242 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3243 time_before(jiffies
, timeout
)) {
3245 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
3248 if (status
!= 0xff && (status
& ATA_BUSY
))
3249 ata_port_printk(ap
, KERN_WARNING
,
3250 "port is slow to respond, please be patient "
3251 "(Status 0x%x)\n", status
);
3253 timeout
= timer_start
+ tmout
;
3254 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3255 time_before(jiffies
, timeout
)) {
3257 status
= ata_chk_status(ap
);
3263 if (status
& ATA_BUSY
) {
3264 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
3265 "(%lu secs, Status 0x%x)\n",
3266 tmout
/ HZ
, status
);
3274 * ata_wait_after_reset - wait before checking status after reset
3275 * @ap: port containing status register to be polled
3276 * @deadline: deadline jiffies for the operation
3278 * After reset, we need to pause a while before reading status.
3279 * Also, certain combination of controller and device report 0xff
3280 * for some duration (e.g. until SATA PHY is up and running)
3281 * which is interpreted as empty port in ATA world. This
3282 * function also waits for such devices to get out of 0xff
3286 * Kernel thread context (may sleep).
3288 void ata_wait_after_reset(struct ata_port
*ap
, unsigned long deadline
)
3290 unsigned long until
= jiffies
+ ATA_TMOUT_FF_WAIT
;
3292 if (time_before(until
, deadline
))
3295 /* Spec mandates ">= 2ms" before checking status. We wait
3296 * 150ms, because that was the magic delay used for ATAPI
3297 * devices in Hale Landis's ATADRVR, for the period of time
3298 * between when the ATA command register is written, and then
3299 * status is checked. Because waiting for "a while" before
3300 * checking status is fine, post SRST, we perform this magic
3301 * delay here as well.
3303 * Old drivers/ide uses the 2mS rule and then waits for ready.
3307 /* Wait for 0xff to clear. Some SATA devices take a long time
3308 * to clear 0xff after reset. For example, HHD424020F7SV00
3309 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3312 * Note that some PATA controllers (pata_ali) explode if
3313 * status register is read more than once when there's no
3316 if (ap
->flags
& ATA_FLAG_SATA
) {
3318 u8 status
= ata_chk_status(ap
);
3320 if (status
!= 0xff || time_after(jiffies
, deadline
))
3329 * ata_wait_ready - sleep until BSY clears, or timeout
3330 * @ap: port containing status register to be polled
3331 * @deadline: deadline jiffies for the operation
3333 * Sleep until ATA Status register bit BSY clears, or timeout
3337 * Kernel thread context (may sleep).
3340 * 0 on success, -errno otherwise.
3342 int ata_wait_ready(struct ata_port
*ap
, unsigned long deadline
)
3344 unsigned long start
= jiffies
;
3348 u8 status
= ata_chk_status(ap
);
3349 unsigned long now
= jiffies
;
3351 if (!(status
& ATA_BUSY
))
3353 if (!ata_link_online(&ap
->link
) && status
== 0xff)
3355 if (time_after(now
, deadline
))
3358 if (!warned
&& time_after(now
, start
+ 5 * HZ
) &&
3359 (deadline
- now
> 3 * HZ
)) {
3360 ata_port_printk(ap
, KERN_WARNING
,
3361 "port is slow to respond, please be patient "
3362 "(Status 0x%x)\n", status
);
3370 static int ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
,
3371 unsigned long deadline
)
3373 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3374 unsigned int dev0
= devmask
& (1 << 0);
3375 unsigned int dev1
= devmask
& (1 << 1);
3378 /* if device 0 was found in ata_devchk, wait for its
3382 rc
= ata_wait_ready(ap
, deadline
);
3390 /* if device 1 was found in ata_devchk, wait for register
3391 * access briefly, then wait for BSY to clear.
3396 ap
->ops
->dev_select(ap
, 1);
3398 /* Wait for register access. Some ATAPI devices fail
3399 * to set nsect/lbal after reset, so don't waste too
3400 * much time on it. We're gonna wait for !BSY anyway.
3402 for (i
= 0; i
< 2; i
++) {
3405 nsect
= ioread8(ioaddr
->nsect_addr
);
3406 lbal
= ioread8(ioaddr
->lbal_addr
);
3407 if ((nsect
== 1) && (lbal
== 1))
3409 msleep(50); /* give drive a breather */
3412 rc
= ata_wait_ready(ap
, deadline
);
3420 /* is all this really necessary? */
3421 ap
->ops
->dev_select(ap
, 0);
3423 ap
->ops
->dev_select(ap
, 1);
3425 ap
->ops
->dev_select(ap
, 0);
3430 static int ata_bus_softreset(struct ata_port
*ap
, unsigned int devmask
,
3431 unsigned long deadline
)
3433 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3435 DPRINTK("ata%u: bus reset via SRST\n", ap
->print_id
);
3437 /* software reset. causes dev0 to be selected */
3438 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3439 udelay(20); /* FIXME: flush */
3440 iowrite8(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
3441 udelay(20); /* FIXME: flush */
3442 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3444 /* wait a while before checking status */
3445 ata_wait_after_reset(ap
, deadline
);
3447 /* Before we perform post reset processing we want to see if
3448 * the bus shows 0xFF because the odd clown forgets the D7
3449 * pulldown resistor.
3451 if (ata_chk_status(ap
) == 0xFF)
3454 return ata_bus_post_reset(ap
, devmask
, deadline
);
3458 * ata_bus_reset - reset host port and associated ATA channel
3459 * @ap: port to reset
3461 * This is typically the first time we actually start issuing
3462 * commands to the ATA channel. We wait for BSY to clear, then
3463 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3464 * result. Determine what devices, if any, are on the channel
3465 * by looking at the device 0/1 error register. Look at the signature
3466 * stored in each device's taskfile registers, to determine if
3467 * the device is ATA or ATAPI.
3470 * PCI/etc. bus probe sem.
3471 * Obtains host lock.
3474 * Sets ATA_FLAG_DISABLED if bus reset fails.
3477 void ata_bus_reset(struct ata_port
*ap
)
3479 struct ata_device
*device
= ap
->link
.device
;
3480 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3481 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3483 unsigned int dev0
, dev1
= 0, devmask
= 0;
3486 DPRINTK("ENTER, host %u, port %u\n", ap
->print_id
, ap
->port_no
);
3488 /* determine if device 0/1 are present */
3489 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
3492 dev0
= ata_devchk(ap
, 0);
3494 dev1
= ata_devchk(ap
, 1);
3498 devmask
|= (1 << 0);
3500 devmask
|= (1 << 1);
3502 /* select device 0 again */
3503 ap
->ops
->dev_select(ap
, 0);
3505 /* issue bus reset */
3506 if (ap
->flags
& ATA_FLAG_SRST
) {
3507 rc
= ata_bus_softreset(ap
, devmask
, jiffies
+ 40 * HZ
);
3508 if (rc
&& rc
!= -ENODEV
)
3513 * determine by signature whether we have ATA or ATAPI devices
3515 device
[0].class = ata_dev_try_classify(&device
[0], dev0
, &err
);
3516 if ((slave_possible
) && (err
!= 0x81))
3517 device
[1].class = ata_dev_try_classify(&device
[1], dev1
, &err
);
3519 /* is double-select really necessary? */
3520 if (device
[1].class != ATA_DEV_NONE
)
3521 ap
->ops
->dev_select(ap
, 1);
3522 if (device
[0].class != ATA_DEV_NONE
)
3523 ap
->ops
->dev_select(ap
, 0);
3525 /* if no devices were detected, disable this port */
3526 if ((device
[0].class == ATA_DEV_NONE
) &&
3527 (device
[1].class == ATA_DEV_NONE
))
3530 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
3531 /* set up device control for ATA_FLAG_SATA_RESET */
3532 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3539 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
3540 ata_port_disable(ap
);
3546 * sata_link_debounce - debounce SATA phy status
3547 * @link: ATA link to debounce SATA phy status for
3548 * @params: timing parameters { interval, duratinon, timeout } in msec
3549 * @deadline: deadline jiffies for the operation
3551 * Make sure SStatus of @link reaches stable state, determined by
3552 * holding the same value where DET is not 1 for @duration polled
3553 * every @interval, before @timeout. Timeout constraints the
3554 * beginning of the stable state. Because DET gets stuck at 1 on
3555 * some controllers after hot unplugging, this functions waits
3556 * until timeout then returns 0 if DET is stable at 1.
3558 * @timeout is further limited by @deadline. The sooner of the
3562 * Kernel thread context (may sleep)
3565 * 0 on success, -errno on failure.
3567 int sata_link_debounce(struct ata_link
*link
, const unsigned long *params
,
3568 unsigned long deadline
)
3570 unsigned long interval_msec
= params
[0];
3571 unsigned long duration
= msecs_to_jiffies(params
[1]);
3572 unsigned long last_jiffies
, t
;
3576 t
= jiffies
+ msecs_to_jiffies(params
[2]);
3577 if (time_before(t
, deadline
))
3580 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3585 last_jiffies
= jiffies
;
3588 msleep(interval_msec
);
3589 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3595 if (cur
== 1 && time_before(jiffies
, deadline
))
3597 if (time_after(jiffies
, last_jiffies
+ duration
))
3602 /* unstable, start over */
3604 last_jiffies
= jiffies
;
3606 /* Check deadline. If debouncing failed, return
3607 * -EPIPE to tell upper layer to lower link speed.
3609 if (time_after(jiffies
, deadline
))
3615 * sata_link_resume - resume SATA link
3616 * @link: ATA link to resume SATA
3617 * @params: timing parameters { interval, duratinon, timeout } in msec
3618 * @deadline: deadline jiffies for the operation
3620 * Resume SATA phy @link and debounce it.
3623 * Kernel thread context (may sleep)
3626 * 0 on success, -errno on failure.
3628 int sata_link_resume(struct ata_link
*link
, const unsigned long *params
,
3629 unsigned long deadline
)
3634 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3637 scontrol
= (scontrol
& 0x0f0) | 0x300;
3639 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3642 /* Some PHYs react badly if SStatus is pounded immediately
3643 * after resuming. Delay 200ms before debouncing.
3647 return sata_link_debounce(link
, params
, deadline
);
3651 * ata_std_prereset - prepare for reset
3652 * @link: ATA link to be reset
3653 * @deadline: deadline jiffies for the operation
3655 * @link is about to be reset. Initialize it. Failure from
3656 * prereset makes libata abort whole reset sequence and give up
3657 * that port, so prereset should be best-effort. It does its
3658 * best to prepare for reset sequence but if things go wrong, it
3659 * should just whine, not fail.
3662 * Kernel thread context (may sleep)
3665 * 0 on success, -errno otherwise.
3667 int ata_std_prereset(struct ata_link
*link
, unsigned long deadline
)
3669 struct ata_port
*ap
= link
->ap
;
3670 struct ata_eh_context
*ehc
= &link
->eh_context
;
3671 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
3674 /* handle link resume */
3675 if ((ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) &&
3676 (link
->flags
& ATA_LFLAG_HRST_TO_RESUME
))
3677 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3679 /* Some PMPs don't work with only SRST, force hardreset if PMP
3682 if (ap
->flags
& ATA_FLAG_PMP
)
3683 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3685 /* if we're about to do hardreset, nothing more to do */
3686 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
3689 /* if SATA, resume link */
3690 if (ap
->flags
& ATA_FLAG_SATA
) {
3691 rc
= sata_link_resume(link
, timing
, deadline
);
3692 /* whine about phy resume failure but proceed */
3693 if (rc
&& rc
!= -EOPNOTSUPP
)
3694 ata_link_printk(link
, KERN_WARNING
, "failed to resume "
3695 "link for reset (errno=%d)\n", rc
);
3698 /* Wait for !BSY if the controller can wait for the first D2H
3699 * Reg FIS and we don't know that no device is attached.
3701 if (!(link
->flags
& ATA_LFLAG_SKIP_D2H_BSY
) && !ata_link_offline(link
)) {
3702 rc
= ata_wait_ready(ap
, deadline
);
3703 if (rc
&& rc
!= -ENODEV
) {
3704 ata_link_printk(link
, KERN_WARNING
, "device not ready "
3705 "(errno=%d), forcing hardreset\n", rc
);
3706 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3714 * ata_std_softreset - reset host port via ATA SRST
3715 * @link: ATA link to reset
3716 * @classes: resulting classes of attached devices
3717 * @deadline: deadline jiffies for the operation
3719 * Reset host port using ATA SRST.
3722 * Kernel thread context (may sleep)
3725 * 0 on success, -errno otherwise.
3727 int ata_std_softreset(struct ata_link
*link
, unsigned int *classes
,
3728 unsigned long deadline
)
3730 struct ata_port
*ap
= link
->ap
;
3731 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3732 unsigned int devmask
= 0;
3738 if (ata_link_offline(link
)) {
3739 classes
[0] = ATA_DEV_NONE
;
3743 /* determine if device 0/1 are present */
3744 if (ata_devchk(ap
, 0))
3745 devmask
|= (1 << 0);
3746 if (slave_possible
&& ata_devchk(ap
, 1))
3747 devmask
|= (1 << 1);
3749 /* select device 0 again */
3750 ap
->ops
->dev_select(ap
, 0);
3752 /* issue bus reset */
3753 DPRINTK("about to softreset, devmask=%x\n", devmask
);
3754 rc
= ata_bus_softreset(ap
, devmask
, deadline
);
3755 /* if link is occupied, -ENODEV too is an error */
3756 if (rc
&& (rc
!= -ENODEV
|| sata_scr_valid(link
))) {
3757 ata_link_printk(link
, KERN_ERR
, "SRST failed (errno=%d)\n", rc
);
3761 /* determine by signature whether we have ATA or ATAPI devices */
3762 classes
[0] = ata_dev_try_classify(&link
->device
[0],
3763 devmask
& (1 << 0), &err
);
3764 if (slave_possible
&& err
!= 0x81)
3765 classes
[1] = ata_dev_try_classify(&link
->device
[1],
3766 devmask
& (1 << 1), &err
);
3769 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
3774 * sata_link_hardreset - reset link via SATA phy reset
3775 * @link: link to reset
3776 * @timing: timing parameters { interval, duratinon, timeout } in msec
3777 * @deadline: deadline jiffies for the operation
3779 * SATA phy-reset @link using DET bits of SControl register.
3782 * Kernel thread context (may sleep)
3785 * 0 on success, -errno otherwise.
3787 int sata_link_hardreset(struct ata_link
*link
, const unsigned long *timing
,
3788 unsigned long deadline
)
3795 if (sata_set_spd_needed(link
)) {
3796 /* SATA spec says nothing about how to reconfigure
3797 * spd. To be on the safe side, turn off phy during
3798 * reconfiguration. This works for at least ICH7 AHCI
3801 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3804 scontrol
= (scontrol
& 0x0f0) | 0x304;
3806 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3812 /* issue phy wake/reset */
3813 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3816 scontrol
= (scontrol
& 0x0f0) | 0x301;
3818 if ((rc
= sata_scr_write_flush(link
, SCR_CONTROL
, scontrol
)))
3821 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3822 * 10.4.2 says at least 1 ms.
3826 /* bring link back */
3827 rc
= sata_link_resume(link
, timing
, deadline
);
3829 DPRINTK("EXIT, rc=%d\n", rc
);
3834 * sata_std_hardreset - reset host port via SATA phy reset
3835 * @link: link to reset
3836 * @class: resulting class of attached device
3837 * @deadline: deadline jiffies for the operation
3839 * SATA phy-reset host port using DET bits of SControl register,
3840 * wait for !BSY and classify the attached device.
3843 * Kernel thread context (may sleep)
3846 * 0 on success, -errno otherwise.
3848 int sata_std_hardreset(struct ata_link
*link
, unsigned int *class,
3849 unsigned long deadline
)
3851 struct ata_port
*ap
= link
->ap
;
3852 const unsigned long *timing
= sata_ehc_deb_timing(&link
->eh_context
);
3858 rc
= sata_link_hardreset(link
, timing
, deadline
);
3860 ata_link_printk(link
, KERN_ERR
,
3861 "COMRESET failed (errno=%d)\n", rc
);
3865 /* TODO: phy layer with polling, timeouts, etc. */
3866 if (ata_link_offline(link
)) {
3867 *class = ATA_DEV_NONE
;
3868 DPRINTK("EXIT, link offline\n");
3872 /* wait a while before checking status */
3873 ata_wait_after_reset(ap
, deadline
);
3875 /* If PMP is supported, we have to do follow-up SRST. Note
3876 * that some PMPs don't send D2H Reg FIS after hardreset at
3877 * all if the first port is empty. Wait for it just for a
3878 * second and request follow-up SRST.
3880 if (ap
->flags
& ATA_FLAG_PMP
) {
3881 ata_wait_ready(ap
, jiffies
+ HZ
);
3885 rc
= ata_wait_ready(ap
, deadline
);
3886 /* link occupied, -ENODEV too is an error */
3888 ata_link_printk(link
, KERN_ERR
,
3889 "COMRESET failed (errno=%d)\n", rc
);
3893 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
3895 *class = ata_dev_try_classify(link
->device
, 1, NULL
);
3897 DPRINTK("EXIT, class=%u\n", *class);
3902 * ata_std_postreset - standard postreset callback
3903 * @link: the target ata_link
3904 * @classes: classes of attached devices
3906 * This function is invoked after a successful reset. Note that
3907 * the device might have been reset more than once using
3908 * different reset methods before postreset is invoked.
3911 * Kernel thread context (may sleep)
3913 void ata_std_postreset(struct ata_link
*link
, unsigned int *classes
)
3915 struct ata_port
*ap
= link
->ap
;
3920 /* print link status */
3921 sata_print_link_status(link
);
3924 if (sata_scr_read(link
, SCR_ERROR
, &serror
) == 0)
3925 sata_scr_write(link
, SCR_ERROR
, serror
);
3927 /* is double-select really necessary? */
3928 if (classes
[0] != ATA_DEV_NONE
)
3929 ap
->ops
->dev_select(ap
, 1);
3930 if (classes
[1] != ATA_DEV_NONE
)
3931 ap
->ops
->dev_select(ap
, 0);
3933 /* bail out if no device is present */
3934 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
3935 DPRINTK("EXIT, no device\n");
3939 /* set up device control */
3940 if (ap
->ioaddr
.ctl_addr
)
3941 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
3947 * ata_dev_same_device - Determine whether new ID matches configured device
3948 * @dev: device to compare against
3949 * @new_class: class of the new device
3950 * @new_id: IDENTIFY page of the new device
3952 * Compare @new_class and @new_id against @dev and determine
3953 * whether @dev is the device indicated by @new_class and
3960 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3962 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
3965 const u16
*old_id
= dev
->id
;
3966 unsigned char model
[2][ATA_ID_PROD_LEN
+ 1];
3967 unsigned char serial
[2][ATA_ID_SERNO_LEN
+ 1];
3969 if (dev
->class != new_class
) {
3970 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
3971 dev
->class, new_class
);
3975 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD
, sizeof(model
[0]));
3976 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD
, sizeof(model
[1]));
3977 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO
, sizeof(serial
[0]));
3978 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO
, sizeof(serial
[1]));
3980 if (strcmp(model
[0], model
[1])) {
3981 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
3982 "'%s' != '%s'\n", model
[0], model
[1]);
3986 if (strcmp(serial
[0], serial
[1])) {
3987 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
3988 "'%s' != '%s'\n", serial
[0], serial
[1]);
3996 * ata_dev_reread_id - Re-read IDENTIFY data
3997 * @dev: target ATA device
3998 * @readid_flags: read ID flags
4000 * Re-read IDENTIFY page and make sure @dev is still attached to
4004 * Kernel thread context (may sleep)
4007 * 0 on success, negative errno otherwise
4009 int ata_dev_reread_id(struct ata_device
*dev
, unsigned int readid_flags
)
4011 unsigned int class = dev
->class;
4012 u16
*id
= (void *)dev
->link
->ap
->sector_buf
;
4016 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
4020 /* is the device still there? */
4021 if (!ata_dev_same_device(dev
, class, id
))
4024 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
4029 * ata_dev_revalidate - Revalidate ATA device
4030 * @dev: device to revalidate
4031 * @new_class: new class code
4032 * @readid_flags: read ID flags
4034 * Re-read IDENTIFY page, make sure @dev is still attached to the
4035 * port and reconfigure it according to the new IDENTIFY page.
4038 * Kernel thread context (may sleep)
4041 * 0 on success, negative errno otherwise
4043 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int new_class
,
4044 unsigned int readid_flags
)
4046 u64 n_sectors
= dev
->n_sectors
;
4049 if (!ata_dev_enabled(dev
))
4052 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4053 if (ata_class_enabled(new_class
) &&
4054 new_class
!= ATA_DEV_ATA
&& new_class
!= ATA_DEV_ATAPI
) {
4055 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %u != %u\n",
4056 dev
->class, new_class
);
4062 rc
= ata_dev_reread_id(dev
, readid_flags
);
4066 /* configure device according to the new ID */
4067 rc
= ata_dev_configure(dev
);
4071 /* verify n_sectors hasn't changed */
4072 if (dev
->class == ATA_DEV_ATA
&& n_sectors
&&
4073 dev
->n_sectors
!= n_sectors
) {
4074 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
4076 (unsigned long long)n_sectors
,
4077 (unsigned long long)dev
->n_sectors
);
4079 /* restore original n_sectors */
4080 dev
->n_sectors
= n_sectors
;
4089 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
4093 struct ata_blacklist_entry
{
4094 const char *model_num
;
4095 const char *model_rev
;
4096 unsigned long horkage
;
4099 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
4100 /* Devices with DMA related problems under Linux */
4101 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
4102 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
4103 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
4104 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
4105 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
4106 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
4107 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
4108 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
4109 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
4110 { "CRD-8480B", NULL
, ATA_HORKAGE_NODMA
},
4111 { "CRD-8482B", NULL
, ATA_HORKAGE_NODMA
},
4112 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
4113 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
4114 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
4115 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
4116 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
4117 { "HITACHI CDR-8335", NULL
, ATA_HORKAGE_NODMA
},
4118 { "HITACHI CDR-8435", NULL
, ATA_HORKAGE_NODMA
},
4119 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
4120 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
4121 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
4122 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
4123 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
4124 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
4125 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
4126 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
4127 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
4128 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
4129 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA
},
4130 { "Seagate STT20000A", NULL
, ATA_HORKAGE_NODMA
},
4131 /* Odd clown on sil3726/4726 PMPs */
4132 { "Config Disk", NULL
, ATA_HORKAGE_NODMA
|
4133 ATA_HORKAGE_SKIP_PM
},
4135 /* Weird ATAPI devices */
4136 { "TORiSAN DVD-ROM DRD-N216", NULL
, ATA_HORKAGE_MAX_SEC_128
},
4138 /* Devices we expect to fail diagnostics */
4140 /* Devices where NCQ should be avoided */
4142 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
4143 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4144 { "FUJITSU MHT2060BH", NULL
, ATA_HORKAGE_NONCQ
},
4146 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ
},
4147 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ
},
4148 { "HITACHI HDS7250SASUN500G*", NULL
, ATA_HORKAGE_NONCQ
},
4149 { "HITACHI HDS7225SBSUN250G*", NULL
, ATA_HORKAGE_NONCQ
},
4150 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ
},
4152 /* Blacklist entries taken from Silicon Image 3124/3132
4153 Windows driver .inf file - also several Linux problem reports */
4154 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ
, },
4155 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ
, },
4156 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ
, },
4157 /* Drives which do spurious command completion */
4158 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ
, },
4159 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ
, },
4160 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ
, },
4161 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ
, },
4162 { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ
, },
4163 { "WDC WD740ADFD-00NLR1", NULL
, ATA_HORKAGE_NONCQ
, },
4164 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ
, },
4165 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ
, },
4166 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ
, },
4167 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ
, },
4168 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ
, },
4169 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ
, },
4170 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ
, },
4171 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ
, },
4172 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ
, },
4173 { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ
, },
4175 /* devices which puke on READ_NATIVE_MAX */
4176 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA
, },
4177 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA
},
4178 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA
},
4179 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA
},
4181 /* Devices which report 1 sector over size HPA */
4182 { "ST340823A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
4183 { "ST320413A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
4185 /* Devices which get the IVB wrong */
4186 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB
, },
4187 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB
, },
4193 static int strn_pattern_cmp(const char *patt
, const char *name
, int wildchar
)
4199 * check for trailing wildcard: *\0
4201 p
= strchr(patt
, wildchar
);
4202 if (p
&& ((*(p
+ 1)) == 0))
4213 return strncmp(patt
, name
, len
);
4216 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
)
4218 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
4219 unsigned char model_rev
[ATA_ID_FW_REV_LEN
+ 1];
4220 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
4222 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
4223 ata_id_c_string(dev
->id
, model_rev
, ATA_ID_FW_REV
, sizeof(model_rev
));
4225 while (ad
->model_num
) {
4226 if (!strn_pattern_cmp(ad
->model_num
, model_num
, '*')) {
4227 if (ad
->model_rev
== NULL
)
4229 if (!strn_pattern_cmp(ad
->model_rev
, model_rev
, '*'))
4237 static int ata_dma_blacklisted(const struct ata_device
*dev
)
4239 /* We don't support polling DMA.
4240 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4241 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4243 if ((dev
->link
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
4244 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
4246 return (dev
->horkage
& ATA_HORKAGE_NODMA
) ? 1 : 0;
4250 * ata_is_40wire - check drive side detection
4253 * Perform drive side detection decoding, allowing for device vendors
4254 * who can't follow the documentation.
4257 static int ata_is_40wire(struct ata_device
*dev
)
4259 if (dev
->horkage
& ATA_HORKAGE_IVB
)
4260 return ata_drive_40wire_relaxed(dev
->id
);
4261 return ata_drive_40wire(dev
->id
);
4265 * ata_dev_xfermask - Compute supported xfermask of the given device
4266 * @dev: Device to compute xfermask for
4268 * Compute supported xfermask of @dev and store it in
4269 * dev->*_mask. This function is responsible for applying all
4270 * known limits including host controller limits, device
4276 static void ata_dev_xfermask(struct ata_device
*dev
)
4278 struct ata_link
*link
= dev
->link
;
4279 struct ata_port
*ap
= link
->ap
;
4280 struct ata_host
*host
= ap
->host
;
4281 unsigned long xfer_mask
;
4283 /* controller modes available */
4284 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
4285 ap
->mwdma_mask
, ap
->udma_mask
);
4287 /* drive modes available */
4288 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
4289 dev
->mwdma_mask
, dev
->udma_mask
);
4290 xfer_mask
&= ata_id_xfermask(dev
->id
);
4293 * CFA Advanced TrueIDE timings are not allowed on a shared
4296 if (ata_dev_pair(dev
)) {
4297 /* No PIO5 or PIO6 */
4298 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
4299 /* No MWDMA3 or MWDMA 4 */
4300 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
4303 if (ata_dma_blacklisted(dev
)) {
4304 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4305 ata_dev_printk(dev
, KERN_WARNING
,
4306 "device is on DMA blacklist, disabling DMA\n");
4309 if ((host
->flags
& ATA_HOST_SIMPLEX
) &&
4310 host
->simplex_claimed
&& host
->simplex_claimed
!= ap
) {
4311 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4312 ata_dev_printk(dev
, KERN_WARNING
, "simplex DMA is claimed by "
4313 "other device, disabling DMA\n");
4316 if (ap
->flags
& ATA_FLAG_NO_IORDY
)
4317 xfer_mask
&= ata_pio_mask_no_iordy(dev
);
4319 if (ap
->ops
->mode_filter
)
4320 xfer_mask
= ap
->ops
->mode_filter(dev
, xfer_mask
);
4322 /* Apply cable rule here. Don't apply it early because when
4323 * we handle hot plug the cable type can itself change.
4324 * Check this last so that we know if the transfer rate was
4325 * solely limited by the cable.
4326 * Unknown or 80 wire cables reported host side are checked
4327 * drive side as well. Cases where we know a 40wire cable
4328 * is used safely for 80 are not checked here.
4330 if (xfer_mask
& (0xF8 << ATA_SHIFT_UDMA
))
4331 /* UDMA/44 or higher would be available */
4332 if ((ap
->cbl
== ATA_CBL_PATA40
) ||
4333 (ata_is_40wire(dev
) &&
4334 (ap
->cbl
== ATA_CBL_PATA_UNK
||
4335 ap
->cbl
== ATA_CBL_PATA80
))) {
4336 ata_dev_printk(dev
, KERN_WARNING
,
4337 "limited to UDMA/33 due to 40-wire cable\n");
4338 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
4341 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
4342 &dev
->mwdma_mask
, &dev
->udma_mask
);
4346 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4347 * @dev: Device to which command will be sent
4349 * Issue SET FEATURES - XFER MODE command to device @dev
4353 * PCI/etc. bus probe sem.
4356 * 0 on success, AC_ERR_* mask otherwise.
4359 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
4361 struct ata_taskfile tf
;
4362 unsigned int err_mask
;
4364 /* set up set-features taskfile */
4365 DPRINTK("set features - xfer mode\n");
4367 /* Some controllers and ATAPI devices show flaky interrupt
4368 * behavior after setting xfer mode. Use polling instead.
4370 ata_tf_init(dev
, &tf
);
4371 tf
.command
= ATA_CMD_SET_FEATURES
;
4372 tf
.feature
= SETFEATURES_XFER
;
4373 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
| ATA_TFLAG_POLLING
;
4374 tf
.protocol
= ATA_PROT_NODATA
;
4375 tf
.nsect
= dev
->xfer_mode
;
4377 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4379 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4383 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4384 * @dev: Device to which command will be sent
4385 * @enable: Whether to enable or disable the feature
4386 * @feature: The sector count represents the feature to set
4388 * Issue SET FEATURES - SATA FEATURES command to device @dev
4389 * on port @ap with sector count
4392 * PCI/etc. bus probe sem.
4395 * 0 on success, AC_ERR_* mask otherwise.
4397 static unsigned int ata_dev_set_feature(struct ata_device
*dev
, u8 enable
,
4400 struct ata_taskfile tf
;
4401 unsigned int err_mask
;
4403 /* set up set-features taskfile */
4404 DPRINTK("set features - SATA features\n");
4406 ata_tf_init(dev
, &tf
);
4407 tf
.command
= ATA_CMD_SET_FEATURES
;
4408 tf
.feature
= enable
;
4409 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4410 tf
.protocol
= ATA_PROT_NODATA
;
4413 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4415 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4420 * ata_dev_init_params - Issue INIT DEV PARAMS command
4421 * @dev: Device to which command will be sent
4422 * @heads: Number of heads (taskfile parameter)
4423 * @sectors: Number of sectors (taskfile parameter)
4426 * Kernel thread context (may sleep)
4429 * 0 on success, AC_ERR_* mask otherwise.
4431 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
4432 u16 heads
, u16 sectors
)
4434 struct ata_taskfile tf
;
4435 unsigned int err_mask
;
4437 /* Number of sectors per track 1-255. Number of heads 1-16 */
4438 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
4439 return AC_ERR_INVALID
;
4441 /* set up init dev params taskfile */
4442 DPRINTK("init dev params \n");
4444 ata_tf_init(dev
, &tf
);
4445 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
4446 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4447 tf
.protocol
= ATA_PROT_NODATA
;
4449 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
4451 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4452 /* A clean abort indicates an original or just out of spec drive
4453 and we should continue as we issue the setup based on the
4454 drive reported working geometry */
4455 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
4458 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4463 * ata_sg_clean - Unmap DMA memory associated with command
4464 * @qc: Command containing DMA memory to be released
4466 * Unmap all mapped DMA memory associated with this command.
4469 * spin_lock_irqsave(host lock)
4471 void ata_sg_clean(struct ata_queued_cmd
*qc
)
4473 struct ata_port
*ap
= qc
->ap
;
4474 struct scatterlist
*sg
= qc
->__sg
;
4475 int dir
= qc
->dma_dir
;
4476 void *pad_buf
= NULL
;
4478 WARN_ON(!(qc
->flags
& ATA_QCFLAG_DMAMAP
));
4479 WARN_ON(sg
== NULL
);
4481 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
4482 WARN_ON(qc
->n_elem
> 1);
4484 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
4486 /* if we padded the buffer out to 32-bit bound, and data
4487 * xfer direction is from-device, we must copy from the
4488 * pad buffer back into the supplied buffer
4490 if (qc
->pad_len
&& !(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
4491 pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4493 if (qc
->flags
& ATA_QCFLAG_SG
) {
4495 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
4496 /* restore last sg */
4497 sg_last(sg
, qc
->orig_n_elem
)->length
+= qc
->pad_len
;
4499 struct scatterlist
*psg
= &qc
->pad_sgent
;
4500 void *addr
= kmap_atomic(sg_page(psg
), KM_IRQ0
);
4501 memcpy(addr
+ psg
->offset
, pad_buf
, qc
->pad_len
);
4502 kunmap_atomic(addr
, KM_IRQ0
);
4506 dma_unmap_single(ap
->dev
,
4507 sg_dma_address(&sg
[0]), sg_dma_len(&sg
[0]),
4510 sg
->length
+= qc
->pad_len
;
4512 memcpy(qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
4513 pad_buf
, qc
->pad_len
);
4516 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4521 * ata_fill_sg - Fill PCI IDE PRD table
4522 * @qc: Metadata associated with taskfile to be transferred
4524 * Fill PCI IDE PRD (scatter-gather) table with segments
4525 * associated with the current disk command.
4528 * spin_lock_irqsave(host lock)
4531 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
4533 struct ata_port
*ap
= qc
->ap
;
4534 struct scatterlist
*sg
;
4537 WARN_ON(qc
->__sg
== NULL
);
4538 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
4541 ata_for_each_sg(sg
, qc
) {
4545 /* determine if physical DMA addr spans 64K boundary.
4546 * Note h/w doesn't support 64-bit, so we unconditionally
4547 * truncate dma_addr_t to u32.
4549 addr
= (u32
) sg_dma_address(sg
);
4550 sg_len
= sg_dma_len(sg
);
4553 offset
= addr
& 0xffff;
4555 if ((offset
+ sg_len
) > 0x10000)
4556 len
= 0x10000 - offset
;
4558 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
4559 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
4560 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
4569 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4573 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4574 * @qc: Metadata associated with taskfile to be transferred
4576 * Fill PCI IDE PRD (scatter-gather) table with segments
4577 * associated with the current disk command. Perform the fill
4578 * so that we avoid writing any length 64K records for
4579 * controllers that don't follow the spec.
4582 * spin_lock_irqsave(host lock)
4585 static void ata_fill_sg_dumb(struct ata_queued_cmd
*qc
)
4587 struct ata_port
*ap
= qc
->ap
;
4588 struct scatterlist
*sg
;
4591 WARN_ON(qc
->__sg
== NULL
);
4592 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
4595 ata_for_each_sg(sg
, qc
) {
4597 u32 sg_len
, len
, blen
;
4599 /* determine if physical DMA addr spans 64K boundary.
4600 * Note h/w doesn't support 64-bit, so we unconditionally
4601 * truncate dma_addr_t to u32.
4603 addr
= (u32
) sg_dma_address(sg
);
4604 sg_len
= sg_dma_len(sg
);
4607 offset
= addr
& 0xffff;
4609 if ((offset
+ sg_len
) > 0x10000)
4610 len
= 0x10000 - offset
;
4612 blen
= len
& 0xffff;
4613 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
4615 /* Some PATA chipsets like the CS5530 can't
4616 cope with 0x0000 meaning 64K as the spec says */
4617 ap
->prd
[idx
].flags_len
= cpu_to_le32(0x8000);
4619 ap
->prd
[++idx
].addr
= cpu_to_le32(addr
+ 0x8000);
4621 ap
->prd
[idx
].flags_len
= cpu_to_le32(blen
);
4622 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
4631 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4635 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4636 * @qc: Metadata associated with taskfile to check
4638 * Allow low-level driver to filter ATA PACKET commands, returning
4639 * a status indicating whether or not it is OK to use DMA for the
4640 * supplied PACKET command.
4643 * spin_lock_irqsave(host lock)
4645 * RETURNS: 0 when ATAPI DMA can be used
4648 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
4650 struct ata_port
*ap
= qc
->ap
;
4652 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4653 * few ATAPI devices choke on such DMA requests.
4655 if (unlikely(qc
->nbytes
& 15))
4658 if (ap
->ops
->check_atapi_dma
)
4659 return ap
->ops
->check_atapi_dma(qc
);
4665 * ata_std_qc_defer - Check whether a qc needs to be deferred
4666 * @qc: ATA command in question
4668 * Non-NCQ commands cannot run with any other command, NCQ or
4669 * not. As upper layer only knows the queue depth, we are
4670 * responsible for maintaining exclusion. This function checks
4671 * whether a new command @qc can be issued.
4674 * spin_lock_irqsave(host lock)
4677 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4679 int ata_std_qc_defer(struct ata_queued_cmd
*qc
)
4681 struct ata_link
*link
= qc
->dev
->link
;
4683 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
4684 if (!ata_tag_valid(link
->active_tag
))
4687 if (!ata_tag_valid(link
->active_tag
) && !link
->sactive
)
4691 return ATA_DEFER_LINK
;
4695 * ata_qc_prep - Prepare taskfile for submission
4696 * @qc: Metadata associated with taskfile to be prepared
4698 * Prepare ATA taskfile for submission.
4701 * spin_lock_irqsave(host lock)
4703 void ata_qc_prep(struct ata_queued_cmd
*qc
)
4705 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4712 * ata_dumb_qc_prep - Prepare taskfile for submission
4713 * @qc: Metadata associated with taskfile to be prepared
4715 * Prepare ATA taskfile for submission.
4718 * spin_lock_irqsave(host lock)
4720 void ata_dumb_qc_prep(struct ata_queued_cmd
*qc
)
4722 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4725 ata_fill_sg_dumb(qc
);
4728 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
4731 * ata_sg_init_one - Associate command with memory buffer
4732 * @qc: Command to be associated
4733 * @buf: Memory buffer
4734 * @buflen: Length of memory buffer, in bytes.
4736 * Initialize the data-related elements of queued_cmd @qc
4737 * to point to a single memory buffer, @buf of byte length @buflen.
4740 * spin_lock_irqsave(host lock)
4743 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
4745 qc
->flags
|= ATA_QCFLAG_SINGLE
;
4747 qc
->__sg
= &qc
->sgent
;
4749 qc
->orig_n_elem
= 1;
4751 qc
->nbytes
= buflen
;
4752 qc
->cursg
= qc
->__sg
;
4754 sg_init_one(&qc
->sgent
, buf
, buflen
);
4758 * ata_sg_init - Associate command with scatter-gather table.
4759 * @qc: Command to be associated
4760 * @sg: Scatter-gather table.
4761 * @n_elem: Number of elements in s/g table.
4763 * Initialize the data-related elements of queued_cmd @qc
4764 * to point to a scatter-gather table @sg, containing @n_elem
4768 * spin_lock_irqsave(host lock)
4771 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
4772 unsigned int n_elem
)
4774 qc
->flags
|= ATA_QCFLAG_SG
;
4776 qc
->n_elem
= n_elem
;
4777 qc
->orig_n_elem
= n_elem
;
4778 qc
->cursg
= qc
->__sg
;
4782 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4783 * @qc: Command with memory buffer to be mapped.
4785 * DMA-map the memory buffer associated with queued_cmd @qc.
4788 * spin_lock_irqsave(host lock)
4791 * Zero on success, negative on error.
4794 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
4796 struct ata_port
*ap
= qc
->ap
;
4797 int dir
= qc
->dma_dir
;
4798 struct scatterlist
*sg
= qc
->__sg
;
4799 dma_addr_t dma_address
;
4802 /* we must lengthen transfers to end on a 32-bit boundary */
4803 qc
->pad_len
= sg
->length
& 3;
4805 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4806 struct scatterlist
*psg
= &qc
->pad_sgent
;
4808 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4810 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4812 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
4813 memcpy(pad_buf
, qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
4816 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4817 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4819 sg
->length
-= qc
->pad_len
;
4820 if (sg
->length
== 0)
4823 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4824 sg
->length
, qc
->pad_len
);
4832 dma_address
= dma_map_single(ap
->dev
, qc
->buf_virt
,
4834 if (dma_mapping_error(dma_address
)) {
4836 sg
->length
+= qc
->pad_len
;
4840 sg_dma_address(sg
) = dma_address
;
4841 sg_dma_len(sg
) = sg
->length
;
4844 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
4845 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4851 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4852 * @qc: Command with scatter-gather table to be mapped.
4854 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4857 * spin_lock_irqsave(host lock)
4860 * Zero on success, negative on error.
4864 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
4866 struct ata_port
*ap
= qc
->ap
;
4867 struct scatterlist
*sg
= qc
->__sg
;
4868 struct scatterlist
*lsg
= sg_last(qc
->__sg
, qc
->n_elem
);
4869 int n_elem
, pre_n_elem
, dir
, trim_sg
= 0;
4871 VPRINTK("ENTER, ata%u\n", ap
->print_id
);
4872 WARN_ON(!(qc
->flags
& ATA_QCFLAG_SG
));
4874 /* we must lengthen transfers to end on a 32-bit boundary */
4875 qc
->pad_len
= lsg
->length
& 3;
4877 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4878 struct scatterlist
*psg
= &qc
->pad_sgent
;
4879 unsigned int offset
;
4881 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4883 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4886 * psg->page/offset are used to copy to-be-written
4887 * data in this function or read data in ata_sg_clean.
4889 offset
= lsg
->offset
+ lsg
->length
- qc
->pad_len
;
4890 sg_init_table(psg
, 1);
4891 sg_set_page(psg
, nth_page(sg_page(lsg
), offset
>> PAGE_SHIFT
),
4892 qc
->pad_len
, offset_in_page(offset
));
4894 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
4895 void *addr
= kmap_atomic(sg_page(psg
), KM_IRQ0
);
4896 memcpy(pad_buf
, addr
+ psg
->offset
, qc
->pad_len
);
4897 kunmap_atomic(addr
, KM_IRQ0
);
4900 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4901 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4903 lsg
->length
-= qc
->pad_len
;
4904 if (lsg
->length
== 0)
4907 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4908 qc
->n_elem
- 1, lsg
->length
, qc
->pad_len
);
4911 pre_n_elem
= qc
->n_elem
;
4912 if (trim_sg
&& pre_n_elem
)
4921 n_elem
= dma_map_sg(ap
->dev
, sg
, pre_n_elem
, dir
);
4923 /* restore last sg */
4924 lsg
->length
+= qc
->pad_len
;
4928 DPRINTK("%d sg elements mapped\n", n_elem
);
4931 qc
->n_elem
= n_elem
;
4937 * swap_buf_le16 - swap halves of 16-bit words in place
4938 * @buf: Buffer to swap
4939 * @buf_words: Number of 16-bit words in buffer.
4941 * Swap halves of 16-bit words if needed to convert from
4942 * little-endian byte order to native cpu byte order, or
4946 * Inherited from caller.
4948 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
4953 for (i
= 0; i
< buf_words
; i
++)
4954 buf
[i
] = le16_to_cpu(buf
[i
]);
4955 #endif /* __BIG_ENDIAN */
4959 * ata_data_xfer - Transfer data by PIO
4960 * @adev: device to target
4962 * @buflen: buffer length
4963 * @write_data: read/write
4965 * Transfer data from/to the device data register by PIO.
4968 * Inherited from caller.
4970 void ata_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
4971 unsigned int buflen
, int write_data
)
4973 struct ata_port
*ap
= adev
->link
->ap
;
4974 unsigned int words
= buflen
>> 1;
4976 /* Transfer multiple of 2 bytes */
4978 iowrite16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4980 ioread16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4982 /* Transfer trailing 1 byte, if any. */
4983 if (unlikely(buflen
& 0x01)) {
4984 u16 align_buf
[1] = { 0 };
4985 unsigned char *trailing_buf
= buf
+ buflen
- 1;
4988 memcpy(align_buf
, trailing_buf
, 1);
4989 iowrite16(le16_to_cpu(align_buf
[0]), ap
->ioaddr
.data_addr
);
4991 align_buf
[0] = cpu_to_le16(ioread16(ap
->ioaddr
.data_addr
));
4992 memcpy(trailing_buf
, align_buf
, 1);
4998 * ata_data_xfer_noirq - Transfer data by PIO
4999 * @adev: device to target
5001 * @buflen: buffer length
5002 * @write_data: read/write
5004 * Transfer data from/to the device data register by PIO. Do the
5005 * transfer with interrupts disabled.
5008 * Inherited from caller.
5010 void ata_data_xfer_noirq(struct ata_device
*adev
, unsigned char *buf
,
5011 unsigned int buflen
, int write_data
)
5013 unsigned long flags
;
5014 local_irq_save(flags
);
5015 ata_data_xfer(adev
, buf
, buflen
, write_data
);
5016 local_irq_restore(flags
);
5021 * ata_pio_sector - Transfer a sector of data.
5022 * @qc: Command on going
5024 * Transfer qc->sect_size bytes of data from/to the ATA device.
5027 * Inherited from caller.
5030 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
5032 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
5033 struct ata_port
*ap
= qc
->ap
;
5035 unsigned int offset
;
5038 if (qc
->curbytes
== qc
->nbytes
- qc
->sect_size
)
5039 ap
->hsm_task_state
= HSM_ST_LAST
;
5041 page
= sg_page(qc
->cursg
);
5042 offset
= qc
->cursg
->offset
+ qc
->cursg_ofs
;
5044 /* get the current page and offset */
5045 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
5046 offset
%= PAGE_SIZE
;
5048 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
5050 if (PageHighMem(page
)) {
5051 unsigned long flags
;
5053 /* FIXME: use a bounce buffer */
5054 local_irq_save(flags
);
5055 buf
= kmap_atomic(page
, KM_IRQ0
);
5057 /* do the actual data transfer */
5058 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
5060 kunmap_atomic(buf
, KM_IRQ0
);
5061 local_irq_restore(flags
);
5063 buf
= page_address(page
);
5064 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
5067 qc
->curbytes
+= qc
->sect_size
;
5068 qc
->cursg_ofs
+= qc
->sect_size
;
5070 if (qc
->cursg_ofs
== qc
->cursg
->length
) {
5071 qc
->cursg
= sg_next(qc
->cursg
);
5077 * ata_pio_sectors - Transfer one or many sectors.
5078 * @qc: Command on going
5080 * Transfer one or many sectors of data from/to the
5081 * ATA device for the DRQ request.
5084 * Inherited from caller.
5087 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
5089 if (is_multi_taskfile(&qc
->tf
)) {
5090 /* READ/WRITE MULTIPLE */
5093 WARN_ON(qc
->dev
->multi_count
== 0);
5095 nsect
= min((qc
->nbytes
- qc
->curbytes
) / qc
->sect_size
,
5096 qc
->dev
->multi_count
);
5102 ata_altstatus(qc
->ap
); /* flush */
5106 * atapi_send_cdb - Write CDB bytes to hardware
5107 * @ap: Port to which ATAPI device is attached.
5108 * @qc: Taskfile currently active
5110 * When device has indicated its readiness to accept
5111 * a CDB, this function is called. Send the CDB.
5117 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
5120 DPRINTK("send cdb\n");
5121 WARN_ON(qc
->dev
->cdb_len
< 12);
5123 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
5124 ata_altstatus(ap
); /* flush */
5126 switch (qc
->tf
.protocol
) {
5127 case ATA_PROT_ATAPI
:
5128 ap
->hsm_task_state
= HSM_ST
;
5130 case ATA_PROT_ATAPI_NODATA
:
5131 ap
->hsm_task_state
= HSM_ST_LAST
;
5133 case ATA_PROT_ATAPI_DMA
:
5134 ap
->hsm_task_state
= HSM_ST_LAST
;
5135 /* initiate bmdma */
5136 ap
->ops
->bmdma_start(qc
);
5142 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5143 * @qc: Command on going
5144 * @bytes: number of bytes
5146 * Transfer Transfer data from/to the ATAPI device.
5149 * Inherited from caller.
5153 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
5155 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
5156 struct scatterlist
*sg
= qc
->__sg
;
5157 struct scatterlist
*lsg
= sg_last(qc
->__sg
, qc
->n_elem
);
5158 struct ata_port
*ap
= qc
->ap
;
5161 unsigned int offset
, count
;
5164 if (qc
->curbytes
+ bytes
>= qc
->nbytes
)
5165 ap
->hsm_task_state
= HSM_ST_LAST
;
5168 if (unlikely(no_more_sg
)) {
5170 * The end of qc->sg is reached and the device expects
5171 * more data to transfer. In order not to overrun qc->sg
5172 * and fulfill length specified in the byte count register,
5173 * - for read case, discard trailing data from the device
5174 * - for write case, padding zero data to the device
5176 u16 pad_buf
[1] = { 0 };
5177 unsigned int words
= bytes
>> 1;
5180 if (words
) /* warning if bytes > 1 */
5181 ata_dev_printk(qc
->dev
, KERN_WARNING
,
5182 "%u bytes trailing data\n", bytes
);
5184 for (i
= 0; i
< words
; i
++)
5185 ap
->ops
->data_xfer(qc
->dev
, (unsigned char *)pad_buf
, 2, do_write
);
5187 ap
->hsm_task_state
= HSM_ST_LAST
;
5194 offset
= sg
->offset
+ qc
->cursg_ofs
;
5196 /* get the current page and offset */
5197 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
5198 offset
%= PAGE_SIZE
;
5200 /* don't overrun current sg */
5201 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
5203 /* don't cross page boundaries */
5204 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
5206 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
5208 if (PageHighMem(page
)) {
5209 unsigned long flags
;
5211 /* FIXME: use bounce buffer */
5212 local_irq_save(flags
);
5213 buf
= kmap_atomic(page
, KM_IRQ0
);
5215 /* do the actual data transfer */
5216 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
5218 kunmap_atomic(buf
, KM_IRQ0
);
5219 local_irq_restore(flags
);
5221 buf
= page_address(page
);
5222 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
5226 qc
->curbytes
+= count
;
5227 qc
->cursg_ofs
+= count
;
5229 if (qc
->cursg_ofs
== sg
->length
) {
5230 if (qc
->cursg
== lsg
)
5233 qc
->cursg
= sg_next(qc
->cursg
);
5242 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5243 * @qc: Command on going
5245 * Transfer Transfer data from/to the ATAPI device.
5248 * Inherited from caller.
5251 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
5253 struct ata_port
*ap
= qc
->ap
;
5254 struct ata_device
*dev
= qc
->dev
;
5255 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
5256 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
5258 /* Abuse qc->result_tf for temp storage of intermediate TF
5259 * here to save some kernel stack usage.
5260 * For normal completion, qc->result_tf is not relevant. For
5261 * error, qc->result_tf is later overwritten by ata_qc_complete().
5262 * So, the correctness of qc->result_tf is not affected.
5264 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5265 ireason
= qc
->result_tf
.nsect
;
5266 bc_lo
= qc
->result_tf
.lbam
;
5267 bc_hi
= qc
->result_tf
.lbah
;
5268 bytes
= (bc_hi
<< 8) | bc_lo
;
5270 /* shall be cleared to zero, indicating xfer of data */
5271 if (ireason
& (1 << 0))
5274 /* make sure transfer direction matches expected */
5275 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
5276 if (do_write
!= i_write
)
5279 VPRINTK("ata%u: xfering %d bytes\n", ap
->print_id
, bytes
);
5281 __atapi_pio_bytes(qc
, bytes
);
5282 ata_altstatus(ap
); /* flush */
5287 ata_dev_printk(dev
, KERN_INFO
, "ATAPI check failed\n");
5288 qc
->err_mask
|= AC_ERR_HSM
;
5289 ap
->hsm_task_state
= HSM_ST_ERR
;
5293 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5294 * @ap: the target ata_port
5298 * 1 if ok in workqueue, 0 otherwise.
5301 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
5303 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5306 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
5307 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
5308 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
5311 if (is_atapi_taskfile(&qc
->tf
) &&
5312 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5320 * ata_hsm_qc_complete - finish a qc running on standard HSM
5321 * @qc: Command to complete
5322 * @in_wq: 1 if called from workqueue, 0 otherwise
5324 * Finish @qc which is running on standard HSM.
5327 * If @in_wq is zero, spin_lock_irqsave(host lock).
5328 * Otherwise, none on entry and grabs host lock.
5330 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
5332 struct ata_port
*ap
= qc
->ap
;
5333 unsigned long flags
;
5335 if (ap
->ops
->error_handler
) {
5337 spin_lock_irqsave(ap
->lock
, flags
);
5339 /* EH might have kicked in while host lock is
5342 qc
= ata_qc_from_tag(ap
, qc
->tag
);
5344 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
5345 ap
->ops
->irq_on(ap
);
5346 ata_qc_complete(qc
);
5348 ata_port_freeze(ap
);
5351 spin_unlock_irqrestore(ap
->lock
, flags
);
5353 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
5354 ata_qc_complete(qc
);
5356 ata_port_freeze(ap
);
5360 spin_lock_irqsave(ap
->lock
, flags
);
5361 ap
->ops
->irq_on(ap
);
5362 ata_qc_complete(qc
);
5363 spin_unlock_irqrestore(ap
->lock
, flags
);
5365 ata_qc_complete(qc
);
5370 * ata_hsm_move - move the HSM to the next state.
5371 * @ap: the target ata_port
5373 * @status: current device status
5374 * @in_wq: 1 if called from workqueue, 0 otherwise
5377 * 1 when poll next status needed, 0 otherwise.
5379 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
5380 u8 status
, int in_wq
)
5382 unsigned long flags
= 0;
5385 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
5387 /* Make sure ata_qc_issue_prot() does not throw things
5388 * like DMA polling into the workqueue. Notice that
5389 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5391 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
5394 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5395 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
5397 switch (ap
->hsm_task_state
) {
5399 /* Send first data block or PACKET CDB */
5401 /* If polling, we will stay in the work queue after
5402 * sending the data. Otherwise, interrupt handler
5403 * takes over after sending the data.
5405 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5407 /* check device status */
5408 if (unlikely((status
& ATA_DRQ
) == 0)) {
5409 /* handle BSY=0, DRQ=0 as error */
5410 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5411 /* device stops HSM for abort/error */
5412 qc
->err_mask
|= AC_ERR_DEV
;
5414 /* HSM violation. Let EH handle this */
5415 qc
->err_mask
|= AC_ERR_HSM
;
5417 ap
->hsm_task_state
= HSM_ST_ERR
;
5421 /* Device should not ask for data transfer (DRQ=1)
5422 * when it finds something wrong.
5423 * We ignore DRQ here and stop the HSM by
5424 * changing hsm_task_state to HSM_ST_ERR and
5425 * let the EH abort the command or reset the device.
5427 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5428 /* Some ATAPI tape drives forget to clear the ERR bit
5429 * when doing the next command (mostly request sense).
5430 * We ignore ERR here to workaround and proceed sending
5433 if (!(qc
->dev
->horkage
& ATA_HORKAGE_STUCK_ERR
)) {
5434 ata_port_printk(ap
, KERN_WARNING
,
5435 "DRQ=1 with device error, "
5436 "dev_stat 0x%X\n", status
);
5437 qc
->err_mask
|= AC_ERR_HSM
;
5438 ap
->hsm_task_state
= HSM_ST_ERR
;
5443 /* Send the CDB (atapi) or the first data block (ata pio out).
5444 * During the state transition, interrupt handler shouldn't
5445 * be invoked before the data transfer is complete and
5446 * hsm_task_state is changed. Hence, the following locking.
5449 spin_lock_irqsave(ap
->lock
, flags
);
5451 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
5452 /* PIO data out protocol.
5453 * send first data block.
5456 /* ata_pio_sectors() might change the state
5457 * to HSM_ST_LAST. so, the state is changed here
5458 * before ata_pio_sectors().
5460 ap
->hsm_task_state
= HSM_ST
;
5461 ata_pio_sectors(qc
);
5464 atapi_send_cdb(ap
, qc
);
5467 spin_unlock_irqrestore(ap
->lock
, flags
);
5469 /* if polling, ata_pio_task() handles the rest.
5470 * otherwise, interrupt handler takes over from here.
5475 /* complete command or read/write the data register */
5476 if (qc
->tf
.protocol
== ATA_PROT_ATAPI
) {
5477 /* ATAPI PIO protocol */
5478 if ((status
& ATA_DRQ
) == 0) {
5479 /* No more data to transfer or device error.
5480 * Device error will be tagged in HSM_ST_LAST.
5482 ap
->hsm_task_state
= HSM_ST_LAST
;
5486 /* Device should not ask for data transfer (DRQ=1)
5487 * when it finds something wrong.
5488 * We ignore DRQ here and stop the HSM by
5489 * changing hsm_task_state to HSM_ST_ERR and
5490 * let the EH abort the command or reset the device.
5492 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5493 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with "
5494 "device error, dev_stat 0x%X\n",
5496 qc
->err_mask
|= AC_ERR_HSM
;
5497 ap
->hsm_task_state
= HSM_ST_ERR
;
5501 atapi_pio_bytes(qc
);
5503 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
5504 /* bad ireason reported by device */
5508 /* ATA PIO protocol */
5509 if (unlikely((status
& ATA_DRQ
) == 0)) {
5510 /* handle BSY=0, DRQ=0 as error */
5511 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5512 /* device stops HSM for abort/error */
5513 qc
->err_mask
|= AC_ERR_DEV
;
5515 /* HSM violation. Let EH handle this.
5516 * Phantom devices also trigger this
5517 * condition. Mark hint.
5519 qc
->err_mask
|= AC_ERR_HSM
|
5522 ap
->hsm_task_state
= HSM_ST_ERR
;
5526 /* For PIO reads, some devices may ask for
5527 * data transfer (DRQ=1) alone with ERR=1.
5528 * We respect DRQ here and transfer one
5529 * block of junk data before changing the
5530 * hsm_task_state to HSM_ST_ERR.
5532 * For PIO writes, ERR=1 DRQ=1 doesn't make
5533 * sense since the data block has been
5534 * transferred to the device.
5536 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5537 /* data might be corrputed */
5538 qc
->err_mask
|= AC_ERR_DEV
;
5540 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
5541 ata_pio_sectors(qc
);
5542 status
= ata_wait_idle(ap
);
5545 if (status
& (ATA_BUSY
| ATA_DRQ
))
5546 qc
->err_mask
|= AC_ERR_HSM
;
5548 /* ata_pio_sectors() might change the
5549 * state to HSM_ST_LAST. so, the state
5550 * is changed after ata_pio_sectors().
5552 ap
->hsm_task_state
= HSM_ST_ERR
;
5556 ata_pio_sectors(qc
);
5558 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
5559 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
5561 status
= ata_wait_idle(ap
);
5570 if (unlikely(!ata_ok(status
))) {
5571 qc
->err_mask
|= __ac_err_mask(status
);
5572 ap
->hsm_task_state
= HSM_ST_ERR
;
5576 /* no more data to transfer */
5577 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5578 ap
->print_id
, qc
->dev
->devno
, status
);
5580 WARN_ON(qc
->err_mask
);
5582 ap
->hsm_task_state
= HSM_ST_IDLE
;
5584 /* complete taskfile transaction */
5585 ata_hsm_qc_complete(qc
, in_wq
);
5591 /* make sure qc->err_mask is available to
5592 * know what's wrong and recover
5594 WARN_ON(qc
->err_mask
== 0);
5596 ap
->hsm_task_state
= HSM_ST_IDLE
;
5598 /* complete taskfile transaction */
5599 ata_hsm_qc_complete(qc
, in_wq
);
5611 static void ata_pio_task(struct work_struct
*work
)
5613 struct ata_port
*ap
=
5614 container_of(work
, struct ata_port
, port_task
.work
);
5615 struct ata_queued_cmd
*qc
= ap
->port_task_data
;
5620 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
5623 * This is purely heuristic. This is a fast path.
5624 * Sometimes when we enter, BSY will be cleared in
5625 * a chk-status or two. If not, the drive is probably seeking
5626 * or something. Snooze for a couple msecs, then
5627 * chk-status again. If still busy, queue delayed work.
5629 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
5630 if (status
& ATA_BUSY
) {
5632 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
5633 if (status
& ATA_BUSY
) {
5634 ata_port_queue_task(ap
, ata_pio_task
, qc
, ATA_SHORT_PAUSE
);
5640 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
5642 /* another command or interrupt handler
5643 * may be running at this point.
5650 * ata_qc_new - Request an available ATA command, for queueing
5651 * @ap: Port associated with device @dev
5652 * @dev: Device from whom we request an available command structure
5658 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
5660 struct ata_queued_cmd
*qc
= NULL
;
5663 /* no command while frozen */
5664 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
5667 /* the last tag is reserved for internal command. */
5668 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
5669 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
5670 qc
= __ata_qc_from_tag(ap
, i
);
5681 * ata_qc_new_init - Request an available ATA command, and initialize it
5682 * @dev: Device from whom we request an available command structure
5688 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
5690 struct ata_port
*ap
= dev
->link
->ap
;
5691 struct ata_queued_cmd
*qc
;
5693 qc
= ata_qc_new(ap
);
5706 * ata_qc_free - free unused ata_queued_cmd
5707 * @qc: Command to complete
5709 * Designed to free unused ata_queued_cmd object
5710 * in case something prevents using it.
5713 * spin_lock_irqsave(host lock)
5715 void ata_qc_free(struct ata_queued_cmd
*qc
)
5717 struct ata_port
*ap
= qc
->ap
;
5720 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5724 if (likely(ata_tag_valid(tag
))) {
5725 qc
->tag
= ATA_TAG_POISON
;
5726 clear_bit(tag
, &ap
->qc_allocated
);
5730 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
5732 struct ata_port
*ap
= qc
->ap
;
5733 struct ata_link
*link
= qc
->dev
->link
;
5735 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5736 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
5738 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
5741 /* command should be marked inactive atomically with qc completion */
5742 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5743 link
->sactive
&= ~(1 << qc
->tag
);
5745 ap
->nr_active_links
--;
5747 link
->active_tag
= ATA_TAG_POISON
;
5748 ap
->nr_active_links
--;
5751 /* clear exclusive status */
5752 if (unlikely(qc
->flags
& ATA_QCFLAG_CLEAR_EXCL
&&
5753 ap
->excl_link
== link
))
5754 ap
->excl_link
= NULL
;
5756 /* atapi: mark qc as inactive to prevent the interrupt handler
5757 * from completing the command twice later, before the error handler
5758 * is called. (when rc != 0 and atapi request sense is needed)
5760 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
5761 ap
->qc_active
&= ~(1 << qc
->tag
);
5763 /* call completion callback */
5764 qc
->complete_fn(qc
);
5767 static void fill_result_tf(struct ata_queued_cmd
*qc
)
5769 struct ata_port
*ap
= qc
->ap
;
5771 qc
->result_tf
.flags
= qc
->tf
.flags
;
5772 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5776 * ata_qc_complete - Complete an active ATA command
5777 * @qc: Command to complete
5778 * @err_mask: ATA Status register contents
5780 * Indicate to the mid and upper layers that an ATA
5781 * command has completed, with either an ok or not-ok status.
5784 * spin_lock_irqsave(host lock)
5786 void ata_qc_complete(struct ata_queued_cmd
*qc
)
5788 struct ata_port
*ap
= qc
->ap
;
5790 /* XXX: New EH and old EH use different mechanisms to
5791 * synchronize EH with regular execution path.
5793 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5794 * Normal execution path is responsible for not accessing a
5795 * failed qc. libata core enforces the rule by returning NULL
5796 * from ata_qc_from_tag() for failed qcs.
5798 * Old EH depends on ata_qc_complete() nullifying completion
5799 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5800 * not synchronize with interrupt handler. Only PIO task is
5803 if (ap
->ops
->error_handler
) {
5804 struct ata_device
*dev
= qc
->dev
;
5805 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
5807 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
5809 if (unlikely(qc
->err_mask
))
5810 qc
->flags
|= ATA_QCFLAG_FAILED
;
5812 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
5813 if (!ata_tag_internal(qc
->tag
)) {
5814 /* always fill result TF for failed qc */
5816 ata_qc_schedule_eh(qc
);
5821 /* read result TF if requested */
5822 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5825 /* Some commands need post-processing after successful
5828 switch (qc
->tf
.command
) {
5829 case ATA_CMD_SET_FEATURES
:
5830 if (qc
->tf
.feature
!= SETFEATURES_WC_ON
&&
5831 qc
->tf
.feature
!= SETFEATURES_WC_OFF
)
5834 case ATA_CMD_INIT_DEV_PARAMS
: /* CHS translation changed */
5835 case ATA_CMD_SET_MULTI
: /* multi_count changed */
5836 /* revalidate device */
5837 ehi
->dev_action
[dev
->devno
] |= ATA_EH_REVALIDATE
;
5838 ata_port_schedule_eh(ap
);
5842 dev
->flags
|= ATA_DFLAG_SLEEPING
;
5846 __ata_qc_complete(qc
);
5848 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
5851 /* read result TF if failed or requested */
5852 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5855 __ata_qc_complete(qc
);
5860 * ata_qc_complete_multiple - Complete multiple qcs successfully
5861 * @ap: port in question
5862 * @qc_active: new qc_active mask
5863 * @finish_qc: LLDD callback invoked before completing a qc
5865 * Complete in-flight commands. This functions is meant to be
5866 * called from low-level driver's interrupt routine to complete
5867 * requests normally. ap->qc_active and @qc_active is compared
5868 * and commands are completed accordingly.
5871 * spin_lock_irqsave(host lock)
5874 * Number of completed commands on success, -errno otherwise.
5876 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
5877 void (*finish_qc
)(struct ata_queued_cmd
*))
5883 done_mask
= ap
->qc_active
^ qc_active
;
5885 if (unlikely(done_mask
& qc_active
)) {
5886 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
5887 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
5891 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
5892 struct ata_queued_cmd
*qc
;
5894 if (!(done_mask
& (1 << i
)))
5897 if ((qc
= ata_qc_from_tag(ap
, i
))) {
5900 ata_qc_complete(qc
);
5908 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
5910 struct ata_port
*ap
= qc
->ap
;
5912 switch (qc
->tf
.protocol
) {
5915 case ATA_PROT_ATAPI_DMA
:
5918 case ATA_PROT_ATAPI
:
5920 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
5933 * ata_qc_issue - issue taskfile to device
5934 * @qc: command to issue to device
5936 * Prepare an ATA command to submission to device.
5937 * This includes mapping the data into a DMA-able
5938 * area, filling in the S/G table, and finally
5939 * writing the taskfile to hardware, starting the command.
5942 * spin_lock_irqsave(host lock)
5944 void ata_qc_issue(struct ata_queued_cmd
*qc
)
5946 struct ata_port
*ap
= qc
->ap
;
5947 struct ata_link
*link
= qc
->dev
->link
;
5949 /* Make sure only one non-NCQ command is outstanding. The
5950 * check is skipped for old EH because it reuses active qc to
5951 * request ATAPI sense.
5953 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(link
->active_tag
));
5955 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5956 WARN_ON(link
->sactive
& (1 << qc
->tag
));
5959 ap
->nr_active_links
++;
5960 link
->sactive
|= 1 << qc
->tag
;
5962 WARN_ON(link
->sactive
);
5964 ap
->nr_active_links
++;
5965 link
->active_tag
= qc
->tag
;
5968 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
5969 ap
->qc_active
|= 1 << qc
->tag
;
5971 if (ata_should_dma_map(qc
)) {
5972 if (qc
->flags
& ATA_QCFLAG_SG
) {
5973 if (ata_sg_setup(qc
))
5975 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
5976 if (ata_sg_setup_one(qc
))
5980 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
5983 /* if device is sleeping, schedule softreset and abort the link */
5984 if (unlikely(qc
->dev
->flags
& ATA_DFLAG_SLEEPING
)) {
5985 link
->eh_info
.action
|= ATA_EH_SOFTRESET
;
5986 ata_ehi_push_desc(&link
->eh_info
, "waking up from sleep");
5987 ata_link_abort(link
);
5991 ap
->ops
->qc_prep(qc
);
5993 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
5994 if (unlikely(qc
->err_mask
))
5999 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
6000 qc
->err_mask
|= AC_ERR_SYSTEM
;
6002 ata_qc_complete(qc
);
6006 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6007 * @qc: command to issue to device
6009 * Using various libata functions and hooks, this function
6010 * starts an ATA command. ATA commands are grouped into
6011 * classes called "protocols", and issuing each type of protocol
6012 * is slightly different.
6014 * May be used as the qc_issue() entry in ata_port_operations.
6017 * spin_lock_irqsave(host lock)
6020 * Zero on success, AC_ERR_* mask on failure
6023 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
6025 struct ata_port
*ap
= qc
->ap
;
6027 /* Use polling pio if the LLD doesn't handle
6028 * interrupt driven pio and atapi CDB interrupt.
6030 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
6031 switch (qc
->tf
.protocol
) {
6033 case ATA_PROT_NODATA
:
6034 case ATA_PROT_ATAPI
:
6035 case ATA_PROT_ATAPI_NODATA
:
6036 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
6038 case ATA_PROT_ATAPI_DMA
:
6039 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
6040 /* see ata_dma_blacklisted() */
6048 /* select the device */
6049 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
6051 /* start the command */
6052 switch (qc
->tf
.protocol
) {
6053 case ATA_PROT_NODATA
:
6054 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6055 ata_qc_set_polling(qc
);
6057 ata_tf_to_host(ap
, &qc
->tf
);
6058 ap
->hsm_task_state
= HSM_ST_LAST
;
6060 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6061 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
6066 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
6068 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
6069 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
6070 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
6071 ap
->hsm_task_state
= HSM_ST_LAST
;
6075 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6076 ata_qc_set_polling(qc
);
6078 ata_tf_to_host(ap
, &qc
->tf
);
6080 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
6081 /* PIO data out protocol */
6082 ap
->hsm_task_state
= HSM_ST_FIRST
;
6083 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
6085 /* always send first data block using
6086 * the ata_pio_task() codepath.
6089 /* PIO data in protocol */
6090 ap
->hsm_task_state
= HSM_ST
;
6092 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6093 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
6095 /* if polling, ata_pio_task() handles the rest.
6096 * otherwise, interrupt handler takes over from here.
6102 case ATA_PROT_ATAPI
:
6103 case ATA_PROT_ATAPI_NODATA
:
6104 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6105 ata_qc_set_polling(qc
);
6107 ata_tf_to_host(ap
, &qc
->tf
);
6109 ap
->hsm_task_state
= HSM_ST_FIRST
;
6111 /* send cdb by polling if no cdb interrupt */
6112 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
6113 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
6114 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
6117 case ATA_PROT_ATAPI_DMA
:
6118 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
6120 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
6121 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
6122 ap
->hsm_task_state
= HSM_ST_FIRST
;
6124 /* send cdb by polling if no cdb interrupt */
6125 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
6126 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
6131 return AC_ERR_SYSTEM
;
6138 * ata_host_intr - Handle host interrupt for given (port, task)
6139 * @ap: Port on which interrupt arrived (possibly...)
6140 * @qc: Taskfile currently active in engine
6142 * Handle host interrupt for given queued command. Currently,
6143 * only DMA interrupts are handled. All other commands are
6144 * handled via polling with interrupts disabled (nIEN bit).
6147 * spin_lock_irqsave(host lock)
6150 * One if interrupt was handled, zero if not (shared irq).
6153 inline unsigned int ata_host_intr(struct ata_port
*ap
,
6154 struct ata_queued_cmd
*qc
)
6156 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
6157 u8 status
, host_stat
= 0;
6159 VPRINTK("ata%u: protocol %d task_state %d\n",
6160 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
6162 /* Check whether we are expecting interrupt in this state */
6163 switch (ap
->hsm_task_state
) {
6165 /* Some pre-ATAPI-4 devices assert INTRQ
6166 * at this state when ready to receive CDB.
6169 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6170 * The flag was turned on only for atapi devices.
6171 * No need to check is_atapi_taskfile(&qc->tf) again.
6173 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
6177 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
6178 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
) {
6179 /* check status of DMA engine */
6180 host_stat
= ap
->ops
->bmdma_status(ap
);
6181 VPRINTK("ata%u: host_stat 0x%X\n",
6182 ap
->print_id
, host_stat
);
6184 /* if it's not our irq... */
6185 if (!(host_stat
& ATA_DMA_INTR
))
6188 /* before we do anything else, clear DMA-Start bit */
6189 ap
->ops
->bmdma_stop(qc
);
6191 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
6192 /* error when transfering data to/from memory */
6193 qc
->err_mask
|= AC_ERR_HOST_BUS
;
6194 ap
->hsm_task_state
= HSM_ST_ERR
;
6204 /* check altstatus */
6205 status
= ata_altstatus(ap
);
6206 if (status
& ATA_BUSY
)
6209 /* check main status, clearing INTRQ */
6210 status
= ata_chk_status(ap
);
6211 if (unlikely(status
& ATA_BUSY
))
6214 /* ack bmdma irq events */
6215 ap
->ops
->irq_clear(ap
);
6217 ata_hsm_move(ap
, qc
, status
, 0);
6219 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
6220 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
))
6221 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
6223 return 1; /* irq handled */
6226 ap
->stats
.idle_irq
++;
6229 if ((ap
->stats
.idle_irq
% 1000) == 0) {
6231 ap
->ops
->irq_clear(ap
);
6232 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
6236 return 0; /* irq not handled */
6240 * ata_interrupt - Default ATA host interrupt handler
6241 * @irq: irq line (unused)
6242 * @dev_instance: pointer to our ata_host information structure
6244 * Default interrupt handler for PCI IDE devices. Calls
6245 * ata_host_intr() for each port that is not disabled.
6248 * Obtains host lock during operation.
6251 * IRQ_NONE or IRQ_HANDLED.
6254 irqreturn_t
ata_interrupt(int irq
, void *dev_instance
)
6256 struct ata_host
*host
= dev_instance
;
6258 unsigned int handled
= 0;
6259 unsigned long flags
;
6261 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6262 spin_lock_irqsave(&host
->lock
, flags
);
6264 for (i
= 0; i
< host
->n_ports
; i
++) {
6265 struct ata_port
*ap
;
6267 ap
= host
->ports
[i
];
6269 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
6270 struct ata_queued_cmd
*qc
;
6272 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
6273 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
6274 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
6275 handled
|= ata_host_intr(ap
, qc
);
6279 spin_unlock_irqrestore(&host
->lock
, flags
);
6281 return IRQ_RETVAL(handled
);
6285 * sata_scr_valid - test whether SCRs are accessible
6286 * @link: ATA link to test SCR accessibility for
6288 * Test whether SCRs are accessible for @link.
6294 * 1 if SCRs are accessible, 0 otherwise.
6296 int sata_scr_valid(struct ata_link
*link
)
6298 struct ata_port
*ap
= link
->ap
;
6300 return (ap
->flags
& ATA_FLAG_SATA
) && ap
->ops
->scr_read
;
6304 * sata_scr_read - read SCR register of the specified port
6305 * @link: ATA link to read SCR for
6307 * @val: Place to store read value
6309 * Read SCR register @reg of @link into *@val. This function is
6310 * guaranteed to succeed if @link is ap->link, the cable type of
6311 * the port is SATA and the port implements ->scr_read.
6314 * None if @link is ap->link. Kernel thread context otherwise.
6317 * 0 on success, negative errno on failure.
6319 int sata_scr_read(struct ata_link
*link
, int reg
, u32
*val
)
6321 if (ata_is_host_link(link
)) {
6322 struct ata_port
*ap
= link
->ap
;
6324 if (sata_scr_valid(link
))
6325 return ap
->ops
->scr_read(ap
, reg
, val
);
6329 return sata_pmp_scr_read(link
, reg
, val
);
6333 * sata_scr_write - write SCR register of the specified port
6334 * @link: ATA link to write SCR for
6335 * @reg: SCR to write
6336 * @val: value to write
6338 * Write @val to SCR register @reg of @link. This function is
6339 * guaranteed to succeed if @link is ap->link, the cable type of
6340 * the port is SATA and the port implements ->scr_read.
6343 * None if @link is ap->link. Kernel thread context otherwise.
6346 * 0 on success, negative errno on failure.
6348 int sata_scr_write(struct ata_link
*link
, int reg
, u32 val
)
6350 if (ata_is_host_link(link
)) {
6351 struct ata_port
*ap
= link
->ap
;
6353 if (sata_scr_valid(link
))
6354 return ap
->ops
->scr_write(ap
, reg
, val
);
6358 return sata_pmp_scr_write(link
, reg
, val
);
6362 * sata_scr_write_flush - write SCR register of the specified port and flush
6363 * @link: ATA link to write SCR for
6364 * @reg: SCR to write
6365 * @val: value to write
6367 * This function is identical to sata_scr_write() except that this
6368 * function performs flush after writing to the register.
6371 * None if @link is ap->link. Kernel thread context otherwise.
6374 * 0 on success, negative errno on failure.
6376 int sata_scr_write_flush(struct ata_link
*link
, int reg
, u32 val
)
6378 if (ata_is_host_link(link
)) {
6379 struct ata_port
*ap
= link
->ap
;
6382 if (sata_scr_valid(link
)) {
6383 rc
= ap
->ops
->scr_write(ap
, reg
, val
);
6385 rc
= ap
->ops
->scr_read(ap
, reg
, &val
);
6391 return sata_pmp_scr_write(link
, reg
, val
);
6395 * ata_link_online - test whether the given link is online
6396 * @link: ATA link to test
6398 * Test whether @link is online. Note that this function returns
6399 * 0 if online status of @link cannot be obtained, so
6400 * ata_link_online(link) != !ata_link_offline(link).
6406 * 1 if the port online status is available and online.
6408 int ata_link_online(struct ata_link
*link
)
6412 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6413 (sstatus
& 0xf) == 0x3)
6419 * ata_link_offline - test whether the given link is offline
6420 * @link: ATA link to test
6422 * Test whether @link is offline. Note that this function
6423 * returns 0 if offline status of @link cannot be obtained, so
6424 * ata_link_online(link) != !ata_link_offline(link).
6430 * 1 if the port offline status is available and offline.
6432 int ata_link_offline(struct ata_link
*link
)
6436 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6437 (sstatus
& 0xf) != 0x3)
6442 int ata_flush_cache(struct ata_device
*dev
)
6444 unsigned int err_mask
;
6447 if (!ata_try_flush_cache(dev
))
6450 if (dev
->flags
& ATA_DFLAG_FLUSH_EXT
)
6451 cmd
= ATA_CMD_FLUSH_EXT
;
6453 cmd
= ATA_CMD_FLUSH
;
6455 /* This is wrong. On a failed flush we get back the LBA of the lost
6456 sector and we should (assuming it wasn't aborted as unknown) issue
6457 a further flush command to continue the writeback until it
6459 err_mask
= ata_do_simple_cmd(dev
, cmd
);
6461 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
6469 static int ata_host_request_pm(struct ata_host
*host
, pm_message_t mesg
,
6470 unsigned int action
, unsigned int ehi_flags
,
6473 unsigned long flags
;
6476 for (i
= 0; i
< host
->n_ports
; i
++) {
6477 struct ata_port
*ap
= host
->ports
[i
];
6478 struct ata_link
*link
;
6480 /* Previous resume operation might still be in
6481 * progress. Wait for PM_PENDING to clear.
6483 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
6484 ata_port_wait_eh(ap
);
6485 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6488 /* request PM ops to EH */
6489 spin_lock_irqsave(ap
->lock
, flags
);
6494 ap
->pm_result
= &rc
;
6497 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
6498 __ata_port_for_each_link(link
, ap
) {
6499 link
->eh_info
.action
|= action
;
6500 link
->eh_info
.flags
|= ehi_flags
;
6503 ata_port_schedule_eh(ap
);
6505 spin_unlock_irqrestore(ap
->lock
, flags
);
6507 /* wait and check result */
6509 ata_port_wait_eh(ap
);
6510 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6520 * ata_host_suspend - suspend host
6521 * @host: host to suspend
6524 * Suspend @host. Actual operation is performed by EH. This
6525 * function requests EH to perform PM operations and waits for EH
6529 * Kernel thread context (may sleep).
6532 * 0 on success, -errno on failure.
6534 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
6539 * disable link pm on all ports before requesting
6542 ata_lpm_enable(host
);
6544 rc
= ata_host_request_pm(host
, mesg
, 0, ATA_EHI_QUIET
, 1);
6546 host
->dev
->power
.power_state
= mesg
;
6551 * ata_host_resume - resume host
6552 * @host: host to resume
6554 * Resume @host. Actual operation is performed by EH. This
6555 * function requests EH to perform PM operations and returns.
6556 * Note that all resume operations are performed parallely.
6559 * Kernel thread context (may sleep).
6561 void ata_host_resume(struct ata_host
*host
)
6563 ata_host_request_pm(host
, PMSG_ON
, ATA_EH_SOFTRESET
,
6564 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
6565 host
->dev
->power
.power_state
= PMSG_ON
;
6567 /* reenable link pm */
6568 ata_lpm_disable(host
);
6573 * ata_port_start - Set port up for dma.
6574 * @ap: Port to initialize
6576 * Called just after data structures for each port are
6577 * initialized. Allocates space for PRD table.
6579 * May be used as the port_start() entry in ata_port_operations.
6582 * Inherited from caller.
6584 int ata_port_start(struct ata_port
*ap
)
6586 struct device
*dev
= ap
->dev
;
6589 ap
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
,
6594 rc
= ata_pad_alloc(ap
, dev
);
6598 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
,
6599 (unsigned long long)ap
->prd_dma
);
6604 * ata_dev_init - Initialize an ata_device structure
6605 * @dev: Device structure to initialize
6607 * Initialize @dev in preparation for probing.
6610 * Inherited from caller.
6612 void ata_dev_init(struct ata_device
*dev
)
6614 struct ata_link
*link
= dev
->link
;
6615 struct ata_port
*ap
= link
->ap
;
6616 unsigned long flags
;
6618 /* SATA spd limit is bound to the first device */
6619 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6622 /* High bits of dev->flags are used to record warm plug
6623 * requests which occur asynchronously. Synchronize using
6626 spin_lock_irqsave(ap
->lock
, flags
);
6627 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
6629 spin_unlock_irqrestore(ap
->lock
, flags
);
6631 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
6632 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
6633 dev
->pio_mask
= UINT_MAX
;
6634 dev
->mwdma_mask
= UINT_MAX
;
6635 dev
->udma_mask
= UINT_MAX
;
6639 * ata_link_init - Initialize an ata_link structure
6640 * @ap: ATA port link is attached to
6641 * @link: Link structure to initialize
6642 * @pmp: Port multiplier port number
6647 * Kernel thread context (may sleep)
6649 void ata_link_init(struct ata_port
*ap
, struct ata_link
*link
, int pmp
)
6653 /* clear everything except for devices */
6654 memset(link
, 0, offsetof(struct ata_link
, device
[0]));
6658 link
->active_tag
= ATA_TAG_POISON
;
6659 link
->hw_sata_spd_limit
= UINT_MAX
;
6661 /* can't use iterator, ap isn't initialized yet */
6662 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
6663 struct ata_device
*dev
= &link
->device
[i
];
6666 dev
->devno
= dev
- link
->device
;
6672 * sata_link_init_spd - Initialize link->sata_spd_limit
6673 * @link: Link to configure sata_spd_limit for
6675 * Initialize @link->[hw_]sata_spd_limit to the currently
6679 * Kernel thread context (may sleep).
6682 * 0 on success, -errno on failure.
6684 int sata_link_init_spd(struct ata_link
*link
)
6689 rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
6693 spd
= (scontrol
>> 4) & 0xf;
6695 link
->hw_sata_spd_limit
&= (1 << spd
) - 1;
6697 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6703 * ata_port_alloc - allocate and initialize basic ATA port resources
6704 * @host: ATA host this allocated port belongs to
6706 * Allocate and initialize basic ATA port resources.
6709 * Allocate ATA port on success, NULL on failure.
6712 * Inherited from calling layer (may sleep).
6714 struct ata_port
*ata_port_alloc(struct ata_host
*host
)
6716 struct ata_port
*ap
;
6720 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
6724 ap
->pflags
|= ATA_PFLAG_INITIALIZING
;
6725 ap
->lock
= &host
->lock
;
6726 ap
->flags
= ATA_FLAG_DISABLED
;
6728 ap
->ctl
= ATA_DEVCTL_OBS
;
6730 ap
->dev
= host
->dev
;
6731 ap
->last_ctl
= 0xFF;
6733 #if defined(ATA_VERBOSE_DEBUG)
6734 /* turn on all debugging levels */
6735 ap
->msg_enable
= 0x00FF;
6736 #elif defined(ATA_DEBUG)
6737 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
6739 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
6742 INIT_DELAYED_WORK(&ap
->port_task
, NULL
);
6743 INIT_DELAYED_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
);
6744 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
);
6745 INIT_LIST_HEAD(&ap
->eh_done_q
);
6746 init_waitqueue_head(&ap
->eh_wait_q
);
6747 init_timer_deferrable(&ap
->fastdrain_timer
);
6748 ap
->fastdrain_timer
.function
= ata_eh_fastdrain_timerfn
;
6749 ap
->fastdrain_timer
.data
= (unsigned long)ap
;
6751 ap
->cbl
= ATA_CBL_NONE
;
6753 ata_link_init(ap
, &ap
->link
, 0);
6756 ap
->stats
.unhandled_irq
= 1;
6757 ap
->stats
.idle_irq
= 1;
6762 static void ata_host_release(struct device
*gendev
, void *res
)
6764 struct ata_host
*host
= dev_get_drvdata(gendev
);
6767 for (i
= 0; i
< host
->n_ports
; i
++) {
6768 struct ata_port
*ap
= host
->ports
[i
];
6774 scsi_host_put(ap
->scsi_host
);
6776 kfree(ap
->pmp_link
);
6778 host
->ports
[i
] = NULL
;
6781 dev_set_drvdata(gendev
, NULL
);
6785 * ata_host_alloc - allocate and init basic ATA host resources
6786 * @dev: generic device this host is associated with
6787 * @max_ports: maximum number of ATA ports associated with this host
6789 * Allocate and initialize basic ATA host resources. LLD calls
6790 * this function to allocate a host, initializes it fully and
6791 * attaches it using ata_host_register().
6793 * @max_ports ports are allocated and host->n_ports is
6794 * initialized to @max_ports. The caller is allowed to decrease
6795 * host->n_ports before calling ata_host_register(). The unused
6796 * ports will be automatically freed on registration.
6799 * Allocate ATA host on success, NULL on failure.
6802 * Inherited from calling layer (may sleep).
6804 struct ata_host
*ata_host_alloc(struct device
*dev
, int max_ports
)
6806 struct ata_host
*host
;
6812 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
6815 /* alloc a container for our list of ATA ports (buses) */
6816 sz
= sizeof(struct ata_host
) + (max_ports
+ 1) * sizeof(void *);
6817 /* alloc a container for our list of ATA ports (buses) */
6818 host
= devres_alloc(ata_host_release
, sz
, GFP_KERNEL
);
6822 devres_add(dev
, host
);
6823 dev_set_drvdata(dev
, host
);
6825 spin_lock_init(&host
->lock
);
6827 host
->n_ports
= max_ports
;
6829 /* allocate ports bound to this host */
6830 for (i
= 0; i
< max_ports
; i
++) {
6831 struct ata_port
*ap
;
6833 ap
= ata_port_alloc(host
);
6838 host
->ports
[i
] = ap
;
6841 devres_remove_group(dev
, NULL
);
6845 devres_release_group(dev
, NULL
);
6850 * ata_host_alloc_pinfo - alloc host and init with port_info array
6851 * @dev: generic device this host is associated with
6852 * @ppi: array of ATA port_info to initialize host with
6853 * @n_ports: number of ATA ports attached to this host
6855 * Allocate ATA host and initialize with info from @ppi. If NULL
6856 * terminated, @ppi may contain fewer entries than @n_ports. The
6857 * last entry will be used for the remaining ports.
6860 * Allocate ATA host on success, NULL on failure.
6863 * Inherited from calling layer (may sleep).
6865 struct ata_host
*ata_host_alloc_pinfo(struct device
*dev
,
6866 const struct ata_port_info
* const * ppi
,
6869 const struct ata_port_info
*pi
;
6870 struct ata_host
*host
;
6873 host
= ata_host_alloc(dev
, n_ports
);
6877 for (i
= 0, j
= 0, pi
= NULL
; i
< host
->n_ports
; i
++) {
6878 struct ata_port
*ap
= host
->ports
[i
];
6883 ap
->pio_mask
= pi
->pio_mask
;
6884 ap
->mwdma_mask
= pi
->mwdma_mask
;
6885 ap
->udma_mask
= pi
->udma_mask
;
6886 ap
->flags
|= pi
->flags
;
6887 ap
->link
.flags
|= pi
->link_flags
;
6888 ap
->ops
= pi
->port_ops
;
6890 if (!host
->ops
&& (pi
->port_ops
!= &ata_dummy_port_ops
))
6891 host
->ops
= pi
->port_ops
;
6892 if (!host
->private_data
&& pi
->private_data
)
6893 host
->private_data
= pi
->private_data
;
6899 static void ata_host_stop(struct device
*gendev
, void *res
)
6901 struct ata_host
*host
= dev_get_drvdata(gendev
);
6904 WARN_ON(!(host
->flags
& ATA_HOST_STARTED
));
6906 for (i
= 0; i
< host
->n_ports
; i
++) {
6907 struct ata_port
*ap
= host
->ports
[i
];
6909 if (ap
->ops
->port_stop
)
6910 ap
->ops
->port_stop(ap
);
6913 if (host
->ops
->host_stop
)
6914 host
->ops
->host_stop(host
);
6918 * ata_host_start - start and freeze ports of an ATA host
6919 * @host: ATA host to start ports for
6921 * Start and then freeze ports of @host. Started status is
6922 * recorded in host->flags, so this function can be called
6923 * multiple times. Ports are guaranteed to get started only
6924 * once. If host->ops isn't initialized yet, its set to the
6925 * first non-dummy port ops.
6928 * Inherited from calling layer (may sleep).
6931 * 0 if all ports are started successfully, -errno otherwise.
6933 int ata_host_start(struct ata_host
*host
)
6936 void *start_dr
= NULL
;
6939 if (host
->flags
& ATA_HOST_STARTED
)
6942 for (i
= 0; i
< host
->n_ports
; i
++) {
6943 struct ata_port
*ap
= host
->ports
[i
];
6945 if (!host
->ops
&& !ata_port_is_dummy(ap
))
6946 host
->ops
= ap
->ops
;
6948 if (ap
->ops
->port_stop
)
6952 if (host
->ops
->host_stop
)
6956 start_dr
= devres_alloc(ata_host_stop
, 0, GFP_KERNEL
);
6961 for (i
= 0; i
< host
->n_ports
; i
++) {
6962 struct ata_port
*ap
= host
->ports
[i
];
6964 if (ap
->ops
->port_start
) {
6965 rc
= ap
->ops
->port_start(ap
);
6967 ata_port_printk(ap
, KERN_ERR
, "failed to "
6968 "start port (errno=%d)\n", rc
);
6973 ata_eh_freeze_port(ap
);
6977 devres_add(host
->dev
, start_dr
);
6978 host
->flags
|= ATA_HOST_STARTED
;
6983 struct ata_port
*ap
= host
->ports
[i
];
6985 if (ap
->ops
->port_stop
)
6986 ap
->ops
->port_stop(ap
);
6988 devres_free(start_dr
);
6993 * ata_sas_host_init - Initialize a host struct
6994 * @host: host to initialize
6995 * @dev: device host is attached to
6996 * @flags: host flags
7000 * PCI/etc. bus probe sem.
7003 /* KILLME - the only user left is ipr */
7004 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
7005 unsigned long flags
, const struct ata_port_operations
*ops
)
7007 spin_lock_init(&host
->lock
);
7009 host
->flags
= flags
;
7014 * ata_host_register - register initialized ATA host
7015 * @host: ATA host to register
7016 * @sht: template for SCSI host
7018 * Register initialized ATA host. @host is allocated using
7019 * ata_host_alloc() and fully initialized by LLD. This function
7020 * starts ports, registers @host with ATA and SCSI layers and
7021 * probe registered devices.
7024 * Inherited from calling layer (may sleep).
7027 * 0 on success, -errno otherwise.
7029 int ata_host_register(struct ata_host
*host
, struct scsi_host_template
*sht
)
7033 /* host must have been started */
7034 if (!(host
->flags
& ATA_HOST_STARTED
)) {
7035 dev_printk(KERN_ERR
, host
->dev
,
7036 "BUG: trying to register unstarted host\n");
7041 /* Blow away unused ports. This happens when LLD can't
7042 * determine the exact number of ports to allocate at
7045 for (i
= host
->n_ports
; host
->ports
[i
]; i
++)
7046 kfree(host
->ports
[i
]);
7048 /* give ports names and add SCSI hosts */
7049 for (i
= 0; i
< host
->n_ports
; i
++)
7050 host
->ports
[i
]->print_id
= ata_print_id
++;
7052 rc
= ata_scsi_add_hosts(host
, sht
);
7056 /* associate with ACPI nodes */
7057 ata_acpi_associate(host
);
7059 /* set cable, sata_spd_limit and report */
7060 for (i
= 0; i
< host
->n_ports
; i
++) {
7061 struct ata_port
*ap
= host
->ports
[i
];
7062 unsigned long xfer_mask
;
7064 /* set SATA cable type if still unset */
7065 if (ap
->cbl
== ATA_CBL_NONE
&& (ap
->flags
& ATA_FLAG_SATA
))
7066 ap
->cbl
= ATA_CBL_SATA
;
7068 /* init sata_spd_limit to the current value */
7069 sata_link_init_spd(&ap
->link
);
7071 /* print per-port info to dmesg */
7072 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
, ap
->mwdma_mask
,
7075 if (!ata_port_is_dummy(ap
)) {
7076 ata_port_printk(ap
, KERN_INFO
,
7077 "%cATA max %s %s\n",
7078 (ap
->flags
& ATA_FLAG_SATA
) ? 'S' : 'P',
7079 ata_mode_string(xfer_mask
),
7080 ap
->link
.eh_info
.desc
);
7081 ata_ehi_clear_desc(&ap
->link
.eh_info
);
7083 ata_port_printk(ap
, KERN_INFO
, "DUMMY\n");
7086 /* perform each probe synchronously */
7087 DPRINTK("probe begin\n");
7088 for (i
= 0; i
< host
->n_ports
; i
++) {
7089 struct ata_port
*ap
= host
->ports
[i
];
7093 if (ap
->ops
->error_handler
) {
7094 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
7095 unsigned long flags
;
7099 /* kick EH for boot probing */
7100 spin_lock_irqsave(ap
->lock
, flags
);
7103 (1 << ata_link_max_devices(&ap
->link
)) - 1;
7104 ehi
->action
|= ATA_EH_SOFTRESET
;
7105 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
7107 ap
->pflags
&= ~ATA_PFLAG_INITIALIZING
;
7108 ap
->pflags
|= ATA_PFLAG_LOADING
;
7109 ata_port_schedule_eh(ap
);
7111 spin_unlock_irqrestore(ap
->lock
, flags
);
7113 /* wait for EH to finish */
7114 ata_port_wait_eh(ap
);
7116 DPRINTK("ata%u: bus probe begin\n", ap
->print_id
);
7117 rc
= ata_bus_probe(ap
);
7118 DPRINTK("ata%u: bus probe end\n", ap
->print_id
);
7121 /* FIXME: do something useful here?
7122 * Current libata behavior will
7123 * tear down everything when
7124 * the module is removed
7125 * or the h/w is unplugged.
7131 /* probes are done, now scan each port's disk(s) */
7132 DPRINTK("host probe begin\n");
7133 for (i
= 0; i
< host
->n_ports
; i
++) {
7134 struct ata_port
*ap
= host
->ports
[i
];
7136 ata_scsi_scan_host(ap
, 1);
7137 ata_lpm_schedule(ap
, ap
->pm_policy
);
7144 * ata_host_activate - start host, request IRQ and register it
7145 * @host: target ATA host
7146 * @irq: IRQ to request
7147 * @irq_handler: irq_handler used when requesting IRQ
7148 * @irq_flags: irq_flags used when requesting IRQ
7149 * @sht: scsi_host_template to use when registering the host
7151 * After allocating an ATA host and initializing it, most libata
7152 * LLDs perform three steps to activate the host - start host,
7153 * request IRQ and register it. This helper takes necessasry
7154 * arguments and performs the three steps in one go.
7156 * An invalid IRQ skips the IRQ registration and expects the host to
7157 * have set polling mode on the port. In this case, @irq_handler
7161 * Inherited from calling layer (may sleep).
7164 * 0 on success, -errno otherwise.
7166 int ata_host_activate(struct ata_host
*host
, int irq
,
7167 irq_handler_t irq_handler
, unsigned long irq_flags
,
7168 struct scsi_host_template
*sht
)
7172 rc
= ata_host_start(host
);
7176 /* Special case for polling mode */
7178 WARN_ON(irq_handler
);
7179 return ata_host_register(host
, sht
);
7182 rc
= devm_request_irq(host
->dev
, irq
, irq_handler
, irq_flags
,
7183 dev_driver_string(host
->dev
), host
);
7187 for (i
= 0; i
< host
->n_ports
; i
++)
7188 ata_port_desc(host
->ports
[i
], "irq %d", irq
);
7190 rc
= ata_host_register(host
, sht
);
7191 /* if failed, just free the IRQ and leave ports alone */
7193 devm_free_irq(host
->dev
, irq
, host
);
7199 * ata_port_detach - Detach ATA port in prepration of device removal
7200 * @ap: ATA port to be detached
7202 * Detach all ATA devices and the associated SCSI devices of @ap;
7203 * then, remove the associated SCSI host. @ap is guaranteed to
7204 * be quiescent on return from this function.
7207 * Kernel thread context (may sleep).
7209 static void ata_port_detach(struct ata_port
*ap
)
7211 unsigned long flags
;
7212 struct ata_link
*link
;
7213 struct ata_device
*dev
;
7215 if (!ap
->ops
->error_handler
)
7218 /* tell EH we're leaving & flush EH */
7219 spin_lock_irqsave(ap
->lock
, flags
);
7220 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
7221 spin_unlock_irqrestore(ap
->lock
, flags
);
7223 ata_port_wait_eh(ap
);
7225 /* EH is now guaranteed to see UNLOADING, so no new device
7226 * will be attached. Disable all existing devices.
7228 spin_lock_irqsave(ap
->lock
, flags
);
7230 ata_port_for_each_link(link
, ap
) {
7231 ata_link_for_each_dev(dev
, link
)
7232 ata_dev_disable(dev
);
7235 spin_unlock_irqrestore(ap
->lock
, flags
);
7237 /* Final freeze & EH. All in-flight commands are aborted. EH
7238 * will be skipped and retrials will be terminated with bad
7241 spin_lock_irqsave(ap
->lock
, flags
);
7242 ata_port_freeze(ap
); /* won't be thawed */
7243 spin_unlock_irqrestore(ap
->lock
, flags
);
7245 ata_port_wait_eh(ap
);
7246 cancel_rearming_delayed_work(&ap
->hotplug_task
);
7249 /* remove the associated SCSI host */
7250 scsi_remove_host(ap
->scsi_host
);
7254 * ata_host_detach - Detach all ports of an ATA host
7255 * @host: Host to detach
7257 * Detach all ports of @host.
7260 * Kernel thread context (may sleep).
7262 void ata_host_detach(struct ata_host
*host
)
7266 for (i
= 0; i
< host
->n_ports
; i
++)
7267 ata_port_detach(host
->ports
[i
]);
7271 * ata_std_ports - initialize ioaddr with standard port offsets.
7272 * @ioaddr: IO address structure to be initialized
7274 * Utility function which initializes data_addr, error_addr,
7275 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7276 * device_addr, status_addr, and command_addr to standard offsets
7277 * relative to cmd_addr.
7279 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7282 void ata_std_ports(struct ata_ioports
*ioaddr
)
7284 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
7285 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
7286 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
7287 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
7288 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
7289 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
7290 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
7291 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
7292 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
7293 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
7300 * ata_pci_remove_one - PCI layer callback for device removal
7301 * @pdev: PCI device that was removed
7303 * PCI layer indicates to libata via this hook that hot-unplug or
7304 * module unload event has occurred. Detach all ports. Resource
7305 * release is handled via devres.
7308 * Inherited from PCI layer (may sleep).
7310 void ata_pci_remove_one(struct pci_dev
*pdev
)
7312 struct device
*dev
= &pdev
->dev
;
7313 struct ata_host
*host
= dev_get_drvdata(dev
);
7315 ata_host_detach(host
);
7318 /* move to PCI subsystem */
7319 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
7321 unsigned long tmp
= 0;
7323 switch (bits
->width
) {
7326 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
7332 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
7338 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
7349 return (tmp
== bits
->val
) ? 1 : 0;
7353 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
7355 pci_save_state(pdev
);
7356 pci_disable_device(pdev
);
7358 if (mesg
.event
== PM_EVENT_SUSPEND
)
7359 pci_set_power_state(pdev
, PCI_D3hot
);
7362 int ata_pci_device_do_resume(struct pci_dev
*pdev
)
7366 pci_set_power_state(pdev
, PCI_D0
);
7367 pci_restore_state(pdev
);
7369 rc
= pcim_enable_device(pdev
);
7371 dev_printk(KERN_ERR
, &pdev
->dev
,
7372 "failed to enable device after resume (%d)\n", rc
);
7376 pci_set_master(pdev
);
7380 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
7382 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
7385 rc
= ata_host_suspend(host
, mesg
);
7389 ata_pci_device_do_suspend(pdev
, mesg
);
7394 int ata_pci_device_resume(struct pci_dev
*pdev
)
7396 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
7399 rc
= ata_pci_device_do_resume(pdev
);
7401 ata_host_resume(host
);
7404 #endif /* CONFIG_PM */
7406 #endif /* CONFIG_PCI */
7409 static int __init
ata_init(void)
7411 ata_probe_timeout
*= HZ
;
7412 ata_wq
= create_workqueue("ata");
7416 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
7418 destroy_workqueue(ata_wq
);
7422 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
7426 static void __exit
ata_exit(void)
7428 destroy_workqueue(ata_wq
);
7429 destroy_workqueue(ata_aux_wq
);
7432 subsys_initcall(ata_init
);
7433 module_exit(ata_exit
);
7435 static unsigned long ratelimit_time
;
7436 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
7438 int ata_ratelimit(void)
7441 unsigned long flags
;
7443 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
7445 if (time_after(jiffies
, ratelimit_time
)) {
7447 ratelimit_time
= jiffies
+ (HZ
/5);
7451 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
7457 * ata_wait_register - wait until register value changes
7458 * @reg: IO-mapped register
7459 * @mask: Mask to apply to read register value
7460 * @val: Wait condition
7461 * @interval_msec: polling interval in milliseconds
7462 * @timeout_msec: timeout in milliseconds
7464 * Waiting for some bits of register to change is a common
7465 * operation for ATA controllers. This function reads 32bit LE
7466 * IO-mapped register @reg and tests for the following condition.
7468 * (*@reg & mask) != val
7470 * If the condition is met, it returns; otherwise, the process is
7471 * repeated after @interval_msec until timeout.
7474 * Kernel thread context (may sleep)
7477 * The final register value.
7479 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
7480 unsigned long interval_msec
,
7481 unsigned long timeout_msec
)
7483 unsigned long timeout
;
7486 tmp
= ioread32(reg
);
7488 /* Calculate timeout _after_ the first read to make sure
7489 * preceding writes reach the controller before starting to
7490 * eat away the timeout.
7492 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
7494 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
7495 msleep(interval_msec
);
7496 tmp
= ioread32(reg
);
7505 static void ata_dummy_noret(struct ata_port
*ap
) { }
7506 static int ata_dummy_ret0(struct ata_port
*ap
) { return 0; }
7507 static void ata_dummy_qc_noret(struct ata_queued_cmd
*qc
) { }
7509 static u8
ata_dummy_check_status(struct ata_port
*ap
)
7514 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
7516 return AC_ERR_SYSTEM
;
7519 const struct ata_port_operations ata_dummy_port_ops
= {
7520 .check_status
= ata_dummy_check_status
,
7521 .check_altstatus
= ata_dummy_check_status
,
7522 .dev_select
= ata_noop_dev_select
,
7523 .qc_prep
= ata_noop_qc_prep
,
7524 .qc_issue
= ata_dummy_qc_issue
,
7525 .freeze
= ata_dummy_noret
,
7526 .thaw
= ata_dummy_noret
,
7527 .error_handler
= ata_dummy_noret
,
7528 .post_internal_cmd
= ata_dummy_qc_noret
,
7529 .irq_clear
= ata_dummy_noret
,
7530 .port_start
= ata_dummy_ret0
,
7531 .port_stop
= ata_dummy_noret
,
7534 const struct ata_port_info ata_dummy_port_info
= {
7535 .port_ops
= &ata_dummy_port_ops
,
7539 * libata is essentially a library of internal helper functions for
7540 * low-level ATA host controller drivers. As such, the API/ABI is
7541 * likely to change as new drivers are added and updated.
7542 * Do not depend on ABI/API stability.
7544 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
7545 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
7546 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
7547 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
7548 EXPORT_SYMBOL_GPL(ata_dummy_port_info
);
7549 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
7550 EXPORT_SYMBOL_GPL(ata_std_ports
);
7551 EXPORT_SYMBOL_GPL(ata_host_init
);
7552 EXPORT_SYMBOL_GPL(ata_host_alloc
);
7553 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo
);
7554 EXPORT_SYMBOL_GPL(ata_host_start
);
7555 EXPORT_SYMBOL_GPL(ata_host_register
);
7556 EXPORT_SYMBOL_GPL(ata_host_activate
);
7557 EXPORT_SYMBOL_GPL(ata_host_detach
);
7558 EXPORT_SYMBOL_GPL(ata_sg_init
);
7559 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
7560 EXPORT_SYMBOL_GPL(ata_hsm_move
);
7561 EXPORT_SYMBOL_GPL(ata_qc_complete
);
7562 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
7563 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
7564 EXPORT_SYMBOL_GPL(ata_tf_load
);
7565 EXPORT_SYMBOL_GPL(ata_tf_read
);
7566 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
7567 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
7568 EXPORT_SYMBOL_GPL(sata_print_link_status
);
7569 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
7570 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
7571 EXPORT_SYMBOL_GPL(ata_check_status
);
7572 EXPORT_SYMBOL_GPL(ata_altstatus
);
7573 EXPORT_SYMBOL_GPL(ata_exec_command
);
7574 EXPORT_SYMBOL_GPL(ata_port_start
);
7575 EXPORT_SYMBOL_GPL(ata_sff_port_start
);
7576 EXPORT_SYMBOL_GPL(ata_interrupt
);
7577 EXPORT_SYMBOL_GPL(ata_do_set_mode
);
7578 EXPORT_SYMBOL_GPL(ata_data_xfer
);
7579 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq
);
7580 EXPORT_SYMBOL_GPL(ata_std_qc_defer
);
7581 EXPORT_SYMBOL_GPL(ata_qc_prep
);
7582 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep
);
7583 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
7584 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
7585 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
7586 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
7587 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
7588 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
7589 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
7590 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
7591 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh
);
7592 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
7593 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
7594 EXPORT_SYMBOL_GPL(ata_port_probe
);
7595 EXPORT_SYMBOL_GPL(ata_dev_disable
);
7596 EXPORT_SYMBOL_GPL(sata_set_spd
);
7597 EXPORT_SYMBOL_GPL(sata_link_debounce
);
7598 EXPORT_SYMBOL_GPL(sata_link_resume
);
7599 EXPORT_SYMBOL_GPL(ata_bus_reset
);
7600 EXPORT_SYMBOL_GPL(ata_std_prereset
);
7601 EXPORT_SYMBOL_GPL(ata_std_softreset
);
7602 EXPORT_SYMBOL_GPL(sata_link_hardreset
);
7603 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
7604 EXPORT_SYMBOL_GPL(ata_std_postreset
);
7605 EXPORT_SYMBOL_GPL(ata_dev_classify
);
7606 EXPORT_SYMBOL_GPL(ata_dev_pair
);
7607 EXPORT_SYMBOL_GPL(ata_port_disable
);
7608 EXPORT_SYMBOL_GPL(ata_ratelimit
);
7609 EXPORT_SYMBOL_GPL(ata_wait_register
);
7610 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
7611 EXPORT_SYMBOL_GPL(ata_wait_after_reset
);
7612 EXPORT_SYMBOL_GPL(ata_wait_ready
);
7613 EXPORT_SYMBOL_GPL(ata_port_queue_task
);
7614 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
7615 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
7616 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
7617 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
7618 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
7619 EXPORT_SYMBOL_GPL(ata_host_intr
);
7620 EXPORT_SYMBOL_GPL(sata_scr_valid
);
7621 EXPORT_SYMBOL_GPL(sata_scr_read
);
7622 EXPORT_SYMBOL_GPL(sata_scr_write
);
7623 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
7624 EXPORT_SYMBOL_GPL(ata_link_online
);
7625 EXPORT_SYMBOL_GPL(ata_link_offline
);
7627 EXPORT_SYMBOL_GPL(ata_host_suspend
);
7628 EXPORT_SYMBOL_GPL(ata_host_resume
);
7629 #endif /* CONFIG_PM */
7630 EXPORT_SYMBOL_GPL(ata_id_string
);
7631 EXPORT_SYMBOL_GPL(ata_id_c_string
);
7632 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode
);
7633 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
7635 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
7636 EXPORT_SYMBOL_GPL(ata_timing_compute
);
7637 EXPORT_SYMBOL_GPL(ata_timing_merge
);
7640 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
7641 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host
);
7642 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma
);
7643 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host
);
7644 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
7645 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
7647 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
7648 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
7649 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
7650 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
7651 #endif /* CONFIG_PM */
7652 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
7653 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
7654 #endif /* CONFIG_PCI */
7656 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch
);
7657 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset
);
7658 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset
);
7659 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset
);
7660 EXPORT_SYMBOL_GPL(sata_pmp_do_eh
);
7662 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc
);
7663 EXPORT_SYMBOL_GPL(ata_ehi_push_desc
);
7664 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc
);
7665 EXPORT_SYMBOL_GPL(ata_port_desc
);
7667 EXPORT_SYMBOL_GPL(ata_port_pbar_desc
);
7668 #endif /* CONFIG_PCI */
7669 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
7670 EXPORT_SYMBOL_GPL(ata_link_abort
);
7671 EXPORT_SYMBOL_GPL(ata_port_abort
);
7672 EXPORT_SYMBOL_GPL(ata_port_freeze
);
7673 EXPORT_SYMBOL_GPL(sata_async_notification
);
7674 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
7675 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
7676 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
7677 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
7678 EXPORT_SYMBOL_GPL(ata_do_eh
);
7679 EXPORT_SYMBOL_GPL(ata_irq_on
);
7680 EXPORT_SYMBOL_GPL(ata_dev_try_classify
);
7682 EXPORT_SYMBOL_GPL(ata_cable_40wire
);
7683 EXPORT_SYMBOL_GPL(ata_cable_80wire
);
7684 EXPORT_SYMBOL_GPL(ata_cable_unknown
);
7685 EXPORT_SYMBOL_GPL(ata_cable_sata
);