1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-core.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <linux/interrupt.h>
40 #include <linux/completion.h>
41 #include <linux/suspend.h>
42 #include <linux/workqueue.h>
43 #include <linux/scatterlist.h>
45 #include <linux/log2.h>
46 #include <linux/slab.h>
47 #include <linux/glob.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_host.h>
51 #include <linux/libata.h>
52 #include <asm/byteorder.h>
53 #include <asm/unaligned.h>
54 #include <linux/cdrom.h>
55 #include <linux/ratelimit.h>
56 #include <linux/leds.h>
57 #include <linux/pm_runtime.h>
58 #include <linux/platform_device.h>
59 #include <asm/setup.h>
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/libata.h>
65 #include "libata-transport.h"
67 const struct ata_port_operations ata_base_port_ops
= {
68 .prereset
= ata_std_prereset
,
69 .postreset
= ata_std_postreset
,
70 .error_handler
= ata_std_error_handler
,
71 .sched_eh
= ata_std_sched_eh
,
72 .end_eh
= ata_std_end_eh
,
75 const struct ata_port_operations sata_port_ops
= {
76 .inherits
= &ata_base_port_ops
,
78 .qc_defer
= ata_std_qc_defer
,
79 .hardreset
= sata_std_hardreset
,
81 EXPORT_SYMBOL_GPL(sata_port_ops
);
83 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
84 u16 heads
, u16 sectors
);
85 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
86 static void ata_dev_xfermask(struct ata_device
*dev
);
87 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
);
89 atomic_t ata_print_id
= ATOMIC_INIT(0);
91 #ifdef CONFIG_ATA_FORCE
92 struct ata_force_param
{
96 unsigned long xfer_mask
;
97 unsigned int horkage_on
;
98 unsigned int horkage_off
;
102 struct ata_force_ent
{
105 struct ata_force_param param
;
108 static struct ata_force_ent
*ata_force_tbl
;
109 static int ata_force_tbl_size
;
111 static char ata_force_param_buf
[COMMAND_LINE_SIZE
] __initdata
;
112 /* param_buf is thrown away after initialization, disallow read */
113 module_param_string(force
, ata_force_param_buf
, sizeof(ata_force_param_buf
), 0);
114 MODULE_PARM_DESC(force
, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
117 static int atapi_enabled
= 1;
118 module_param(atapi_enabled
, int, 0444);
119 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
121 static int atapi_dmadir
= 0;
122 module_param(atapi_dmadir
, int, 0444);
123 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
125 int atapi_passthru16
= 1;
126 module_param(atapi_passthru16
, int, 0444);
127 MODULE_PARM_DESC(atapi_passthru16
, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
130 module_param_named(fua
, libata_fua
, int, 0444);
131 MODULE_PARM_DESC(fua
, "FUA support (0=off [default], 1=on)");
133 static int ata_ignore_hpa
;
134 module_param_named(ignore_hpa
, ata_ignore_hpa
, int, 0644);
135 MODULE_PARM_DESC(ignore_hpa
, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137 static int libata_dma_mask
= ATA_DMA_MASK_ATA
|ATA_DMA_MASK_ATAPI
|ATA_DMA_MASK_CFA
;
138 module_param_named(dma
, libata_dma_mask
, int, 0444);
139 MODULE_PARM_DESC(dma
, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141 static int ata_probe_timeout
;
142 module_param(ata_probe_timeout
, int, 0444);
143 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
145 int libata_noacpi
= 0;
146 module_param_named(noacpi
, libata_noacpi
, int, 0444);
147 MODULE_PARM_DESC(noacpi
, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
149 int libata_allow_tpm
= 0;
150 module_param_named(allow_tpm
, libata_allow_tpm
, int, 0444);
151 MODULE_PARM_DESC(allow_tpm
, "Permit the use of TPM commands (0=off [default], 1=on)");
154 module_param(atapi_an
, int, 0444);
155 MODULE_PARM_DESC(atapi_an
, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157 MODULE_AUTHOR("Jeff Garzik");
158 MODULE_DESCRIPTION("Library module for ATA devices");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_VERSION
);
162 static inline bool ata_dev_print_info(struct ata_device
*dev
)
164 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
166 return ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
169 static bool ata_sstatus_online(u32 sstatus
)
171 return (sstatus
& 0xf) == 0x3;
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
181 * Host lock or EH context.
184 * Pointer to the next link.
186 struct ata_link
*ata_link_next(struct ata_link
*link
, struct ata_port
*ap
,
187 enum ata_link_iter_mode mode
)
189 BUG_ON(mode
!= ATA_LITER_EDGE
&&
190 mode
!= ATA_LITER_PMP_FIRST
&& mode
!= ATA_LITER_HOST_FIRST
);
192 /* NULL link indicates start of iteration */
196 case ATA_LITER_PMP_FIRST
:
197 if (sata_pmp_attached(ap
))
200 case ATA_LITER_HOST_FIRST
:
204 /* we just iterated over the host link, what's next? */
205 if (link
== &ap
->link
)
207 case ATA_LITER_HOST_FIRST
:
208 if (sata_pmp_attached(ap
))
211 case ATA_LITER_PMP_FIRST
:
212 if (unlikely(ap
->slave_link
))
213 return ap
->slave_link
;
219 /* slave_link excludes PMP */
220 if (unlikely(link
== ap
->slave_link
))
223 /* we were over a PMP link */
224 if (++link
< ap
->pmp_link
+ ap
->nr_pmp_links
)
227 if (mode
== ATA_LITER_PMP_FIRST
)
232 EXPORT_SYMBOL_GPL(ata_link_next
);
235 * ata_dev_next - device iteration helper
236 * @dev: the previous device, NULL to start
237 * @link: ATA link containing devices to iterate
238 * @mode: iteration mode, one of ATA_DITER_*
241 * Host lock or EH context.
244 * Pointer to the next device.
246 struct ata_device
*ata_dev_next(struct ata_device
*dev
, struct ata_link
*link
,
247 enum ata_dev_iter_mode mode
)
249 BUG_ON(mode
!= ATA_DITER_ENABLED
&& mode
!= ATA_DITER_ENABLED_REVERSE
&&
250 mode
!= ATA_DITER_ALL
&& mode
!= ATA_DITER_ALL_REVERSE
);
252 /* NULL dev indicates start of iteration */
255 case ATA_DITER_ENABLED
:
259 case ATA_DITER_ENABLED_REVERSE
:
260 case ATA_DITER_ALL_REVERSE
:
261 dev
= link
->device
+ ata_link_max_devices(link
) - 1;
266 /* move to the next one */
268 case ATA_DITER_ENABLED
:
270 if (++dev
< link
->device
+ ata_link_max_devices(link
))
273 case ATA_DITER_ENABLED_REVERSE
:
274 case ATA_DITER_ALL_REVERSE
:
275 if (--dev
>= link
->device
)
281 if ((mode
== ATA_DITER_ENABLED
|| mode
== ATA_DITER_ENABLED_REVERSE
) &&
282 !ata_dev_enabled(dev
))
286 EXPORT_SYMBOL_GPL(ata_dev_next
);
289 * ata_dev_phys_link - find physical link for a device
290 * @dev: ATA device to look up physical link for
292 * Look up physical link which @dev is attached to. Note that
293 * this is different from @dev->link only when @dev is on slave
294 * link. For all other cases, it's the same as @dev->link.
300 * Pointer to the found physical link.
302 struct ata_link
*ata_dev_phys_link(struct ata_device
*dev
)
304 struct ata_port
*ap
= dev
->link
->ap
;
310 return ap
->slave_link
;
313 #ifdef CONFIG_ATA_FORCE
315 * ata_force_cbl - force cable type according to libata.force
316 * @ap: ATA port of interest
318 * Force cable type according to libata.force and whine about it.
319 * The last entry which has matching port number is used, so it
320 * can be specified as part of device force parameters. For
321 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
327 void ata_force_cbl(struct ata_port
*ap
)
331 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
332 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
334 if (fe
->port
!= -1 && fe
->port
!= ap
->print_id
)
337 if (fe
->param
.cbl
== ATA_CBL_NONE
)
340 ap
->cbl
= fe
->param
.cbl
;
341 ata_port_notice(ap
, "FORCE: cable set to %s\n", fe
->param
.name
);
347 * ata_force_link_limits - force link limits according to libata.force
348 * @link: ATA link of interest
350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always
356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it.
362 static void ata_force_link_limits(struct ata_link
*link
)
364 bool did_spd
= false;
365 int linkno
= link
->pmp
;
368 if (ata_is_host_link(link
))
371 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
372 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
374 if (fe
->port
!= -1 && fe
->port
!= link
->ap
->print_id
)
377 if (fe
->device
!= -1 && fe
->device
!= linkno
)
380 /* only honor the first spd limit */
381 if (!did_spd
&& fe
->param
.spd_limit
) {
382 link
->hw_sata_spd_limit
= (1 << fe
->param
.spd_limit
) - 1;
383 ata_link_notice(link
, "FORCE: PHY spd limit set to %s\n",
388 /* let lflags stack */
389 if (fe
->param
.lflags
) {
390 link
->flags
|= fe
->param
.lflags
;
391 ata_link_notice(link
,
392 "FORCE: link flag 0x%x forced -> 0x%x\n",
393 fe
->param
.lflags
, link
->flags
);
399 * ata_force_xfermask - force xfermask according to libata.force
400 * @dev: ATA device of interest
402 * Force xfer_mask according to libata.force and whine about it.
403 * For consistency with link selection, device number 15 selects
404 * the first device connected to the host link.
409 static void ata_force_xfermask(struct ata_device
*dev
)
411 int devno
= dev
->link
->pmp
+ dev
->devno
;
412 int alt_devno
= devno
;
415 /* allow n.15/16 for devices attached to host port */
416 if (ata_is_host_link(dev
->link
))
419 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
420 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
421 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
423 if (fe
->port
!= -1 && fe
->port
!= dev
->link
->ap
->print_id
)
426 if (fe
->device
!= -1 && fe
->device
!= devno
&&
427 fe
->device
!= alt_devno
)
430 if (!fe
->param
.xfer_mask
)
433 ata_unpack_xfermask(fe
->param
.xfer_mask
,
434 &pio_mask
, &mwdma_mask
, &udma_mask
);
436 dev
->udma_mask
= udma_mask
;
437 else if (mwdma_mask
) {
439 dev
->mwdma_mask
= mwdma_mask
;
443 dev
->pio_mask
= pio_mask
;
446 ata_dev_notice(dev
, "FORCE: xfer_mask set to %s\n",
453 * ata_force_horkage - force horkage according to libata.force
454 * @dev: ATA device of interest
456 * Force horkage according to libata.force and whine about it.
457 * For consistency with link selection, device number 15 selects
458 * the first device connected to the host link.
463 static void ata_force_horkage(struct ata_device
*dev
)
465 int devno
= dev
->link
->pmp
+ dev
->devno
;
466 int alt_devno
= devno
;
469 /* allow n.15/16 for devices attached to host port */
470 if (ata_is_host_link(dev
->link
))
473 for (i
= 0; i
< ata_force_tbl_size
; i
++) {
474 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
476 if (fe
->port
!= -1 && fe
->port
!= dev
->link
->ap
->print_id
)
479 if (fe
->device
!= -1 && fe
->device
!= devno
&&
480 fe
->device
!= alt_devno
)
483 if (!(~dev
->horkage
& fe
->param
.horkage_on
) &&
484 !(dev
->horkage
& fe
->param
.horkage_off
))
487 dev
->horkage
|= fe
->param
.horkage_on
;
488 dev
->horkage
&= ~fe
->param
.horkage_off
;
490 ata_dev_notice(dev
, "FORCE: horkage modified (%s)\n",
495 static inline void ata_force_link_limits(struct ata_link
*link
) { }
496 static inline void ata_force_xfermask(struct ata_device
*dev
) { }
497 static inline void ata_force_horkage(struct ata_device
*dev
) { }
501 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
502 * @opcode: SCSI opcode
504 * Determine ATAPI command type from @opcode.
510 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
512 int atapi_cmd_type(u8 opcode
)
521 case GPCMD_WRITE_AND_VERIFY_10
:
525 case GPCMD_READ_CD_MSF
:
526 return ATAPI_READ_CD
;
530 if (atapi_passthru16
)
531 return ATAPI_PASS_THRU
;
537 EXPORT_SYMBOL_GPL(atapi_cmd_type
);
539 static const u8 ata_rw_cmds
[] = {
543 ATA_CMD_READ_MULTI_EXT
,
544 ATA_CMD_WRITE_MULTI_EXT
,
548 ATA_CMD_WRITE_MULTI_FUA_EXT
,
552 ATA_CMD_PIO_READ_EXT
,
553 ATA_CMD_PIO_WRITE_EXT
,
566 ATA_CMD_WRITE_FUA_EXT
570 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
571 * @tf: command to examine and configure
572 * @dev: device tf belongs to
574 * Examine the device configuration and tf->flags to calculate
575 * the proper read/write commands and protocol to use.
580 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
584 int index
, fua
, lba48
, write
;
586 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
587 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
588 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
590 if (dev
->flags
& ATA_DFLAG_PIO
) {
591 tf
->protocol
= ATA_PROT_PIO
;
592 index
= dev
->multi_count
? 0 : 8;
593 } else if (lba48
&& (dev
->link
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
594 /* Unable to use DMA due to host limitation */
595 tf
->protocol
= ATA_PROT_PIO
;
596 index
= dev
->multi_count
? 0 : 8;
598 tf
->protocol
= ATA_PROT_DMA
;
602 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
611 * ata_tf_read_block - Read block address from ATA taskfile
612 * @tf: ATA taskfile of interest
613 * @dev: ATA device @tf belongs to
618 * Read block address from @tf. This function can handle all
619 * three address formats - LBA, LBA48 and CHS. tf->protocol and
620 * flags select the address format to use.
623 * Block address read from @tf.
625 u64
ata_tf_read_block(const struct ata_taskfile
*tf
, struct ata_device
*dev
)
629 if (tf
->flags
& ATA_TFLAG_LBA
) {
630 if (tf
->flags
& ATA_TFLAG_LBA48
) {
631 block
|= (u64
)tf
->hob_lbah
<< 40;
632 block
|= (u64
)tf
->hob_lbam
<< 32;
633 block
|= (u64
)tf
->hob_lbal
<< 24;
635 block
|= (tf
->device
& 0xf) << 24;
637 block
|= tf
->lbah
<< 16;
638 block
|= tf
->lbam
<< 8;
643 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
644 head
= tf
->device
& 0xf;
649 "device reported invalid CHS sector 0\n");
653 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
- 1;
660 * ata_build_rw_tf - Build ATA taskfile for given read/write request
661 * @tf: Target ATA taskfile
662 * @dev: ATA device @tf belongs to
663 * @block: Block address
664 * @n_block: Number of blocks
665 * @tf_flags: RW/FUA etc...
667 * @class: IO priority class
672 * Build ATA taskfile @tf for read/write request described by
673 * @block, @n_block, @tf_flags and @tag on @dev.
677 * 0 on success, -ERANGE if the request is too large for @dev,
678 * -EINVAL if the request is invalid.
680 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
681 u64 block
, u32 n_block
, unsigned int tf_flags
,
682 unsigned int tag
, int class)
684 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
685 tf
->flags
|= tf_flags
;
687 if (ata_ncq_enabled(dev
) && !ata_tag_internal(tag
)) {
689 if (!lba_48_ok(block
, n_block
))
692 tf
->protocol
= ATA_PROT_NCQ
;
693 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
695 if (tf
->flags
& ATA_TFLAG_WRITE
)
696 tf
->command
= ATA_CMD_FPDMA_WRITE
;
698 tf
->command
= ATA_CMD_FPDMA_READ
;
700 tf
->nsect
= tag
<< 3;
701 tf
->hob_feature
= (n_block
>> 8) & 0xff;
702 tf
->feature
= n_block
& 0xff;
704 tf
->hob_lbah
= (block
>> 40) & 0xff;
705 tf
->hob_lbam
= (block
>> 32) & 0xff;
706 tf
->hob_lbal
= (block
>> 24) & 0xff;
707 tf
->lbah
= (block
>> 16) & 0xff;
708 tf
->lbam
= (block
>> 8) & 0xff;
709 tf
->lbal
= block
& 0xff;
711 tf
->device
= ATA_LBA
;
712 if (tf
->flags
& ATA_TFLAG_FUA
)
713 tf
->device
|= 1 << 7;
715 if (dev
->flags
& ATA_DFLAG_NCQ_PRIO_ENABLE
&&
716 class == IOPRIO_CLASS_RT
)
717 tf
->hob_nsect
|= ATA_PRIO_HIGH
<< ATA_SHIFT_PRIO
;
718 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
719 tf
->flags
|= ATA_TFLAG_LBA
;
721 if (lba_28_ok(block
, n_block
)) {
723 tf
->device
|= (block
>> 24) & 0xf;
724 } else if (lba_48_ok(block
, n_block
)) {
725 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
729 tf
->flags
|= ATA_TFLAG_LBA48
;
731 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
733 tf
->hob_lbah
= (block
>> 40) & 0xff;
734 tf
->hob_lbam
= (block
>> 32) & 0xff;
735 tf
->hob_lbal
= (block
>> 24) & 0xff;
737 /* request too large even for LBA48 */
740 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
743 tf
->nsect
= n_block
& 0xff;
745 tf
->lbah
= (block
>> 16) & 0xff;
746 tf
->lbam
= (block
>> 8) & 0xff;
747 tf
->lbal
= block
& 0xff;
749 tf
->device
|= ATA_LBA
;
752 u32 sect
, head
, cyl
, track
;
754 /* The request -may- be too large for CHS addressing. */
755 if (!lba_28_ok(block
, n_block
))
758 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
761 /* Convert LBA to CHS */
762 track
= (u32
)block
/ dev
->sectors
;
763 cyl
= track
/ dev
->heads
;
764 head
= track
% dev
->heads
;
765 sect
= (u32
)block
% dev
->sectors
+ 1;
767 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
768 (u32
)block
, track
, cyl
, head
, sect
);
770 /* Check whether the converted CHS can fit.
774 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
777 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
788 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
789 * @pio_mask: pio_mask
790 * @mwdma_mask: mwdma_mask
791 * @udma_mask: udma_mask
793 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
794 * unsigned int xfer_mask.
802 unsigned long ata_pack_xfermask(unsigned long pio_mask
,
803 unsigned long mwdma_mask
,
804 unsigned long udma_mask
)
806 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
807 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
808 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
810 EXPORT_SYMBOL_GPL(ata_pack_xfermask
);
813 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
814 * @xfer_mask: xfer_mask to unpack
815 * @pio_mask: resulting pio_mask
816 * @mwdma_mask: resulting mwdma_mask
817 * @udma_mask: resulting udma_mask
819 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
820 * Any NULL destination masks will be ignored.
822 void ata_unpack_xfermask(unsigned long xfer_mask
, unsigned long *pio_mask
,
823 unsigned long *mwdma_mask
, unsigned long *udma_mask
)
826 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
828 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
830 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
833 static const struct ata_xfer_ent
{
837 { ATA_SHIFT_PIO
, ATA_NR_PIO_MODES
, XFER_PIO_0
},
838 { ATA_SHIFT_MWDMA
, ATA_NR_MWDMA_MODES
, XFER_MW_DMA_0
},
839 { ATA_SHIFT_UDMA
, ATA_NR_UDMA_MODES
, XFER_UDMA_0
},
844 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
845 * @xfer_mask: xfer_mask of interest
847 * Return matching XFER_* value for @xfer_mask. Only the highest
848 * bit of @xfer_mask is considered.
854 * Matching XFER_* value, 0xff if no match found.
856 u8
ata_xfer_mask2mode(unsigned long xfer_mask
)
858 int highbit
= fls(xfer_mask
) - 1;
859 const struct ata_xfer_ent
*ent
;
861 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
862 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
863 return ent
->base
+ highbit
- ent
->shift
;
866 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode
);
869 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
870 * @xfer_mode: XFER_* of interest
872 * Return matching xfer_mask for @xfer_mode.
878 * Matching xfer_mask, 0 if no match found.
880 unsigned long ata_xfer_mode2mask(u8 xfer_mode
)
882 const struct ata_xfer_ent
*ent
;
884 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
885 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
886 return ((2 << (ent
->shift
+ xfer_mode
- ent
->base
)) - 1)
887 & ~((1 << ent
->shift
) - 1);
890 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask
);
893 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
894 * @xfer_mode: XFER_* of interest
896 * Return matching xfer_shift for @xfer_mode.
902 * Matching xfer_shift, -1 if no match found.
904 int ata_xfer_mode2shift(unsigned long xfer_mode
)
906 const struct ata_xfer_ent
*ent
;
908 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
909 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
913 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift
);
916 * ata_mode_string - convert xfer_mask to string
917 * @xfer_mask: mask of bits supported; only highest bit counts.
919 * Determine string which represents the highest speed
920 * (highest bit in @modemask).
926 * Constant C string representing highest speed listed in
927 * @mode_mask, or the constant C string "<n/a>".
929 const char *ata_mode_string(unsigned long xfer_mask
)
931 static const char * const xfer_mode_str
[] = {
955 highbit
= fls(xfer_mask
) - 1;
956 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
957 return xfer_mode_str
[highbit
];
960 EXPORT_SYMBOL_GPL(ata_mode_string
);
962 const char *sata_spd_string(unsigned int spd
)
964 static const char * const spd_str
[] = {
970 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
972 return spd_str
[spd
- 1];
976 * ata_dev_classify - determine device type based on ATA-spec signature
977 * @tf: ATA taskfile register set for device to be identified
979 * Determine from taskfile register contents whether a device is
980 * ATA or ATAPI, as per "Signature and persistence" section
981 * of ATA/PI spec (volume 1, sect 5.14).
987 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
988 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
990 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
992 /* Apple's open source Darwin code hints that some devices only
993 * put a proper signature into the LBA mid/high registers,
994 * So, we only check those. It's sufficient for uniqueness.
996 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
997 * signatures for ATA and ATAPI devices attached on SerialATA,
998 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
999 * spec has never mentioned about using different signatures
1000 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1001 * Multiplier specification began to use 0x69/0x96 to identify
1002 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1003 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1004 * 0x69/0x96 shortly and described them as reserved for
1007 * We follow the current spec and consider that 0x69/0x96
1008 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1009 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1010 * SEMB signature. This is worked around in
1011 * ata_dev_read_id().
1013 if ((tf
->lbam
== 0) && (tf
->lbah
== 0)) {
1014 DPRINTK("found ATA device by sig\n");
1018 if ((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) {
1019 DPRINTK("found ATAPI device by sig\n");
1020 return ATA_DEV_ATAPI
;
1023 if ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96)) {
1024 DPRINTK("found PMP device by sig\n");
1028 if ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3)) {
1029 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1030 return ATA_DEV_SEMB
;
1033 if ((tf
->lbam
== 0xcd) && (tf
->lbah
== 0xab)) {
1034 DPRINTK("found ZAC device by sig\n");
1038 DPRINTK("unknown device\n");
1039 return ATA_DEV_UNKNOWN
;
1041 EXPORT_SYMBOL_GPL(ata_dev_classify
);
1044 * ata_id_string - Convert IDENTIFY DEVICE page into string
1045 * @id: IDENTIFY DEVICE results we will examine
1046 * @s: string into which data is output
1047 * @ofs: offset into identify device page
1048 * @len: length of string to return. must be an even number.
1050 * The strings in the IDENTIFY DEVICE page are broken up into
1051 * 16-bit chunks. Run through the string, and output each
1052 * 8-bit chunk linearly, regardless of platform.
1058 void ata_id_string(const u16
*id
, unsigned char *s
,
1059 unsigned int ofs
, unsigned int len
)
1078 EXPORT_SYMBOL_GPL(ata_id_string
);
1081 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1082 * @id: IDENTIFY DEVICE results we will examine
1083 * @s: string into which data is output
1084 * @ofs: offset into identify device page
1085 * @len: length of string to return. must be an odd number.
1087 * This function is identical to ata_id_string except that it
1088 * trims trailing spaces and terminates the resulting string with
1089 * null. @len must be actual maximum length (even number) + 1.
1094 void ata_id_c_string(const u16
*id
, unsigned char *s
,
1095 unsigned int ofs
, unsigned int len
)
1099 ata_id_string(id
, s
, ofs
, len
- 1);
1101 p
= s
+ strnlen(s
, len
- 1);
1102 while (p
> s
&& p
[-1] == ' ')
1106 EXPORT_SYMBOL_GPL(ata_id_c_string
);
1108 static u64
ata_id_n_sectors(const u16
*id
)
1110 if (ata_id_has_lba(id
)) {
1111 if (ata_id_has_lba48(id
))
1112 return ata_id_u64(id
, ATA_ID_LBA_CAPACITY_2
);
1114 return ata_id_u32(id
, ATA_ID_LBA_CAPACITY
);
1116 if (ata_id_current_chs_valid(id
))
1117 return id
[ATA_ID_CUR_CYLS
] * id
[ATA_ID_CUR_HEADS
] *
1118 id
[ATA_ID_CUR_SECTORS
];
1120 return id
[ATA_ID_CYLS
] * id
[ATA_ID_HEADS
] *
1125 u64
ata_tf_to_lba48(const struct ata_taskfile
*tf
)
1129 sectors
|= ((u64
)(tf
->hob_lbah
& 0xff)) << 40;
1130 sectors
|= ((u64
)(tf
->hob_lbam
& 0xff)) << 32;
1131 sectors
|= ((u64
)(tf
->hob_lbal
& 0xff)) << 24;
1132 sectors
|= (tf
->lbah
& 0xff) << 16;
1133 sectors
|= (tf
->lbam
& 0xff) << 8;
1134 sectors
|= (tf
->lbal
& 0xff);
1139 u64
ata_tf_to_lba(const struct ata_taskfile
*tf
)
1143 sectors
|= (tf
->device
& 0x0f) << 24;
1144 sectors
|= (tf
->lbah
& 0xff) << 16;
1145 sectors
|= (tf
->lbam
& 0xff) << 8;
1146 sectors
|= (tf
->lbal
& 0xff);
1152 * ata_read_native_max_address - Read native max address
1153 * @dev: target device
1154 * @max_sectors: out parameter for the result native max address
1156 * Perform an LBA48 or LBA28 native size query upon the device in
1160 * 0 on success, -EACCES if command is aborted by the drive.
1161 * -EIO on other errors.
1163 static int ata_read_native_max_address(struct ata_device
*dev
, u64
*max_sectors
)
1165 unsigned int err_mask
;
1166 struct ata_taskfile tf
;
1167 int lba48
= ata_id_has_lba48(dev
->id
);
1169 ata_tf_init(dev
, &tf
);
1171 /* always clear all address registers */
1172 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1175 tf
.command
= ATA_CMD_READ_NATIVE_MAX_EXT
;
1176 tf
.flags
|= ATA_TFLAG_LBA48
;
1178 tf
.command
= ATA_CMD_READ_NATIVE_MAX
;
1180 tf
.protocol
= ATA_PROT_NODATA
;
1181 tf
.device
|= ATA_LBA
;
1183 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1186 "failed to read native max address (err_mask=0x%x)\n",
1188 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
1194 *max_sectors
= ata_tf_to_lba48(&tf
) + 1;
1196 *max_sectors
= ata_tf_to_lba(&tf
) + 1;
1197 if (dev
->horkage
& ATA_HORKAGE_HPA_SIZE
)
1203 * ata_set_max_sectors - Set max sectors
1204 * @dev: target device
1205 * @new_sectors: new max sectors value to set for the device
1207 * Set max sectors of @dev to @new_sectors.
1210 * 0 on success, -EACCES if command is aborted or denied (due to
1211 * previous non-volatile SET_MAX) by the drive. -EIO on other
1214 static int ata_set_max_sectors(struct ata_device
*dev
, u64 new_sectors
)
1216 unsigned int err_mask
;
1217 struct ata_taskfile tf
;
1218 int lba48
= ata_id_has_lba48(dev
->id
);
1222 ata_tf_init(dev
, &tf
);
1224 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1227 tf
.command
= ATA_CMD_SET_MAX_EXT
;
1228 tf
.flags
|= ATA_TFLAG_LBA48
;
1230 tf
.hob_lbal
= (new_sectors
>> 24) & 0xff;
1231 tf
.hob_lbam
= (new_sectors
>> 32) & 0xff;
1232 tf
.hob_lbah
= (new_sectors
>> 40) & 0xff;
1234 tf
.command
= ATA_CMD_SET_MAX
;
1236 tf
.device
|= (new_sectors
>> 24) & 0xf;
1239 tf
.protocol
= ATA_PROT_NODATA
;
1240 tf
.device
|= ATA_LBA
;
1242 tf
.lbal
= (new_sectors
>> 0) & 0xff;
1243 tf
.lbam
= (new_sectors
>> 8) & 0xff;
1244 tf
.lbah
= (new_sectors
>> 16) & 0xff;
1246 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1249 "failed to set max address (err_mask=0x%x)\n",
1251 if (err_mask
== AC_ERR_DEV
&&
1252 (tf
.feature
& (ATA_ABORTED
| ATA_IDNF
)))
1261 * ata_hpa_resize - Resize a device with an HPA set
1262 * @dev: Device to resize
1264 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1265 * it if required to the full size of the media. The caller must check
1266 * the drive has the HPA feature set enabled.
1269 * 0 on success, -errno on failure.
1271 static int ata_hpa_resize(struct ata_device
*dev
)
1273 bool print_info
= ata_dev_print_info(dev
);
1274 bool unlock_hpa
= ata_ignore_hpa
|| dev
->flags
& ATA_DFLAG_UNLOCK_HPA
;
1275 u64 sectors
= ata_id_n_sectors(dev
->id
);
1279 /* do we need to do it? */
1280 if ((dev
->class != ATA_DEV_ATA
&& dev
->class != ATA_DEV_ZAC
) ||
1281 !ata_id_has_lba(dev
->id
) || !ata_id_hpa_enabled(dev
->id
) ||
1282 (dev
->horkage
& ATA_HORKAGE_BROKEN_HPA
))
1285 /* read native max address */
1286 rc
= ata_read_native_max_address(dev
, &native_sectors
);
1288 /* If device aborted the command or HPA isn't going to
1289 * be unlocked, skip HPA resizing.
1291 if (rc
== -EACCES
|| !unlock_hpa
) {
1293 "HPA support seems broken, skipping HPA handling\n");
1294 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1296 /* we can continue if device aborted the command */
1303 dev
->n_native_sectors
= native_sectors
;
1305 /* nothing to do? */
1306 if (native_sectors
<= sectors
|| !unlock_hpa
) {
1307 if (!print_info
|| native_sectors
== sectors
)
1310 if (native_sectors
> sectors
)
1312 "HPA detected: current %llu, native %llu\n",
1313 (unsigned long long)sectors
,
1314 (unsigned long long)native_sectors
);
1315 else if (native_sectors
< sectors
)
1317 "native sectors (%llu) is smaller than sectors (%llu)\n",
1318 (unsigned long long)native_sectors
,
1319 (unsigned long long)sectors
);
1323 /* let's unlock HPA */
1324 rc
= ata_set_max_sectors(dev
, native_sectors
);
1325 if (rc
== -EACCES
) {
1326 /* if device aborted the command, skip HPA resizing */
1328 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1329 (unsigned long long)sectors
,
1330 (unsigned long long)native_sectors
);
1331 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1336 /* re-read IDENTIFY data */
1337 rc
= ata_dev_reread_id(dev
, 0);
1340 "failed to re-read IDENTIFY data after HPA resizing\n");
1345 u64 new_sectors
= ata_id_n_sectors(dev
->id
);
1347 "HPA unlocked: %llu -> %llu, native %llu\n",
1348 (unsigned long long)sectors
,
1349 (unsigned long long)new_sectors
,
1350 (unsigned long long)native_sectors
);
1357 * ata_dump_id - IDENTIFY DEVICE info debugging output
1358 * @id: IDENTIFY DEVICE page to dump
1360 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1367 static inline void ata_dump_id(const u16
*id
)
1369 DPRINTK("49==0x%04x "
1379 DPRINTK("80==0x%04x "
1389 DPRINTK("88==0x%04x "
1396 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1397 * @id: IDENTIFY data to compute xfer mask from
1399 * Compute the xfermask for this device. This is not as trivial
1400 * as it seems if we must consider early devices correctly.
1402 * FIXME: pre IDE drive timing (do we care ?).
1410 unsigned long ata_id_xfermask(const u16
*id
)
1412 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
1414 /* Usual case. Word 53 indicates word 64 is valid */
1415 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1416 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1420 /* If word 64 isn't valid then Word 51 high byte holds
1421 * the PIO timing number for the maximum. Turn it into
1424 u8 mode
= (id
[ATA_ID_OLD_PIO_MODES
] >> 8) & 0xFF;
1425 if (mode
< 5) /* Valid PIO range */
1426 pio_mask
= (2 << mode
) - 1;
1430 /* But wait.. there's more. Design your standards by
1431 * committee and you too can get a free iordy field to
1432 * process. However its the speeds not the modes that
1433 * are supported... Note drivers using the timing API
1434 * will get this right anyway
1438 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1440 if (ata_id_is_cfa(id
)) {
1442 * Process compact flash extended modes
1444 int pio
= (id
[ATA_ID_CFA_MODES
] >> 0) & 0x7;
1445 int dma
= (id
[ATA_ID_CFA_MODES
] >> 3) & 0x7;
1448 pio_mask
|= (1 << 5);
1450 pio_mask
|= (1 << 6);
1452 mwdma_mask
|= (1 << 3);
1454 mwdma_mask
|= (1 << 4);
1458 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1459 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1461 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1463 EXPORT_SYMBOL_GPL(ata_id_xfermask
);
1465 static void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1467 struct completion
*waiting
= qc
->private_data
;
1473 * ata_exec_internal_sg - execute libata internal command
1474 * @dev: Device to which the command is sent
1475 * @tf: Taskfile registers for the command and the result
1476 * @cdb: CDB for packet command
1477 * @dma_dir: Data transfer direction of the command
1478 * @sgl: sg list for the data buffer of the command
1479 * @n_elem: Number of sg entries
1480 * @timeout: Timeout in msecs (0 for default)
1482 * Executes libata internal command with timeout. @tf contains
1483 * command on entry and result on return. Timeout and error
1484 * conditions are reported via return value. No recovery action
1485 * is taken after a command times out. It's caller's duty to
1486 * clean up after timeout.
1489 * None. Should be called with kernel context, might sleep.
1492 * Zero on success, AC_ERR_* mask on failure
1494 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1495 struct ata_taskfile
*tf
, const u8
*cdb
,
1496 int dma_dir
, struct scatterlist
*sgl
,
1497 unsigned int n_elem
, unsigned long timeout
)
1499 struct ata_link
*link
= dev
->link
;
1500 struct ata_port
*ap
= link
->ap
;
1501 u8 command
= tf
->command
;
1502 int auto_timeout
= 0;
1503 struct ata_queued_cmd
*qc
;
1504 unsigned int preempted_tag
;
1505 u32 preempted_sactive
;
1506 u64 preempted_qc_active
;
1507 int preempted_nr_active_links
;
1508 DECLARE_COMPLETION_ONSTACK(wait
);
1509 unsigned long flags
;
1510 unsigned int err_mask
;
1513 spin_lock_irqsave(ap
->lock
, flags
);
1515 /* no internal command while frozen */
1516 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1517 spin_unlock_irqrestore(ap
->lock
, flags
);
1518 return AC_ERR_SYSTEM
;
1521 /* initialize internal qc */
1522 qc
= __ata_qc_from_tag(ap
, ATA_TAG_INTERNAL
);
1524 qc
->tag
= ATA_TAG_INTERNAL
;
1531 preempted_tag
= link
->active_tag
;
1532 preempted_sactive
= link
->sactive
;
1533 preempted_qc_active
= ap
->qc_active
;
1534 preempted_nr_active_links
= ap
->nr_active_links
;
1535 link
->active_tag
= ATA_TAG_POISON
;
1538 ap
->nr_active_links
= 0;
1540 /* prepare & issue qc */
1543 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1545 /* some SATA bridges need us to indicate data xfer direction */
1546 if (tf
->protocol
== ATAPI_PROT_DMA
&& (dev
->flags
& ATA_DFLAG_DMADIR
) &&
1547 dma_dir
== DMA_FROM_DEVICE
)
1548 qc
->tf
.feature
|= ATAPI_DMADIR
;
1550 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1551 qc
->dma_dir
= dma_dir
;
1552 if (dma_dir
!= DMA_NONE
) {
1553 unsigned int i
, buflen
= 0;
1554 struct scatterlist
*sg
;
1556 for_each_sg(sgl
, sg
, n_elem
, i
)
1557 buflen
+= sg
->length
;
1559 ata_sg_init(qc
, sgl
, n_elem
);
1560 qc
->nbytes
= buflen
;
1563 qc
->private_data
= &wait
;
1564 qc
->complete_fn
= ata_qc_complete_internal
;
1568 spin_unlock_irqrestore(ap
->lock
, flags
);
1571 if (ata_probe_timeout
)
1572 timeout
= ata_probe_timeout
* 1000;
1574 timeout
= ata_internal_cmd_timeout(dev
, command
);
1579 if (ap
->ops
->error_handler
)
1582 rc
= wait_for_completion_timeout(&wait
, msecs_to_jiffies(timeout
));
1584 if (ap
->ops
->error_handler
)
1587 ata_sff_flush_pio_task(ap
);
1590 spin_lock_irqsave(ap
->lock
, flags
);
1592 /* We're racing with irq here. If we lose, the
1593 * following test prevents us from completing the qc
1594 * twice. If we win, the port is frozen and will be
1595 * cleaned up by ->post_internal_cmd().
1597 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1598 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1600 if (ap
->ops
->error_handler
)
1601 ata_port_freeze(ap
);
1603 ata_qc_complete(qc
);
1605 if (ata_msg_warn(ap
))
1606 ata_dev_warn(dev
, "qc timeout (cmd 0x%x)\n",
1610 spin_unlock_irqrestore(ap
->lock
, flags
);
1613 /* do post_internal_cmd */
1614 if (ap
->ops
->post_internal_cmd
)
1615 ap
->ops
->post_internal_cmd(qc
);
1617 /* perform minimal error analysis */
1618 if (qc
->flags
& ATA_QCFLAG_FAILED
) {
1619 if (qc
->result_tf
.command
& (ATA_ERR
| ATA_DF
))
1620 qc
->err_mask
|= AC_ERR_DEV
;
1623 qc
->err_mask
|= AC_ERR_OTHER
;
1625 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1626 qc
->err_mask
&= ~AC_ERR_OTHER
;
1627 } else if (qc
->tf
.command
== ATA_CMD_REQ_SENSE_DATA
) {
1628 qc
->result_tf
.command
|= ATA_SENSE
;
1632 spin_lock_irqsave(ap
->lock
, flags
);
1634 *tf
= qc
->result_tf
;
1635 err_mask
= qc
->err_mask
;
1638 link
->active_tag
= preempted_tag
;
1639 link
->sactive
= preempted_sactive
;
1640 ap
->qc_active
= preempted_qc_active
;
1641 ap
->nr_active_links
= preempted_nr_active_links
;
1643 spin_unlock_irqrestore(ap
->lock
, flags
);
1645 if ((err_mask
& AC_ERR_TIMEOUT
) && auto_timeout
)
1646 ata_internal_cmd_timed_out(dev
, command
);
1652 * ata_exec_internal - execute libata internal command
1653 * @dev: Device to which the command is sent
1654 * @tf: Taskfile registers for the command and the result
1655 * @cdb: CDB for packet command
1656 * @dma_dir: Data transfer direction of the command
1657 * @buf: Data buffer of the command
1658 * @buflen: Length of data buffer
1659 * @timeout: Timeout in msecs (0 for default)
1661 * Wrapper around ata_exec_internal_sg() which takes simple
1662 * buffer instead of sg list.
1665 * None. Should be called with kernel context, might sleep.
1668 * Zero on success, AC_ERR_* mask on failure
1670 unsigned ata_exec_internal(struct ata_device
*dev
,
1671 struct ata_taskfile
*tf
, const u8
*cdb
,
1672 int dma_dir
, void *buf
, unsigned int buflen
,
1673 unsigned long timeout
)
1675 struct scatterlist
*psg
= NULL
, sg
;
1676 unsigned int n_elem
= 0;
1678 if (dma_dir
!= DMA_NONE
) {
1680 sg_init_one(&sg
, buf
, buflen
);
1685 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, psg
, n_elem
,
1690 * ata_pio_need_iordy - check if iordy needed
1693 * Check if the current speed of the device requires IORDY. Used
1694 * by various controllers for chip configuration.
1696 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1698 /* Don't set IORDY if we're preparing for reset. IORDY may
1699 * lead to controller lock up on certain controllers if the
1700 * port is not occupied. See bko#11703 for details.
1702 if (adev
->link
->ap
->pflags
& ATA_PFLAG_RESETTING
)
1704 /* Controller doesn't support IORDY. Probably a pointless
1705 * check as the caller should know this.
1707 if (adev
->link
->ap
->flags
& ATA_FLAG_NO_IORDY
)
1709 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1710 if (ata_id_is_cfa(adev
->id
)
1711 && (adev
->pio_mode
== XFER_PIO_5
|| adev
->pio_mode
== XFER_PIO_6
))
1713 /* PIO3 and higher it is mandatory */
1714 if (adev
->pio_mode
> XFER_PIO_2
)
1716 /* We turn it on when possible */
1717 if (ata_id_has_iordy(adev
->id
))
1721 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
1724 * ata_pio_mask_no_iordy - Return the non IORDY mask
1727 * Compute the highest mode possible if we are not using iordy. Return
1728 * -1 if no iordy mode is available.
1730 static u32
ata_pio_mask_no_iordy(const struct ata_device
*adev
)
1732 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1733 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1734 u16 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1735 /* Is the speed faster than the drive allows non IORDY ? */
1737 /* This is cycle times not frequency - watch the logic! */
1738 if (pio
> 240) /* PIO2 is 240nS per cycle */
1739 return 3 << ATA_SHIFT_PIO
;
1740 return 7 << ATA_SHIFT_PIO
;
1743 return 3 << ATA_SHIFT_PIO
;
1747 * ata_do_dev_read_id - default ID read method
1749 * @tf: proposed taskfile
1752 * Issue the identify taskfile and hand back the buffer containing
1753 * identify data. For some RAID controllers and for pre ATA devices
1754 * this function is wrapped or replaced by the driver
1756 unsigned int ata_do_dev_read_id(struct ata_device
*dev
,
1757 struct ata_taskfile
*tf
, u16
*id
)
1759 return ata_exec_internal(dev
, tf
, NULL
, DMA_FROM_DEVICE
,
1760 id
, sizeof(id
[0]) * ATA_ID_WORDS
, 0);
1762 EXPORT_SYMBOL_GPL(ata_do_dev_read_id
);
1765 * ata_dev_read_id - Read ID data from the specified device
1766 * @dev: target device
1767 * @p_class: pointer to class of the target device (may be changed)
1768 * @flags: ATA_READID_* flags
1769 * @id: buffer to read IDENTIFY data into
1771 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1772 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1773 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1774 * for pre-ATA4 drives.
1776 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1777 * now we abort if we hit that case.
1780 * Kernel thread context (may sleep)
1783 * 0 on success, -errno otherwise.
1785 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1786 unsigned int flags
, u16
*id
)
1788 struct ata_port
*ap
= dev
->link
->ap
;
1789 unsigned int class = *p_class
;
1790 struct ata_taskfile tf
;
1791 unsigned int err_mask
= 0;
1793 bool is_semb
= class == ATA_DEV_SEMB
;
1794 int may_fallback
= 1, tried_spinup
= 0;
1797 if (ata_msg_ctl(ap
))
1798 ata_dev_dbg(dev
, "%s: ENTER\n", __func__
);
1801 ata_tf_init(dev
, &tf
);
1805 class = ATA_DEV_ATA
; /* some hard drives report SEMB sig */
1809 tf
.command
= ATA_CMD_ID_ATA
;
1812 tf
.command
= ATA_CMD_ID_ATAPI
;
1816 reason
= "unsupported class";
1820 tf
.protocol
= ATA_PROT_PIO
;
1822 /* Some devices choke if TF registers contain garbage. Make
1823 * sure those are properly initialized.
1825 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1827 /* Device presence detection is unreliable on some
1828 * controllers. Always poll IDENTIFY if available.
1830 tf
.flags
|= ATA_TFLAG_POLLING
;
1832 if (ap
->ops
->read_id
)
1833 err_mask
= ap
->ops
->read_id(dev
, &tf
, id
);
1835 err_mask
= ata_do_dev_read_id(dev
, &tf
, id
);
1838 if (err_mask
& AC_ERR_NODEV_HINT
) {
1839 ata_dev_dbg(dev
, "NODEV after polling detection\n");
1845 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1846 /* SEMB is not supported yet */
1847 *p_class
= ATA_DEV_SEMB_UNSUP
;
1851 if ((err_mask
== AC_ERR_DEV
) && (tf
.feature
& ATA_ABORTED
)) {
1852 /* Device or controller might have reported
1853 * the wrong device class. Give a shot at the
1854 * other IDENTIFY if the current one is
1855 * aborted by the device.
1860 if (class == ATA_DEV_ATA
)
1861 class = ATA_DEV_ATAPI
;
1863 class = ATA_DEV_ATA
;
1867 /* Control reaches here iff the device aborted
1868 * both flavors of IDENTIFYs which happens
1869 * sometimes with phantom devices.
1872 "both IDENTIFYs aborted, assuming NODEV\n");
1877 reason
= "I/O error";
1881 if (dev
->horkage
& ATA_HORKAGE_DUMP_ID
) {
1882 ata_dev_dbg(dev
, "dumping IDENTIFY data, "
1883 "class=%d may_fallback=%d tried_spinup=%d\n",
1884 class, may_fallback
, tried_spinup
);
1885 print_hex_dump(KERN_DEBUG
, "", DUMP_PREFIX_OFFSET
,
1886 16, 2, id
, ATA_ID_WORDS
* sizeof(*id
), true);
1889 /* Falling back doesn't make sense if ID data was read
1890 * successfully at least once.
1894 swap_buf_le16(id
, ATA_ID_WORDS
);
1898 reason
= "device reports invalid type";
1900 if (class == ATA_DEV_ATA
|| class == ATA_DEV_ZAC
) {
1901 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1903 if (ap
->host
->flags
& ATA_HOST_IGNORE_ATA
&&
1904 ata_id_is_ata(id
)) {
1906 "host indicates ignore ATA devices, ignored\n");
1910 if (ata_id_is_ata(id
))
1914 if (!tried_spinup
&& (id
[2] == 0x37c8 || id
[2] == 0x738c)) {
1917 * Drive powered-up in standby mode, and requires a specific
1918 * SET_FEATURES spin-up subcommand before it will accept
1919 * anything other than the original IDENTIFY command.
1921 err_mask
= ata_dev_set_feature(dev
, SETFEATURES_SPINUP
, 0);
1922 if (err_mask
&& id
[2] != 0x738c) {
1924 reason
= "SPINUP failed";
1928 * If the drive initially returned incomplete IDENTIFY info,
1929 * we now must reissue the IDENTIFY command.
1931 if (id
[2] == 0x37c8)
1935 if ((flags
& ATA_READID_POSTRESET
) &&
1936 (class == ATA_DEV_ATA
|| class == ATA_DEV_ZAC
)) {
1938 * The exact sequence expected by certain pre-ATA4 drives is:
1940 * IDENTIFY (optional in early ATA)
1941 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1943 * Some drives were very specific about that exact sequence.
1945 * Note that ATA4 says lba is mandatory so the second check
1946 * should never trigger.
1948 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
1949 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
1952 reason
= "INIT_DEV_PARAMS failed";
1956 /* current CHS translation info (id[53-58]) might be
1957 * changed. reread the identify device info.
1959 flags
&= ~ATA_READID_POSTRESET
;
1969 if (ata_msg_warn(ap
))
1970 ata_dev_warn(dev
, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1976 * ata_read_log_page - read a specific log page
1977 * @dev: target device
1979 * @page: page to read
1980 * @buf: buffer to store read page
1981 * @sectors: number of sectors to read
1983 * Read log page using READ_LOG_EXT command.
1986 * Kernel thread context (may sleep).
1989 * 0 on success, AC_ERR_* mask otherwise.
1991 unsigned int ata_read_log_page(struct ata_device
*dev
, u8 log
,
1992 u8 page
, void *buf
, unsigned int sectors
)
1994 unsigned long ap_flags
= dev
->link
->ap
->flags
;
1995 struct ata_taskfile tf
;
1996 unsigned int err_mask
;
1999 DPRINTK("read log page - log 0x%x, page 0x%x\n", log
, page
);
2002 * Return error without actually issuing the command on controllers
2003 * which e.g. lockup on a read log page.
2005 if (ap_flags
& ATA_FLAG_NO_LOG_PAGE
)
2009 ata_tf_init(dev
, &tf
);
2010 if (dev
->dma_mode
&& ata_id_has_read_log_dma_ext(dev
->id
) &&
2011 !(dev
->horkage
& ATA_HORKAGE_NO_DMA_LOG
)) {
2012 tf
.command
= ATA_CMD_READ_LOG_DMA_EXT
;
2013 tf
.protocol
= ATA_PROT_DMA
;
2016 tf
.command
= ATA_CMD_READ_LOG_EXT
;
2017 tf
.protocol
= ATA_PROT_PIO
;
2023 tf
.hob_nsect
= sectors
>> 8;
2024 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA48
| ATA_TFLAG_DEVICE
;
2026 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
2027 buf
, sectors
* ATA_SECT_SIZE
, 0);
2031 dev
->horkage
|= ATA_HORKAGE_NO_DMA_LOG
;
2034 ata_dev_err(dev
, "Read log page 0x%02x failed, Emask 0x%x\n",
2035 (unsigned int)page
, err_mask
);
2041 static bool ata_log_supported(struct ata_device
*dev
, u8 log
)
2043 struct ata_port
*ap
= dev
->link
->ap
;
2045 if (ata_read_log_page(dev
, ATA_LOG_DIRECTORY
, 0, ap
->sector_buf
, 1))
2047 return get_unaligned_le16(&ap
->sector_buf
[log
* 2]) ? true : false;
2050 static bool ata_identify_page_supported(struct ata_device
*dev
, u8 page
)
2052 struct ata_port
*ap
= dev
->link
->ap
;
2053 unsigned int err
, i
;
2055 if (!ata_log_supported(dev
, ATA_LOG_IDENTIFY_DEVICE
)) {
2056 ata_dev_warn(dev
, "ATA Identify Device Log not supported\n");
2061 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2064 err
= ata_read_log_page(dev
, ATA_LOG_IDENTIFY_DEVICE
, 0, ap
->sector_buf
,
2069 for (i
= 0; i
< ap
->sector_buf
[8]; i
++) {
2070 if (ap
->sector_buf
[9 + i
] == page
)
2077 static int ata_do_link_spd_horkage(struct ata_device
*dev
)
2079 struct ata_link
*plink
= ata_dev_phys_link(dev
);
2080 u32 target
, target_limit
;
2082 if (!sata_scr_valid(plink
))
2085 if (dev
->horkage
& ATA_HORKAGE_1_5_GBPS
)
2090 target_limit
= (1 << target
) - 1;
2092 /* if already on stricter limit, no need to push further */
2093 if (plink
->sata_spd_limit
<= target_limit
)
2096 plink
->sata_spd_limit
= target_limit
;
2098 /* Request another EH round by returning -EAGAIN if link is
2099 * going faster than the target speed. Forward progress is
2100 * guaranteed by setting sata_spd_limit to target_limit above.
2102 if (plink
->sata_spd
> target
) {
2103 ata_dev_info(dev
, "applying link speed limit horkage to %s\n",
2104 sata_spd_string(target
));
2110 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
2112 struct ata_port
*ap
= dev
->link
->ap
;
2114 if (ata_dev_blacklisted(dev
) & ATA_HORKAGE_BRIDGE_OK
)
2117 return ((ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
2120 static void ata_dev_config_ncq_send_recv(struct ata_device
*dev
)
2122 struct ata_port
*ap
= dev
->link
->ap
;
2123 unsigned int err_mask
;
2125 if (!ata_log_supported(dev
, ATA_LOG_NCQ_SEND_RECV
)) {
2126 ata_dev_warn(dev
, "NCQ Send/Recv Log not supported\n");
2129 err_mask
= ata_read_log_page(dev
, ATA_LOG_NCQ_SEND_RECV
,
2130 0, ap
->sector_buf
, 1);
2132 u8
*cmds
= dev
->ncq_send_recv_cmds
;
2134 dev
->flags
|= ATA_DFLAG_NCQ_SEND_RECV
;
2135 memcpy(cmds
, ap
->sector_buf
, ATA_LOG_NCQ_SEND_RECV_SIZE
);
2137 if (dev
->horkage
& ATA_HORKAGE_NO_NCQ_TRIM
) {
2138 ata_dev_dbg(dev
, "disabling queued TRIM support\n");
2139 cmds
[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET
] &=
2140 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM
;
2145 static void ata_dev_config_ncq_non_data(struct ata_device
*dev
)
2147 struct ata_port
*ap
= dev
->link
->ap
;
2148 unsigned int err_mask
;
2150 if (!ata_log_supported(dev
, ATA_LOG_NCQ_NON_DATA
)) {
2152 "NCQ Send/Recv Log not supported\n");
2155 err_mask
= ata_read_log_page(dev
, ATA_LOG_NCQ_NON_DATA
,
2156 0, ap
->sector_buf
, 1);
2158 u8
*cmds
= dev
->ncq_non_data_cmds
;
2160 memcpy(cmds
, ap
->sector_buf
, ATA_LOG_NCQ_NON_DATA_SIZE
);
2164 static void ata_dev_config_ncq_prio(struct ata_device
*dev
)
2166 struct ata_port
*ap
= dev
->link
->ap
;
2167 unsigned int err_mask
;
2169 err_mask
= ata_read_log_page(dev
,
2170 ATA_LOG_IDENTIFY_DEVICE
,
2171 ATA_LOG_SATA_SETTINGS
,
2177 if (!(ap
->sector_buf
[ATA_LOG_NCQ_PRIO_OFFSET
] & BIT(3)))
2180 dev
->flags
|= ATA_DFLAG_NCQ_PRIO
;
2185 dev
->flags
&= ~ATA_DFLAG_NCQ_PRIO_ENABLE
;
2186 dev
->flags
&= ~ATA_DFLAG_NCQ_PRIO
;
2189 static bool ata_dev_check_adapter(struct ata_device
*dev
,
2190 unsigned short vendor_id
)
2192 struct pci_dev
*pcidev
= NULL
;
2193 struct device
*parent_dev
= NULL
;
2195 for (parent_dev
= dev
->tdev
.parent
; parent_dev
!= NULL
;
2196 parent_dev
= parent_dev
->parent
) {
2197 if (dev_is_pci(parent_dev
)) {
2198 pcidev
= to_pci_dev(parent_dev
);
2199 if (pcidev
->vendor
== vendor_id
)
2208 static int ata_dev_config_ncq(struct ata_device
*dev
,
2209 char *desc
, size_t desc_sz
)
2211 struct ata_port
*ap
= dev
->link
->ap
;
2212 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
2213 unsigned int err_mask
;
2216 if (!ata_id_has_ncq(dev
->id
)) {
2220 if (!IS_ENABLED(CONFIG_SATA_HOST
))
2222 if (dev
->horkage
& ATA_HORKAGE_NONCQ
) {
2223 snprintf(desc
, desc_sz
, "NCQ (not used)");
2227 if (dev
->horkage
& ATA_HORKAGE_NO_NCQ_ON_ATI
&&
2228 ata_dev_check_adapter(dev
, PCI_VENDOR_ID_ATI
)) {
2229 snprintf(desc
, desc_sz
, "NCQ (not used)");
2233 if (ap
->flags
& ATA_FLAG_NCQ
) {
2234 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
);
2235 dev
->flags
|= ATA_DFLAG_NCQ
;
2238 if (!(dev
->horkage
& ATA_HORKAGE_BROKEN_FPDMA_AA
) &&
2239 (ap
->flags
& ATA_FLAG_FPDMA_AA
) &&
2240 ata_id_has_fpdma_aa(dev
->id
)) {
2241 err_mask
= ata_dev_set_feature(dev
, SETFEATURES_SATA_ENABLE
,
2245 "failed to enable AA (error_mask=0x%x)\n",
2247 if (err_mask
!= AC_ERR_DEV
) {
2248 dev
->horkage
|= ATA_HORKAGE_BROKEN_FPDMA_AA
;
2255 if (hdepth
>= ddepth
)
2256 snprintf(desc
, desc_sz
, "NCQ (depth %d)%s", ddepth
, aa_desc
);
2258 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)%s", hdepth
,
2261 if ((ap
->flags
& ATA_FLAG_FPDMA_AUX
)) {
2262 if (ata_id_has_ncq_send_and_recv(dev
->id
))
2263 ata_dev_config_ncq_send_recv(dev
);
2264 if (ata_id_has_ncq_non_data(dev
->id
))
2265 ata_dev_config_ncq_non_data(dev
);
2266 if (ata_id_has_ncq_prio(dev
->id
))
2267 ata_dev_config_ncq_prio(dev
);
2273 static void ata_dev_config_sense_reporting(struct ata_device
*dev
)
2275 unsigned int err_mask
;
2277 if (!ata_id_has_sense_reporting(dev
->id
))
2280 if (ata_id_sense_reporting_enabled(dev
->id
))
2283 err_mask
= ata_dev_set_feature(dev
, SETFEATURE_SENSE_DATA
, 0x1);
2286 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2291 static void ata_dev_config_zac(struct ata_device
*dev
)
2293 struct ata_port
*ap
= dev
->link
->ap
;
2294 unsigned int err_mask
;
2295 u8
*identify_buf
= ap
->sector_buf
;
2297 dev
->zac_zones_optimal_open
= U32_MAX
;
2298 dev
->zac_zones_optimal_nonseq
= U32_MAX
;
2299 dev
->zac_zones_max_open
= U32_MAX
;
2302 * Always set the 'ZAC' flag for Host-managed devices.
2304 if (dev
->class == ATA_DEV_ZAC
)
2305 dev
->flags
|= ATA_DFLAG_ZAC
;
2306 else if (ata_id_zoned_cap(dev
->id
) == 0x01)
2308 * Check for host-aware devices.
2310 dev
->flags
|= ATA_DFLAG_ZAC
;
2312 if (!(dev
->flags
& ATA_DFLAG_ZAC
))
2315 if (!ata_identify_page_supported(dev
, ATA_LOG_ZONED_INFORMATION
)) {
2317 "ATA Zoned Information Log not supported\n");
2322 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2324 err_mask
= ata_read_log_page(dev
, ATA_LOG_IDENTIFY_DEVICE
,
2325 ATA_LOG_ZONED_INFORMATION
,
2328 u64 zoned_cap
, opt_open
, opt_nonseq
, max_open
;
2330 zoned_cap
= get_unaligned_le64(&identify_buf
[8]);
2331 if ((zoned_cap
>> 63))
2332 dev
->zac_zoned_cap
= (zoned_cap
& 1);
2333 opt_open
= get_unaligned_le64(&identify_buf
[24]);
2334 if ((opt_open
>> 63))
2335 dev
->zac_zones_optimal_open
= (u32
)opt_open
;
2336 opt_nonseq
= get_unaligned_le64(&identify_buf
[32]);
2337 if ((opt_nonseq
>> 63))
2338 dev
->zac_zones_optimal_nonseq
= (u32
)opt_nonseq
;
2339 max_open
= get_unaligned_le64(&identify_buf
[40]);
2340 if ((max_open
>> 63))
2341 dev
->zac_zones_max_open
= (u32
)max_open
;
2345 static void ata_dev_config_trusted(struct ata_device
*dev
)
2347 struct ata_port
*ap
= dev
->link
->ap
;
2351 if (!ata_id_has_trusted(dev
->id
))
2354 if (!ata_identify_page_supported(dev
, ATA_LOG_SECURITY
)) {
2356 "Security Log not supported\n");
2360 err
= ata_read_log_page(dev
, ATA_LOG_IDENTIFY_DEVICE
, ATA_LOG_SECURITY
,
2365 trusted_cap
= get_unaligned_le64(&ap
->sector_buf
[40]);
2366 if (!(trusted_cap
& (1ULL << 63))) {
2368 "Trusted Computing capability qword not valid!\n");
2372 if (trusted_cap
& (1 << 0))
2373 dev
->flags
|= ATA_DFLAG_TRUSTED
;
2376 static int ata_dev_config_lba(struct ata_device
*dev
)
2378 struct ata_port
*ap
= dev
->link
->ap
;
2379 const u16
*id
= dev
->id
;
2380 const char *lba_desc
;
2384 dev
->flags
|= ATA_DFLAG_LBA
;
2386 if (ata_id_has_lba48(id
)) {
2388 dev
->flags
|= ATA_DFLAG_LBA48
;
2389 if (dev
->n_sectors
>= (1UL << 28) &&
2390 ata_id_has_flush_ext(id
))
2391 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
2397 ret
= ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
2399 /* print device info to dmesg */
2400 if (ata_msg_drv(ap
) && ata_dev_print_info(dev
))
2402 "%llu sectors, multi %u: %s %s\n",
2403 (unsigned long long)dev
->n_sectors
,
2404 dev
->multi_count
, lba_desc
, ncq_desc
);
2409 static void ata_dev_config_chs(struct ata_device
*dev
)
2411 struct ata_port
*ap
= dev
->link
->ap
;
2412 const u16
*id
= dev
->id
;
2414 if (ata_id_current_chs_valid(id
)) {
2415 /* Current CHS translation is valid. */
2416 dev
->cylinders
= id
[54];
2417 dev
->heads
= id
[55];
2418 dev
->sectors
= id
[56];
2420 /* Default translation */
2421 dev
->cylinders
= id
[1];
2423 dev
->sectors
= id
[6];
2426 /* print device info to dmesg */
2427 if (ata_msg_drv(ap
) && ata_dev_print_info(dev
))
2429 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2430 (unsigned long long)dev
->n_sectors
,
2431 dev
->multi_count
, dev
->cylinders
,
2432 dev
->heads
, dev
->sectors
);
2435 static void ata_dev_config_devslp(struct ata_device
*dev
)
2437 u8
*sata_setting
= dev
->link
->ap
->sector_buf
;
2438 unsigned int err_mask
;
2442 * Check device sleep capability. Get DevSlp timing variables
2443 * from SATA Settings page of Identify Device Data Log.
2445 if (!ata_id_has_devslp(dev
->id
))
2448 err_mask
= ata_read_log_page(dev
,
2449 ATA_LOG_IDENTIFY_DEVICE
,
2450 ATA_LOG_SATA_SETTINGS
,
2455 dev
->flags
|= ATA_DFLAG_DEVSLP
;
2456 for (i
= 0; i
< ATA_LOG_DEVSLP_SIZE
; i
++) {
2457 j
= ATA_LOG_DEVSLP_OFFSET
+ i
;
2458 dev
->devslp_timing
[i
] = sata_setting
[j
];
2462 static void ata_dev_print_features(struct ata_device
*dev
)
2464 if (!(dev
->flags
& ATA_DFLAG_FEATURES_MASK
))
2468 "Features:%s%s%s%s%s\n",
2469 dev
->flags
& ATA_DFLAG_TRUSTED
? " Trust" : "",
2470 dev
->flags
& ATA_DFLAG_DA
? " Dev-Attention" : "",
2471 dev
->flags
& ATA_DFLAG_DEVSLP
? " Dev-Sleep" : "",
2472 dev
->flags
& ATA_DFLAG_NCQ_SEND_RECV
? " NCQ-sndrcv" : "",
2473 dev
->flags
& ATA_DFLAG_NCQ_PRIO
? " NCQ-prio" : "");
2477 * ata_dev_configure - Configure the specified ATA/ATAPI device
2478 * @dev: Target device to configure
2480 * Configure @dev according to @dev->id. Generic and low-level
2481 * driver specific fixups are also applied.
2484 * Kernel thread context (may sleep)
2487 * 0 on success, -errno otherwise
2489 int ata_dev_configure(struct ata_device
*dev
)
2491 struct ata_port
*ap
= dev
->link
->ap
;
2492 bool print_info
= ata_dev_print_info(dev
);
2493 const u16
*id
= dev
->id
;
2494 unsigned long xfer_mask
;
2495 unsigned int err_mask
;
2496 char revbuf
[7]; /* XYZ-99\0 */
2497 char fwrevbuf
[ATA_ID_FW_REV_LEN
+1];
2498 char modelbuf
[ATA_ID_PROD_LEN
+1];
2501 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
2502 ata_dev_info(dev
, "%s: ENTER/EXIT -- nodev\n", __func__
);
2506 if (ata_msg_probe(ap
))
2507 ata_dev_dbg(dev
, "%s: ENTER\n", __func__
);
2510 dev
->horkage
|= ata_dev_blacklisted(dev
);
2511 ata_force_horkage(dev
);
2513 if (dev
->horkage
& ATA_HORKAGE_DISABLE
) {
2514 ata_dev_info(dev
, "unsupported device, disabling\n");
2515 ata_dev_disable(dev
);
2519 if ((!atapi_enabled
|| (ap
->flags
& ATA_FLAG_NO_ATAPI
)) &&
2520 dev
->class == ATA_DEV_ATAPI
) {
2521 ata_dev_warn(dev
, "WARNING: ATAPI is %s, device ignored\n",
2522 atapi_enabled
? "not supported with this driver"
2524 ata_dev_disable(dev
);
2528 rc
= ata_do_link_spd_horkage(dev
);
2532 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2533 if ((dev
->horkage
& ATA_HORKAGE_WD_BROKEN_LPM
) &&
2534 (id
[ATA_ID_SATA_CAPABILITY
] & 0xe) == 0x2)
2535 dev
->horkage
|= ATA_HORKAGE_NOLPM
;
2537 if (ap
->flags
& ATA_FLAG_NO_LPM
)
2538 dev
->horkage
|= ATA_HORKAGE_NOLPM
;
2540 if (dev
->horkage
& ATA_HORKAGE_NOLPM
) {
2541 ata_dev_warn(dev
, "LPM support broken, forcing max_power\n");
2542 dev
->link
->ap
->target_lpm_policy
= ATA_LPM_MAX_POWER
;
2545 /* let ACPI work its magic */
2546 rc
= ata_acpi_on_devcfg(dev
);
2550 /* massage HPA, do it early as it might change IDENTIFY data */
2551 rc
= ata_hpa_resize(dev
);
2555 /* print device capabilities */
2556 if (ata_msg_probe(ap
))
2558 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2559 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2561 id
[49], id
[82], id
[83], id
[84],
2562 id
[85], id
[86], id
[87], id
[88]);
2564 /* initialize to-be-configured parameters */
2565 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
2566 dev
->max_sectors
= 0;
2572 dev
->multi_count
= 0;
2575 * common ATA, ATAPI feature tests
2578 /* find max transfer mode; for printk only */
2579 xfer_mask
= ata_id_xfermask(id
);
2581 if (ata_msg_probe(ap
))
2584 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2585 ata_id_c_string(dev
->id
, fwrevbuf
, ATA_ID_FW_REV
,
2588 ata_id_c_string(dev
->id
, modelbuf
, ATA_ID_PROD
,
2591 /* ATA-specific feature tests */
2592 if (dev
->class == ATA_DEV_ATA
|| dev
->class == ATA_DEV_ZAC
) {
2593 if (ata_id_is_cfa(id
)) {
2594 /* CPRM may make this media unusable */
2595 if (id
[ATA_ID_CFA_KEY_MGMT
] & 1)
2597 "supports DRM functions and may not be fully accessible\n");
2598 snprintf(revbuf
, 7, "CFA");
2600 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
2601 /* Warn the user if the device has TPM extensions */
2602 if (ata_id_has_tpm(id
))
2604 "supports DRM functions and may not be fully accessible\n");
2607 dev
->n_sectors
= ata_id_n_sectors(id
);
2609 /* get current R/W Multiple count setting */
2610 if ((dev
->id
[47] >> 8) == 0x80 && (dev
->id
[59] & 0x100)) {
2611 unsigned int max
= dev
->id
[47] & 0xff;
2612 unsigned int cnt
= dev
->id
[59] & 0xff;
2613 /* only recognize/allow powers of two here */
2614 if (is_power_of_2(max
) && is_power_of_2(cnt
))
2616 dev
->multi_count
= cnt
;
2619 /* print device info to dmesg */
2620 if (ata_msg_drv(ap
) && print_info
)
2621 ata_dev_info(dev
, "%s: %s, %s, max %s\n",
2622 revbuf
, modelbuf
, fwrevbuf
,
2623 ata_mode_string(xfer_mask
));
2625 if (ata_id_has_lba(id
)) {
2626 rc
= ata_dev_config_lba(dev
);
2630 ata_dev_config_chs(dev
);
2633 ata_dev_config_devslp(dev
);
2634 ata_dev_config_sense_reporting(dev
);
2635 ata_dev_config_zac(dev
);
2636 ata_dev_config_trusted(dev
);
2639 if (ata_msg_drv(ap
) && print_info
)
2640 ata_dev_print_features(dev
);
2643 /* ATAPI-specific feature tests */
2644 else if (dev
->class == ATA_DEV_ATAPI
) {
2645 const char *cdb_intr_string
= "";
2646 const char *atapi_an_string
= "";
2647 const char *dma_dir_string
= "";
2650 rc
= atapi_cdb_len(id
);
2651 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
2652 if (ata_msg_warn(ap
))
2653 ata_dev_warn(dev
, "unsupported CDB len\n");
2657 dev
->cdb_len
= (unsigned int) rc
;
2659 /* Enable ATAPI AN if both the host and device have
2660 * the support. If PMP is attached, SNTF is required
2661 * to enable ATAPI AN to discern between PHY status
2662 * changed notifications and ATAPI ANs.
2665 (ap
->flags
& ATA_FLAG_AN
) && ata_id_has_atapi_AN(id
) &&
2666 (!sata_pmp_attached(ap
) ||
2667 sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
) == 0)) {
2668 /* issue SET feature command to turn this on */
2669 err_mask
= ata_dev_set_feature(dev
,
2670 SETFEATURES_SATA_ENABLE
, SATA_AN
);
2673 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2676 dev
->flags
|= ATA_DFLAG_AN
;
2677 atapi_an_string
= ", ATAPI AN";
2681 if (ata_id_cdb_intr(dev
->id
)) {
2682 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
2683 cdb_intr_string
= ", CDB intr";
2686 if (atapi_dmadir
|| (dev
->horkage
& ATA_HORKAGE_ATAPI_DMADIR
) || atapi_id_dmadir(dev
->id
)) {
2687 dev
->flags
|= ATA_DFLAG_DMADIR
;
2688 dma_dir_string
= ", DMADIR";
2691 if (ata_id_has_da(dev
->id
)) {
2692 dev
->flags
|= ATA_DFLAG_DA
;
2696 /* print device info to dmesg */
2697 if (ata_msg_drv(ap
) && print_info
)
2699 "ATAPI: %s, %s, max %s%s%s%s\n",
2701 ata_mode_string(xfer_mask
),
2702 cdb_intr_string
, atapi_an_string
,
2706 /* determine max_sectors */
2707 dev
->max_sectors
= ATA_MAX_SECTORS
;
2708 if (dev
->flags
& ATA_DFLAG_LBA48
)
2709 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
2711 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2713 if (ata_dev_knobble(dev
)) {
2714 if (ata_msg_drv(ap
) && print_info
)
2715 ata_dev_info(dev
, "applying bridge limits\n");
2716 dev
->udma_mask
&= ATA_UDMA5
;
2717 dev
->max_sectors
= ATA_MAX_SECTORS
;
2720 if ((dev
->class == ATA_DEV_ATAPI
) &&
2721 (atapi_command_packet_set(id
) == TYPE_TAPE
)) {
2722 dev
->max_sectors
= ATA_MAX_SECTORS_TAPE
;
2723 dev
->horkage
|= ATA_HORKAGE_STUCK_ERR
;
2726 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_128
)
2727 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_128
,
2730 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_1024
)
2731 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_1024
,
2734 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_LBA48
)
2735 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
2737 if (ap
->ops
->dev_config
)
2738 ap
->ops
->dev_config(dev
);
2740 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
2741 /* Let the user know. We don't want to disallow opens for
2742 rescue purposes, or in case the vendor is just a blithering
2743 idiot. Do this after the dev_config call as some controllers
2744 with buggy firmware may want to avoid reporting false device
2749 "Drive reports diagnostics failure. This may indicate a drive\n");
2751 "fault or invalid emulation. Contact drive vendor for information.\n");
2755 if ((dev
->horkage
& ATA_HORKAGE_FIRMWARE_WARN
) && print_info
) {
2756 ata_dev_warn(dev
, "WARNING: device requires firmware update to be fully functional\n");
2757 ata_dev_warn(dev
, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2763 if (ata_msg_probe(ap
))
2764 ata_dev_dbg(dev
, "%s: EXIT, err\n", __func__
);
2769 * ata_cable_40wire - return 40 wire cable type
2772 * Helper method for drivers which want to hardwire 40 wire cable
2776 int ata_cable_40wire(struct ata_port
*ap
)
2778 return ATA_CBL_PATA40
;
2780 EXPORT_SYMBOL_GPL(ata_cable_40wire
);
2783 * ata_cable_80wire - return 80 wire cable type
2786 * Helper method for drivers which want to hardwire 80 wire cable
2790 int ata_cable_80wire(struct ata_port
*ap
)
2792 return ATA_CBL_PATA80
;
2794 EXPORT_SYMBOL_GPL(ata_cable_80wire
);
2797 * ata_cable_unknown - return unknown PATA cable.
2800 * Helper method for drivers which have no PATA cable detection.
2803 int ata_cable_unknown(struct ata_port
*ap
)
2805 return ATA_CBL_PATA_UNK
;
2807 EXPORT_SYMBOL_GPL(ata_cable_unknown
);
2810 * ata_cable_ignore - return ignored PATA cable.
2813 * Helper method for drivers which don't use cable type to limit
2816 int ata_cable_ignore(struct ata_port
*ap
)
2818 return ATA_CBL_PATA_IGN
;
2820 EXPORT_SYMBOL_GPL(ata_cable_ignore
);
2823 * ata_cable_sata - return SATA cable type
2826 * Helper method for drivers which have SATA cables
2829 int ata_cable_sata(struct ata_port
*ap
)
2831 return ATA_CBL_SATA
;
2833 EXPORT_SYMBOL_GPL(ata_cable_sata
);
2836 * ata_bus_probe - Reset and probe ATA bus
2839 * Master ATA bus probing function. Initiates a hardware-dependent
2840 * bus reset, then attempts to identify any devices found on
2844 * PCI/etc. bus probe sem.
2847 * Zero on success, negative errno otherwise.
2850 int ata_bus_probe(struct ata_port
*ap
)
2852 unsigned int classes
[ATA_MAX_DEVICES
];
2853 int tries
[ATA_MAX_DEVICES
];
2855 struct ata_device
*dev
;
2857 ata_for_each_dev(dev
, &ap
->link
, ALL
)
2858 tries
[dev
->devno
] = ATA_PROBE_MAX_TRIES
;
2861 ata_for_each_dev(dev
, &ap
->link
, ALL
) {
2862 /* If we issue an SRST then an ATA drive (not ATAPI)
2863 * may change configuration and be in PIO0 timing. If
2864 * we do a hard reset (or are coming from power on)
2865 * this is true for ATA or ATAPI. Until we've set a
2866 * suitable controller mode we should not touch the
2867 * bus as we may be talking too fast.
2869 dev
->pio_mode
= XFER_PIO_0
;
2870 dev
->dma_mode
= 0xff;
2872 /* If the controller has a pio mode setup function
2873 * then use it to set the chipset to rights. Don't
2874 * touch the DMA setup as that will be dealt with when
2875 * configuring devices.
2877 if (ap
->ops
->set_piomode
)
2878 ap
->ops
->set_piomode(ap
, dev
);
2881 /* reset and determine device classes */
2882 ap
->ops
->phy_reset(ap
);
2884 ata_for_each_dev(dev
, &ap
->link
, ALL
) {
2885 if (dev
->class != ATA_DEV_UNKNOWN
)
2886 classes
[dev
->devno
] = dev
->class;
2888 classes
[dev
->devno
] = ATA_DEV_NONE
;
2890 dev
->class = ATA_DEV_UNKNOWN
;
2893 /* read IDENTIFY page and configure devices. We have to do the identify
2894 specific sequence bass-ackwards so that PDIAG- is released by
2897 ata_for_each_dev(dev
, &ap
->link
, ALL_REVERSE
) {
2898 if (tries
[dev
->devno
])
2899 dev
->class = classes
[dev
->devno
];
2901 if (!ata_dev_enabled(dev
))
2904 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
2910 /* Now ask for the cable type as PDIAG- should have been released */
2911 if (ap
->ops
->cable_detect
)
2912 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2914 /* We may have SATA bridge glue hiding here irrespective of
2915 * the reported cable types and sensed types. When SATA
2916 * drives indicate we have a bridge, we don't know which end
2917 * of the link the bridge is which is a problem.
2919 ata_for_each_dev(dev
, &ap
->link
, ENABLED
)
2920 if (ata_id_is_sata(dev
->id
))
2921 ap
->cbl
= ATA_CBL_SATA
;
2923 /* After the identify sequence we can now set up the devices. We do
2924 this in the normal order so that the user doesn't get confused */
2926 ata_for_each_dev(dev
, &ap
->link
, ENABLED
) {
2927 ap
->link
.eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
2928 rc
= ata_dev_configure(dev
);
2929 ap
->link
.eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
2934 /* configure transfer mode */
2935 rc
= ata_set_mode(&ap
->link
, &dev
);
2939 ata_for_each_dev(dev
, &ap
->link
, ENABLED
)
2945 tries
[dev
->devno
]--;
2949 /* eeek, something went very wrong, give up */
2950 tries
[dev
->devno
] = 0;
2954 /* give it just one more chance */
2955 tries
[dev
->devno
] = min(tries
[dev
->devno
], 1);
2958 if (tries
[dev
->devno
] == 1) {
2959 /* This is the last chance, better to slow
2960 * down than lose it.
2962 sata_down_spd_limit(&ap
->link
, 0);
2963 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
2967 if (!tries
[dev
->devno
])
2968 ata_dev_disable(dev
);
2974 * sata_print_link_status - Print SATA link status
2975 * @link: SATA link to printk link status about
2977 * This function prints link speed and status of a SATA link.
2982 static void sata_print_link_status(struct ata_link
*link
)
2984 u32 sstatus
, scontrol
, tmp
;
2986 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2988 sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
2990 if (ata_phys_link_online(link
)) {
2991 tmp
= (sstatus
>> 4) & 0xf;
2992 ata_link_info(link
, "SATA link up %s (SStatus %X SControl %X)\n",
2993 sata_spd_string(tmp
), sstatus
, scontrol
);
2995 ata_link_info(link
, "SATA link down (SStatus %X SControl %X)\n",
3001 * ata_dev_pair - return other device on cable
3004 * Obtain the other device on the same cable, or if none is
3005 * present NULL is returned
3008 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
3010 struct ata_link
*link
= adev
->link
;
3011 struct ata_device
*pair
= &link
->device
[1 - adev
->devno
];
3012 if (!ata_dev_enabled(pair
))
3016 EXPORT_SYMBOL_GPL(ata_dev_pair
);
3019 * sata_down_spd_limit - adjust SATA spd limit downward
3020 * @link: Link to adjust SATA spd limit for
3021 * @spd_limit: Additional limit
3023 * Adjust SATA spd limit of @link downward. Note that this
3024 * function only adjusts the limit. The change must be applied
3025 * using sata_set_spd().
3027 * If @spd_limit is non-zero, the speed is limited to equal to or
3028 * lower than @spd_limit if such speed is supported. If
3029 * @spd_limit is slower than any supported speed, only the lowest
3030 * supported speed is allowed.
3033 * Inherited from caller.
3036 * 0 on success, negative errno on failure
3038 int sata_down_spd_limit(struct ata_link
*link
, u32 spd_limit
)
3040 u32 sstatus
, spd
, mask
;
3043 if (!sata_scr_valid(link
))
3046 /* If SCR can be read, use it to determine the current SPD.
3047 * If not, use cached value in link->sata_spd.
3049 rc
= sata_scr_read(link
, SCR_STATUS
, &sstatus
);
3050 if (rc
== 0 && ata_sstatus_online(sstatus
))
3051 spd
= (sstatus
>> 4) & 0xf;
3053 spd
= link
->sata_spd
;
3055 mask
= link
->sata_spd_limit
;
3059 /* unconditionally mask off the highest bit */
3060 bit
= fls(mask
) - 1;
3061 mask
&= ~(1 << bit
);
3064 * Mask off all speeds higher than or equal to the current one. At
3065 * this point, if current SPD is not available and we previously
3066 * recorded the link speed from SStatus, the driver has already
3067 * masked off the highest bit so mask should already be 1 or 0.
3068 * Otherwise, we should not force 1.5Gbps on a link where we have
3069 * not previously recorded speed from SStatus. Just return in this
3073 mask
&= (1 << (spd
- 1)) - 1;
3077 /* were we already at the bottom? */
3082 if (mask
& ((1 << spd_limit
) - 1))
3083 mask
&= (1 << spd_limit
) - 1;
3085 bit
= ffs(mask
) - 1;
3090 link
->sata_spd_limit
= mask
;
3092 ata_link_warn(link
, "limiting SATA link speed to %s\n",
3093 sata_spd_string(fls(mask
)));
3098 #ifdef CONFIG_ATA_ACPI
3100 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3101 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3102 * @cycle: cycle duration in ns
3104 * Return matching xfer mode for @cycle. The returned mode is of
3105 * the transfer type specified by @xfer_shift. If @cycle is too
3106 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3107 * than the fastest known mode, the fasted mode is returned.
3113 * Matching xfer_mode, 0xff if no match found.
3115 u8
ata_timing_cycle2mode(unsigned int xfer_shift
, int cycle
)
3117 u8 base_mode
= 0xff, last_mode
= 0xff;
3118 const struct ata_xfer_ent
*ent
;
3119 const struct ata_timing
*t
;
3121 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
3122 if (ent
->shift
== xfer_shift
)
3123 base_mode
= ent
->base
;
3125 for (t
= ata_timing_find_mode(base_mode
);
3126 t
&& ata_xfer_mode2shift(t
->mode
) == xfer_shift
; t
++) {
3127 unsigned short this_cycle
;
3129 switch (xfer_shift
) {
3131 case ATA_SHIFT_MWDMA
:
3132 this_cycle
= t
->cycle
;
3134 case ATA_SHIFT_UDMA
:
3135 this_cycle
= t
->udma
;
3141 if (cycle
> this_cycle
)
3144 last_mode
= t
->mode
;
3152 * ata_down_xfermask_limit - adjust dev xfer masks downward
3153 * @dev: Device to adjust xfer masks
3154 * @sel: ATA_DNXFER_* selector
3156 * Adjust xfer masks of @dev downward. Note that this function
3157 * does not apply the change. Invoking ata_set_mode() afterwards
3158 * will apply the limit.
3161 * Inherited from caller.
3164 * 0 on success, negative errno on failure
3166 int ata_down_xfermask_limit(struct ata_device
*dev
, unsigned int sel
)
3169 unsigned long orig_mask
, xfer_mask
;
3170 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
3173 quiet
= !!(sel
& ATA_DNXFER_QUIET
);
3174 sel
&= ~ATA_DNXFER_QUIET
;
3176 xfer_mask
= orig_mask
= ata_pack_xfermask(dev
->pio_mask
,
3179 ata_unpack_xfermask(xfer_mask
, &pio_mask
, &mwdma_mask
, &udma_mask
);
3182 case ATA_DNXFER_PIO
:
3183 highbit
= fls(pio_mask
) - 1;
3184 pio_mask
&= ~(1 << highbit
);
3187 case ATA_DNXFER_DMA
:
3189 highbit
= fls(udma_mask
) - 1;
3190 udma_mask
&= ~(1 << highbit
);
3193 } else if (mwdma_mask
) {
3194 highbit
= fls(mwdma_mask
) - 1;
3195 mwdma_mask
&= ~(1 << highbit
);
3201 case ATA_DNXFER_40C
:
3202 udma_mask
&= ATA_UDMA_MASK_40C
;
3205 case ATA_DNXFER_FORCE_PIO0
:
3208 case ATA_DNXFER_FORCE_PIO
:
3217 xfer_mask
&= ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
3219 if (!(xfer_mask
& ATA_MASK_PIO
) || xfer_mask
== orig_mask
)
3223 if (xfer_mask
& (ATA_MASK_MWDMA
| ATA_MASK_UDMA
))
3224 snprintf(buf
, sizeof(buf
), "%s:%s",
3225 ata_mode_string(xfer_mask
),
3226 ata_mode_string(xfer_mask
& ATA_MASK_PIO
));
3228 snprintf(buf
, sizeof(buf
), "%s",
3229 ata_mode_string(xfer_mask
));
3231 ata_dev_warn(dev
, "limiting speed to %s\n", buf
);
3234 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
3240 static int ata_dev_set_mode(struct ata_device
*dev
)
3242 struct ata_port
*ap
= dev
->link
->ap
;
3243 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3244 const bool nosetxfer
= dev
->horkage
& ATA_HORKAGE_NOSETXFER
;
3245 const char *dev_err_whine
= "";
3246 int ign_dev_err
= 0;
3247 unsigned int err_mask
= 0;
3250 dev
->flags
&= ~ATA_DFLAG_PIO
;
3251 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
3252 dev
->flags
|= ATA_DFLAG_PIO
;
3254 if (nosetxfer
&& ap
->flags
& ATA_FLAG_SATA
&& ata_id_is_sata(dev
->id
))
3255 dev_err_whine
= " (SET_XFERMODE skipped)";
3259 "NOSETXFER but PATA detected - can't "
3260 "skip SETXFER, might malfunction\n");
3261 err_mask
= ata_dev_set_xfermode(dev
);
3264 if (err_mask
& ~AC_ERR_DEV
)
3268 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
3269 rc
= ata_dev_revalidate(dev
, ATA_DEV_UNKNOWN
, 0);
3270 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
3274 if (dev
->xfer_shift
== ATA_SHIFT_PIO
) {
3275 /* Old CFA may refuse this command, which is just fine */
3276 if (ata_id_is_cfa(dev
->id
))
3278 /* Catch several broken garbage emulations plus some pre
3280 if (ata_id_major_version(dev
->id
) == 0 &&
3281 dev
->pio_mode
<= XFER_PIO_2
)
3283 /* Some very old devices and some bad newer ones fail
3284 any kind of SET_XFERMODE request but support PIO0-2
3285 timings and no IORDY */
3286 if (!ata_id_has_iordy(dev
->id
) && dev
->pio_mode
<= XFER_PIO_2
)
3289 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3290 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3291 if (dev
->xfer_shift
== ATA_SHIFT_MWDMA
&&
3292 dev
->dma_mode
== XFER_MW_DMA_0
&&
3293 (dev
->id
[63] >> 8) & 1)
3296 /* if the device is actually configured correctly, ignore dev err */
3297 if (dev
->xfer_mode
== ata_xfer_mask2mode(ata_id_xfermask(dev
->id
)))
3300 if (err_mask
& AC_ERR_DEV
) {
3304 dev_err_whine
= " (device error ignored)";
3307 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3308 dev
->xfer_shift
, (int)dev
->xfer_mode
);
3310 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) ||
3311 ehc
->i
.flags
& ATA_EHI_DID_HARDRESET
)
3312 ata_dev_info(dev
, "configured for %s%s\n",
3313 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)),
3319 ata_dev_err(dev
, "failed to set xfermode (err_mask=0x%x)\n", err_mask
);
3324 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3325 * @link: link on which timings will be programmed
3326 * @r_failed_dev: out parameter for failed device
3328 * Standard implementation of the function used to tune and set
3329 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3330 * ata_dev_set_mode() fails, pointer to the failing device is
3331 * returned in @r_failed_dev.
3334 * PCI/etc. bus probe sem.
3337 * 0 on success, negative errno otherwise
3340 int ata_do_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3342 struct ata_port
*ap
= link
->ap
;
3343 struct ata_device
*dev
;
3344 int rc
= 0, used_dma
= 0, found
= 0;
3346 /* step 1: calculate xfer_mask */
3347 ata_for_each_dev(dev
, link
, ENABLED
) {
3348 unsigned long pio_mask
, dma_mask
;
3349 unsigned int mode_mask
;
3351 mode_mask
= ATA_DMA_MASK_ATA
;
3352 if (dev
->class == ATA_DEV_ATAPI
)
3353 mode_mask
= ATA_DMA_MASK_ATAPI
;
3354 else if (ata_id_is_cfa(dev
->id
))
3355 mode_mask
= ATA_DMA_MASK_CFA
;
3357 ata_dev_xfermask(dev
);
3358 ata_force_xfermask(dev
);
3360 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
3362 if (libata_dma_mask
& mode_mask
)
3363 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
,
3368 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
3369 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
3372 if (ata_dma_enabled(dev
))
3378 /* step 2: always set host PIO timings */
3379 ata_for_each_dev(dev
, link
, ENABLED
) {
3380 if (dev
->pio_mode
== 0xff) {
3381 ata_dev_warn(dev
, "no PIO support\n");
3386 dev
->xfer_mode
= dev
->pio_mode
;
3387 dev
->xfer_shift
= ATA_SHIFT_PIO
;
3388 if (ap
->ops
->set_piomode
)
3389 ap
->ops
->set_piomode(ap
, dev
);
3392 /* step 3: set host DMA timings */
3393 ata_for_each_dev(dev
, link
, ENABLED
) {
3394 if (!ata_dma_enabled(dev
))
3397 dev
->xfer_mode
= dev
->dma_mode
;
3398 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
3399 if (ap
->ops
->set_dmamode
)
3400 ap
->ops
->set_dmamode(ap
, dev
);
3403 /* step 4: update devices' xfer mode */
3404 ata_for_each_dev(dev
, link
, ENABLED
) {
3405 rc
= ata_dev_set_mode(dev
);
3410 /* Record simplex status. If we selected DMA then the other
3411 * host channels are not permitted to do so.
3413 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
3414 ap
->host
->simplex_claimed
= ap
;
3418 *r_failed_dev
= dev
;
3421 EXPORT_SYMBOL_GPL(ata_do_set_mode
);
3424 * ata_wait_ready - wait for link to become ready
3425 * @link: link to be waited on
3426 * @deadline: deadline jiffies for the operation
3427 * @check_ready: callback to check link readiness
3429 * Wait for @link to become ready. @check_ready should return
3430 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3431 * link doesn't seem to be occupied, other errno for other error
3434 * Transient -ENODEV conditions are allowed for
3435 * ATA_TMOUT_FF_WAIT.
3441 * 0 if @link is ready before @deadline; otherwise, -errno.
3443 int ata_wait_ready(struct ata_link
*link
, unsigned long deadline
,
3444 int (*check_ready
)(struct ata_link
*link
))
3446 unsigned long start
= jiffies
;
3447 unsigned long nodev_deadline
;
3450 /* choose which 0xff timeout to use, read comment in libata.h */
3451 if (link
->ap
->host
->flags
& ATA_HOST_PARALLEL_SCAN
)
3452 nodev_deadline
= ata_deadline(start
, ATA_TMOUT_FF_WAIT_LONG
);
3454 nodev_deadline
= ata_deadline(start
, ATA_TMOUT_FF_WAIT
);
3456 /* Slave readiness can't be tested separately from master. On
3457 * M/S emulation configuration, this function should be called
3458 * only on the master and it will handle both master and slave.
3460 WARN_ON(link
== link
->ap
->slave_link
);
3462 if (time_after(nodev_deadline
, deadline
))
3463 nodev_deadline
= deadline
;
3466 unsigned long now
= jiffies
;
3469 ready
= tmp
= check_ready(link
);
3474 * -ENODEV could be transient. Ignore -ENODEV if link
3475 * is online. Also, some SATA devices take a long
3476 * time to clear 0xff after reset. Wait for
3477 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3480 * Note that some PATA controllers (pata_ali) explode
3481 * if status register is read more than once when
3482 * there's no device attached.
3484 if (ready
== -ENODEV
) {
3485 if (ata_link_online(link
))
3487 else if ((link
->ap
->flags
& ATA_FLAG_SATA
) &&
3488 !ata_link_offline(link
) &&
3489 time_before(now
, nodev_deadline
))
3495 if (time_after(now
, deadline
))
3498 if (!warned
&& time_after(now
, start
+ 5 * HZ
) &&
3499 (deadline
- now
> 3 * HZ
)) {
3501 "link is slow to respond, please be patient "
3502 "(ready=%d)\n", tmp
);
3506 ata_msleep(link
->ap
, 50);
3511 * ata_wait_after_reset - wait for link to become ready after reset
3512 * @link: link to be waited on
3513 * @deadline: deadline jiffies for the operation
3514 * @check_ready: callback to check link readiness
3516 * Wait for @link to become ready after reset.
3522 * 0 if @link is ready before @deadline; otherwise, -errno.
3524 int ata_wait_after_reset(struct ata_link
*link
, unsigned long deadline
,
3525 int (*check_ready
)(struct ata_link
*link
))
3527 ata_msleep(link
->ap
, ATA_WAIT_AFTER_RESET
);
3529 return ata_wait_ready(link
, deadline
, check_ready
);
3531 EXPORT_SYMBOL_GPL(ata_wait_after_reset
);
3534 * ata_std_prereset - prepare for reset
3535 * @link: ATA link to be reset
3536 * @deadline: deadline jiffies for the operation
3538 * @link is about to be reset. Initialize it. Failure from
3539 * prereset makes libata abort whole reset sequence and give up
3540 * that port, so prereset should be best-effort. It does its
3541 * best to prepare for reset sequence but if things go wrong, it
3542 * should just whine, not fail.
3545 * Kernel thread context (may sleep)
3548 * 0 on success, -errno otherwise.
3550 int ata_std_prereset(struct ata_link
*link
, unsigned long deadline
)
3552 struct ata_port
*ap
= link
->ap
;
3553 struct ata_eh_context
*ehc
= &link
->eh_context
;
3554 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
3557 /* if we're about to do hardreset, nothing more to do */
3558 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
3561 /* if SATA, resume link */
3562 if (ap
->flags
& ATA_FLAG_SATA
) {
3563 rc
= sata_link_resume(link
, timing
, deadline
);
3564 /* whine about phy resume failure but proceed */
3565 if (rc
&& rc
!= -EOPNOTSUPP
)
3567 "failed to resume link for reset (errno=%d)\n",
3571 /* no point in trying softreset on offline link */
3572 if (ata_phys_link_offline(link
))
3573 ehc
->i
.action
&= ~ATA_EH_SOFTRESET
;
3577 EXPORT_SYMBOL_GPL(ata_std_prereset
);
3580 * sata_std_hardreset - COMRESET w/o waiting or classification
3581 * @link: link to reset
3582 * @class: resulting class of attached device
3583 * @deadline: deadline jiffies for the operation
3585 * Standard SATA COMRESET w/o waiting or classification.
3588 * Kernel thread context (may sleep)
3591 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3593 int sata_std_hardreset(struct ata_link
*link
, unsigned int *class,
3594 unsigned long deadline
)
3596 const unsigned long *timing
= sata_ehc_deb_timing(&link
->eh_context
);
3601 rc
= sata_link_hardreset(link
, timing
, deadline
, &online
, NULL
);
3602 return online
? -EAGAIN
: rc
;
3604 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
3607 * ata_std_postreset - standard postreset callback
3608 * @link: the target ata_link
3609 * @classes: classes of attached devices
3611 * This function is invoked after a successful reset. Note that
3612 * the device might have been reset more than once using
3613 * different reset methods before postreset is invoked.
3616 * Kernel thread context (may sleep)
3618 void ata_std_postreset(struct ata_link
*link
, unsigned int *classes
)
3624 /* reset complete, clear SError */
3625 if (!sata_scr_read(link
, SCR_ERROR
, &serror
))
3626 sata_scr_write(link
, SCR_ERROR
, serror
);
3628 /* print link status */
3629 sata_print_link_status(link
);
3633 EXPORT_SYMBOL_GPL(ata_std_postreset
);
3636 * ata_dev_same_device - Determine whether new ID matches configured device
3637 * @dev: device to compare against
3638 * @new_class: class of the new device
3639 * @new_id: IDENTIFY page of the new device
3641 * Compare @new_class and @new_id against @dev and determine
3642 * whether @dev is the device indicated by @new_class and
3649 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3651 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
3654 const u16
*old_id
= dev
->id
;
3655 unsigned char model
[2][ATA_ID_PROD_LEN
+ 1];
3656 unsigned char serial
[2][ATA_ID_SERNO_LEN
+ 1];
3658 if (dev
->class != new_class
) {
3659 ata_dev_info(dev
, "class mismatch %d != %d\n",
3660 dev
->class, new_class
);
3664 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD
, sizeof(model
[0]));
3665 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD
, sizeof(model
[1]));
3666 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO
, sizeof(serial
[0]));
3667 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO
, sizeof(serial
[1]));
3669 if (strcmp(model
[0], model
[1])) {
3670 ata_dev_info(dev
, "model number mismatch '%s' != '%s'\n",
3671 model
[0], model
[1]);
3675 if (strcmp(serial
[0], serial
[1])) {
3676 ata_dev_info(dev
, "serial number mismatch '%s' != '%s'\n",
3677 serial
[0], serial
[1]);
3685 * ata_dev_reread_id - Re-read IDENTIFY data
3686 * @dev: target ATA device
3687 * @readid_flags: read ID flags
3689 * Re-read IDENTIFY page and make sure @dev is still attached to
3693 * Kernel thread context (may sleep)
3696 * 0 on success, negative errno otherwise
3698 int ata_dev_reread_id(struct ata_device
*dev
, unsigned int readid_flags
)
3700 unsigned int class = dev
->class;
3701 u16
*id
= (void *)dev
->link
->ap
->sector_buf
;
3705 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
3709 /* is the device still there? */
3710 if (!ata_dev_same_device(dev
, class, id
))
3713 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
3718 * ata_dev_revalidate - Revalidate ATA device
3719 * @dev: device to revalidate
3720 * @new_class: new class code
3721 * @readid_flags: read ID flags
3723 * Re-read IDENTIFY page, make sure @dev is still attached to the
3724 * port and reconfigure it according to the new IDENTIFY page.
3727 * Kernel thread context (may sleep)
3730 * 0 on success, negative errno otherwise
3732 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int new_class
,
3733 unsigned int readid_flags
)
3735 u64 n_sectors
= dev
->n_sectors
;
3736 u64 n_native_sectors
= dev
->n_native_sectors
;
3739 if (!ata_dev_enabled(dev
))
3742 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3743 if (ata_class_enabled(new_class
) &&
3744 new_class
!= ATA_DEV_ATA
&&
3745 new_class
!= ATA_DEV_ATAPI
&&
3746 new_class
!= ATA_DEV_ZAC
&&
3747 new_class
!= ATA_DEV_SEMB
) {
3748 ata_dev_info(dev
, "class mismatch %u != %u\n",
3749 dev
->class, new_class
);
3755 rc
= ata_dev_reread_id(dev
, readid_flags
);
3759 /* configure device according to the new ID */
3760 rc
= ata_dev_configure(dev
);
3764 /* verify n_sectors hasn't changed */
3765 if (dev
->class != ATA_DEV_ATA
|| !n_sectors
||
3766 dev
->n_sectors
== n_sectors
)
3769 /* n_sectors has changed */
3770 ata_dev_warn(dev
, "n_sectors mismatch %llu != %llu\n",
3771 (unsigned long long)n_sectors
,
3772 (unsigned long long)dev
->n_sectors
);
3775 * Something could have caused HPA to be unlocked
3776 * involuntarily. If n_native_sectors hasn't changed and the
3777 * new size matches it, keep the device.
3779 if (dev
->n_native_sectors
== n_native_sectors
&&
3780 dev
->n_sectors
> n_sectors
&& dev
->n_sectors
== n_native_sectors
) {
3782 "new n_sectors matches native, probably "
3783 "late HPA unlock, n_sectors updated\n");
3784 /* use the larger n_sectors */
3789 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3790 * unlocking HPA in those cases.
3792 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3794 if (dev
->n_native_sectors
== n_native_sectors
&&
3795 dev
->n_sectors
< n_sectors
&& n_sectors
== n_native_sectors
&&
3796 !(dev
->horkage
& ATA_HORKAGE_BROKEN_HPA
)) {
3798 "old n_sectors matches native, probably "
3799 "late HPA lock, will try to unlock HPA\n");
3800 /* try unlocking HPA */
3801 dev
->flags
|= ATA_DFLAG_UNLOCK_HPA
;
3806 /* restore original n_[native_]sectors and fail */
3807 dev
->n_native_sectors
= n_native_sectors
;
3808 dev
->n_sectors
= n_sectors
;
3810 ata_dev_err(dev
, "revalidation failed (errno=%d)\n", rc
);
3814 struct ata_blacklist_entry
{
3815 const char *model_num
;
3816 const char *model_rev
;
3817 unsigned long horkage
;
3820 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
3821 /* Devices with DMA related problems under Linux */
3822 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
3823 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
3824 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
3825 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
3826 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
3827 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
3828 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
3829 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
3830 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
3831 { "CRD-848[02]B", NULL
, ATA_HORKAGE_NODMA
},
3832 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
3833 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
3834 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
3835 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
3836 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
3837 { "HITACHI CDR-8[34]35",NULL
, ATA_HORKAGE_NODMA
},
3838 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
3839 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
3840 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
3841 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
3842 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
3843 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
3844 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
3845 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
3846 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
3847 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
3848 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA
},
3849 { "Seagate STT20000A", NULL
, ATA_HORKAGE_NODMA
},
3850 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA
},
3851 { "VRFDFC22048UCHC-TE*", NULL
, ATA_HORKAGE_NODMA
},
3852 /* Odd clown on sil3726/4726 PMPs */
3853 { "Config Disk", NULL
, ATA_HORKAGE_DISABLE
},
3855 /* Weird ATAPI devices */
3856 { "TORiSAN DVD-ROM DRD-N216", NULL
, ATA_HORKAGE_MAX_SEC_128
},
3857 { "QUANTUM DAT DAT72-000", NULL
, ATA_HORKAGE_ATAPI_MOD16_DMA
},
3858 { "Slimtype DVD A DS8A8SH", NULL
, ATA_HORKAGE_MAX_SEC_LBA48
},
3859 { "Slimtype DVD A DS8A9SH", NULL
, ATA_HORKAGE_MAX_SEC_LBA48
},
3862 * Causes silent data corruption with higher max sects.
3863 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3865 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024
},
3868 * These devices time out with higher max sects.
3869 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3871 { "LITEON CX1-JB*-HP", NULL
, ATA_HORKAGE_MAX_SEC_1024
},
3872 { "LITEON EP1-*", NULL
, ATA_HORKAGE_MAX_SEC_1024
},
3874 /* Devices we expect to fail diagnostics */
3876 /* Devices where NCQ should be avoided */
3878 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
3879 { "WDC WD740ADFD-00NLR1", NULL
, ATA_HORKAGE_NONCQ
, },
3880 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3881 { "FUJITSU MHT2060BH", NULL
, ATA_HORKAGE_NONCQ
},
3883 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ
},
3884 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ
},
3885 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ
},
3886 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ
},
3887 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ
},
3889 /* Seagate NCQ + FLUSH CACHE firmware bug */
3890 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ
|
3891 ATA_HORKAGE_FIRMWARE_WARN
},
3893 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ
|
3894 ATA_HORKAGE_FIRMWARE_WARN
},
3896 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ
|
3897 ATA_HORKAGE_FIRMWARE_WARN
},
3899 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ
|
3900 ATA_HORKAGE_FIRMWARE_WARN
},
3902 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3903 the ST disks also have LPM issues */
3904 { "ST1000LM024 HN-M101MBB", NULL
, ATA_HORKAGE_BROKEN_FPDMA_AA
|
3905 ATA_HORKAGE_NOLPM
, },
3906 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA
},
3908 /* Blacklist entries taken from Silicon Image 3124/3132
3909 Windows driver .inf file - also several Linux problem reports */
3910 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ
, },
3911 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ
, },
3912 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ
, },
3914 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3915 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ
, },
3917 /* Sandisk SD7/8/9s lock up hard on large trims */
3918 { "SanDisk SD[789]*", NULL
, ATA_HORKAGE_MAX_TRIM_128M
, },
3920 /* devices which puke on READ_NATIVE_MAX */
3921 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA
, },
3922 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA
},
3923 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA
},
3924 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA
},
3926 /* this one allows HPA unlocking but fails IOs on the area */
3927 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA
},
3929 /* Devices which report 1 sector over size HPA */
3930 { "ST340823A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
3931 { "ST320413A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
3932 { "ST310211A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
3934 /* Devices which get the IVB wrong */
3935 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB
, },
3936 /* Maybe we should just blacklist TSSTcorp... */
3937 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB
, },
3939 /* Devices that do not need bridging limits applied */
3940 { "MTRON MSP-SATA*", NULL
, ATA_HORKAGE_BRIDGE_OK
, },
3941 { "BUFFALO HD-QSU2/R5", NULL
, ATA_HORKAGE_BRIDGE_OK
, },
3943 /* Devices which aren't very happy with higher link speeds */
3944 { "WD My Book", NULL
, ATA_HORKAGE_1_5_GBPS
, },
3945 { "Seagate FreeAgent GoFlex", NULL
, ATA_HORKAGE_1_5_GBPS
, },
3948 * Devices which choke on SETXFER. Applies only if both the
3949 * device and controller are SATA.
3951 { "PIONEER DVD-RW DVRTD08", NULL
, ATA_HORKAGE_NOSETXFER
},
3952 { "PIONEER DVD-RW DVRTD08A", NULL
, ATA_HORKAGE_NOSETXFER
},
3953 { "PIONEER DVD-RW DVR-215", NULL
, ATA_HORKAGE_NOSETXFER
},
3954 { "PIONEER DVD-RW DVR-212D", NULL
, ATA_HORKAGE_NOSETXFER
},
3955 { "PIONEER DVD-RW DVR-216D", NULL
, ATA_HORKAGE_NOSETXFER
},
3957 /* Crucial BX100 SSD 500GB has broken LPM support */
3958 { "CT500BX100SSD1", NULL
, ATA_HORKAGE_NOLPM
},
3960 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3961 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM
|
3962 ATA_HORKAGE_ZERO_AFTER_TRIM
|
3963 ATA_HORKAGE_NOLPM
, },
3964 /* 512GB MX100 with newer firmware has only LPM issues */
3965 { "Crucial_CT512MX100*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
|
3966 ATA_HORKAGE_NOLPM
, },
3968 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
3969 { "Crucial_CT480M500*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3970 ATA_HORKAGE_ZERO_AFTER_TRIM
|
3971 ATA_HORKAGE_NOLPM
, },
3972 { "Crucial_CT960M500*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3973 ATA_HORKAGE_ZERO_AFTER_TRIM
|
3974 ATA_HORKAGE_NOLPM
, },
3976 /* These specific Samsung models/firmware-revs do not handle LPM well */
3977 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM
, },
3978 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM
, },
3979 { "SAMSUNG MZ7TD256HAFV-000L9", NULL
, ATA_HORKAGE_NOLPM
, },
3980 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM
, },
3982 /* devices that don't properly handle queued TRIM commands */
3983 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM
|
3984 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3985 { "Micron_M500_*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3986 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3987 { "Crucial_CT*M500*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3988 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3989 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM
|
3990 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3991 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM
|
3992 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3993 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM
|
3994 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3995 { "Samsung SSD 840*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3996 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3997 { "Samsung SSD 850*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
3998 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
3999 { "Samsung SSD 860*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
4000 ATA_HORKAGE_ZERO_AFTER_TRIM
|
4001 ATA_HORKAGE_NO_NCQ_ON_ATI
, },
4002 { "Samsung SSD 870*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
4003 ATA_HORKAGE_ZERO_AFTER_TRIM
|
4004 ATA_HORKAGE_NO_NCQ_ON_ATI
, },
4005 { "FCCT*M500*", NULL
, ATA_HORKAGE_NO_NCQ_TRIM
|
4006 ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4008 /* devices that don't properly handle TRIM commands */
4009 { "SuperSSpeed S238*", NULL
, ATA_HORKAGE_NOTRIM
, },
4012 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4013 * (Return Zero After Trim) flags in the ATA Command Set are
4014 * unreliable in the sense that they only define what happens if
4015 * the device successfully executed the DSM TRIM command. TRIM
4016 * is only advisory, however, and the device is free to silently
4017 * ignore all or parts of the request.
4019 * Whitelist drives that are known to reliably return zeroes
4024 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4025 * that model before whitelisting all other intel SSDs.
4027 { "INTEL*SSDSC2MH*", NULL
, 0, },
4029 { "Micron*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4030 { "Crucial*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4031 { "INTEL*SSD*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4032 { "SSD*INTEL*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4033 { "Samsung*SSD*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4034 { "SAMSUNG*SSD*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4035 { "SAMSUNG*MZ7KM*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4036 { "ST[1248][0248]0[FH]*", NULL
, ATA_HORKAGE_ZERO_AFTER_TRIM
, },
4039 * Some WD SATA-I drives spin up and down erratically when the link
4040 * is put into the slumber mode. We don't have full list of the
4041 * affected devices. Disable LPM if the device matches one of the
4042 * known prefixes and is SATA-1. As a side effect LPM partial is
4045 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4047 { "WDC WD800JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4048 { "WDC WD1200JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4049 { "WDC WD1600JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4050 { "WDC WD2000JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4051 { "WDC WD2500JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4052 { "WDC WD3000JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4053 { "WDC WD3200JD-*", NULL
, ATA_HORKAGE_WD_BROKEN_LPM
},
4059 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
)
4061 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
4062 unsigned char model_rev
[ATA_ID_FW_REV_LEN
+ 1];
4063 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
4065 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
4066 ata_id_c_string(dev
->id
, model_rev
, ATA_ID_FW_REV
, sizeof(model_rev
));
4068 while (ad
->model_num
) {
4069 if (glob_match(ad
->model_num
, model_num
)) {
4070 if (ad
->model_rev
== NULL
)
4072 if (glob_match(ad
->model_rev
, model_rev
))
4080 static int ata_dma_blacklisted(const struct ata_device
*dev
)
4082 /* We don't support polling DMA.
4083 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4084 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4086 if ((dev
->link
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
4087 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
4089 return (dev
->horkage
& ATA_HORKAGE_NODMA
) ? 1 : 0;
4093 * ata_is_40wire - check drive side detection
4096 * Perform drive side detection decoding, allowing for device vendors
4097 * who can't follow the documentation.
4100 static int ata_is_40wire(struct ata_device
*dev
)
4102 if (dev
->horkage
& ATA_HORKAGE_IVB
)
4103 return ata_drive_40wire_relaxed(dev
->id
);
4104 return ata_drive_40wire(dev
->id
);
4108 * cable_is_40wire - 40/80/SATA decider
4109 * @ap: port to consider
4111 * This function encapsulates the policy for speed management
4112 * in one place. At the moment we don't cache the result but
4113 * there is a good case for setting ap->cbl to the result when
4114 * we are called with unknown cables (and figuring out if it
4115 * impacts hotplug at all).
4117 * Return 1 if the cable appears to be 40 wire.
4120 static int cable_is_40wire(struct ata_port
*ap
)
4122 struct ata_link
*link
;
4123 struct ata_device
*dev
;
4125 /* If the controller thinks we are 40 wire, we are. */
4126 if (ap
->cbl
== ATA_CBL_PATA40
)
4129 /* If the controller thinks we are 80 wire, we are. */
4130 if (ap
->cbl
== ATA_CBL_PATA80
|| ap
->cbl
== ATA_CBL_SATA
)
4133 /* If the system is known to be 40 wire short cable (eg
4134 * laptop), then we allow 80 wire modes even if the drive
4137 if (ap
->cbl
== ATA_CBL_PATA40_SHORT
)
4140 /* If the controller doesn't know, we scan.
4142 * Note: We look for all 40 wire detects at this point. Any
4143 * 80 wire detect is taken to be 80 wire cable because
4144 * - in many setups only the one drive (slave if present) will
4145 * give a valid detect
4146 * - if you have a non detect capable drive you don't want it
4147 * to colour the choice
4149 ata_for_each_link(link
, ap
, EDGE
) {
4150 ata_for_each_dev(dev
, link
, ENABLED
) {
4151 if (!ata_is_40wire(dev
))
4159 * ata_dev_xfermask - Compute supported xfermask of the given device
4160 * @dev: Device to compute xfermask for
4162 * Compute supported xfermask of @dev and store it in
4163 * dev->*_mask. This function is responsible for applying all
4164 * known limits including host controller limits, device
4170 static void ata_dev_xfermask(struct ata_device
*dev
)
4172 struct ata_link
*link
= dev
->link
;
4173 struct ata_port
*ap
= link
->ap
;
4174 struct ata_host
*host
= ap
->host
;
4175 unsigned long xfer_mask
;
4177 /* controller modes available */
4178 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
4179 ap
->mwdma_mask
, ap
->udma_mask
);
4181 /* drive modes available */
4182 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
4183 dev
->mwdma_mask
, dev
->udma_mask
);
4184 xfer_mask
&= ata_id_xfermask(dev
->id
);
4187 * CFA Advanced TrueIDE timings are not allowed on a shared
4190 if (ata_dev_pair(dev
)) {
4191 /* No PIO5 or PIO6 */
4192 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
4193 /* No MWDMA3 or MWDMA 4 */
4194 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
4197 if (ata_dma_blacklisted(dev
)) {
4198 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4200 "device is on DMA blacklist, disabling DMA\n");
4203 if ((host
->flags
& ATA_HOST_SIMPLEX
) &&
4204 host
->simplex_claimed
&& host
->simplex_claimed
!= ap
) {
4205 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4207 "simplex DMA is claimed by other device, disabling DMA\n");
4210 if (ap
->flags
& ATA_FLAG_NO_IORDY
)
4211 xfer_mask
&= ata_pio_mask_no_iordy(dev
);
4213 if (ap
->ops
->mode_filter
)
4214 xfer_mask
= ap
->ops
->mode_filter(dev
, xfer_mask
);
4216 /* Apply cable rule here. Don't apply it early because when
4217 * we handle hot plug the cable type can itself change.
4218 * Check this last so that we know if the transfer rate was
4219 * solely limited by the cable.
4220 * Unknown or 80 wire cables reported host side are checked
4221 * drive side as well. Cases where we know a 40wire cable
4222 * is used safely for 80 are not checked here.
4224 if (xfer_mask
& (0xF8 << ATA_SHIFT_UDMA
))
4225 /* UDMA/44 or higher would be available */
4226 if (cable_is_40wire(ap
)) {
4228 "limited to UDMA/33 due to 40-wire cable\n");
4229 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
4232 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
4233 &dev
->mwdma_mask
, &dev
->udma_mask
);
4237 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4238 * @dev: Device to which command will be sent
4240 * Issue SET FEATURES - XFER MODE command to device @dev
4244 * PCI/etc. bus probe sem.
4247 * 0 on success, AC_ERR_* mask otherwise.
4250 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
4252 struct ata_taskfile tf
;
4253 unsigned int err_mask
;
4255 /* set up set-features taskfile */
4256 DPRINTK("set features - xfer mode\n");
4258 /* Some controllers and ATAPI devices show flaky interrupt
4259 * behavior after setting xfer mode. Use polling instead.
4261 ata_tf_init(dev
, &tf
);
4262 tf
.command
= ATA_CMD_SET_FEATURES
;
4263 tf
.feature
= SETFEATURES_XFER
;
4264 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
| ATA_TFLAG_POLLING
;
4265 tf
.protocol
= ATA_PROT_NODATA
;
4266 /* If we are using IORDY we must send the mode setting command */
4267 if (ata_pio_need_iordy(dev
))
4268 tf
.nsect
= dev
->xfer_mode
;
4269 /* If the device has IORDY and the controller does not - turn it off */
4270 else if (ata_id_has_iordy(dev
->id
))
4272 else /* In the ancient relic department - skip all of this */
4275 /* On some disks, this command causes spin-up, so we need longer timeout */
4276 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 15000);
4278 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4283 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4284 * @dev: Device to which command will be sent
4285 * @enable: Whether to enable or disable the feature
4286 * @feature: The sector count represents the feature to set
4288 * Issue SET FEATURES - SATA FEATURES command to device @dev
4289 * on port @ap with sector count
4292 * PCI/etc. bus probe sem.
4295 * 0 on success, AC_ERR_* mask otherwise.
4297 unsigned int ata_dev_set_feature(struct ata_device
*dev
, u8 enable
, u8 feature
)
4299 struct ata_taskfile tf
;
4300 unsigned int err_mask
;
4301 unsigned long timeout
= 0;
4303 /* set up set-features taskfile */
4304 DPRINTK("set features - SATA features\n");
4306 ata_tf_init(dev
, &tf
);
4307 tf
.command
= ATA_CMD_SET_FEATURES
;
4308 tf
.feature
= enable
;
4309 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4310 tf
.protocol
= ATA_PROT_NODATA
;
4313 if (enable
== SETFEATURES_SPINUP
)
4314 timeout
= ata_probe_timeout
?
4315 ata_probe_timeout
* 1000 : SETFEATURES_SPINUP_TIMEOUT
;
4316 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, timeout
);
4318 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4321 EXPORT_SYMBOL_GPL(ata_dev_set_feature
);
4324 * ata_dev_init_params - Issue INIT DEV PARAMS command
4325 * @dev: Device to which command will be sent
4326 * @heads: Number of heads (taskfile parameter)
4327 * @sectors: Number of sectors (taskfile parameter)
4330 * Kernel thread context (may sleep)
4333 * 0 on success, AC_ERR_* mask otherwise.
4335 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
4336 u16 heads
, u16 sectors
)
4338 struct ata_taskfile tf
;
4339 unsigned int err_mask
;
4341 /* Number of sectors per track 1-255. Number of heads 1-16 */
4342 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
4343 return AC_ERR_INVALID
;
4345 /* set up init dev params taskfile */
4346 DPRINTK("init dev params \n");
4348 ata_tf_init(dev
, &tf
);
4349 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
4350 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4351 tf
.protocol
= ATA_PROT_NODATA
;
4353 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
4355 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4356 /* A clean abort indicates an original or just out of spec drive
4357 and we should continue as we issue the setup based on the
4358 drive reported working geometry */
4359 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
4362 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4367 * atapi_check_dma - Check whether ATAPI DMA can be supported
4368 * @qc: Metadata associated with taskfile to check
4370 * Allow low-level driver to filter ATA PACKET commands, returning
4371 * a status indicating whether or not it is OK to use DMA for the
4372 * supplied PACKET command.
4375 * spin_lock_irqsave(host lock)
4377 * RETURNS: 0 when ATAPI DMA can be used
4380 int atapi_check_dma(struct ata_queued_cmd
*qc
)
4382 struct ata_port
*ap
= qc
->ap
;
4384 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4385 * few ATAPI devices choke on such DMA requests.
4387 if (!(qc
->dev
->horkage
& ATA_HORKAGE_ATAPI_MOD16_DMA
) &&
4388 unlikely(qc
->nbytes
& 15))
4391 if (ap
->ops
->check_atapi_dma
)
4392 return ap
->ops
->check_atapi_dma(qc
);
4398 * ata_std_qc_defer - Check whether a qc needs to be deferred
4399 * @qc: ATA command in question
4401 * Non-NCQ commands cannot run with any other command, NCQ or
4402 * not. As upper layer only knows the queue depth, we are
4403 * responsible for maintaining exclusion. This function checks
4404 * whether a new command @qc can be issued.
4407 * spin_lock_irqsave(host lock)
4410 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4412 int ata_std_qc_defer(struct ata_queued_cmd
*qc
)
4414 struct ata_link
*link
= qc
->dev
->link
;
4416 if (ata_is_ncq(qc
->tf
.protocol
)) {
4417 if (!ata_tag_valid(link
->active_tag
))
4420 if (!ata_tag_valid(link
->active_tag
) && !link
->sactive
)
4424 return ATA_DEFER_LINK
;
4426 EXPORT_SYMBOL_GPL(ata_std_qc_defer
);
4428 enum ata_completion_errors
ata_noop_qc_prep(struct ata_queued_cmd
*qc
)
4432 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
4435 * ata_sg_init - Associate command with scatter-gather table.
4436 * @qc: Command to be associated
4437 * @sg: Scatter-gather table.
4438 * @n_elem: Number of elements in s/g table.
4440 * Initialize the data-related elements of queued_cmd @qc
4441 * to point to a scatter-gather table @sg, containing @n_elem
4445 * spin_lock_irqsave(host lock)
4447 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
4448 unsigned int n_elem
)
4451 qc
->n_elem
= n_elem
;
4455 #ifdef CONFIG_HAS_DMA
4458 * ata_sg_clean - Unmap DMA memory associated with command
4459 * @qc: Command containing DMA memory to be released
4461 * Unmap all mapped DMA memory associated with this command.
4464 * spin_lock_irqsave(host lock)
4466 static void ata_sg_clean(struct ata_queued_cmd
*qc
)
4468 struct ata_port
*ap
= qc
->ap
;
4469 struct scatterlist
*sg
= qc
->sg
;
4470 int dir
= qc
->dma_dir
;
4472 WARN_ON_ONCE(sg
== NULL
);
4474 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
4477 dma_unmap_sg(ap
->dev
, sg
, qc
->orig_n_elem
, dir
);
4479 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4484 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4485 * @qc: Command with scatter-gather table to be mapped.
4487 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4490 * spin_lock_irqsave(host lock)
4493 * Zero on success, negative on error.
4496 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
4498 struct ata_port
*ap
= qc
->ap
;
4499 unsigned int n_elem
;
4501 VPRINTK("ENTER, ata%u\n", ap
->print_id
);
4503 n_elem
= dma_map_sg(ap
->dev
, qc
->sg
, qc
->n_elem
, qc
->dma_dir
);
4507 VPRINTK("%d sg elements mapped\n", n_elem
);
4508 qc
->orig_n_elem
= qc
->n_elem
;
4509 qc
->n_elem
= n_elem
;
4510 qc
->flags
|= ATA_QCFLAG_DMAMAP
;
4515 #else /* !CONFIG_HAS_DMA */
4517 static inline void ata_sg_clean(struct ata_queued_cmd
*qc
) {}
4518 static inline int ata_sg_setup(struct ata_queued_cmd
*qc
) { return -1; }
4520 #endif /* !CONFIG_HAS_DMA */
4523 * swap_buf_le16 - swap halves of 16-bit words in place
4524 * @buf: Buffer to swap
4525 * @buf_words: Number of 16-bit words in buffer.
4527 * Swap halves of 16-bit words if needed to convert from
4528 * little-endian byte order to native cpu byte order, or
4532 * Inherited from caller.
4534 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
4539 for (i
= 0; i
< buf_words
; i
++)
4540 buf
[i
] = le16_to_cpu(buf
[i
]);
4541 #endif /* __BIG_ENDIAN */
4545 * ata_qc_new_init - Request an available ATA command, and initialize it
4546 * @dev: Device from whom we request an available command structure
4553 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
, int tag
)
4555 struct ata_port
*ap
= dev
->link
->ap
;
4556 struct ata_queued_cmd
*qc
;
4558 /* no command while frozen */
4559 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
4563 if (ap
->flags
& ATA_FLAG_SAS_HOST
) {
4564 tag
= ata_sas_allocate_tag(ap
);
4569 qc
= __ata_qc_from_tag(ap
, tag
);
4570 qc
->tag
= qc
->hw_tag
= tag
;
4581 * ata_qc_free - free unused ata_queued_cmd
4582 * @qc: Command to complete
4584 * Designed to free unused ata_queued_cmd object
4585 * in case something prevents using it.
4588 * spin_lock_irqsave(host lock)
4590 void ata_qc_free(struct ata_queued_cmd
*qc
)
4592 struct ata_port
*ap
;
4595 WARN_ON_ONCE(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4600 if (ata_tag_valid(tag
)) {
4601 qc
->tag
= ATA_TAG_POISON
;
4602 if (ap
->flags
& ATA_FLAG_SAS_HOST
)
4603 ata_sas_free_tag(tag
, ap
);
4607 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
4609 struct ata_port
*ap
;
4610 struct ata_link
*link
;
4612 WARN_ON_ONCE(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4613 WARN_ON_ONCE(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
4615 link
= qc
->dev
->link
;
4617 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4620 /* command should be marked inactive atomically with qc completion */
4621 if (ata_is_ncq(qc
->tf
.protocol
)) {
4622 link
->sactive
&= ~(1 << qc
->hw_tag
);
4624 ap
->nr_active_links
--;
4626 link
->active_tag
= ATA_TAG_POISON
;
4627 ap
->nr_active_links
--;
4630 /* clear exclusive status */
4631 if (unlikely(qc
->flags
& ATA_QCFLAG_CLEAR_EXCL
&&
4632 ap
->excl_link
== link
))
4633 ap
->excl_link
= NULL
;
4635 /* atapi: mark qc as inactive to prevent the interrupt handler
4636 * from completing the command twice later, before the error handler
4637 * is called. (when rc != 0 and atapi request sense is needed)
4639 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
4640 ap
->qc_active
&= ~(1ULL << qc
->tag
);
4642 /* call completion callback */
4643 qc
->complete_fn(qc
);
4646 static void fill_result_tf(struct ata_queued_cmd
*qc
)
4648 struct ata_port
*ap
= qc
->ap
;
4650 qc
->result_tf
.flags
= qc
->tf
.flags
;
4651 ap
->ops
->qc_fill_rtf(qc
);
4654 static void ata_verify_xfer(struct ata_queued_cmd
*qc
)
4656 struct ata_device
*dev
= qc
->dev
;
4658 if (!ata_is_data(qc
->tf
.protocol
))
4661 if ((dev
->mwdma_mask
|| dev
->udma_mask
) && ata_is_pio(qc
->tf
.protocol
))
4664 dev
->flags
&= ~ATA_DFLAG_DUBIOUS_XFER
;
4668 * ata_qc_complete - Complete an active ATA command
4669 * @qc: Command to complete
4671 * Indicate to the mid and upper layers that an ATA command has
4672 * completed, with either an ok or not-ok status.
4674 * Refrain from calling this function multiple times when
4675 * successfully completing multiple NCQ commands.
4676 * ata_qc_complete_multiple() should be used instead, which will
4677 * properly update IRQ expect state.
4680 * spin_lock_irqsave(host lock)
4682 void ata_qc_complete(struct ata_queued_cmd
*qc
)
4684 struct ata_port
*ap
= qc
->ap
;
4686 /* Trigger the LED (if available) */
4687 ledtrig_disk_activity(!!(qc
->tf
.flags
& ATA_TFLAG_WRITE
));
4689 /* XXX: New EH and old EH use different mechanisms to
4690 * synchronize EH with regular execution path.
4692 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4693 * Normal execution path is responsible for not accessing a
4694 * failed qc. libata core enforces the rule by returning NULL
4695 * from ata_qc_from_tag() for failed qcs.
4697 * Old EH depends on ata_qc_complete() nullifying completion
4698 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4699 * not synchronize with interrupt handler. Only PIO task is
4702 if (ap
->ops
->error_handler
) {
4703 struct ata_device
*dev
= qc
->dev
;
4704 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
4706 if (unlikely(qc
->err_mask
))
4707 qc
->flags
|= ATA_QCFLAG_FAILED
;
4710 * Finish internal commands without any further processing
4711 * and always with the result TF filled.
4713 if (unlikely(ata_tag_internal(qc
->tag
))) {
4715 trace_ata_qc_complete_internal(qc
);
4716 __ata_qc_complete(qc
);
4721 * Non-internal qc has failed. Fill the result TF and
4724 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
4726 trace_ata_qc_complete_failed(qc
);
4727 ata_qc_schedule_eh(qc
);
4731 WARN_ON_ONCE(ap
->pflags
& ATA_PFLAG_FROZEN
);
4733 /* read result TF if requested */
4734 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4737 trace_ata_qc_complete_done(qc
);
4738 /* Some commands need post-processing after successful
4741 switch (qc
->tf
.command
) {
4742 case ATA_CMD_SET_FEATURES
:
4743 if (qc
->tf
.feature
!= SETFEATURES_WC_ON
&&
4744 qc
->tf
.feature
!= SETFEATURES_WC_OFF
&&
4745 qc
->tf
.feature
!= SETFEATURES_RA_ON
&&
4746 qc
->tf
.feature
!= SETFEATURES_RA_OFF
)
4749 case ATA_CMD_INIT_DEV_PARAMS
: /* CHS translation changed */
4750 case ATA_CMD_SET_MULTI
: /* multi_count changed */
4751 /* revalidate device */
4752 ehi
->dev_action
[dev
->devno
] |= ATA_EH_REVALIDATE
;
4753 ata_port_schedule_eh(ap
);
4757 dev
->flags
|= ATA_DFLAG_SLEEPING
;
4761 if (unlikely(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
))
4762 ata_verify_xfer(qc
);
4764 __ata_qc_complete(qc
);
4766 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
4769 /* read result TF if failed or requested */
4770 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4773 __ata_qc_complete(qc
);
4776 EXPORT_SYMBOL_GPL(ata_qc_complete
);
4779 * ata_qc_get_active - get bitmask of active qcs
4780 * @ap: port in question
4783 * spin_lock_irqsave(host lock)
4786 * Bitmask of active qcs
4788 u64
ata_qc_get_active(struct ata_port
*ap
)
4790 u64 qc_active
= ap
->qc_active
;
4792 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4793 if (qc_active
& (1ULL << ATA_TAG_INTERNAL
)) {
4794 qc_active
|= (1 << 0);
4795 qc_active
&= ~(1ULL << ATA_TAG_INTERNAL
);
4800 EXPORT_SYMBOL_GPL(ata_qc_get_active
);
4803 * ata_qc_issue - issue taskfile to device
4804 * @qc: command to issue to device
4806 * Prepare an ATA command to submission to device.
4807 * This includes mapping the data into a DMA-able
4808 * area, filling in the S/G table, and finally
4809 * writing the taskfile to hardware, starting the command.
4812 * spin_lock_irqsave(host lock)
4814 void ata_qc_issue(struct ata_queued_cmd
*qc
)
4816 struct ata_port
*ap
= qc
->ap
;
4817 struct ata_link
*link
= qc
->dev
->link
;
4818 u8 prot
= qc
->tf
.protocol
;
4820 /* Make sure only one non-NCQ command is outstanding. The
4821 * check is skipped for old EH because it reuses active qc to
4822 * request ATAPI sense.
4824 WARN_ON_ONCE(ap
->ops
->error_handler
&& ata_tag_valid(link
->active_tag
));
4826 if (ata_is_ncq(prot
)) {
4827 WARN_ON_ONCE(link
->sactive
& (1 << qc
->hw_tag
));
4830 ap
->nr_active_links
++;
4831 link
->sactive
|= 1 << qc
->hw_tag
;
4833 WARN_ON_ONCE(link
->sactive
);
4835 ap
->nr_active_links
++;
4836 link
->active_tag
= qc
->tag
;
4839 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
4840 ap
->qc_active
|= 1ULL << qc
->tag
;
4843 * We guarantee to LLDs that they will have at least one
4844 * non-zero sg if the command is a data command.
4846 if (ata_is_data(prot
) && (!qc
->sg
|| !qc
->n_elem
|| !qc
->nbytes
))
4849 if (ata_is_dma(prot
) || (ata_is_pio(prot
) &&
4850 (ap
->flags
& ATA_FLAG_PIO_DMA
)))
4851 if (ata_sg_setup(qc
))
4854 /* if device is sleeping, schedule reset and abort the link */
4855 if (unlikely(qc
->dev
->flags
& ATA_DFLAG_SLEEPING
)) {
4856 link
->eh_info
.action
|= ATA_EH_RESET
;
4857 ata_ehi_push_desc(&link
->eh_info
, "waking up from sleep");
4858 ata_link_abort(link
);
4862 qc
->err_mask
|= ap
->ops
->qc_prep(qc
);
4863 if (unlikely(qc
->err_mask
))
4865 trace_ata_qc_issue(qc
);
4866 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
4867 if (unlikely(qc
->err_mask
))
4872 qc
->err_mask
|= AC_ERR_SYSTEM
;
4874 ata_qc_complete(qc
);
4878 * ata_phys_link_online - test whether the given link is online
4879 * @link: ATA link to test
4881 * Test whether @link is online. Note that this function returns
4882 * 0 if online status of @link cannot be obtained, so
4883 * ata_link_online(link) != !ata_link_offline(link).
4889 * True if the port online status is available and online.
4891 bool ata_phys_link_online(struct ata_link
*link
)
4895 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
4896 ata_sstatus_online(sstatus
))
4902 * ata_phys_link_offline - test whether the given link is offline
4903 * @link: ATA link to test
4905 * Test whether @link is offline. Note that this function
4906 * returns 0 if offline status of @link cannot be obtained, so
4907 * ata_link_online(link) != !ata_link_offline(link).
4913 * True if the port offline status is available and offline.
4915 bool ata_phys_link_offline(struct ata_link
*link
)
4919 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
4920 !ata_sstatus_online(sstatus
))
4926 * ata_link_online - test whether the given link is online
4927 * @link: ATA link to test
4929 * Test whether @link is online. This is identical to
4930 * ata_phys_link_online() when there's no slave link. When
4931 * there's a slave link, this function should only be called on
4932 * the master link and will return true if any of M/S links is
4939 * True if the port online status is available and online.
4941 bool ata_link_online(struct ata_link
*link
)
4943 struct ata_link
*slave
= link
->ap
->slave_link
;
4945 WARN_ON(link
== slave
); /* shouldn't be called on slave link */
4947 return ata_phys_link_online(link
) ||
4948 (slave
&& ata_phys_link_online(slave
));
4950 EXPORT_SYMBOL_GPL(ata_link_online
);
4953 * ata_link_offline - test whether the given link is offline
4954 * @link: ATA link to test
4956 * Test whether @link is offline. This is identical to
4957 * ata_phys_link_offline() when there's no slave link. When
4958 * there's a slave link, this function should only be called on
4959 * the master link and will return true if both M/S links are
4966 * True if the port offline status is available and offline.
4968 bool ata_link_offline(struct ata_link
*link
)
4970 struct ata_link
*slave
= link
->ap
->slave_link
;
4972 WARN_ON(link
== slave
); /* shouldn't be called on slave link */
4974 return ata_phys_link_offline(link
) &&
4975 (!slave
|| ata_phys_link_offline(slave
));
4977 EXPORT_SYMBOL_GPL(ata_link_offline
);
4980 static void ata_port_request_pm(struct ata_port
*ap
, pm_message_t mesg
,
4981 unsigned int action
, unsigned int ehi_flags
,
4984 struct ata_link
*link
;
4985 unsigned long flags
;
4987 /* Previous resume operation might still be in
4988 * progress. Wait for PM_PENDING to clear.
4990 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
4991 ata_port_wait_eh(ap
);
4992 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
4995 /* request PM ops to EH */
4996 spin_lock_irqsave(ap
->lock
, flags
);
4999 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
5000 ata_for_each_link(link
, ap
, HOST_FIRST
) {
5001 link
->eh_info
.action
|= action
;
5002 link
->eh_info
.flags
|= ehi_flags
;
5005 ata_port_schedule_eh(ap
);
5007 spin_unlock_irqrestore(ap
->lock
, flags
);
5010 ata_port_wait_eh(ap
);
5011 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5016 * On some hardware, device fails to respond after spun down for suspend. As
5017 * the device won't be used before being resumed, we don't need to touch the
5018 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5020 * http://thread.gmane.org/gmane.linux.ide/46764
5022 static const unsigned int ata_port_suspend_ehi
= ATA_EHI_QUIET
5023 | ATA_EHI_NO_AUTOPSY
5024 | ATA_EHI_NO_RECOVERY
;
5026 static void ata_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
5028 ata_port_request_pm(ap
, mesg
, 0, ata_port_suspend_ehi
, false);
5031 static void ata_port_suspend_async(struct ata_port
*ap
, pm_message_t mesg
)
5033 ata_port_request_pm(ap
, mesg
, 0, ata_port_suspend_ehi
, true);
5036 static int ata_port_pm_suspend(struct device
*dev
)
5038 struct ata_port
*ap
= to_ata_port(dev
);
5040 if (pm_runtime_suspended(dev
))
5043 ata_port_suspend(ap
, PMSG_SUSPEND
);
5047 static int ata_port_pm_freeze(struct device
*dev
)
5049 struct ata_port
*ap
= to_ata_port(dev
);
5051 if (pm_runtime_suspended(dev
))
5054 ata_port_suspend(ap
, PMSG_FREEZE
);
5058 static int ata_port_pm_poweroff(struct device
*dev
)
5060 ata_port_suspend(to_ata_port(dev
), PMSG_HIBERNATE
);
5064 static const unsigned int ata_port_resume_ehi
= ATA_EHI_NO_AUTOPSY
5067 static void ata_port_resume(struct ata_port
*ap
, pm_message_t mesg
)
5069 ata_port_request_pm(ap
, mesg
, ATA_EH_RESET
, ata_port_resume_ehi
, false);
5072 static void ata_port_resume_async(struct ata_port
*ap
, pm_message_t mesg
)
5074 ata_port_request_pm(ap
, mesg
, ATA_EH_RESET
, ata_port_resume_ehi
, true);
5077 static int ata_port_pm_resume(struct device
*dev
)
5079 ata_port_resume_async(to_ata_port(dev
), PMSG_RESUME
);
5080 pm_runtime_disable(dev
);
5081 pm_runtime_set_active(dev
);
5082 pm_runtime_enable(dev
);
5087 * For ODDs, the upper layer will poll for media change every few seconds,
5088 * which will make it enter and leave suspend state every few seconds. And
5089 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5090 * is very little and the ODD may malfunction after constantly being reset.
5091 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5092 * ODD is attached to the port.
5094 static int ata_port_runtime_idle(struct device
*dev
)
5096 struct ata_port
*ap
= to_ata_port(dev
);
5097 struct ata_link
*link
;
5098 struct ata_device
*adev
;
5100 ata_for_each_link(link
, ap
, HOST_FIRST
) {
5101 ata_for_each_dev(adev
, link
, ENABLED
)
5102 if (adev
->class == ATA_DEV_ATAPI
&&
5103 !zpodd_dev_enabled(adev
))
5110 static int ata_port_runtime_suspend(struct device
*dev
)
5112 ata_port_suspend(to_ata_port(dev
), PMSG_AUTO_SUSPEND
);
5116 static int ata_port_runtime_resume(struct device
*dev
)
5118 ata_port_resume(to_ata_port(dev
), PMSG_AUTO_RESUME
);
5122 static const struct dev_pm_ops ata_port_pm_ops
= {
5123 .suspend
= ata_port_pm_suspend
,
5124 .resume
= ata_port_pm_resume
,
5125 .freeze
= ata_port_pm_freeze
,
5126 .thaw
= ata_port_pm_resume
,
5127 .poweroff
= ata_port_pm_poweroff
,
5128 .restore
= ata_port_pm_resume
,
5130 .runtime_suspend
= ata_port_runtime_suspend
,
5131 .runtime_resume
= ata_port_runtime_resume
,
5132 .runtime_idle
= ata_port_runtime_idle
,
5135 /* sas ports don't participate in pm runtime management of ata_ports,
5136 * and need to resume ata devices at the domain level, not the per-port
5137 * level. sas suspend/resume is async to allow parallel port recovery
5138 * since sas has multiple ata_port instances per Scsi_Host.
5140 void ata_sas_port_suspend(struct ata_port
*ap
)
5142 ata_port_suspend_async(ap
, PMSG_SUSPEND
);
5144 EXPORT_SYMBOL_GPL(ata_sas_port_suspend
);
5146 void ata_sas_port_resume(struct ata_port
*ap
)
5148 ata_port_resume_async(ap
, PMSG_RESUME
);
5150 EXPORT_SYMBOL_GPL(ata_sas_port_resume
);
5153 * ata_host_suspend - suspend host
5154 * @host: host to suspend
5157 * Suspend @host. Actual operation is performed by port suspend.
5159 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
5161 host
->dev
->power
.power_state
= mesg
;
5164 EXPORT_SYMBOL_GPL(ata_host_suspend
);
5167 * ata_host_resume - resume host
5168 * @host: host to resume
5170 * Resume @host. Actual operation is performed by port resume.
5172 void ata_host_resume(struct ata_host
*host
)
5174 host
->dev
->power
.power_state
= PMSG_ON
;
5176 EXPORT_SYMBOL_GPL(ata_host_resume
);
5179 const struct device_type ata_port_type
= {
5182 .pm
= &ata_port_pm_ops
,
5187 * ata_dev_init - Initialize an ata_device structure
5188 * @dev: Device structure to initialize
5190 * Initialize @dev in preparation for probing.
5193 * Inherited from caller.
5195 void ata_dev_init(struct ata_device
*dev
)
5197 struct ata_link
*link
= ata_dev_phys_link(dev
);
5198 struct ata_port
*ap
= link
->ap
;
5199 unsigned long flags
;
5201 /* SATA spd limit is bound to the attached device, reset together */
5202 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
5205 /* High bits of dev->flags are used to record warm plug
5206 * requests which occur asynchronously. Synchronize using
5209 spin_lock_irqsave(ap
->lock
, flags
);
5210 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
5212 spin_unlock_irqrestore(ap
->lock
, flags
);
5214 memset((void *)dev
+ ATA_DEVICE_CLEAR_BEGIN
, 0,
5215 ATA_DEVICE_CLEAR_END
- ATA_DEVICE_CLEAR_BEGIN
);
5216 dev
->pio_mask
= UINT_MAX
;
5217 dev
->mwdma_mask
= UINT_MAX
;
5218 dev
->udma_mask
= UINT_MAX
;
5222 * ata_link_init - Initialize an ata_link structure
5223 * @ap: ATA port link is attached to
5224 * @link: Link structure to initialize
5225 * @pmp: Port multiplier port number
5230 * Kernel thread context (may sleep)
5232 void ata_link_init(struct ata_port
*ap
, struct ata_link
*link
, int pmp
)
5236 /* clear everything except for devices */
5237 memset((void *)link
+ ATA_LINK_CLEAR_BEGIN
, 0,
5238 ATA_LINK_CLEAR_END
- ATA_LINK_CLEAR_BEGIN
);
5242 link
->active_tag
= ATA_TAG_POISON
;
5243 link
->hw_sata_spd_limit
= UINT_MAX
;
5245 /* can't use iterator, ap isn't initialized yet */
5246 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
5247 struct ata_device
*dev
= &link
->device
[i
];
5250 dev
->devno
= dev
- link
->device
;
5251 #ifdef CONFIG_ATA_ACPI
5252 dev
->gtf_filter
= ata_acpi_gtf_filter
;
5259 * sata_link_init_spd - Initialize link->sata_spd_limit
5260 * @link: Link to configure sata_spd_limit for
5262 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
5266 * Kernel thread context (may sleep).
5269 * 0 on success, -errno on failure.
5271 int sata_link_init_spd(struct ata_link
*link
)
5276 rc
= sata_scr_read(link
, SCR_CONTROL
, &link
->saved_scontrol
);
5280 spd
= (link
->saved_scontrol
>> 4) & 0xf;
5282 link
->hw_sata_spd_limit
&= (1 << spd
) - 1;
5284 ata_force_link_limits(link
);
5286 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
5292 * ata_port_alloc - allocate and initialize basic ATA port resources
5293 * @host: ATA host this allocated port belongs to
5295 * Allocate and initialize basic ATA port resources.
5298 * Allocate ATA port on success, NULL on failure.
5301 * Inherited from calling layer (may sleep).
5303 struct ata_port
*ata_port_alloc(struct ata_host
*host
)
5305 struct ata_port
*ap
;
5309 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
5313 ap
->pflags
|= ATA_PFLAG_INITIALIZING
| ATA_PFLAG_FROZEN
;
5314 ap
->lock
= &host
->lock
;
5316 ap
->local_port_no
= -1;
5318 ap
->dev
= host
->dev
;
5320 #if defined(ATA_VERBOSE_DEBUG)
5321 /* turn on all debugging levels */
5322 ap
->msg_enable
= 0x00FF;
5323 #elif defined(ATA_DEBUG)
5324 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
5326 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
5329 mutex_init(&ap
->scsi_scan_mutex
);
5330 INIT_DELAYED_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
);
5331 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
);
5332 INIT_LIST_HEAD(&ap
->eh_done_q
);
5333 init_waitqueue_head(&ap
->eh_wait_q
);
5334 init_completion(&ap
->park_req_pending
);
5335 timer_setup(&ap
->fastdrain_timer
, ata_eh_fastdrain_timerfn
,
5338 ap
->cbl
= ATA_CBL_NONE
;
5340 ata_link_init(ap
, &ap
->link
, 0);
5343 ap
->stats
.unhandled_irq
= 1;
5344 ap
->stats
.idle_irq
= 1;
5346 ata_sff_port_init(ap
);
5351 static void ata_devres_release(struct device
*gendev
, void *res
)
5353 struct ata_host
*host
= dev_get_drvdata(gendev
);
5356 for (i
= 0; i
< host
->n_ports
; i
++) {
5357 struct ata_port
*ap
= host
->ports
[i
];
5363 scsi_host_put(ap
->scsi_host
);
5367 dev_set_drvdata(gendev
, NULL
);
5371 static void ata_host_release(struct kref
*kref
)
5373 struct ata_host
*host
= container_of(kref
, struct ata_host
, kref
);
5376 for (i
= 0; i
< host
->n_ports
; i
++) {
5377 struct ata_port
*ap
= host
->ports
[i
];
5379 kfree(ap
->pmp_link
);
5380 kfree(ap
->slave_link
);
5382 host
->ports
[i
] = NULL
;
5387 void ata_host_get(struct ata_host
*host
)
5389 kref_get(&host
->kref
);
5392 void ata_host_put(struct ata_host
*host
)
5394 kref_put(&host
->kref
, ata_host_release
);
5396 EXPORT_SYMBOL_GPL(ata_host_put
);
5399 * ata_host_alloc - allocate and init basic ATA host resources
5400 * @dev: generic device this host is associated with
5401 * @max_ports: maximum number of ATA ports associated with this host
5403 * Allocate and initialize basic ATA host resources. LLD calls
5404 * this function to allocate a host, initializes it fully and
5405 * attaches it using ata_host_register().
5407 * @max_ports ports are allocated and host->n_ports is
5408 * initialized to @max_ports. The caller is allowed to decrease
5409 * host->n_ports before calling ata_host_register(). The unused
5410 * ports will be automatically freed on registration.
5413 * Allocate ATA host on success, NULL on failure.
5416 * Inherited from calling layer (may sleep).
5418 struct ata_host
*ata_host_alloc(struct device
*dev
, int max_ports
)
5420 struct ata_host
*host
;
5427 /* alloc a container for our list of ATA ports (buses) */
5428 sz
= sizeof(struct ata_host
) + (max_ports
+ 1) * sizeof(void *);
5429 host
= kzalloc(sz
, GFP_KERNEL
);
5433 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
5436 dr
= devres_alloc(ata_devres_release
, 0, GFP_KERNEL
);
5440 devres_add(dev
, dr
);
5441 dev_set_drvdata(dev
, host
);
5443 spin_lock_init(&host
->lock
);
5444 mutex_init(&host
->eh_mutex
);
5446 host
->n_ports
= max_ports
;
5447 kref_init(&host
->kref
);
5449 /* allocate ports bound to this host */
5450 for (i
= 0; i
< max_ports
; i
++) {
5451 struct ata_port
*ap
;
5453 ap
= ata_port_alloc(host
);
5458 host
->ports
[i
] = ap
;
5461 devres_remove_group(dev
, NULL
);
5465 devres_release_group(dev
, NULL
);
5470 EXPORT_SYMBOL_GPL(ata_host_alloc
);
5473 * ata_host_alloc_pinfo - alloc host and init with port_info array
5474 * @dev: generic device this host is associated with
5475 * @ppi: array of ATA port_info to initialize host with
5476 * @n_ports: number of ATA ports attached to this host
5478 * Allocate ATA host and initialize with info from @ppi. If NULL
5479 * terminated, @ppi may contain fewer entries than @n_ports. The
5480 * last entry will be used for the remaining ports.
5483 * Allocate ATA host on success, NULL on failure.
5486 * Inherited from calling layer (may sleep).
5488 struct ata_host
*ata_host_alloc_pinfo(struct device
*dev
,
5489 const struct ata_port_info
* const * ppi
,
5492 const struct ata_port_info
*pi
;
5493 struct ata_host
*host
;
5496 host
= ata_host_alloc(dev
, n_ports
);
5500 for (i
= 0, j
= 0, pi
= NULL
; i
< host
->n_ports
; i
++) {
5501 struct ata_port
*ap
= host
->ports
[i
];
5506 ap
->pio_mask
= pi
->pio_mask
;
5507 ap
->mwdma_mask
= pi
->mwdma_mask
;
5508 ap
->udma_mask
= pi
->udma_mask
;
5509 ap
->flags
|= pi
->flags
;
5510 ap
->link
.flags
|= pi
->link_flags
;
5511 ap
->ops
= pi
->port_ops
;
5513 if (!host
->ops
&& (pi
->port_ops
!= &ata_dummy_port_ops
))
5514 host
->ops
= pi
->port_ops
;
5519 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo
);
5521 static void ata_host_stop(struct device
*gendev
, void *res
)
5523 struct ata_host
*host
= dev_get_drvdata(gendev
);
5526 WARN_ON(!(host
->flags
& ATA_HOST_STARTED
));
5528 for (i
= 0; i
< host
->n_ports
; i
++) {
5529 struct ata_port
*ap
= host
->ports
[i
];
5531 if (ap
->ops
->port_stop
)
5532 ap
->ops
->port_stop(ap
);
5535 if (host
->ops
->host_stop
)
5536 host
->ops
->host_stop(host
);
5540 * ata_finalize_port_ops - finalize ata_port_operations
5541 * @ops: ata_port_operations to finalize
5543 * An ata_port_operations can inherit from another ops and that
5544 * ops can again inherit from another. This can go on as many
5545 * times as necessary as long as there is no loop in the
5546 * inheritance chain.
5548 * Ops tables are finalized when the host is started. NULL or
5549 * unspecified entries are inherited from the closet ancestor
5550 * which has the method and the entry is populated with it.
5551 * After finalization, the ops table directly points to all the
5552 * methods and ->inherits is no longer necessary and cleared.
5554 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5559 static void ata_finalize_port_ops(struct ata_port_operations
*ops
)
5561 static DEFINE_SPINLOCK(lock
);
5562 const struct ata_port_operations
*cur
;
5563 void **begin
= (void **)ops
;
5564 void **end
= (void **)&ops
->inherits
;
5567 if (!ops
|| !ops
->inherits
)
5572 for (cur
= ops
->inherits
; cur
; cur
= cur
->inherits
) {
5573 void **inherit
= (void **)cur
;
5575 for (pp
= begin
; pp
< end
; pp
++, inherit
++)
5580 for (pp
= begin
; pp
< end
; pp
++)
5584 ops
->inherits
= NULL
;
5590 * ata_host_start - start and freeze ports of an ATA host
5591 * @host: ATA host to start ports for
5593 * Start and then freeze ports of @host. Started status is
5594 * recorded in host->flags, so this function can be called
5595 * multiple times. Ports are guaranteed to get started only
5596 * once. If host->ops isn't initialized yet, its set to the
5597 * first non-dummy port ops.
5600 * Inherited from calling layer (may sleep).
5603 * 0 if all ports are started successfully, -errno otherwise.
5605 int ata_host_start(struct ata_host
*host
)
5608 void *start_dr
= NULL
;
5611 if (host
->flags
& ATA_HOST_STARTED
)
5614 ata_finalize_port_ops(host
->ops
);
5616 for (i
= 0; i
< host
->n_ports
; i
++) {
5617 struct ata_port
*ap
= host
->ports
[i
];
5619 ata_finalize_port_ops(ap
->ops
);
5621 if (!host
->ops
&& !ata_port_is_dummy(ap
))
5622 host
->ops
= ap
->ops
;
5624 if (ap
->ops
->port_stop
)
5628 if (host
->ops
&& host
->ops
->host_stop
)
5632 start_dr
= devres_alloc(ata_host_stop
, 0, GFP_KERNEL
);
5637 for (i
= 0; i
< host
->n_ports
; i
++) {
5638 struct ata_port
*ap
= host
->ports
[i
];
5640 if (ap
->ops
->port_start
) {
5641 rc
= ap
->ops
->port_start(ap
);
5645 "failed to start port %d (errno=%d)\n",
5650 ata_eh_freeze_port(ap
);
5654 devres_add(host
->dev
, start_dr
);
5655 host
->flags
|= ATA_HOST_STARTED
;
5660 struct ata_port
*ap
= host
->ports
[i
];
5662 if (ap
->ops
->port_stop
)
5663 ap
->ops
->port_stop(ap
);
5665 devres_free(start_dr
);
5668 EXPORT_SYMBOL_GPL(ata_host_start
);
5671 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
5672 * @host: host to initialize
5673 * @dev: device host is attached to
5677 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
5678 struct ata_port_operations
*ops
)
5680 spin_lock_init(&host
->lock
);
5681 mutex_init(&host
->eh_mutex
);
5682 host
->n_tags
= ATA_MAX_QUEUE
;
5685 kref_init(&host
->kref
);
5687 EXPORT_SYMBOL_GPL(ata_host_init
);
5689 void __ata_port_probe(struct ata_port
*ap
)
5691 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
5692 unsigned long flags
;
5694 /* kick EH for boot probing */
5695 spin_lock_irqsave(ap
->lock
, flags
);
5697 ehi
->probe_mask
|= ATA_ALL_DEVICES
;
5698 ehi
->action
|= ATA_EH_RESET
;
5699 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
5701 ap
->pflags
&= ~ATA_PFLAG_INITIALIZING
;
5702 ap
->pflags
|= ATA_PFLAG_LOADING
;
5703 ata_port_schedule_eh(ap
);
5705 spin_unlock_irqrestore(ap
->lock
, flags
);
5708 int ata_port_probe(struct ata_port
*ap
)
5712 if (ap
->ops
->error_handler
) {
5713 __ata_port_probe(ap
);
5714 ata_port_wait_eh(ap
);
5716 DPRINTK("ata%u: bus probe begin\n", ap
->print_id
);
5717 rc
= ata_bus_probe(ap
);
5718 DPRINTK("ata%u: bus probe end\n", ap
->print_id
);
5724 static void async_port_probe(void *data
, async_cookie_t cookie
)
5726 struct ata_port
*ap
= data
;
5729 * If we're not allowed to scan this host in parallel,
5730 * we need to wait until all previous scans have completed
5731 * before going further.
5732 * Jeff Garzik says this is only within a controller, so we
5733 * don't need to wait for port 0, only for later ports.
5735 if (!(ap
->host
->flags
& ATA_HOST_PARALLEL_SCAN
) && ap
->port_no
!= 0)
5736 async_synchronize_cookie(cookie
);
5738 (void)ata_port_probe(ap
);
5740 /* in order to keep device order, we need to synchronize at this point */
5741 async_synchronize_cookie(cookie
);
5743 ata_scsi_scan_host(ap
, 1);
5747 * ata_host_register - register initialized ATA host
5748 * @host: ATA host to register
5749 * @sht: template for SCSI host
5751 * Register initialized ATA host. @host is allocated using
5752 * ata_host_alloc() and fully initialized by LLD. This function
5753 * starts ports, registers @host with ATA and SCSI layers and
5754 * probe registered devices.
5757 * Inherited from calling layer (may sleep).
5760 * 0 on success, -errno otherwise.
5762 int ata_host_register(struct ata_host
*host
, struct scsi_host_template
*sht
)
5766 host
->n_tags
= clamp(sht
->can_queue
, 1, ATA_MAX_QUEUE
);
5768 /* host must have been started */
5769 if (!(host
->flags
& ATA_HOST_STARTED
)) {
5770 dev_err(host
->dev
, "BUG: trying to register unstarted host\n");
5775 /* Blow away unused ports. This happens when LLD can't
5776 * determine the exact number of ports to allocate at
5779 for (i
= host
->n_ports
; host
->ports
[i
]; i
++)
5780 kfree(host
->ports
[i
]);
5782 /* give ports names and add SCSI hosts */
5783 for (i
= 0; i
< host
->n_ports
; i
++) {
5784 host
->ports
[i
]->print_id
= atomic_inc_return(&ata_print_id
);
5785 host
->ports
[i
]->local_port_no
= i
+ 1;
5788 /* Create associated sysfs transport objects */
5789 for (i
= 0; i
< host
->n_ports
; i
++) {
5790 rc
= ata_tport_add(host
->dev
,host
->ports
[i
]);
5796 rc
= ata_scsi_add_hosts(host
, sht
);
5800 /* set cable, sata_spd_limit and report */
5801 for (i
= 0; i
< host
->n_ports
; i
++) {
5802 struct ata_port
*ap
= host
->ports
[i
];
5803 unsigned long xfer_mask
;
5805 /* set SATA cable type if still unset */
5806 if (ap
->cbl
== ATA_CBL_NONE
&& (ap
->flags
& ATA_FLAG_SATA
))
5807 ap
->cbl
= ATA_CBL_SATA
;
5809 /* init sata_spd_limit to the current value */
5810 sata_link_init_spd(&ap
->link
);
5812 sata_link_init_spd(ap
->slave_link
);
5814 /* print per-port info to dmesg */
5815 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
, ap
->mwdma_mask
,
5818 if (!ata_port_is_dummy(ap
)) {
5819 ata_port_info(ap
, "%cATA max %s %s\n",
5820 (ap
->flags
& ATA_FLAG_SATA
) ? 'S' : 'P',
5821 ata_mode_string(xfer_mask
),
5822 ap
->link
.eh_info
.desc
);
5823 ata_ehi_clear_desc(&ap
->link
.eh_info
);
5825 ata_port_info(ap
, "DUMMY\n");
5828 /* perform each probe asynchronously */
5829 for (i
= 0; i
< host
->n_ports
; i
++) {
5830 struct ata_port
*ap
= host
->ports
[i
];
5831 ap
->cookie
= async_schedule(async_port_probe
, ap
);
5838 ata_tport_delete(host
->ports
[i
]);
5843 EXPORT_SYMBOL_GPL(ata_host_register
);
5846 * ata_host_activate - start host, request IRQ and register it
5847 * @host: target ATA host
5848 * @irq: IRQ to request
5849 * @irq_handler: irq_handler used when requesting IRQ
5850 * @irq_flags: irq_flags used when requesting IRQ
5851 * @sht: scsi_host_template to use when registering the host
5853 * After allocating an ATA host and initializing it, most libata
5854 * LLDs perform three steps to activate the host - start host,
5855 * request IRQ and register it. This helper takes necessary
5856 * arguments and performs the three steps in one go.
5858 * An invalid IRQ skips the IRQ registration and expects the host to
5859 * have set polling mode on the port. In this case, @irq_handler
5863 * Inherited from calling layer (may sleep).
5866 * 0 on success, -errno otherwise.
5868 int ata_host_activate(struct ata_host
*host
, int irq
,
5869 irq_handler_t irq_handler
, unsigned long irq_flags
,
5870 struct scsi_host_template
*sht
)
5875 rc
= ata_host_start(host
);
5879 /* Special case for polling mode */
5881 WARN_ON(irq_handler
);
5882 return ata_host_register(host
, sht
);
5885 irq_desc
= devm_kasprintf(host
->dev
, GFP_KERNEL
, "%s[%s]",
5886 dev_driver_string(host
->dev
),
5887 dev_name(host
->dev
));
5891 rc
= devm_request_irq(host
->dev
, irq
, irq_handler
, irq_flags
,
5896 for (i
= 0; i
< host
->n_ports
; i
++)
5897 ata_port_desc(host
->ports
[i
], "irq %d", irq
);
5899 rc
= ata_host_register(host
, sht
);
5900 /* if failed, just free the IRQ and leave ports alone */
5902 devm_free_irq(host
->dev
, irq
, host
);
5906 EXPORT_SYMBOL_GPL(ata_host_activate
);
5909 * ata_port_detach - Detach ATA port in preparation of device removal
5910 * @ap: ATA port to be detached
5912 * Detach all ATA devices and the associated SCSI devices of @ap;
5913 * then, remove the associated SCSI host. @ap is guaranteed to
5914 * be quiescent on return from this function.
5917 * Kernel thread context (may sleep).
5919 static void ata_port_detach(struct ata_port
*ap
)
5921 unsigned long flags
;
5922 struct ata_link
*link
;
5923 struct ata_device
*dev
;
5925 if (!ap
->ops
->error_handler
)
5928 /* tell EH we're leaving & flush EH */
5929 spin_lock_irqsave(ap
->lock
, flags
);
5930 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
5931 ata_port_schedule_eh(ap
);
5932 spin_unlock_irqrestore(ap
->lock
, flags
);
5934 /* wait till EH commits suicide */
5935 ata_port_wait_eh(ap
);
5937 /* it better be dead now */
5938 WARN_ON(!(ap
->pflags
& ATA_PFLAG_UNLOADED
));
5940 cancel_delayed_work_sync(&ap
->hotplug_task
);
5943 /* clean up zpodd on port removal */
5944 ata_for_each_link(link
, ap
, HOST_FIRST
) {
5945 ata_for_each_dev(dev
, link
, ALL
) {
5946 if (zpodd_dev_enabled(dev
))
5952 for (i
= 0; i
< SATA_PMP_MAX_PORTS
; i
++)
5953 ata_tlink_delete(&ap
->pmp_link
[i
]);
5955 /* remove the associated SCSI host */
5956 scsi_remove_host(ap
->scsi_host
);
5957 ata_tport_delete(ap
);
5961 * ata_host_detach - Detach all ports of an ATA host
5962 * @host: Host to detach
5964 * Detach all ports of @host.
5967 * Kernel thread context (may sleep).
5969 void ata_host_detach(struct ata_host
*host
)
5973 for (i
= 0; i
< host
->n_ports
; i
++) {
5974 /* Ensure ata_port probe has completed */
5975 async_synchronize_cookie(host
->ports
[i
]->cookie
+ 1);
5976 ata_port_detach(host
->ports
[i
]);
5979 /* the host is dead now, dissociate ACPI */
5980 ata_acpi_dissociate(host
);
5982 EXPORT_SYMBOL_GPL(ata_host_detach
);
5987 * ata_pci_remove_one - PCI layer callback for device removal
5988 * @pdev: PCI device that was removed
5990 * PCI layer indicates to libata via this hook that hot-unplug or
5991 * module unload event has occurred. Detach all ports. Resource
5992 * release is handled via devres.
5995 * Inherited from PCI layer (may sleep).
5997 void ata_pci_remove_one(struct pci_dev
*pdev
)
5999 struct ata_host
*host
= pci_get_drvdata(pdev
);
6001 ata_host_detach(host
);
6003 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
6005 void ata_pci_shutdown_one(struct pci_dev
*pdev
)
6007 struct ata_host
*host
= pci_get_drvdata(pdev
);
6010 for (i
= 0; i
< host
->n_ports
; i
++) {
6011 struct ata_port
*ap
= host
->ports
[i
];
6013 ap
->pflags
|= ATA_PFLAG_FROZEN
;
6015 /* Disable port interrupts */
6016 if (ap
->ops
->freeze
)
6017 ap
->ops
->freeze(ap
);
6019 /* Stop the port DMA engines */
6020 if (ap
->ops
->port_stop
)
6021 ap
->ops
->port_stop(ap
);
6024 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one
);
6026 /* move to PCI subsystem */
6027 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
6029 unsigned long tmp
= 0;
6031 switch (bits
->width
) {
6034 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
6040 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
6046 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
6057 return (tmp
== bits
->val
) ? 1 : 0;
6059 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
6062 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6064 pci_save_state(pdev
);
6065 pci_disable_device(pdev
);
6067 if (mesg
.event
& PM_EVENT_SLEEP
)
6068 pci_set_power_state(pdev
, PCI_D3hot
);
6070 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
6072 int ata_pci_device_do_resume(struct pci_dev
*pdev
)
6076 pci_set_power_state(pdev
, PCI_D0
);
6077 pci_restore_state(pdev
);
6079 rc
= pcim_enable_device(pdev
);
6082 "failed to enable device after resume (%d)\n", rc
);
6086 pci_set_master(pdev
);
6089 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
6091 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6093 struct ata_host
*host
= pci_get_drvdata(pdev
);
6096 rc
= ata_host_suspend(host
, mesg
);
6100 ata_pci_device_do_suspend(pdev
, mesg
);
6104 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
6106 int ata_pci_device_resume(struct pci_dev
*pdev
)
6108 struct ata_host
*host
= pci_get_drvdata(pdev
);
6111 rc
= ata_pci_device_do_resume(pdev
);
6113 ata_host_resume(host
);
6116 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
6117 #endif /* CONFIG_PM */
6118 #endif /* CONFIG_PCI */
6121 * ata_platform_remove_one - Platform layer callback for device removal
6122 * @pdev: Platform device that was removed
6124 * Platform layer indicates to libata via this hook that hot-unplug or
6125 * module unload event has occurred. Detach all ports. Resource
6126 * release is handled via devres.
6129 * Inherited from platform layer (may sleep).
6131 int ata_platform_remove_one(struct platform_device
*pdev
)
6133 struct ata_host
*host
= platform_get_drvdata(pdev
);
6135 ata_host_detach(host
);
6139 EXPORT_SYMBOL_GPL(ata_platform_remove_one
);
6141 #ifdef CONFIG_ATA_FORCE
6142 static int __init
ata_parse_force_one(char **cur
,
6143 struct ata_force_ent
*force_ent
,
6144 const char **reason
)
6146 static const struct ata_force_param force_tbl
[] __initconst
= {
6147 { "40c", .cbl
= ATA_CBL_PATA40
},
6148 { "80c", .cbl
= ATA_CBL_PATA80
},
6149 { "short40c", .cbl
= ATA_CBL_PATA40_SHORT
},
6150 { "unk", .cbl
= ATA_CBL_PATA_UNK
},
6151 { "ign", .cbl
= ATA_CBL_PATA_IGN
},
6152 { "sata", .cbl
= ATA_CBL_SATA
},
6153 { "1.5Gbps", .spd_limit
= 1 },
6154 { "3.0Gbps", .spd_limit
= 2 },
6155 { "noncq", .horkage_on
= ATA_HORKAGE_NONCQ
},
6156 { "ncq", .horkage_off
= ATA_HORKAGE_NONCQ
},
6157 { "noncqtrim", .horkage_on
= ATA_HORKAGE_NO_NCQ_TRIM
},
6158 { "ncqtrim", .horkage_off
= ATA_HORKAGE_NO_NCQ_TRIM
},
6159 { "noncqati", .horkage_on
= ATA_HORKAGE_NO_NCQ_ON_ATI
},
6160 { "ncqati", .horkage_off
= ATA_HORKAGE_NO_NCQ_ON_ATI
},
6161 { "dump_id", .horkage_on
= ATA_HORKAGE_DUMP_ID
},
6162 { "pio0", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 0) },
6163 { "pio1", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 1) },
6164 { "pio2", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 2) },
6165 { "pio3", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 3) },
6166 { "pio4", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 4) },
6167 { "pio5", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 5) },
6168 { "pio6", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 6) },
6169 { "mwdma0", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 0) },
6170 { "mwdma1", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 1) },
6171 { "mwdma2", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 2) },
6172 { "mwdma3", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 3) },
6173 { "mwdma4", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 4) },
6174 { "udma0", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
6175 { "udma16", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
6176 { "udma/16", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
6177 { "udma1", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
6178 { "udma25", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
6179 { "udma/25", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
6180 { "udma2", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
6181 { "udma33", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
6182 { "udma/33", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
6183 { "udma3", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
6184 { "udma44", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
6185 { "udma/44", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
6186 { "udma4", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
6187 { "udma66", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
6188 { "udma/66", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
6189 { "udma5", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
6190 { "udma100", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
6191 { "udma/100", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
6192 { "udma6", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
6193 { "udma133", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
6194 { "udma/133", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
6195 { "udma7", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 7) },
6196 { "nohrst", .lflags
= ATA_LFLAG_NO_HRST
},
6197 { "nosrst", .lflags
= ATA_LFLAG_NO_SRST
},
6198 { "norst", .lflags
= ATA_LFLAG_NO_HRST
| ATA_LFLAG_NO_SRST
},
6199 { "rstonce", .lflags
= ATA_LFLAG_RST_ONCE
},
6200 { "atapi_dmadir", .horkage_on
= ATA_HORKAGE_ATAPI_DMADIR
},
6201 { "disable", .horkage_on
= ATA_HORKAGE_DISABLE
},
6203 char *start
= *cur
, *p
= *cur
;
6204 char *id
, *val
, *endp
;
6205 const struct ata_force_param
*match_fp
= NULL
;
6206 int nr_matches
= 0, i
;
6208 /* find where this param ends and update *cur */
6209 while (*p
!= '\0' && *p
!= ',')
6220 p
= strchr(start
, ':');
6222 val
= strstrip(start
);
6227 id
= strstrip(start
);
6228 val
= strstrip(p
+ 1);
6231 p
= strchr(id
, '.');
6234 force_ent
->device
= simple_strtoul(p
, &endp
, 10);
6235 if (p
== endp
|| *endp
!= '\0') {
6236 *reason
= "invalid device";
6241 force_ent
->port
= simple_strtoul(id
, &endp
, 10);
6242 if (id
== endp
|| *endp
!= '\0') {
6243 *reason
= "invalid port/link";
6248 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6249 for (i
= 0; i
< ARRAY_SIZE(force_tbl
); i
++) {
6250 const struct ata_force_param
*fp
= &force_tbl
[i
];
6252 if (strncasecmp(val
, fp
->name
, strlen(val
)))
6258 if (strcasecmp(val
, fp
->name
) == 0) {
6265 *reason
= "unknown value";
6268 if (nr_matches
> 1) {
6269 *reason
= "ambiguous value";
6273 force_ent
->param
= *match_fp
;
6278 static void __init
ata_parse_force_param(void)
6280 int idx
= 0, size
= 1;
6281 int last_port
= -1, last_device
= -1;
6282 char *p
, *cur
, *next
;
6284 /* calculate maximum number of params and allocate force_tbl */
6285 for (p
= ata_force_param_buf
; *p
; p
++)
6289 ata_force_tbl
= kcalloc(size
, sizeof(ata_force_tbl
[0]), GFP_KERNEL
);
6290 if (!ata_force_tbl
) {
6291 printk(KERN_WARNING
"ata: failed to extend force table, "
6292 "libata.force ignored\n");
6296 /* parse and populate the table */
6297 for (cur
= ata_force_param_buf
; *cur
!= '\0'; cur
= next
) {
6298 const char *reason
= "";
6299 struct ata_force_ent te
= { .port
= -1, .device
= -1 };
6302 if (ata_parse_force_one(&next
, &te
, &reason
)) {
6303 printk(KERN_WARNING
"ata: failed to parse force "
6304 "parameter \"%s\" (%s)\n",
6309 if (te
.port
== -1) {
6310 te
.port
= last_port
;
6311 te
.device
= last_device
;
6314 ata_force_tbl
[idx
++] = te
;
6316 last_port
= te
.port
;
6317 last_device
= te
.device
;
6320 ata_force_tbl_size
= idx
;
6323 static void ata_free_force_param(void)
6325 kfree(ata_force_tbl
);
6328 static inline void ata_parse_force_param(void) { }
6329 static inline void ata_free_force_param(void) { }
6332 static int __init
ata_init(void)
6336 ata_parse_force_param();
6338 rc
= ata_sff_init();
6340 ata_free_force_param();
6344 libata_transport_init();
6345 ata_scsi_transport_template
= ata_attach_transport();
6346 if (!ata_scsi_transport_template
) {
6352 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
6359 static void __exit
ata_exit(void)
6361 ata_release_transport(ata_scsi_transport_template
);
6362 libata_transport_exit();
6364 ata_free_force_param();
6367 subsys_initcall(ata_init
);
6368 module_exit(ata_exit
);
6370 static DEFINE_RATELIMIT_STATE(ratelimit
, HZ
/ 5, 1);
6372 int ata_ratelimit(void)
6374 return __ratelimit(&ratelimit
);
6376 EXPORT_SYMBOL_GPL(ata_ratelimit
);
6379 * ata_msleep - ATA EH owner aware msleep
6380 * @ap: ATA port to attribute the sleep to
6381 * @msecs: duration to sleep in milliseconds
6383 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6384 * ownership is released before going to sleep and reacquired
6385 * after the sleep is complete. IOW, other ports sharing the
6386 * @ap->host will be allowed to own the EH while this task is
6392 void ata_msleep(struct ata_port
*ap
, unsigned int msecs
)
6394 bool owns_eh
= ap
&& ap
->host
->eh_owner
== current
;
6400 unsigned long usecs
= msecs
* USEC_PER_MSEC
;
6401 usleep_range(usecs
, usecs
+ 50);
6409 EXPORT_SYMBOL_GPL(ata_msleep
);
6412 * ata_wait_register - wait until register value changes
6413 * @ap: ATA port to wait register for, can be NULL
6414 * @reg: IO-mapped register
6415 * @mask: Mask to apply to read register value
6416 * @val: Wait condition
6417 * @interval: polling interval in milliseconds
6418 * @timeout: timeout in milliseconds
6420 * Waiting for some bits of register to change is a common
6421 * operation for ATA controllers. This function reads 32bit LE
6422 * IO-mapped register @reg and tests for the following condition.
6424 * (*@reg & mask) != val
6426 * If the condition is met, it returns; otherwise, the process is
6427 * repeated after @interval_msec until timeout.
6430 * Kernel thread context (may sleep)
6433 * The final register value.
6435 u32
ata_wait_register(struct ata_port
*ap
, void __iomem
*reg
, u32 mask
, u32 val
,
6436 unsigned long interval
, unsigned long timeout
)
6438 unsigned long deadline
;
6441 tmp
= ioread32(reg
);
6443 /* Calculate timeout _after_ the first read to make sure
6444 * preceding writes reach the controller before starting to
6445 * eat away the timeout.
6447 deadline
= ata_deadline(jiffies
, timeout
);
6449 while ((tmp
& mask
) == val
&& time_before(jiffies
, deadline
)) {
6450 ata_msleep(ap
, interval
);
6451 tmp
= ioread32(reg
);
6456 EXPORT_SYMBOL_GPL(ata_wait_register
);
6461 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
6463 return AC_ERR_SYSTEM
;
6466 static void ata_dummy_error_handler(struct ata_port
*ap
)
6471 struct ata_port_operations ata_dummy_port_ops
= {
6472 .qc_prep
= ata_noop_qc_prep
,
6473 .qc_issue
= ata_dummy_qc_issue
,
6474 .error_handler
= ata_dummy_error_handler
,
6475 .sched_eh
= ata_std_sched_eh
,
6476 .end_eh
= ata_std_end_eh
,
6478 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
6480 const struct ata_port_info ata_dummy_port_info
= {
6481 .port_ops
= &ata_dummy_port_ops
,
6483 EXPORT_SYMBOL_GPL(ata_dummy_port_info
);
6486 * Utility print functions
6488 void ata_port_printk(const struct ata_port
*ap
, const char *level
,
6489 const char *fmt
, ...)
6491 struct va_format vaf
;
6494 va_start(args
, fmt
);
6499 printk("%sata%u: %pV", level
, ap
->print_id
, &vaf
);
6503 EXPORT_SYMBOL(ata_port_printk
);
6505 void ata_link_printk(const struct ata_link
*link
, const char *level
,
6506 const char *fmt
, ...)
6508 struct va_format vaf
;
6511 va_start(args
, fmt
);
6516 if (sata_pmp_attached(link
->ap
) || link
->ap
->slave_link
)
6517 printk("%sata%u.%02u: %pV",
6518 level
, link
->ap
->print_id
, link
->pmp
, &vaf
);
6520 printk("%sata%u: %pV",
6521 level
, link
->ap
->print_id
, &vaf
);
6525 EXPORT_SYMBOL(ata_link_printk
);
6527 void ata_dev_printk(const struct ata_device
*dev
, const char *level
,
6528 const char *fmt
, ...)
6530 struct va_format vaf
;
6533 va_start(args
, fmt
);
6538 printk("%sata%u.%02u: %pV",
6539 level
, dev
->link
->ap
->print_id
, dev
->link
->pmp
+ dev
->devno
,
6544 EXPORT_SYMBOL(ata_dev_printk
);
6546 void ata_print_version(const struct device
*dev
, const char *version
)
6548 dev_printk(KERN_DEBUG
, dev
, "version %s\n", version
);
6550 EXPORT_SYMBOL(ata_print_version
);