2 * libata-eh.c - libata error handling
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
47 #include <linux/libata.h>
49 #include <trace/events/libata.h>
53 /* speed down verdicts */
54 ATA_EH_SPDN_NCQ_OFF
= (1 << 0),
55 ATA_EH_SPDN_SPEED_DOWN
= (1 << 1),
56 ATA_EH_SPDN_FALLBACK_TO_PIO
= (1 << 2),
57 ATA_EH_SPDN_KEEP_ERRORS
= (1 << 3),
60 ATA_EFLAG_IS_IO
= (1 << 0),
61 ATA_EFLAG_DUBIOUS_XFER
= (1 << 1),
62 ATA_EFLAG_OLD_ER
= (1 << 31),
64 /* error categories */
67 ATA_ECAT_TOUT_HSM
= 2,
69 ATA_ECAT_DUBIOUS_NONE
= 4,
70 ATA_ECAT_DUBIOUS_ATA_BUS
= 5,
71 ATA_ECAT_DUBIOUS_TOUT_HSM
= 6,
72 ATA_ECAT_DUBIOUS_UNK_DEV
= 7,
75 ATA_EH_CMD_DFL_TIMEOUT
= 5000,
77 /* always put at least this amount of time between resets */
78 ATA_EH_RESET_COOL_DOWN
= 5000,
80 /* Waiting in ->prereset can never be reliable. It's
81 * sometimes nice to wait there but it can't be depended upon;
82 * otherwise, we wouldn't be resetting. Just give it enough
83 * time for most drives to spin up.
85 ATA_EH_PRERESET_TIMEOUT
= 10000,
86 ATA_EH_FASTDRAIN_INTERVAL
= 3000,
90 /* probe speed down parameters, see ata_eh_schedule_probe() */
91 ATA_EH_PROBE_TRIAL_INTERVAL
= 60000, /* 1 min */
92 ATA_EH_PROBE_TRIALS
= 2,
95 /* The following table determines how we sequence resets. Each entry
96 * represents timeout for that try. The first try can be soft or
97 * hardreset. All others are hardreset if available. In most cases
98 * the first reset w/ 10sec timeout should succeed. Following entries
99 * are mostly for error handling, hotplug and those outlier devices that
100 * take an exceptionally long time to recover from reset.
102 static const unsigned long ata_eh_reset_timeouts
[] = {
103 10000, /* most drives spin up by 10sec */
104 10000, /* > 99% working drives spin up before 20sec */
105 35000, /* give > 30 secs of idleness for outlier devices */
106 5000, /* and sweet one last chance */
107 ULONG_MAX
, /* > 1 min has elapsed, give up */
110 static const unsigned long ata_eh_identify_timeouts
[] = {
111 5000, /* covers > 99% of successes and not too boring on failures */
112 10000, /* combined time till here is enough even for media access */
113 30000, /* for true idiots */
117 static const unsigned long ata_eh_flush_timeouts
[] = {
118 15000, /* be generous with flush */
120 30000, /* and even more generous */
124 static const unsigned long ata_eh_other_timeouts
[] = {
125 5000, /* same rationale as identify timeout */
127 /* but no merciful 30sec for other commands, it just isn't worth it */
131 struct ata_eh_cmd_timeout_ent
{
133 const unsigned long *timeouts
;
136 /* The following table determines timeouts to use for EH internal
137 * commands. Each table entry is a command class and matches the
138 * commands the entry applies to and the timeout table to use.
140 * On the retry after a command timed out, the next timeout value from
141 * the table is used. If the table doesn't contain further entries,
142 * the last value is used.
144 * ehc->cmd_timeout_idx keeps track of which timeout to use per
145 * command class, so if SET_FEATURES times out on the first try, the
146 * next try will use the second timeout value only for that class.
148 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE
] = {
151 { .commands
= CMDS(ATA_CMD_ID_ATA
, ATA_CMD_ID_ATAPI
),
152 .timeouts
= ata_eh_identify_timeouts
, },
153 { .commands
= CMDS(ATA_CMD_READ_NATIVE_MAX
, ATA_CMD_READ_NATIVE_MAX_EXT
),
154 .timeouts
= ata_eh_other_timeouts
, },
155 { .commands
= CMDS(ATA_CMD_SET_MAX
, ATA_CMD_SET_MAX_EXT
),
156 .timeouts
= ata_eh_other_timeouts
, },
157 { .commands
= CMDS(ATA_CMD_SET_FEATURES
),
158 .timeouts
= ata_eh_other_timeouts
, },
159 { .commands
= CMDS(ATA_CMD_INIT_DEV_PARAMS
),
160 .timeouts
= ata_eh_other_timeouts
, },
161 { .commands
= CMDS(ATA_CMD_FLUSH
, ATA_CMD_FLUSH_EXT
),
162 .timeouts
= ata_eh_flush_timeouts
},
166 static void __ata_port_freeze(struct ata_port
*ap
);
168 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
169 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
174 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
176 #endif /* CONFIG_PM */
178 static void __ata_ehi_pushv_desc(struct ata_eh_info
*ehi
, const char *fmt
,
181 ehi
->desc_len
+= vscnprintf(ehi
->desc
+ ehi
->desc_len
,
182 ATA_EH_DESC_LEN
- ehi
->desc_len
,
187 * __ata_ehi_push_desc - push error description without adding separator
189 * @fmt: printf format string
191 * Format string according to @fmt and append it to @ehi->desc.
194 * spin_lock_irqsave(host lock)
196 void __ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
201 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
206 * ata_ehi_push_desc - push error description with separator
208 * @fmt: printf format string
210 * Format string according to @fmt and append it to @ehi->desc.
211 * If @ehi->desc is not empty, ", " is added in-between.
214 * spin_lock_irqsave(host lock)
216 void ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
221 __ata_ehi_push_desc(ehi
, ", ");
224 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
229 * ata_ehi_clear_desc - clean error description
235 * spin_lock_irqsave(host lock)
237 void ata_ehi_clear_desc(struct ata_eh_info
*ehi
)
244 * ata_port_desc - append port description
245 * @ap: target ATA port
246 * @fmt: printf format string
248 * Format string according to @fmt and append it to port
249 * description. If port description is not empty, " " is added
250 * in-between. This function is to be used while initializing
251 * ata_host. The description is printed on host registration.
256 void ata_port_desc(struct ata_port
*ap
, const char *fmt
, ...)
260 WARN_ON(!(ap
->pflags
& ATA_PFLAG_INITIALIZING
));
262 if (ap
->link
.eh_info
.desc_len
)
263 __ata_ehi_push_desc(&ap
->link
.eh_info
, " ");
266 __ata_ehi_pushv_desc(&ap
->link
.eh_info
, fmt
, args
);
273 * ata_port_pbar_desc - append PCI BAR description
274 * @ap: target ATA port
275 * @bar: target PCI BAR
276 * @offset: offset into PCI BAR
277 * @name: name of the area
279 * If @offset is negative, this function formats a string which
280 * contains the name, address, size and type of the BAR and
281 * appends it to the port description. If @offset is zero or
282 * positive, only name and offsetted address is appended.
287 void ata_port_pbar_desc(struct ata_port
*ap
, int bar
, ssize_t offset
,
290 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
292 unsigned long long start
, len
;
294 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
296 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
299 start
= (unsigned long long)pci_resource_start(pdev
, bar
);
300 len
= (unsigned long long)pci_resource_len(pdev
, bar
);
303 ata_port_desc(ap
, "%s %s%llu@0x%llx", name
, type
, len
, start
);
305 ata_port_desc(ap
, "%s 0x%llx", name
,
306 start
+ (unsigned long long)offset
);
309 #endif /* CONFIG_PCI */
311 static int ata_lookup_timeout_table(u8 cmd
)
315 for (i
= 0; i
< ATA_EH_CMD_TIMEOUT_TABLE_SIZE
; i
++) {
318 for (cur
= ata_eh_cmd_timeout_table
[i
].commands
; *cur
; cur
++)
327 * ata_internal_cmd_timeout - determine timeout for an internal command
328 * @dev: target device
329 * @cmd: internal command to be issued
331 * Determine timeout for internal command @cmd for @dev.
337 * Determined timeout.
339 unsigned long ata_internal_cmd_timeout(struct ata_device
*dev
, u8 cmd
)
341 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
342 int ent
= ata_lookup_timeout_table(cmd
);
346 return ATA_EH_CMD_DFL_TIMEOUT
;
348 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
349 return ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
];
353 * ata_internal_cmd_timed_out - notification for internal command timeout
354 * @dev: target device
355 * @cmd: internal command which timed out
357 * Notify EH that internal command @cmd for @dev timed out. This
358 * function should be called only for commands whose timeouts are
359 * determined using ata_internal_cmd_timeout().
364 void ata_internal_cmd_timed_out(struct ata_device
*dev
, u8 cmd
)
366 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
367 int ent
= ata_lookup_timeout_table(cmd
);
373 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
374 if (ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
+ 1] != ULONG_MAX
)
375 ehc
->cmd_timeout_idx
[dev
->devno
][ent
]++;
378 static void ata_ering_record(struct ata_ering
*ering
, unsigned int eflags
,
379 unsigned int err_mask
)
381 struct ata_ering_entry
*ent
;
386 ering
->cursor
%= ATA_ERING_SIZE
;
388 ent
= &ering
->ring
[ering
->cursor
];
389 ent
->eflags
= eflags
;
390 ent
->err_mask
= err_mask
;
391 ent
->timestamp
= get_jiffies_64();
394 static struct ata_ering_entry
*ata_ering_top(struct ata_ering
*ering
)
396 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
403 int ata_ering_map(struct ata_ering
*ering
,
404 int (*map_fn
)(struct ata_ering_entry
*, void *),
408 struct ata_ering_entry
*ent
;
412 ent
= &ering
->ring
[idx
];
415 rc
= map_fn(ent
, arg
);
418 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
419 } while (idx
!= ering
->cursor
);
424 static int ata_ering_clear_cb(struct ata_ering_entry
*ent
, void *void_arg
)
426 ent
->eflags
|= ATA_EFLAG_OLD_ER
;
430 static void ata_ering_clear(struct ata_ering
*ering
)
432 ata_ering_map(ering
, ata_ering_clear_cb
, NULL
);
435 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
437 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
439 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
442 static void ata_eh_clear_action(struct ata_link
*link
, struct ata_device
*dev
,
443 struct ata_eh_info
*ehi
, unsigned int action
)
445 struct ata_device
*tdev
;
448 ehi
->action
&= ~action
;
449 ata_for_each_dev(tdev
, link
, ALL
)
450 ehi
->dev_action
[tdev
->devno
] &= ~action
;
452 /* doesn't make sense for port-wide EH actions */
453 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
455 /* break ehi->action into ehi->dev_action */
456 if (ehi
->action
& action
) {
457 ata_for_each_dev(tdev
, link
, ALL
)
458 ehi
->dev_action
[tdev
->devno
] |=
459 ehi
->action
& action
;
460 ehi
->action
&= ~action
;
463 /* turn off the specified per-dev action */
464 ehi
->dev_action
[dev
->devno
] &= ~action
;
469 * ata_eh_acquire - acquire EH ownership
470 * @ap: ATA port to acquire EH ownership for
472 * Acquire EH ownership for @ap. This is the basic exclusion
473 * mechanism for ports sharing a host. Only one port hanging off
474 * the same host can claim the ownership of EH.
479 void ata_eh_acquire(struct ata_port
*ap
)
481 mutex_lock(&ap
->host
->eh_mutex
);
482 WARN_ON_ONCE(ap
->host
->eh_owner
);
483 ap
->host
->eh_owner
= current
;
487 * ata_eh_release - release EH ownership
488 * @ap: ATA port to release EH ownership for
490 * Release EH ownership for @ap if the caller. The caller must
491 * have acquired EH ownership using ata_eh_acquire() previously.
496 void ata_eh_release(struct ata_port
*ap
)
498 WARN_ON_ONCE(ap
->host
->eh_owner
!= current
);
499 ap
->host
->eh_owner
= NULL
;
500 mutex_unlock(&ap
->host
->eh_mutex
);
504 * ata_scsi_timed_out - SCSI layer time out callback
505 * @cmd: timed out SCSI command
507 * Handles SCSI layer timeout. We race with normal completion of
508 * the qc for @cmd. If the qc is already gone, we lose and let
509 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
510 * timed out and EH should be invoked. Prevent ata_qc_complete()
511 * from finishing it by setting EH_SCHEDULED and return
514 * TODO: kill this function once old EH is gone.
517 * Called from timer context
520 * EH_HANDLED or EH_NOT_HANDLED
522 enum blk_eh_timer_return
ata_scsi_timed_out(struct scsi_cmnd
*cmd
)
524 struct Scsi_Host
*host
= cmd
->device
->host
;
525 struct ata_port
*ap
= ata_shost_to_port(host
);
527 struct ata_queued_cmd
*qc
;
528 enum blk_eh_timer_return ret
;
532 if (ap
->ops
->error_handler
) {
533 ret
= BLK_EH_NOT_HANDLED
;
537 ret
= BLK_EH_HANDLED
;
538 spin_lock_irqsave(ap
->lock
, flags
);
539 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
541 WARN_ON(qc
->scsicmd
!= cmd
);
542 qc
->flags
|= ATA_QCFLAG_EH_SCHEDULED
;
543 qc
->err_mask
|= AC_ERR_TIMEOUT
;
544 ret
= BLK_EH_NOT_HANDLED
;
546 spin_unlock_irqrestore(ap
->lock
, flags
);
549 DPRINTK("EXIT, ret=%d\n", ret
);
553 static void ata_eh_unload(struct ata_port
*ap
)
555 struct ata_link
*link
;
556 struct ata_device
*dev
;
559 /* Restore SControl IPM and SPD for the next driver and
560 * disable attached devices.
562 ata_for_each_link(link
, ap
, PMP_FIRST
) {
563 sata_scr_write(link
, SCR_CONTROL
, link
->saved_scontrol
& 0xff0);
564 ata_for_each_dev(dev
, link
, ALL
)
565 ata_dev_disable(dev
);
568 /* freeze and set UNLOADED */
569 spin_lock_irqsave(ap
->lock
, flags
);
571 ata_port_freeze(ap
); /* won't be thawed */
572 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
; /* clear pending from freeze */
573 ap
->pflags
|= ATA_PFLAG_UNLOADED
;
575 spin_unlock_irqrestore(ap
->lock
, flags
);
579 * ata_scsi_error - SCSI layer error handler callback
580 * @host: SCSI host on which error occurred
582 * Handles SCSI-layer-thrown error events.
585 * Inherited from SCSI layer (none, can sleep)
590 void ata_scsi_error(struct Scsi_Host
*host
)
592 struct ata_port
*ap
= ata_shost_to_port(host
);
594 LIST_HEAD(eh_work_q
);
598 spin_lock_irqsave(host
->host_lock
, flags
);
599 list_splice_init(&host
->eh_cmd_q
, &eh_work_q
);
600 spin_unlock_irqrestore(host
->host_lock
, flags
);
602 ata_scsi_cmd_error_handler(host
, ap
, &eh_work_q
);
604 /* If we timed raced normal completion and there is nothing to
605 recover nr_timedout == 0 why exactly are we doing error recovery ? */
606 ata_scsi_port_error_handler(host
, ap
);
608 /* finish or retry handled scmd's and clean up */
609 WARN_ON(host
->host_failed
|| !list_empty(&eh_work_q
));
615 * ata_scsi_cmd_error_handler - error callback for a list of commands
616 * @host: scsi host containing the port
617 * @ap: ATA port within the host
618 * @eh_work_q: list of commands to process
620 * process the given list of commands and return those finished to the
621 * ap->eh_done_q. This function is the first part of the libata error
622 * handler which processes a given list of failed commands.
624 void ata_scsi_cmd_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
,
625 struct list_head
*eh_work_q
)
630 /* make sure sff pio task is not running */
631 ata_sff_flush_pio_task(ap
);
633 /* synchronize with host lock and sort out timeouts */
635 /* For new EH, all qcs are finished in one of three ways -
636 * normal completion, error completion, and SCSI timeout.
637 * Both completions can race against SCSI timeout. When normal
638 * completion wins, the qc never reaches EH. When error
639 * completion wins, the qc has ATA_QCFLAG_FAILED set.
641 * When SCSI timeout wins, things are a bit more complex.
642 * Normal or error completion can occur after the timeout but
643 * before this point. In such cases, both types of
644 * completions are honored. A scmd is determined to have
645 * timed out iff its associated qc is active and not failed.
647 if (ap
->ops
->error_handler
) {
648 struct scsi_cmnd
*scmd
, *tmp
;
651 spin_lock_irqsave(ap
->lock
, flags
);
653 /* This must occur under the ap->lock as we don't want
654 a polled recovery to race the real interrupt handler
656 The lost_interrupt handler checks for any completed but
657 non-notified command and completes much like an IRQ handler.
659 We then fall into the error recovery code which will treat
660 this as if normal completion won the race */
662 if (ap
->ops
->lost_interrupt
)
663 ap
->ops
->lost_interrupt(ap
);
665 list_for_each_entry_safe(scmd
, tmp
, eh_work_q
, eh_entry
) {
666 struct ata_queued_cmd
*qc
;
668 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
669 qc
= __ata_qc_from_tag(ap
, i
);
670 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
675 if (i
< ATA_MAX_QUEUE
) {
676 /* the scmd has an associated qc */
677 if (!(qc
->flags
& ATA_QCFLAG_FAILED
)) {
678 /* which hasn't failed yet, timeout */
679 qc
->err_mask
|= AC_ERR_TIMEOUT
;
680 qc
->flags
|= ATA_QCFLAG_FAILED
;
684 /* Normal completion occurred after
685 * SCSI timeout but before this point.
686 * Successfully complete it.
688 scmd
->retries
= scmd
->allowed
;
689 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
693 /* If we have timed out qcs. They belong to EH from
694 * this point but the state of the controller is
695 * unknown. Freeze the port to make sure the IRQ
696 * handler doesn't diddle with those qcs. This must
697 * be done atomically w.r.t. setting QCFLAG_FAILED.
700 __ata_port_freeze(ap
);
702 spin_unlock_irqrestore(ap
->lock
, flags
);
704 /* initialize eh_tries */
705 ap
->eh_tries
= ATA_EH_MAX_TRIES
;
707 spin_unlock_wait(ap
->lock
);
710 EXPORT_SYMBOL(ata_scsi_cmd_error_handler
);
713 * ata_scsi_port_error_handler - recover the port after the commands
714 * @host: SCSI host containing the port
717 * Handle the recovery of the port @ap after all the commands
718 * have been recovered.
720 void ata_scsi_port_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
)
724 /* invoke error handler */
725 if (ap
->ops
->error_handler
) {
726 struct ata_link
*link
;
728 /* acquire EH ownership */
731 /* kill fast drain timer */
732 del_timer_sync(&ap
->fastdrain_timer
);
734 /* process port resume request */
735 ata_eh_handle_port_resume(ap
);
737 /* fetch & clear EH info */
738 spin_lock_irqsave(ap
->lock
, flags
);
740 ata_for_each_link(link
, ap
, HOST_FIRST
) {
741 struct ata_eh_context
*ehc
= &link
->eh_context
;
742 struct ata_device
*dev
;
744 memset(&link
->eh_context
, 0, sizeof(link
->eh_context
));
745 link
->eh_context
.i
= link
->eh_info
;
746 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
748 ata_for_each_dev(dev
, link
, ENABLED
) {
749 int devno
= dev
->devno
;
751 ehc
->saved_xfer_mode
[devno
] = dev
->xfer_mode
;
752 if (ata_ncq_enabled(dev
))
753 ehc
->saved_ncq_enabled
|= 1 << devno
;
757 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
758 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
759 ap
->excl_link
= NULL
; /* don't maintain exclusion over EH */
761 spin_unlock_irqrestore(ap
->lock
, flags
);
763 /* invoke EH, skip if unloading or suspended */
764 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
765 ap
->ops
->error_handler(ap
);
767 /* if unloading, commence suicide */
768 if ((ap
->pflags
& ATA_PFLAG_UNLOADING
) &&
769 !(ap
->pflags
& ATA_PFLAG_UNLOADED
))
774 /* process port suspend request */
775 ata_eh_handle_port_suspend(ap
);
777 /* Exception might have happened after ->error_handler
778 * recovered the port but before this point. Repeat
781 spin_lock_irqsave(ap
->lock
, flags
);
783 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
784 if (--ap
->eh_tries
) {
785 spin_unlock_irqrestore(ap
->lock
, flags
);
789 "EH pending after %d tries, giving up\n",
791 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
794 /* this run is complete, make sure EH info is clear */
795 ata_for_each_link(link
, ap
, HOST_FIRST
)
796 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
798 /* end eh (clear host_eh_scheduled) while holding
799 * ap->lock such that if exception occurs after this
800 * point but before EH completion, SCSI midlayer will
805 spin_unlock_irqrestore(ap
->lock
, flags
);
808 WARN_ON(ata_qc_from_tag(ap
, ap
->link
.active_tag
) == NULL
);
809 ap
->ops
->eng_timeout(ap
);
812 scsi_eh_flush_done_q(&ap
->eh_done_q
);
815 spin_lock_irqsave(ap
->lock
, flags
);
817 if (ap
->pflags
& ATA_PFLAG_LOADING
)
818 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
819 else if (ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
)
820 schedule_delayed_work(&ap
->hotplug_task
, 0);
822 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
823 ata_port_info(ap
, "EH complete\n");
825 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
827 /* tell wait_eh that we're done */
828 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
829 wake_up_all(&ap
->eh_wait_q
);
831 spin_unlock_irqrestore(ap
->lock
, flags
);
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler
);
836 * ata_port_wait_eh - Wait for the currently pending EH to complete
837 * @ap: Port to wait EH for
839 * Wait until the currently pending EH is complete.
842 * Kernel thread context (may sleep).
844 void ata_port_wait_eh(struct ata_port
*ap
)
850 spin_lock_irqsave(ap
->lock
, flags
);
852 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
853 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
854 spin_unlock_irqrestore(ap
->lock
, flags
);
856 spin_lock_irqsave(ap
->lock
, flags
);
858 finish_wait(&ap
->eh_wait_q
, &wait
);
860 spin_unlock_irqrestore(ap
->lock
, flags
);
862 /* make sure SCSI EH is complete */
863 if (scsi_host_in_recovery(ap
->scsi_host
)) {
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh
);
870 static int ata_eh_nr_in_flight(struct ata_port
*ap
)
875 /* count only non-internal commands */
876 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++)
877 if (ata_qc_from_tag(ap
, tag
))
883 void ata_eh_fastdrain_timerfn(unsigned long arg
)
885 struct ata_port
*ap
= (void *)arg
;
889 spin_lock_irqsave(ap
->lock
, flags
);
891 cnt
= ata_eh_nr_in_flight(ap
);
897 if (cnt
== ap
->fastdrain_cnt
) {
900 /* No progress during the last interval, tag all
901 * in-flight qcs as timed out and freeze the port.
903 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++) {
904 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
906 qc
->err_mask
|= AC_ERR_TIMEOUT
;
911 /* some qcs have finished, give it another chance */
912 ap
->fastdrain_cnt
= cnt
;
913 ap
->fastdrain_timer
.expires
=
914 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
915 add_timer(&ap
->fastdrain_timer
);
919 spin_unlock_irqrestore(ap
->lock
, flags
);
923 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924 * @ap: target ATA port
925 * @fastdrain: activate fast drain
927 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928 * is non-zero and EH wasn't pending before. Fast drain ensures
929 * that EH kicks in in timely manner.
932 * spin_lock_irqsave(host lock)
934 static void ata_eh_set_pending(struct ata_port
*ap
, int fastdrain
)
938 /* already scheduled? */
939 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
)
942 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
947 /* do we have in-flight qcs? */
948 cnt
= ata_eh_nr_in_flight(ap
);
952 /* activate fast drain */
953 ap
->fastdrain_cnt
= cnt
;
954 ap
->fastdrain_timer
.expires
=
955 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
956 add_timer(&ap
->fastdrain_timer
);
960 * ata_qc_schedule_eh - schedule qc for error handling
961 * @qc: command to schedule error handling for
963 * Schedule error handling for @qc. EH will kick in as soon as
964 * other commands are drained.
967 * spin_lock_irqsave(host lock)
969 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
971 struct ata_port
*ap
= qc
->ap
;
972 struct request_queue
*q
= qc
->scsicmd
->device
->request_queue
;
975 WARN_ON(!ap
->ops
->error_handler
);
977 qc
->flags
|= ATA_QCFLAG_FAILED
;
978 ata_eh_set_pending(ap
, 1);
980 /* The following will fail if timeout has already expired.
981 * ata_scsi_error() takes care of such scmds on EH entry.
982 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 * this function completes.
985 spin_lock_irqsave(q
->queue_lock
, flags
);
986 blk_abort_request(qc
->scsicmd
->request
);
987 spin_unlock_irqrestore(q
->queue_lock
, flags
);
991 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992 * @ap: ATA port to schedule EH for
994 * LOCKING: inherited from ata_port_schedule_eh
995 * spin_lock_irqsave(host lock)
997 void ata_std_sched_eh(struct ata_port
*ap
)
999 WARN_ON(!ap
->ops
->error_handler
);
1001 if (ap
->pflags
& ATA_PFLAG_INITIALIZING
)
1004 ata_eh_set_pending(ap
, 1);
1005 scsi_schedule_eh(ap
->scsi_host
);
1007 DPRINTK("port EH scheduled\n");
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh
);
1012 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013 * @ap: ATA port to end EH for
1015 * In the libata object model there is a 1:1 mapping of ata_port to
1016 * shost, so host fields can be directly manipulated under ap->lock, in
1017 * the libsas case we need to hold a lock at the ha->level to coordinate
1021 * spin_lock_irqsave(host lock)
1023 void ata_std_end_eh(struct ata_port
*ap
)
1025 struct Scsi_Host
*host
= ap
->scsi_host
;
1027 host
->host_eh_scheduled
= 0;
1029 EXPORT_SYMBOL(ata_std_end_eh
);
1033 * ata_port_schedule_eh - schedule error handling without a qc
1034 * @ap: ATA port to schedule EH for
1036 * Schedule error handling for @ap. EH will kick in as soon as
1037 * all commands are drained.
1040 * spin_lock_irqsave(host lock)
1042 void ata_port_schedule_eh(struct ata_port
*ap
)
1044 /* see: ata_std_sched_eh, unless you know better */
1045 ap
->ops
->sched_eh(ap
);
1048 static int ata_do_link_abort(struct ata_port
*ap
, struct ata_link
*link
)
1050 int tag
, nr_aborted
= 0;
1052 WARN_ON(!ap
->ops
->error_handler
);
1054 /* we're gonna abort all commands, no need for fast drain */
1055 ata_eh_set_pending(ap
, 0);
1057 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1058 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
1060 if (qc
&& (!link
|| qc
->dev
->link
== link
)) {
1061 qc
->flags
|= ATA_QCFLAG_FAILED
;
1062 ata_qc_complete(qc
);
1068 ata_port_schedule_eh(ap
);
1074 * ata_link_abort - abort all qc's on the link
1075 * @link: ATA link to abort qc's for
1077 * Abort all active qc's active on @link and schedule EH.
1080 * spin_lock_irqsave(host lock)
1083 * Number of aborted qc's.
1085 int ata_link_abort(struct ata_link
*link
)
1087 return ata_do_link_abort(link
->ap
, link
);
1091 * ata_port_abort - abort all qc's on the port
1092 * @ap: ATA port to abort qc's for
1094 * Abort all active qc's of @ap and schedule EH.
1097 * spin_lock_irqsave(host_set lock)
1100 * Number of aborted qc's.
1102 int ata_port_abort(struct ata_port
*ap
)
1104 return ata_do_link_abort(ap
, NULL
);
1108 * __ata_port_freeze - freeze port
1109 * @ap: ATA port to freeze
1111 * This function is called when HSM violation or some other
1112 * condition disrupts normal operation of the port. Frozen port
1113 * is not allowed to perform any operation until the port is
1114 * thawed, which usually follows a successful reset.
1116 * ap->ops->freeze() callback can be used for freezing the port
1117 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1118 * port cannot be frozen hardware-wise, the interrupt handler
1119 * must ack and clear interrupts unconditionally while the port
1123 * spin_lock_irqsave(host lock)
1125 static void __ata_port_freeze(struct ata_port
*ap
)
1127 WARN_ON(!ap
->ops
->error_handler
);
1129 if (ap
->ops
->freeze
)
1130 ap
->ops
->freeze(ap
);
1132 ap
->pflags
|= ATA_PFLAG_FROZEN
;
1134 DPRINTK("ata%u port frozen\n", ap
->print_id
);
1138 * ata_port_freeze - abort & freeze port
1139 * @ap: ATA port to freeze
1141 * Abort and freeze @ap. The freeze operation must be called
1142 * first, because some hardware requires special operations
1143 * before the taskfile registers are accessible.
1146 * spin_lock_irqsave(host lock)
1149 * Number of aborted commands.
1151 int ata_port_freeze(struct ata_port
*ap
)
1155 WARN_ON(!ap
->ops
->error_handler
);
1157 __ata_port_freeze(ap
);
1158 nr_aborted
= ata_port_abort(ap
);
1164 * sata_async_notification - SATA async notification handler
1165 * @ap: ATA port where async notification is received
1167 * Handler to be called when async notification via SDB FIS is
1168 * received. This function schedules EH if necessary.
1171 * spin_lock_irqsave(host lock)
1174 * 1 if EH is scheduled, 0 otherwise.
1176 int sata_async_notification(struct ata_port
*ap
)
1181 if (!(ap
->flags
& ATA_FLAG_AN
))
1184 rc
= sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
);
1186 sata_scr_write(&ap
->link
, SCR_NOTIFICATION
, sntf
);
1188 if (!sata_pmp_attached(ap
) || rc
) {
1189 /* PMP is not attached or SNTF is not available */
1190 if (!sata_pmp_attached(ap
)) {
1191 /* PMP is not attached. Check whether ATAPI
1192 * AN is configured. If so, notify media
1195 struct ata_device
*dev
= ap
->link
.device
;
1197 if ((dev
->class == ATA_DEV_ATAPI
) &&
1198 (dev
->flags
& ATA_DFLAG_AN
))
1199 ata_scsi_media_change_notify(dev
);
1202 /* PMP is attached but SNTF is not available.
1203 * ATAPI async media change notification is
1204 * not used. The PMP must be reporting PHY
1205 * status change, schedule EH.
1207 ata_port_schedule_eh(ap
);
1211 /* PMP is attached and SNTF is available */
1212 struct ata_link
*link
;
1214 /* check and notify ATAPI AN */
1215 ata_for_each_link(link
, ap
, EDGE
) {
1216 if (!(sntf
& (1 << link
->pmp
)))
1219 if ((link
->device
->class == ATA_DEV_ATAPI
) &&
1220 (link
->device
->flags
& ATA_DFLAG_AN
))
1221 ata_scsi_media_change_notify(link
->device
);
1224 /* If PMP is reporting that PHY status of some
1225 * downstream ports has changed, schedule EH.
1227 if (sntf
& (1 << SATA_PMP_CTRL_PORT
)) {
1228 ata_port_schedule_eh(ap
);
1237 * ata_eh_freeze_port - EH helper to freeze port
1238 * @ap: ATA port to freeze
1245 void ata_eh_freeze_port(struct ata_port
*ap
)
1247 unsigned long flags
;
1249 if (!ap
->ops
->error_handler
)
1252 spin_lock_irqsave(ap
->lock
, flags
);
1253 __ata_port_freeze(ap
);
1254 spin_unlock_irqrestore(ap
->lock
, flags
);
1258 * ata_port_thaw_port - EH helper to thaw port
1259 * @ap: ATA port to thaw
1261 * Thaw frozen port @ap.
1266 void ata_eh_thaw_port(struct ata_port
*ap
)
1268 unsigned long flags
;
1270 if (!ap
->ops
->error_handler
)
1273 spin_lock_irqsave(ap
->lock
, flags
);
1275 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
1280 spin_unlock_irqrestore(ap
->lock
, flags
);
1282 DPRINTK("ata%u port thawed\n", ap
->print_id
);
1285 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1292 struct ata_port
*ap
= qc
->ap
;
1293 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1294 unsigned long flags
;
1296 spin_lock_irqsave(ap
->lock
, flags
);
1297 qc
->scsidone
= ata_eh_scsidone
;
1298 __ata_qc_complete(qc
);
1299 WARN_ON(ata_tag_valid(qc
->tag
));
1300 spin_unlock_irqrestore(ap
->lock
, flags
);
1302 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
1306 * ata_eh_qc_complete - Complete an active ATA command from EH
1307 * @qc: Command to complete
1309 * Indicate to the mid and upper layers that an ATA command has
1310 * completed. To be used from EH.
1312 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1314 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1315 scmd
->retries
= scmd
->allowed
;
1316 __ata_eh_qc_complete(qc
);
1320 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321 * @qc: Command to retry
1323 * Indicate to the mid and upper layers that an ATA command
1324 * should be retried. To be used from EH.
1326 * SCSI midlayer limits the number of retries to scmd->allowed.
1327 * scmd->allowed is incremented for commands which get retried
1328 * due to unrelated failures (qc->err_mask is zero).
1330 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
1332 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1335 __ata_eh_qc_complete(qc
);
1339 * ata_dev_disable - disable ATA device
1340 * @dev: ATA device to disable
1347 void ata_dev_disable(struct ata_device
*dev
)
1349 if (!ata_dev_enabled(dev
))
1352 if (ata_msg_drv(dev
->link
->ap
))
1353 ata_dev_warn(dev
, "disabled\n");
1354 ata_acpi_on_disable(dev
);
1355 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
| ATA_DNXFER_QUIET
);
1358 /* From now till the next successful probe, ering is used to
1359 * track probe failures. Clear accumulated device error info.
1361 ata_ering_clear(&dev
->ering
);
1365 * ata_eh_detach_dev - detach ATA device
1366 * @dev: ATA device to detach
1373 void ata_eh_detach_dev(struct ata_device
*dev
)
1375 struct ata_link
*link
= dev
->link
;
1376 struct ata_port
*ap
= link
->ap
;
1377 struct ata_eh_context
*ehc
= &link
->eh_context
;
1378 unsigned long flags
;
1380 ata_dev_disable(dev
);
1382 spin_lock_irqsave(ap
->lock
, flags
);
1384 dev
->flags
&= ~ATA_DFLAG_DETACH
;
1386 if (ata_scsi_offline_dev(dev
)) {
1387 dev
->flags
|= ATA_DFLAG_DETACHED
;
1388 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1391 /* clear per-dev EH info */
1392 ata_eh_clear_action(link
, dev
, &link
->eh_info
, ATA_EH_PERDEV_MASK
);
1393 ata_eh_clear_action(link
, dev
, &link
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
1394 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
1395 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
1397 spin_unlock_irqrestore(ap
->lock
, flags
);
1401 * ata_eh_about_to_do - about to perform eh_action
1402 * @link: target ATA link
1403 * @dev: target ATA dev for per-dev action (can be NULL)
1404 * @action: action about to be performed
1406 * Called just before performing EH actions to clear related bits
1407 * in @link->eh_info such that eh actions are not unnecessarily
1413 void ata_eh_about_to_do(struct ata_link
*link
, struct ata_device
*dev
,
1414 unsigned int action
)
1416 struct ata_port
*ap
= link
->ap
;
1417 struct ata_eh_info
*ehi
= &link
->eh_info
;
1418 struct ata_eh_context
*ehc
= &link
->eh_context
;
1419 unsigned long flags
;
1421 spin_lock_irqsave(ap
->lock
, flags
);
1423 ata_eh_clear_action(link
, dev
, ehi
, action
);
1425 /* About to take EH action, set RECOVERED. Ignore actions on
1426 * slave links as master will do them again.
1428 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) && link
!= ap
->slave_link
)
1429 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
1431 spin_unlock_irqrestore(ap
->lock
, flags
);
1435 * ata_eh_done - EH action complete
1436 * @ap: target ATA port
1437 * @dev: target ATA dev for per-dev action (can be NULL)
1438 * @action: action just completed
1440 * Called right after performing EH actions to clear related bits
1441 * in @link->eh_context.
1446 void ata_eh_done(struct ata_link
*link
, struct ata_device
*dev
,
1447 unsigned int action
)
1449 struct ata_eh_context
*ehc
= &link
->eh_context
;
1451 ata_eh_clear_action(link
, dev
, &ehc
->i
, action
);
1455 * ata_err_string - convert err_mask to descriptive string
1456 * @err_mask: error mask to convert to string
1458 * Convert @err_mask to descriptive string. Errors are
1459 * prioritized according to severity and only the most severe
1460 * error is reported.
1466 * Descriptive string for @err_mask
1468 static const char *ata_err_string(unsigned int err_mask
)
1470 if (err_mask
& AC_ERR_HOST_BUS
)
1471 return "host bus error";
1472 if (err_mask
& AC_ERR_ATA_BUS
)
1473 return "ATA bus error";
1474 if (err_mask
& AC_ERR_TIMEOUT
)
1476 if (err_mask
& AC_ERR_HSM
)
1477 return "HSM violation";
1478 if (err_mask
& AC_ERR_SYSTEM
)
1479 return "internal error";
1480 if (err_mask
& AC_ERR_MEDIA
)
1481 return "media error";
1482 if (err_mask
& AC_ERR_INVALID
)
1483 return "invalid argument";
1484 if (err_mask
& AC_ERR_DEV
)
1485 return "device error";
1486 return "unknown error";
1490 * ata_read_log_page - read a specific log page
1491 * @dev: target device
1493 * @page: page to read
1494 * @buf: buffer to store read page
1495 * @sectors: number of sectors to read
1497 * Read log page using READ_LOG_EXT command.
1500 * Kernel thread context (may sleep).
1503 * 0 on success, AC_ERR_* mask otherwise.
1505 unsigned int ata_read_log_page(struct ata_device
*dev
, u8 log
,
1506 u8 page
, void *buf
, unsigned int sectors
)
1508 struct ata_taskfile tf
;
1509 unsigned int err_mask
;
1511 DPRINTK("read log page - log 0x%x, page 0x%x\n", log
, page
);
1513 ata_tf_init(dev
, &tf
);
1514 if (dev
->dma_mode
&& ata_id_has_read_log_dma_ext(dev
->id
)) {
1515 tf
.command
= ATA_CMD_READ_LOG_DMA_EXT
;
1516 tf
.protocol
= ATA_PROT_DMA
;
1518 tf
.command
= ATA_CMD_READ_LOG_EXT
;
1519 tf
.protocol
= ATA_PROT_PIO
;
1524 tf
.hob_nsect
= sectors
>> 8;
1525 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA48
| ATA_TFLAG_DEVICE
;
1527 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1528 buf
, sectors
* ATA_SECT_SIZE
, 0);
1530 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
1535 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1536 * @dev: Device to read log page 10h from
1537 * @tag: Resulting tag of the failed command
1538 * @tf: Resulting taskfile registers of the failed command
1540 * Read log page 10h to obtain NCQ error details and clear error
1544 * Kernel thread context (may sleep).
1547 * 0 on success, -errno otherwise.
1549 static int ata_eh_read_log_10h(struct ata_device
*dev
,
1550 int *tag
, struct ata_taskfile
*tf
)
1552 u8
*buf
= dev
->link
->ap
->sector_buf
;
1553 unsigned int err_mask
;
1557 err_mask
= ata_read_log_page(dev
, ATA_LOG_SATA_NCQ
, 0, buf
, 1);
1562 for (i
= 0; i
< ATA_SECT_SIZE
; i
++)
1565 ata_dev_warn(dev
, "invalid checksum 0x%x on log page 10h\n",
1571 *tag
= buf
[0] & 0x1f;
1573 tf
->command
= buf
[2];
1574 tf
->feature
= buf
[3];
1578 tf
->device
= buf
[7];
1579 tf
->hob_lbal
= buf
[8];
1580 tf
->hob_lbam
= buf
[9];
1581 tf
->hob_lbah
= buf
[10];
1582 tf
->nsect
= buf
[12];
1583 tf
->hob_nsect
= buf
[13];
1584 if (ata_id_has_ncq_autosense(dev
->id
))
1585 tf
->auxiliary
= buf
[14] << 16 | buf
[15] << 8 | buf
[16];
1591 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1592 * @dev: target ATAPI device
1593 * @r_sense_key: out parameter for sense_key
1595 * Perform ATAPI TEST_UNIT_READY.
1598 * EH context (may sleep).
1601 * 0 on success, AC_ERR_* mask on failure.
1603 unsigned int atapi_eh_tur(struct ata_device
*dev
, u8
*r_sense_key
)
1605 u8 cdb
[ATAPI_CDB_LEN
] = { TEST_UNIT_READY
, 0, 0, 0, 0, 0 };
1606 struct ata_taskfile tf
;
1607 unsigned int err_mask
;
1609 ata_tf_init(dev
, &tf
);
1611 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1612 tf
.command
= ATA_CMD_PACKET
;
1613 tf
.protocol
= ATAPI_PROT_NODATA
;
1615 err_mask
= ata_exec_internal(dev
, &tf
, cdb
, DMA_NONE
, NULL
, 0, 0);
1616 if (err_mask
== AC_ERR_DEV
)
1617 *r_sense_key
= tf
.feature
>> 4;
1622 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1623 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1624 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1625 * @dfl_sense_key: default sense key to use
1627 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1628 * SENSE. This function is EH helper.
1631 * Kernel thread context (may sleep).
1634 * encoded sense data on success, 0 on failure or if sense data
1637 static u32
ata_eh_request_sense(struct ata_queued_cmd
*qc
,
1638 struct scsi_cmnd
*cmd
)
1640 struct ata_device
*dev
= qc
->dev
;
1641 struct ata_taskfile tf
;
1642 unsigned int err_mask
;
1647 DPRINTK("ATA request sense\n");
1648 ata_dev_warn(dev
, "request sense\n");
1649 if (!ata_id_sense_reporting_enabled(dev
->id
)) {
1650 ata_dev_warn(qc
->dev
, "sense data reporting disabled\n");
1653 ata_tf_init(dev
, &tf
);
1655 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1656 tf
.flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1657 tf
.command
= ATA_CMD_REQ_SENSE_DATA
;
1658 tf
.protocol
= ATA_PROT_NODATA
;
1660 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1663 * The device may set the SENSE DATA AVAILABLE bit to one in the
1664 * STATUS field and clear the ERROR bit to zero in the STATUS field
1665 * to indicate that the command returned completion without an error
1666 * and the sense data described in table 306 is available.
1668 * IOW the 'ATA_SENSE' bit might not be set even though valid
1669 * sense data is available.
1670 * So check for both.
1672 if ((tf
.command
& ATA_SENSE
) ||
1673 tf
.lbah
!= 0 || tf
.lbam
!= 0 || tf
.lbal
!= 0) {
1674 ata_scsi_set_sense(cmd
, tf
.lbah
, tf
.lbam
, tf
.lbal
);
1675 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1676 ata_dev_warn(dev
, "sense data %02x/%02x/%02x\n",
1677 tf
.lbah
, tf
.lbam
, tf
.lbal
);
1679 ata_dev_warn(dev
, "request sense failed stat %02x emask %x\n",
1680 tf
.command
, err_mask
);
1686 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1687 * @dev: device to perform REQUEST_SENSE to
1688 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1689 * @dfl_sense_key: default sense key to use
1691 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1692 * SENSE. This function is EH helper.
1695 * Kernel thread context (may sleep).
1698 * 0 on success, AC_ERR_* mask on failure
1700 unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
1701 u8
*sense_buf
, u8 dfl_sense_key
)
1703 u8 cdb
[ATAPI_CDB_LEN
] =
1704 { REQUEST_SENSE
, 0, 0, 0, SCSI_SENSE_BUFFERSIZE
, 0 };
1705 struct ata_port
*ap
= dev
->link
->ap
;
1706 struct ata_taskfile tf
;
1708 DPRINTK("ATAPI request sense\n");
1710 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
1712 /* initialize sense_buf with the error register,
1713 * for the case where they are -not- overwritten
1715 sense_buf
[0] = 0x70;
1716 sense_buf
[2] = dfl_sense_key
;
1718 /* some devices time out if garbage left in tf */
1719 ata_tf_init(dev
, &tf
);
1721 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1722 tf
.command
= ATA_CMD_PACKET
;
1724 /* is it pointless to prefer PIO for "safety reasons"? */
1725 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
1726 tf
.protocol
= ATAPI_PROT_DMA
;
1727 tf
.feature
|= ATAPI_PKT_DMA
;
1729 tf
.protocol
= ATAPI_PROT_PIO
;
1730 tf
.lbam
= SCSI_SENSE_BUFFERSIZE
;
1734 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
1735 sense_buf
, SCSI_SENSE_BUFFERSIZE
, 0);
1739 * ata_eh_analyze_serror - analyze SError for a failed port
1740 * @link: ATA link to analyze SError for
1742 * Analyze SError if available and further determine cause of
1748 static void ata_eh_analyze_serror(struct ata_link
*link
)
1750 struct ata_eh_context
*ehc
= &link
->eh_context
;
1751 u32 serror
= ehc
->i
.serror
;
1752 unsigned int err_mask
= 0, action
= 0;
1755 if (serror
& (SERR_PERSISTENT
| SERR_DATA
)) {
1756 err_mask
|= AC_ERR_ATA_BUS
;
1757 action
|= ATA_EH_RESET
;
1759 if (serror
& SERR_PROTOCOL
) {
1760 err_mask
|= AC_ERR_HSM
;
1761 action
|= ATA_EH_RESET
;
1763 if (serror
& SERR_INTERNAL
) {
1764 err_mask
|= AC_ERR_SYSTEM
;
1765 action
|= ATA_EH_RESET
;
1768 /* Determine whether a hotplug event has occurred. Both
1769 * SError.N/X are considered hotplug events for enabled or
1770 * host links. For disabled PMP links, only N bit is
1771 * considered as X bit is left at 1 for link plugging.
1773 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
)
1774 hotplug_mask
= 0; /* hotplug doesn't work w/ LPM */
1775 else if (!(link
->flags
& ATA_LFLAG_DISABLED
) || ata_is_host_link(link
))
1776 hotplug_mask
= SERR_PHYRDY_CHG
| SERR_DEV_XCHG
;
1778 hotplug_mask
= SERR_PHYRDY_CHG
;
1780 if (serror
& hotplug_mask
)
1781 ata_ehi_hotplugged(&ehc
->i
);
1783 ehc
->i
.err_mask
|= err_mask
;
1784 ehc
->i
.action
|= action
;
1788 * ata_eh_analyze_ncq_error - analyze NCQ error
1789 * @link: ATA link to analyze NCQ error for
1791 * Read log page 10h, determine the offending qc and acquire
1792 * error status TF. For NCQ device errors, all LLDDs have to do
1793 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1797 * Kernel thread context (may sleep).
1799 void ata_eh_analyze_ncq_error(struct ata_link
*link
)
1801 struct ata_port
*ap
= link
->ap
;
1802 struct ata_eh_context
*ehc
= &link
->eh_context
;
1803 struct ata_device
*dev
= link
->device
;
1804 struct ata_queued_cmd
*qc
;
1805 struct ata_taskfile tf
;
1808 /* if frozen, we can't do much */
1809 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
1812 /* is it NCQ device error? */
1813 if (!link
->sactive
|| !(ehc
->i
.err_mask
& AC_ERR_DEV
))
1816 /* has LLDD analyzed already? */
1817 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1818 qc
= __ata_qc_from_tag(ap
, tag
);
1820 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1827 /* okay, this error is ours */
1828 memset(&tf
, 0, sizeof(tf
));
1829 rc
= ata_eh_read_log_10h(dev
, &tag
, &tf
);
1831 ata_link_err(link
, "failed to read log page 10h (errno=%d)\n",
1836 if (!(link
->sactive
& (1 << tag
))) {
1837 ata_link_err(link
, "log page 10h reported inactive tag %d\n",
1842 /* we've got the perpetrator, condemn it */
1843 qc
= __ata_qc_from_tag(ap
, tag
);
1844 memcpy(&qc
->result_tf
, &tf
, sizeof(tf
));
1845 qc
->result_tf
.flags
= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1846 qc
->err_mask
|= AC_ERR_DEV
| AC_ERR_NCQ
;
1847 if (qc
->result_tf
.auxiliary
) {
1848 char sense_key
, asc
, ascq
;
1850 sense_key
= (qc
->result_tf
.auxiliary
>> 16) & 0xff;
1851 asc
= (qc
->result_tf
.auxiliary
>> 8) & 0xff;
1852 ascq
= qc
->result_tf
.auxiliary
& 0xff;
1853 ata_dev_dbg(dev
, "NCQ Autosense %02x/%02x/%02x\n",
1854 sense_key
, asc
, ascq
);
1855 ata_scsi_set_sense(qc
->scsicmd
, sense_key
, asc
, ascq
);
1856 ata_scsi_set_sense_information(qc
->scsicmd
, &qc
->result_tf
);
1857 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1860 ehc
->i
.err_mask
&= ~AC_ERR_DEV
;
1864 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1865 * @qc: qc to analyze
1866 * @tf: Taskfile registers to analyze
1868 * Analyze taskfile of @qc and further determine cause of
1869 * failure. This function also requests ATAPI sense data if
1873 * Kernel thread context (may sleep).
1876 * Determined recovery action
1878 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
,
1879 const struct ata_taskfile
*tf
)
1881 unsigned int tmp
, action
= 0;
1882 u8 stat
= tf
->command
, err
= tf
->feature
;
1884 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1885 qc
->err_mask
|= AC_ERR_HSM
;
1886 return ATA_EH_RESET
;
1890 * Sense data reporting does not work if the
1891 * device fault bit is set.
1893 if ((stat
& ATA_SENSE
) && !(stat
& ATA_DF
) &&
1894 !(qc
->flags
& ATA_QCFLAG_SENSE_VALID
)) {
1895 if (!(qc
->ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1896 tmp
= ata_eh_request_sense(qc
, qc
->scsicmd
);
1898 qc
->err_mask
|= tmp
;
1900 ata_scsi_set_sense_information(qc
->scsicmd
, tf
);
1902 ata_dev_warn(qc
->dev
, "sense data available but port frozen\n");
1906 /* Set by NCQ autosense or request sense above */
1907 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
1910 if (stat
& (ATA_ERR
| ATA_DF
))
1911 qc
->err_mask
|= AC_ERR_DEV
;
1915 switch (qc
->dev
->class) {
1919 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1920 if (err
& (ATA_UNC
| ATA_AMNF
))
1921 qc
->err_mask
|= AC_ERR_MEDIA
;
1923 qc
->err_mask
|= AC_ERR_INVALID
;
1927 if (!(qc
->ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1928 tmp
= atapi_eh_request_sense(qc
->dev
,
1929 qc
->scsicmd
->sense_buffer
,
1930 qc
->result_tf
.feature
>> 4);
1932 /* ATA_QCFLAG_SENSE_VALID is used to
1933 * tell atapi_qc_complete() that sense
1934 * data is already valid.
1936 * TODO: interpret sense data and set
1937 * appropriate err_mask.
1939 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1941 qc
->err_mask
|= tmp
;
1945 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1946 action
|= ATA_EH_RESET
;
1951 static int ata_eh_categorize_error(unsigned int eflags
, unsigned int err_mask
,
1956 if (!(eflags
& ATA_EFLAG_DUBIOUS_XFER
))
1960 base
= ATA_ECAT_DUBIOUS_NONE
;
1962 if (err_mask
& AC_ERR_ATA_BUS
)
1963 return base
+ ATA_ECAT_ATA_BUS
;
1965 if (err_mask
& AC_ERR_TIMEOUT
)
1966 return base
+ ATA_ECAT_TOUT_HSM
;
1968 if (eflags
& ATA_EFLAG_IS_IO
) {
1969 if (err_mask
& AC_ERR_HSM
)
1970 return base
+ ATA_ECAT_TOUT_HSM
;
1972 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1973 return base
+ ATA_ECAT_UNK_DEV
;
1979 struct speed_down_verdict_arg
{
1982 int nr_errors
[ATA_ECAT_NR
];
1985 static int speed_down_verdict_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1987 struct speed_down_verdict_arg
*arg
= void_arg
;
1990 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) || (ent
->timestamp
< arg
->since
))
1993 cat
= ata_eh_categorize_error(ent
->eflags
, ent
->err_mask
,
1995 arg
->nr_errors
[cat
]++;
2001 * ata_eh_speed_down_verdict - Determine speed down verdict
2002 * @dev: Device of interest
2004 * This function examines error ring of @dev and determines
2005 * whether NCQ needs to be turned off, transfer speed should be
2006 * stepped down, or falling back to PIO is necessary.
2008 * ECAT_ATA_BUS : ATA_BUS error for any command
2010 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
2013 * ECAT_UNK_DEV : Unknown DEV error for IO commands
2015 * ECAT_DUBIOUS_* : Identical to above three but occurred while
2016 * data transfer hasn't been verified.
2020 * NCQ_OFF : Turn off NCQ.
2022 * SPEED_DOWN : Speed down transfer speed but don't fall back
2025 * FALLBACK_TO_PIO : Fall back to PIO.
2027 * Even if multiple verdicts are returned, only one action is
2028 * taken per error. An action triggered by non-DUBIOUS errors
2029 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
2030 * This is to expedite speed down decisions right after device is
2031 * initially configured.
2033 * The followings are speed down rules. #1 and #2 deal with
2036 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
2037 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
2039 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
2040 * occurred during last 5 mins, NCQ_OFF.
2042 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
2043 * occurred during last 5 mins, FALLBACK_TO_PIO
2045 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
2046 * during last 10 mins, NCQ_OFF.
2048 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
2049 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2052 * Inherited from caller.
2055 * OR of ATA_EH_SPDN_* flags.
2057 static unsigned int ata_eh_speed_down_verdict(struct ata_device
*dev
)
2059 const u64 j5mins
= 5LLU * 60 * HZ
, j10mins
= 10LLU * 60 * HZ
;
2060 u64 j64
= get_jiffies_64();
2061 struct speed_down_verdict_arg arg
;
2062 unsigned int verdict
= 0;
2064 /* scan past 5 mins of error history */
2065 memset(&arg
, 0, sizeof(arg
));
2066 arg
.since
= j64
- min(j64
, j5mins
);
2067 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
2069 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_ATA_BUS
] +
2070 arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] > 1)
2071 verdict
|= ATA_EH_SPDN_SPEED_DOWN
|
2072 ATA_EH_SPDN_FALLBACK_TO_PIO
| ATA_EH_SPDN_KEEP_ERRORS
;
2074 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] +
2075 arg
.nr_errors
[ATA_ECAT_DUBIOUS_UNK_DEV
] > 1)
2076 verdict
|= ATA_EH_SPDN_NCQ_OFF
| ATA_EH_SPDN_KEEP_ERRORS
;
2078 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
2079 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
2080 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
2081 verdict
|= ATA_EH_SPDN_FALLBACK_TO_PIO
;
2083 /* scan past 10 mins of error history */
2084 memset(&arg
, 0, sizeof(arg
));
2085 arg
.since
= j64
- min(j64
, j10mins
);
2086 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
2088 if (arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
2089 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 3)
2090 verdict
|= ATA_EH_SPDN_NCQ_OFF
;
2092 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
2093 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] > 3 ||
2094 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
2095 verdict
|= ATA_EH_SPDN_SPEED_DOWN
;
2101 * ata_eh_speed_down - record error and speed down if necessary
2102 * @dev: Failed device
2103 * @eflags: mask of ATA_EFLAG_* flags
2104 * @err_mask: err_mask of the error
2106 * Record error and examine error history to determine whether
2107 * adjusting transmission speed is necessary. It also sets
2108 * transmission limits appropriately if such adjustment is
2112 * Kernel thread context (may sleep).
2115 * Determined recovery action.
2117 static unsigned int ata_eh_speed_down(struct ata_device
*dev
,
2118 unsigned int eflags
, unsigned int err_mask
)
2120 struct ata_link
*link
= ata_dev_phys_link(dev
);
2122 unsigned int verdict
;
2123 unsigned int action
= 0;
2125 /* don't bother if Cat-0 error */
2126 if (ata_eh_categorize_error(eflags
, err_mask
, &xfer_ok
) == 0)
2129 /* record error and determine whether speed down is necessary */
2130 ata_ering_record(&dev
->ering
, eflags
, err_mask
);
2131 verdict
= ata_eh_speed_down_verdict(dev
);
2134 if ((verdict
& ATA_EH_SPDN_NCQ_OFF
) &&
2135 (dev
->flags
& (ATA_DFLAG_PIO
| ATA_DFLAG_NCQ
|
2136 ATA_DFLAG_NCQ_OFF
)) == ATA_DFLAG_NCQ
) {
2137 dev
->flags
|= ATA_DFLAG_NCQ_OFF
;
2138 ata_dev_warn(dev
, "NCQ disabled due to excessive errors\n");
2143 if (verdict
& ATA_EH_SPDN_SPEED_DOWN
) {
2144 /* speed down SATA link speed if possible */
2145 if (sata_down_spd_limit(link
, 0) == 0) {
2146 action
|= ATA_EH_RESET
;
2150 /* lower transfer mode */
2151 if (dev
->spdn_cnt
< 2) {
2152 static const int dma_dnxfer_sel
[] =
2153 { ATA_DNXFER_DMA
, ATA_DNXFER_40C
};
2154 static const int pio_dnxfer_sel
[] =
2155 { ATA_DNXFER_PIO
, ATA_DNXFER_FORCE_PIO0
};
2158 if (dev
->xfer_shift
!= ATA_SHIFT_PIO
)
2159 sel
= dma_dnxfer_sel
[dev
->spdn_cnt
];
2161 sel
= pio_dnxfer_sel
[dev
->spdn_cnt
];
2165 if (ata_down_xfermask_limit(dev
, sel
) == 0) {
2166 action
|= ATA_EH_RESET
;
2172 /* Fall back to PIO? Slowing down to PIO is meaningless for
2173 * SATA ATA devices. Consider it only for PATA and SATAPI.
2175 if ((verdict
& ATA_EH_SPDN_FALLBACK_TO_PIO
) && (dev
->spdn_cnt
>= 2) &&
2176 (link
->ap
->cbl
!= ATA_CBL_SATA
|| dev
->class == ATA_DEV_ATAPI
) &&
2177 (dev
->xfer_shift
!= ATA_SHIFT_PIO
)) {
2178 if (ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO
) == 0) {
2180 action
|= ATA_EH_RESET
;
2187 /* device has been slowed down, blow error history */
2188 if (!(verdict
& ATA_EH_SPDN_KEEP_ERRORS
))
2189 ata_ering_clear(&dev
->ering
);
2194 * ata_eh_worth_retry - analyze error and decide whether to retry
2195 * @qc: qc to possibly retry
2197 * Look at the cause of the error and decide if a retry
2198 * might be useful or not. We don't want to retry media errors
2199 * because the drive itself has probably already taken 10-30 seconds
2200 * doing its own internal retries before reporting the failure.
2202 static inline int ata_eh_worth_retry(struct ata_queued_cmd
*qc
)
2204 if (qc
->err_mask
& AC_ERR_MEDIA
)
2205 return 0; /* don't retry media errors */
2206 if (qc
->flags
& ATA_QCFLAG_IO
)
2207 return 1; /* otherwise retry anything from fs stack */
2208 if (qc
->err_mask
& AC_ERR_INVALID
)
2209 return 0; /* don't retry these */
2210 return qc
->err_mask
!= AC_ERR_DEV
; /* retry if not dev error */
2214 * ata_eh_link_autopsy - analyze error and determine recovery action
2215 * @link: host link to perform autopsy on
2217 * Analyze why @link failed and determine which recovery actions
2218 * are needed. This function also sets more detailed AC_ERR_*
2219 * values and fills sense data for ATAPI CHECK SENSE.
2222 * Kernel thread context (may sleep).
2224 static void ata_eh_link_autopsy(struct ata_link
*link
)
2226 struct ata_port
*ap
= link
->ap
;
2227 struct ata_eh_context
*ehc
= &link
->eh_context
;
2228 struct ata_device
*dev
;
2229 unsigned int all_err_mask
= 0, eflags
= 0;
2236 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
2239 /* obtain and analyze SError */
2240 rc
= sata_scr_read(link
, SCR_ERROR
, &serror
);
2242 ehc
->i
.serror
|= serror
;
2243 ata_eh_analyze_serror(link
);
2244 } else if (rc
!= -EOPNOTSUPP
) {
2245 /* SError read failed, force reset and probing */
2246 ehc
->i
.probe_mask
|= ATA_ALL_DEVICES
;
2247 ehc
->i
.action
|= ATA_EH_RESET
;
2248 ehc
->i
.err_mask
|= AC_ERR_OTHER
;
2251 /* analyze NCQ failure */
2252 ata_eh_analyze_ncq_error(link
);
2254 /* any real error trumps AC_ERR_OTHER */
2255 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
2256 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
2258 all_err_mask
|= ehc
->i
.err_mask
;
2260 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2261 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2263 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2264 ata_dev_phys_link(qc
->dev
) != link
)
2267 /* inherit upper level err_mask */
2268 qc
->err_mask
|= ehc
->i
.err_mask
;
2271 ehc
->i
.action
|= ata_eh_analyze_tf(qc
, &qc
->result_tf
);
2273 /* DEV errors are probably spurious in case of ATA_BUS error */
2274 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
2275 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
2278 /* any real error trumps unknown error */
2279 if (qc
->err_mask
& ~AC_ERR_OTHER
)
2280 qc
->err_mask
&= ~AC_ERR_OTHER
;
2282 /* SENSE_VALID trumps dev/unknown error and revalidation */
2283 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
2284 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
2286 /* determine whether the command is worth retrying */
2287 if (ata_eh_worth_retry(qc
))
2288 qc
->flags
|= ATA_QCFLAG_RETRY
;
2290 /* accumulate error info */
2291 ehc
->i
.dev
= qc
->dev
;
2292 all_err_mask
|= qc
->err_mask
;
2293 if (qc
->flags
& ATA_QCFLAG_IO
)
2294 eflags
|= ATA_EFLAG_IS_IO
;
2295 trace_ata_eh_link_autopsy_qc(qc
);
2298 /* enforce default EH actions */
2299 if (ap
->pflags
& ATA_PFLAG_FROZEN
||
2300 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
2301 ehc
->i
.action
|= ATA_EH_RESET
;
2302 else if (((eflags
& ATA_EFLAG_IS_IO
) && all_err_mask
) ||
2303 (!(eflags
& ATA_EFLAG_IS_IO
) && (all_err_mask
& ~AC_ERR_DEV
)))
2304 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2306 /* If we have offending qcs and the associated failed device,
2307 * perform per-dev EH action only on the offending device.
2310 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
2311 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
2312 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
2315 /* propagate timeout to host link */
2316 if ((all_err_mask
& AC_ERR_TIMEOUT
) && !ata_is_host_link(link
))
2317 ap
->link
.eh_context
.i
.err_mask
|= AC_ERR_TIMEOUT
;
2319 /* record error and consider speeding down */
2321 if (!dev
&& ((ata_link_max_devices(link
) == 1 &&
2322 ata_dev_enabled(link
->device
))))
2326 if (dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)
2327 eflags
|= ATA_EFLAG_DUBIOUS_XFER
;
2328 ehc
->i
.action
|= ata_eh_speed_down(dev
, eflags
, all_err_mask
);
2330 trace_ata_eh_link_autopsy(dev
, ehc
->i
.action
, all_err_mask
);
2335 * ata_eh_autopsy - analyze error and determine recovery action
2336 * @ap: host port to perform autopsy on
2338 * Analyze all links of @ap and determine why they failed and
2339 * which recovery actions are needed.
2342 * Kernel thread context (may sleep).
2344 void ata_eh_autopsy(struct ata_port
*ap
)
2346 struct ata_link
*link
;
2348 ata_for_each_link(link
, ap
, EDGE
)
2349 ata_eh_link_autopsy(link
);
2351 /* Handle the frigging slave link. Autopsy is done similarly
2352 * but actions and flags are transferred over to the master
2353 * link and handled from there.
2355 if (ap
->slave_link
) {
2356 struct ata_eh_context
*mehc
= &ap
->link
.eh_context
;
2357 struct ata_eh_context
*sehc
= &ap
->slave_link
->eh_context
;
2359 /* transfer control flags from master to slave */
2360 sehc
->i
.flags
|= mehc
->i
.flags
& ATA_EHI_TO_SLAVE_MASK
;
2362 /* perform autopsy on the slave link */
2363 ata_eh_link_autopsy(ap
->slave_link
);
2365 /* transfer actions from slave to master and clear slave */
2366 ata_eh_about_to_do(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2367 mehc
->i
.action
|= sehc
->i
.action
;
2368 mehc
->i
.dev_action
[1] |= sehc
->i
.dev_action
[1];
2369 mehc
->i
.flags
|= sehc
->i
.flags
;
2370 ata_eh_done(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2373 /* Autopsy of fanout ports can affect host link autopsy.
2374 * Perform host link autopsy last.
2376 if (sata_pmp_attached(ap
))
2377 ata_eh_link_autopsy(&ap
->link
);
2381 * ata_get_cmd_descript - get description for ATA command
2382 * @command: ATA command code to get description for
2384 * Return a textual description of the given command, or NULL if the
2385 * command is not known.
2390 const char *ata_get_cmd_descript(u8 command
)
2392 #ifdef CONFIG_ATA_VERBOSE_ERROR
2398 { ATA_CMD_DEV_RESET
, "DEVICE RESET" },
2399 { ATA_CMD_CHK_POWER
, "CHECK POWER MODE" },
2400 { ATA_CMD_STANDBY
, "STANDBY" },
2401 { ATA_CMD_IDLE
, "IDLE" },
2402 { ATA_CMD_EDD
, "EXECUTE DEVICE DIAGNOSTIC" },
2403 { ATA_CMD_DOWNLOAD_MICRO
, "DOWNLOAD MICROCODE" },
2404 { ATA_CMD_DOWNLOAD_MICRO_DMA
, "DOWNLOAD MICROCODE DMA" },
2405 { ATA_CMD_NOP
, "NOP" },
2406 { ATA_CMD_FLUSH
, "FLUSH CACHE" },
2407 { ATA_CMD_FLUSH_EXT
, "FLUSH CACHE EXT" },
2408 { ATA_CMD_ID_ATA
, "IDENTIFY DEVICE" },
2409 { ATA_CMD_ID_ATAPI
, "IDENTIFY PACKET DEVICE" },
2410 { ATA_CMD_SERVICE
, "SERVICE" },
2411 { ATA_CMD_READ
, "READ DMA" },
2412 { ATA_CMD_READ_EXT
, "READ DMA EXT" },
2413 { ATA_CMD_READ_QUEUED
, "READ DMA QUEUED" },
2414 { ATA_CMD_READ_STREAM_EXT
, "READ STREAM EXT" },
2415 { ATA_CMD_READ_STREAM_DMA_EXT
, "READ STREAM DMA EXT" },
2416 { ATA_CMD_WRITE
, "WRITE DMA" },
2417 { ATA_CMD_WRITE_EXT
, "WRITE DMA EXT" },
2418 { ATA_CMD_WRITE_QUEUED
, "WRITE DMA QUEUED EXT" },
2419 { ATA_CMD_WRITE_STREAM_EXT
, "WRITE STREAM EXT" },
2420 { ATA_CMD_WRITE_STREAM_DMA_EXT
, "WRITE STREAM DMA EXT" },
2421 { ATA_CMD_WRITE_FUA_EXT
, "WRITE DMA FUA EXT" },
2422 { ATA_CMD_WRITE_QUEUED_FUA_EXT
, "WRITE DMA QUEUED FUA EXT" },
2423 { ATA_CMD_FPDMA_READ
, "READ FPDMA QUEUED" },
2424 { ATA_CMD_FPDMA_WRITE
, "WRITE FPDMA QUEUED" },
2425 { ATA_CMD_FPDMA_SEND
, "SEND FPDMA QUEUED" },
2426 { ATA_CMD_FPDMA_RECV
, "RECEIVE FPDMA QUEUED" },
2427 { ATA_CMD_PIO_READ
, "READ SECTOR(S)" },
2428 { ATA_CMD_PIO_READ_EXT
, "READ SECTOR(S) EXT" },
2429 { ATA_CMD_PIO_WRITE
, "WRITE SECTOR(S)" },
2430 { ATA_CMD_PIO_WRITE_EXT
, "WRITE SECTOR(S) EXT" },
2431 { ATA_CMD_READ_MULTI
, "READ MULTIPLE" },
2432 { ATA_CMD_READ_MULTI_EXT
, "READ MULTIPLE EXT" },
2433 { ATA_CMD_WRITE_MULTI
, "WRITE MULTIPLE" },
2434 { ATA_CMD_WRITE_MULTI_EXT
, "WRITE MULTIPLE EXT" },
2435 { ATA_CMD_WRITE_MULTI_FUA_EXT
, "WRITE MULTIPLE FUA EXT" },
2436 { ATA_CMD_SET_FEATURES
, "SET FEATURES" },
2437 { ATA_CMD_SET_MULTI
, "SET MULTIPLE MODE" },
2438 { ATA_CMD_VERIFY
, "READ VERIFY SECTOR(S)" },
2439 { ATA_CMD_VERIFY_EXT
, "READ VERIFY SECTOR(S) EXT" },
2440 { ATA_CMD_WRITE_UNCORR_EXT
, "WRITE UNCORRECTABLE EXT" },
2441 { ATA_CMD_STANDBYNOW1
, "STANDBY IMMEDIATE" },
2442 { ATA_CMD_IDLEIMMEDIATE
, "IDLE IMMEDIATE" },
2443 { ATA_CMD_SLEEP
, "SLEEP" },
2444 { ATA_CMD_INIT_DEV_PARAMS
, "INITIALIZE DEVICE PARAMETERS" },
2445 { ATA_CMD_READ_NATIVE_MAX
, "READ NATIVE MAX ADDRESS" },
2446 { ATA_CMD_READ_NATIVE_MAX_EXT
, "READ NATIVE MAX ADDRESS EXT" },
2447 { ATA_CMD_SET_MAX
, "SET MAX ADDRESS" },
2448 { ATA_CMD_SET_MAX_EXT
, "SET MAX ADDRESS EXT" },
2449 { ATA_CMD_READ_LOG_EXT
, "READ LOG EXT" },
2450 { ATA_CMD_WRITE_LOG_EXT
, "WRITE LOG EXT" },
2451 { ATA_CMD_READ_LOG_DMA_EXT
, "READ LOG DMA EXT" },
2452 { ATA_CMD_WRITE_LOG_DMA_EXT
, "WRITE LOG DMA EXT" },
2453 { ATA_CMD_TRUSTED_NONDATA
, "TRUSTED NON-DATA" },
2454 { ATA_CMD_TRUSTED_RCV
, "TRUSTED RECEIVE" },
2455 { ATA_CMD_TRUSTED_RCV_DMA
, "TRUSTED RECEIVE DMA" },
2456 { ATA_CMD_TRUSTED_SND
, "TRUSTED SEND" },
2457 { ATA_CMD_TRUSTED_SND_DMA
, "TRUSTED SEND DMA" },
2458 { ATA_CMD_PMP_READ
, "READ BUFFER" },
2459 { ATA_CMD_PMP_READ_DMA
, "READ BUFFER DMA" },
2460 { ATA_CMD_PMP_WRITE
, "WRITE BUFFER" },
2461 { ATA_CMD_PMP_WRITE_DMA
, "WRITE BUFFER DMA" },
2462 { ATA_CMD_CONF_OVERLAY
, "DEVICE CONFIGURATION OVERLAY" },
2463 { ATA_CMD_SEC_SET_PASS
, "SECURITY SET PASSWORD" },
2464 { ATA_CMD_SEC_UNLOCK
, "SECURITY UNLOCK" },
2465 { ATA_CMD_SEC_ERASE_PREP
, "SECURITY ERASE PREPARE" },
2466 { ATA_CMD_SEC_ERASE_UNIT
, "SECURITY ERASE UNIT" },
2467 { ATA_CMD_SEC_FREEZE_LOCK
, "SECURITY FREEZE LOCK" },
2468 { ATA_CMD_SEC_DISABLE_PASS
, "SECURITY DISABLE PASSWORD" },
2469 { ATA_CMD_CONFIG_STREAM
, "CONFIGURE STREAM" },
2470 { ATA_CMD_SMART
, "SMART" },
2471 { ATA_CMD_MEDIA_LOCK
, "DOOR LOCK" },
2472 { ATA_CMD_MEDIA_UNLOCK
, "DOOR UNLOCK" },
2473 { ATA_CMD_DSM
, "DATA SET MANAGEMENT" },
2474 { ATA_CMD_CHK_MED_CRD_TYP
, "CHECK MEDIA CARD TYPE" },
2475 { ATA_CMD_CFA_REQ_EXT_ERR
, "CFA REQUEST EXTENDED ERROR" },
2476 { ATA_CMD_CFA_WRITE_NE
, "CFA WRITE SECTORS WITHOUT ERASE" },
2477 { ATA_CMD_CFA_TRANS_SECT
, "CFA TRANSLATE SECTOR" },
2478 { ATA_CMD_CFA_ERASE
, "CFA ERASE SECTORS" },
2479 { ATA_CMD_CFA_WRITE_MULT_NE
, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2480 { ATA_CMD_REQ_SENSE_DATA
, "REQUEST SENSE DATA EXT" },
2481 { ATA_CMD_SANITIZE_DEVICE
, "SANITIZE DEVICE" },
2482 { ATA_CMD_READ_LONG
, "READ LONG (with retries)" },
2483 { ATA_CMD_READ_LONG_ONCE
, "READ LONG (without retries)" },
2484 { ATA_CMD_WRITE_LONG
, "WRITE LONG (with retries)" },
2485 { ATA_CMD_WRITE_LONG_ONCE
, "WRITE LONG (without retries)" },
2486 { ATA_CMD_RESTORE
, "RECALIBRATE" },
2487 { 0, NULL
} /* terminate list */
2491 for (i
= 0; cmd_descr
[i
].text
; i
++)
2492 if (cmd_descr
[i
].command
== command
)
2493 return cmd_descr
[i
].text
;
2498 EXPORT_SYMBOL_GPL(ata_get_cmd_descript
);
2501 * ata_eh_link_report - report error handling to user
2502 * @link: ATA link EH is going on
2504 * Report EH to user.
2509 static void ata_eh_link_report(struct ata_link
*link
)
2511 struct ata_port
*ap
= link
->ap
;
2512 struct ata_eh_context
*ehc
= &link
->eh_context
;
2513 const char *frozen
, *desc
;
2514 char tries_buf
[6] = "";
2515 int tag
, nr_failed
= 0;
2517 if (ehc
->i
.flags
& ATA_EHI_QUIET
)
2521 if (ehc
->i
.desc
[0] != '\0')
2524 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2525 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2527 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2528 ata_dev_phys_link(qc
->dev
) != link
||
2529 ((qc
->flags
& ATA_QCFLAG_QUIET
) &&
2530 qc
->err_mask
== AC_ERR_DEV
))
2532 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
2538 if (!nr_failed
&& !ehc
->i
.err_mask
)
2542 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2545 if (ap
->eh_tries
< ATA_EH_MAX_TRIES
)
2546 snprintf(tries_buf
, sizeof(tries_buf
), " t%d",
2550 ata_dev_err(ehc
->i
.dev
, "exception Emask 0x%x "
2551 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2552 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2553 ehc
->i
.action
, frozen
, tries_buf
);
2555 ata_dev_err(ehc
->i
.dev
, "%s\n", desc
);
2557 ata_link_err(link
, "exception Emask 0x%x "
2558 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2559 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2560 ehc
->i
.action
, frozen
, tries_buf
);
2562 ata_link_err(link
, "%s\n", desc
);
2565 #ifdef CONFIG_ATA_VERBOSE_ERROR
2568 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2569 ehc
->i
.serror
& SERR_DATA_RECOVERED
? "RecovData " : "",
2570 ehc
->i
.serror
& SERR_COMM_RECOVERED
? "RecovComm " : "",
2571 ehc
->i
.serror
& SERR_DATA
? "UnrecovData " : "",
2572 ehc
->i
.serror
& SERR_PERSISTENT
? "Persist " : "",
2573 ehc
->i
.serror
& SERR_PROTOCOL
? "Proto " : "",
2574 ehc
->i
.serror
& SERR_INTERNAL
? "HostInt " : "",
2575 ehc
->i
.serror
& SERR_PHYRDY_CHG
? "PHYRdyChg " : "",
2576 ehc
->i
.serror
& SERR_PHY_INT_ERR
? "PHYInt " : "",
2577 ehc
->i
.serror
& SERR_COMM_WAKE
? "CommWake " : "",
2578 ehc
->i
.serror
& SERR_10B_8B_ERR
? "10B8B " : "",
2579 ehc
->i
.serror
& SERR_DISPARITY
? "Dispar " : "",
2580 ehc
->i
.serror
& SERR_CRC
? "BadCRC " : "",
2581 ehc
->i
.serror
& SERR_HANDSHAKE
? "Handshk " : "",
2582 ehc
->i
.serror
& SERR_LINK_SEQ_ERR
? "LinkSeq " : "",
2583 ehc
->i
.serror
& SERR_TRANS_ST_ERROR
? "TrStaTrns " : "",
2584 ehc
->i
.serror
& SERR_UNRECOG_FIS
? "UnrecFIS " : "",
2585 ehc
->i
.serror
& SERR_DEV_XCHG
? "DevExch " : "");
2588 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2589 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2590 struct ata_taskfile
*cmd
= &qc
->tf
, *res
= &qc
->result_tf
;
2591 char data_buf
[20] = "";
2592 char cdb_buf
[70] = "";
2594 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2595 ata_dev_phys_link(qc
->dev
) != link
|| !qc
->err_mask
)
2598 if (qc
->dma_dir
!= DMA_NONE
) {
2599 static const char *dma_str
[] = {
2600 [DMA_BIDIRECTIONAL
] = "bidi",
2601 [DMA_TO_DEVICE
] = "out",
2602 [DMA_FROM_DEVICE
] = "in",
2604 static const char *prot_str
[] = {
2605 [ATA_PROT_PIO
] = "pio",
2606 [ATA_PROT_DMA
] = "dma",
2607 [ATA_PROT_NCQ
] = "ncq",
2608 [ATAPI_PROT_PIO
] = "pio",
2609 [ATAPI_PROT_DMA
] = "dma",
2612 snprintf(data_buf
, sizeof(data_buf
), " %s %u %s",
2613 prot_str
[qc
->tf
.protocol
], qc
->nbytes
,
2614 dma_str
[qc
->dma_dir
]);
2617 if (ata_is_atapi(qc
->tf
.protocol
)) {
2618 const u8
*cdb
= qc
->cdb
;
2619 size_t cdb_len
= qc
->dev
->cdb_len
;
2622 cdb
= qc
->scsicmd
->cmnd
;
2623 cdb_len
= qc
->scsicmd
->cmd_len
;
2625 __scsi_format_command(cdb_buf
, sizeof(cdb_buf
),
2628 const char *descr
= ata_get_cmd_descript(cmd
->command
);
2630 ata_dev_err(qc
->dev
, "failed command: %s\n",
2634 ata_dev_err(qc
->dev
,
2635 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2637 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2638 "Emask 0x%x (%s)%s\n",
2639 cmd
->command
, cmd
->feature
, cmd
->nsect
,
2640 cmd
->lbal
, cmd
->lbam
, cmd
->lbah
,
2641 cmd
->hob_feature
, cmd
->hob_nsect
,
2642 cmd
->hob_lbal
, cmd
->hob_lbam
, cmd
->hob_lbah
,
2643 cmd
->device
, qc
->tag
, data_buf
, cdb_buf
,
2644 res
->command
, res
->feature
, res
->nsect
,
2645 res
->lbal
, res
->lbam
, res
->lbah
,
2646 res
->hob_feature
, res
->hob_nsect
,
2647 res
->hob_lbal
, res
->hob_lbam
, res
->hob_lbah
,
2648 res
->device
, qc
->err_mask
, ata_err_string(qc
->err_mask
),
2649 qc
->err_mask
& AC_ERR_NCQ
? " <F>" : "");
2651 #ifdef CONFIG_ATA_VERBOSE_ERROR
2652 if (res
->command
& (ATA_BUSY
| ATA_DRDY
| ATA_DF
| ATA_DRQ
|
2653 ATA_SENSE
| ATA_ERR
)) {
2654 if (res
->command
& ATA_BUSY
)
2655 ata_dev_err(qc
->dev
, "status: { Busy }\n");
2657 ata_dev_err(qc
->dev
, "status: { %s%s%s%s%s}\n",
2658 res
->command
& ATA_DRDY
? "DRDY " : "",
2659 res
->command
& ATA_DF
? "DF " : "",
2660 res
->command
& ATA_DRQ
? "DRQ " : "",
2661 res
->command
& ATA_SENSE
? "SENSE " : "",
2662 res
->command
& ATA_ERR
? "ERR " : "");
2665 if (cmd
->command
!= ATA_CMD_PACKET
&&
2666 (res
->feature
& (ATA_ICRC
| ATA_UNC
| ATA_AMNF
|
2667 ATA_IDNF
| ATA_ABORTED
)))
2668 ata_dev_err(qc
->dev
, "error: { %s%s%s%s%s}\n",
2669 res
->feature
& ATA_ICRC
? "ICRC " : "",
2670 res
->feature
& ATA_UNC
? "UNC " : "",
2671 res
->feature
& ATA_AMNF
? "AMNF " : "",
2672 res
->feature
& ATA_IDNF
? "IDNF " : "",
2673 res
->feature
& ATA_ABORTED
? "ABRT " : "");
2679 * ata_eh_report - report error handling to user
2680 * @ap: ATA port to report EH about
2682 * Report EH to user.
2687 void ata_eh_report(struct ata_port
*ap
)
2689 struct ata_link
*link
;
2691 ata_for_each_link(link
, ap
, HOST_FIRST
)
2692 ata_eh_link_report(link
);
2695 static int ata_do_reset(struct ata_link
*link
, ata_reset_fn_t reset
,
2696 unsigned int *classes
, unsigned long deadline
,
2699 struct ata_device
*dev
;
2702 ata_for_each_dev(dev
, link
, ALL
)
2703 classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
2705 return reset(link
, classes
, deadline
);
2708 static int ata_eh_followup_srst_needed(struct ata_link
*link
, int rc
)
2710 if ((link
->flags
& ATA_LFLAG_NO_SRST
) || ata_link_offline(link
))
2714 if (sata_pmp_supported(link
->ap
) && ata_is_host_link(link
))
2719 int ata_eh_reset(struct ata_link
*link
, int classify
,
2720 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
2721 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
2723 struct ata_port
*ap
= link
->ap
;
2724 struct ata_link
*slave
= ap
->slave_link
;
2725 struct ata_eh_context
*ehc
= &link
->eh_context
;
2726 struct ata_eh_context
*sehc
= slave
? &slave
->eh_context
: NULL
;
2727 unsigned int *classes
= ehc
->classes
;
2728 unsigned int lflags
= link
->flags
;
2729 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
2730 int max_tries
= 0, try = 0;
2731 struct ata_link
*failed_link
;
2732 struct ata_device
*dev
;
2733 unsigned long deadline
, now
;
2734 ata_reset_fn_t reset
;
2735 unsigned long flags
;
2742 while (ata_eh_reset_timeouts
[max_tries
] != ULONG_MAX
)
2744 if (link
->flags
& ATA_LFLAG_RST_ONCE
)
2746 if (link
->flags
& ATA_LFLAG_NO_HRST
)
2748 if (link
->flags
& ATA_LFLAG_NO_SRST
)
2751 /* make sure each reset attempt is at least COOL_DOWN apart */
2752 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
2754 WARN_ON(time_after(ehc
->last_reset
, now
));
2755 deadline
= ata_deadline(ehc
->last_reset
,
2756 ATA_EH_RESET_COOL_DOWN
);
2757 if (time_before(now
, deadline
))
2758 schedule_timeout_uninterruptible(deadline
- now
);
2761 spin_lock_irqsave(ap
->lock
, flags
);
2762 ap
->pflags
|= ATA_PFLAG_RESETTING
;
2763 spin_unlock_irqrestore(ap
->lock
, flags
);
2765 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2767 ata_for_each_dev(dev
, link
, ALL
) {
2768 /* If we issue an SRST then an ATA drive (not ATAPI)
2769 * may change configuration and be in PIO0 timing. If
2770 * we do a hard reset (or are coming from power on)
2771 * this is true for ATA or ATAPI. Until we've set a
2772 * suitable controller mode we should not touch the
2773 * bus as we may be talking too fast.
2775 dev
->pio_mode
= XFER_PIO_0
;
2776 dev
->dma_mode
= 0xff;
2778 /* If the controller has a pio mode setup function
2779 * then use it to set the chipset to rights. Don't
2780 * touch the DMA setup as that will be dealt with when
2781 * configuring devices.
2783 if (ap
->ops
->set_piomode
)
2784 ap
->ops
->set_piomode(ap
, dev
);
2787 /* prefer hardreset */
2789 ehc
->i
.action
&= ~ATA_EH_RESET
;
2792 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2793 } else if (softreset
) {
2795 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2799 unsigned long deadline
= ata_deadline(jiffies
,
2800 ATA_EH_PRERESET_TIMEOUT
);
2803 sehc
->i
.action
&= ~ATA_EH_RESET
;
2804 sehc
->i
.action
|= ehc
->i
.action
;
2807 rc
= prereset(link
, deadline
);
2809 /* If present, do prereset on slave link too. Reset
2810 * is skipped iff both master and slave links report
2811 * -ENOENT or clear ATA_EH_RESET.
2813 if (slave
&& (rc
== 0 || rc
== -ENOENT
)) {
2816 tmp
= prereset(slave
, deadline
);
2820 ehc
->i
.action
|= sehc
->i
.action
;
2824 if (rc
== -ENOENT
) {
2825 ata_link_dbg(link
, "port disabled--ignoring\n");
2826 ehc
->i
.action
&= ~ATA_EH_RESET
;
2828 ata_for_each_dev(dev
, link
, ALL
)
2829 classes
[dev
->devno
] = ATA_DEV_NONE
;
2834 "prereset failed (errno=%d)\n",
2839 /* prereset() might have cleared ATA_EH_RESET. If so,
2840 * bang classes, thaw and return.
2842 if (reset
&& !(ehc
->i
.action
& ATA_EH_RESET
)) {
2843 ata_for_each_dev(dev
, link
, ALL
)
2844 classes
[dev
->devno
] = ATA_DEV_NONE
;
2845 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) &&
2846 ata_is_host_link(link
))
2847 ata_eh_thaw_port(ap
);
2857 if (ata_is_host_link(link
))
2858 ata_eh_freeze_port(ap
);
2860 deadline
= ata_deadline(jiffies
, ata_eh_reset_timeouts
[try++]);
2864 ata_link_info(link
, "%s resetting link\n",
2865 reset
== softreset
? "soft" : "hard");
2867 /* mark that this EH session started with reset */
2868 ehc
->last_reset
= jiffies
;
2869 if (reset
== hardreset
)
2870 ehc
->i
.flags
|= ATA_EHI_DID_HARDRESET
;
2872 ehc
->i
.flags
|= ATA_EHI_DID_SOFTRESET
;
2874 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2875 if (rc
&& rc
!= -EAGAIN
) {
2880 /* hardreset slave link if existent */
2881 if (slave
&& reset
== hardreset
) {
2885 ata_link_info(slave
, "hard resetting link\n");
2887 ata_eh_about_to_do(slave
, NULL
, ATA_EH_RESET
);
2888 tmp
= ata_do_reset(slave
, reset
, classes
, deadline
,
2896 failed_link
= slave
;
2902 /* perform follow-up SRST if necessary */
2903 if (reset
== hardreset
&&
2904 ata_eh_followup_srst_needed(link
, rc
)) {
2909 "follow-up softreset required but no softreset available\n");
2915 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2916 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2925 "no reset method available, skipping reset\n");
2926 if (!(lflags
& ATA_LFLAG_ASSUME_CLASS
))
2927 lflags
|= ATA_LFLAG_ASSUME_ATA
;
2931 * Post-reset processing
2933 ata_for_each_dev(dev
, link
, ALL
) {
2934 /* After the reset, the device state is PIO 0 and the
2935 * controller state is undefined. Reset also wakes up
2936 * drives from sleeping mode.
2938 dev
->pio_mode
= XFER_PIO_0
;
2939 dev
->flags
&= ~ATA_DFLAG_SLEEPING
;
2941 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
2944 /* apply class override */
2945 if (lflags
& ATA_LFLAG_ASSUME_ATA
)
2946 classes
[dev
->devno
] = ATA_DEV_ATA
;
2947 else if (lflags
& ATA_LFLAG_ASSUME_SEMB
)
2948 classes
[dev
->devno
] = ATA_DEV_SEMB_UNSUP
;
2951 /* record current link speed */
2952 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0)
2953 link
->sata_spd
= (sstatus
>> 4) & 0xf;
2954 if (slave
&& sata_scr_read(slave
, SCR_STATUS
, &sstatus
) == 0)
2955 slave
->sata_spd
= (sstatus
>> 4) & 0xf;
2958 if (ata_is_host_link(link
))
2959 ata_eh_thaw_port(ap
);
2961 /* postreset() should clear hardware SError. Although SError
2962 * is cleared during link resume, clearing SError here is
2963 * necessary as some PHYs raise hotplug events after SRST.
2964 * This introduces race condition where hotplug occurs between
2965 * reset and here. This race is mediated by cross checking
2966 * link onlineness and classification result later.
2969 postreset(link
, classes
);
2971 postreset(slave
, classes
);
2975 * Some controllers can't be frozen very well and may set spurious
2976 * error conditions during reset. Clear accumulated error
2977 * information and re-thaw the port if frozen. As reset is the
2978 * final recovery action and we cross check link onlineness against
2979 * device classification later, no hotplug event is lost by this.
2981 spin_lock_irqsave(link
->ap
->lock
, flags
);
2982 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
2984 memset(&slave
->eh_info
, 0, sizeof(link
->eh_info
));
2985 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
2986 spin_unlock_irqrestore(link
->ap
->lock
, flags
);
2988 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2989 ata_eh_thaw_port(ap
);
2992 * Make sure onlineness and classification result correspond.
2993 * Hotplug could have happened during reset and some
2994 * controllers fail to wait while a drive is spinning up after
2995 * being hotplugged causing misdetection. By cross checking
2996 * link on/offlineness and classification result, those
2997 * conditions can be reliably detected and retried.
3000 ata_for_each_dev(dev
, link
, ALL
) {
3001 if (ata_phys_link_online(ata_dev_phys_link(dev
))) {
3002 if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
3003 ata_dev_dbg(dev
, "link online but device misclassified\n");
3004 classes
[dev
->devno
] = ATA_DEV_NONE
;
3007 } else if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
3008 if (ata_class_enabled(classes
[dev
->devno
]))
3010 "link offline, clearing class %d to NONE\n",
3011 classes
[dev
->devno
]);
3012 classes
[dev
->devno
] = ATA_DEV_NONE
;
3013 } else if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
3015 "link status unknown, clearing UNKNOWN to NONE\n");
3016 classes
[dev
->devno
] = ATA_DEV_NONE
;
3020 if (classify
&& nr_unknown
) {
3021 if (try < max_tries
) {
3023 "link online but %d devices misclassified, retrying\n",
3030 "link online but %d devices misclassified, "
3031 "device detection might fail\n", nr_unknown
);
3034 /* reset successful, schedule revalidation */
3035 ata_eh_done(link
, NULL
, ATA_EH_RESET
);
3037 ata_eh_done(slave
, NULL
, ATA_EH_RESET
);
3038 ehc
->last_reset
= jiffies
; /* update to completion time */
3039 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
3040 link
->lpm_policy
= ATA_LPM_UNKNOWN
; /* reset LPM state */
3044 /* clear hotplug flag */
3045 ehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
3047 sehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
3049 spin_lock_irqsave(ap
->lock
, flags
);
3050 ap
->pflags
&= ~ATA_PFLAG_RESETTING
;
3051 spin_unlock_irqrestore(ap
->lock
, flags
);
3056 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3057 if (!ata_is_host_link(link
) &&
3058 sata_scr_read(link
, SCR_STATUS
, &sstatus
))
3061 if (try >= max_tries
) {
3063 * Thaw host port even if reset failed, so that the port
3064 * can be retried on the next phy event. This risks
3065 * repeated EH runs but seems to be a better tradeoff than
3066 * shutting down a port after a botched hotplug attempt.
3068 if (ata_is_host_link(link
))
3069 ata_eh_thaw_port(ap
);
3074 if (time_before(now
, deadline
)) {
3075 unsigned long delta
= deadline
- now
;
3077 ata_link_warn(failed_link
,
3078 "reset failed (errno=%d), retrying in %u secs\n",
3079 rc
, DIV_ROUND_UP(jiffies_to_msecs(delta
), 1000));
3083 delta
= schedule_timeout_uninterruptible(delta
);
3088 * While disks spinup behind PMP, some controllers fail sending SRST.
3089 * They need to be reset - as well as the PMP - before retrying.
3091 if (rc
== -ERESTART
) {
3092 if (ata_is_host_link(link
))
3093 ata_eh_thaw_port(ap
);
3097 if (try == max_tries
- 1) {
3098 sata_down_spd_limit(link
, 0);
3100 sata_down_spd_limit(slave
, 0);
3101 } else if (rc
== -EPIPE
)
3102 sata_down_spd_limit(failed_link
, 0);
3109 static inline void ata_eh_pull_park_action(struct ata_port
*ap
)
3111 struct ata_link
*link
;
3112 struct ata_device
*dev
;
3113 unsigned long flags
;
3116 * This function can be thought of as an extended version of
3117 * ata_eh_about_to_do() specially crafted to accommodate the
3118 * requirements of ATA_EH_PARK handling. Since the EH thread
3119 * does not leave the do {} while () loop in ata_eh_recover as
3120 * long as the timeout for a park request to *one* device on
3121 * the port has not expired, and since we still want to pick
3122 * up park requests to other devices on the same port or
3123 * timeout updates for the same device, we have to pull
3124 * ATA_EH_PARK actions from eh_info into eh_context.i
3125 * ourselves at the beginning of each pass over the loop.
3127 * Additionally, all write accesses to &ap->park_req_pending
3128 * through reinit_completion() (see below) or complete_all()
3129 * (see ata_scsi_park_store()) are protected by the host lock.
3130 * As a result we have that park_req_pending.done is zero on
3131 * exit from this function, i.e. when ATA_EH_PARK actions for
3132 * *all* devices on port ap have been pulled into the
3133 * respective eh_context structs. If, and only if,
3134 * park_req_pending.done is non-zero by the time we reach
3135 * wait_for_completion_timeout(), another ATA_EH_PARK action
3136 * has been scheduled for at least one of the devices on port
3137 * ap and we have to cycle over the do {} while () loop in
3138 * ata_eh_recover() again.
3141 spin_lock_irqsave(ap
->lock
, flags
);
3142 reinit_completion(&ap
->park_req_pending
);
3143 ata_for_each_link(link
, ap
, EDGE
) {
3144 ata_for_each_dev(dev
, link
, ALL
) {
3145 struct ata_eh_info
*ehi
= &link
->eh_info
;
3147 link
->eh_context
.i
.dev_action
[dev
->devno
] |=
3148 ehi
->dev_action
[dev
->devno
] & ATA_EH_PARK
;
3149 ata_eh_clear_action(link
, dev
, ehi
, ATA_EH_PARK
);
3152 spin_unlock_irqrestore(ap
->lock
, flags
);
3155 static void ata_eh_park_issue_cmd(struct ata_device
*dev
, int park
)
3157 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3158 struct ata_taskfile tf
;
3159 unsigned int err_mask
;
3161 ata_tf_init(dev
, &tf
);
3163 ehc
->unloaded_mask
|= 1 << dev
->devno
;
3164 tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
3170 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3171 tf
.command
= ATA_CMD_CHK_POWER
;
3174 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
3175 tf
.protocol
|= ATA_PROT_NODATA
;
3176 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3177 if (park
&& (err_mask
|| tf
.lbal
!= 0xc4)) {
3178 ata_dev_err(dev
, "head unload failed!\n");
3179 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3183 static int ata_eh_revalidate_and_attach(struct ata_link
*link
,
3184 struct ata_device
**r_failed_dev
)
3186 struct ata_port
*ap
= link
->ap
;
3187 struct ata_eh_context
*ehc
= &link
->eh_context
;
3188 struct ata_device
*dev
;
3189 unsigned int new_mask
= 0;
3190 unsigned long flags
;
3195 /* For PATA drive side cable detection to work, IDENTIFY must
3196 * be done backwards such that PDIAG- is released by the slave
3197 * device before the master device is identified.
3199 ata_for_each_dev(dev
, link
, ALL_REVERSE
) {
3200 unsigned int action
= ata_eh_dev_action(dev
);
3201 unsigned int readid_flags
= 0;
3203 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
3204 readid_flags
|= ATA_READID_POSTRESET
;
3206 if ((action
& ATA_EH_REVALIDATE
) && ata_dev_enabled(dev
)) {
3207 WARN_ON(dev
->class == ATA_DEV_PMP
);
3209 if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
3214 ata_eh_about_to_do(link
, dev
, ATA_EH_REVALIDATE
);
3215 rc
= ata_dev_revalidate(dev
, ehc
->classes
[dev
->devno
],
3220 ata_eh_done(link
, dev
, ATA_EH_REVALIDATE
);
3222 /* Configuration may have changed, reconfigure
3225 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3227 /* schedule the scsi_rescan_device() here */
3228 schedule_work(&(ap
->scsi_rescan_task
));
3229 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
3230 ehc
->tries
[dev
->devno
] &&
3231 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
3232 /* Temporarily set dev->class, it will be
3233 * permanently set once all configurations are
3234 * complete. This is necessary because new
3235 * device configuration is done in two
3238 dev
->class = ehc
->classes
[dev
->devno
];
3240 if (dev
->class == ATA_DEV_PMP
)
3241 rc
= sata_pmp_attach(dev
);
3243 rc
= ata_dev_read_id(dev
, &dev
->class,
3244 readid_flags
, dev
->id
);
3246 /* read_id might have changed class, store and reset */
3247 ehc
->classes
[dev
->devno
] = dev
->class;
3248 dev
->class = ATA_DEV_UNKNOWN
;
3252 /* clear error info accumulated during probe */
3253 ata_ering_clear(&dev
->ering
);
3254 new_mask
|= 1 << dev
->devno
;
3257 /* IDENTIFY was issued to non-existent
3258 * device. No need to reset. Just
3259 * thaw and ignore the device.
3261 ata_eh_thaw_port(ap
);
3269 /* PDIAG- should have been released, ask cable type if post-reset */
3270 if ((ehc
->i
.flags
& ATA_EHI_DID_RESET
) && ata_is_host_link(link
)) {
3271 if (ap
->ops
->cable_detect
)
3272 ap
->cbl
= ap
->ops
->cable_detect(ap
);
3276 /* Configure new devices forward such that user doesn't see
3277 * device detection messages backwards.
3279 ata_for_each_dev(dev
, link
, ALL
) {
3280 if (!(new_mask
& (1 << dev
->devno
)))
3283 dev
->class = ehc
->classes
[dev
->devno
];
3285 if (dev
->class == ATA_DEV_PMP
)
3288 ehc
->i
.flags
|= ATA_EHI_PRINTINFO
;
3289 rc
= ata_dev_configure(dev
);
3290 ehc
->i
.flags
&= ~ATA_EHI_PRINTINFO
;
3292 dev
->class = ATA_DEV_UNKNOWN
;
3296 spin_lock_irqsave(ap
->lock
, flags
);
3297 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
3298 spin_unlock_irqrestore(ap
->lock
, flags
);
3300 /* new device discovered, configure xfermode */
3301 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3307 *r_failed_dev
= dev
;
3308 DPRINTK("EXIT rc=%d\n", rc
);
3313 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3314 * @link: link on which timings will be programmed
3315 * @r_failed_dev: out parameter for failed device
3317 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3318 * ata_set_mode() fails, pointer to the failing device is
3319 * returned in @r_failed_dev.
3322 * PCI/etc. bus probe sem.
3325 * 0 on success, negative errno otherwise
3327 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3329 struct ata_port
*ap
= link
->ap
;
3330 struct ata_device
*dev
;
3333 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3334 ata_for_each_dev(dev
, link
, ENABLED
) {
3335 if (!(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)) {
3336 struct ata_ering_entry
*ent
;
3338 ent
= ata_ering_top(&dev
->ering
);
3340 ent
->eflags
&= ~ATA_EFLAG_DUBIOUS_XFER
;
3344 /* has private set_mode? */
3345 if (ap
->ops
->set_mode
)
3346 rc
= ap
->ops
->set_mode(link
, r_failed_dev
);
3348 rc
= ata_do_set_mode(link
, r_failed_dev
);
3350 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3351 ata_for_each_dev(dev
, link
, ENABLED
) {
3352 struct ata_eh_context
*ehc
= &link
->eh_context
;
3353 u8 saved_xfer_mode
= ehc
->saved_xfer_mode
[dev
->devno
];
3354 u8 saved_ncq
= !!(ehc
->saved_ncq_enabled
& (1 << dev
->devno
));
3356 if (dev
->xfer_mode
!= saved_xfer_mode
||
3357 ata_ncq_enabled(dev
) != saved_ncq
)
3358 dev
->flags
|= ATA_DFLAG_DUBIOUS_XFER
;
3365 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3366 * @dev: ATAPI device to clear UA for
3368 * Resets and other operations can make an ATAPI device raise
3369 * UNIT ATTENTION which causes the next operation to fail. This
3370 * function clears UA.
3373 * EH context (may sleep).
3376 * 0 on success, -errno on failure.
3378 static int atapi_eh_clear_ua(struct ata_device
*dev
)
3382 for (i
= 0; i
< ATA_EH_UA_TRIES
; i
++) {
3383 u8
*sense_buffer
= dev
->link
->ap
->sector_buf
;
3385 unsigned int err_mask
;
3387 err_mask
= atapi_eh_tur(dev
, &sense_key
);
3388 if (err_mask
!= 0 && err_mask
!= AC_ERR_DEV
) {
3390 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3395 if (!err_mask
|| sense_key
!= UNIT_ATTENTION
)
3398 err_mask
= atapi_eh_request_sense(dev
, sense_buffer
, sense_key
);
3400 ata_dev_warn(dev
, "failed to clear "
3401 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask
);
3406 ata_dev_warn(dev
, "UNIT ATTENTION persists after %d tries\n",
3413 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3414 * @dev: ATA device which may need FLUSH retry
3416 * If @dev failed FLUSH, it needs to be reported upper layer
3417 * immediately as it means that @dev failed to remap and already
3418 * lost at least a sector and further FLUSH retrials won't make
3419 * any difference to the lost sector. However, if FLUSH failed
3420 * for other reasons, for example transmission error, FLUSH needs
3423 * This function determines whether FLUSH failure retry is
3424 * necessary and performs it if so.
3427 * 0 if EH can continue, -errno if EH needs to be repeated.
3429 static int ata_eh_maybe_retry_flush(struct ata_device
*dev
)
3431 struct ata_link
*link
= dev
->link
;
3432 struct ata_port
*ap
= link
->ap
;
3433 struct ata_queued_cmd
*qc
;
3434 struct ata_taskfile tf
;
3435 unsigned int err_mask
;
3438 /* did flush fail for this device? */
3439 if (!ata_tag_valid(link
->active_tag
))
3442 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
3443 if (qc
->dev
!= dev
|| (qc
->tf
.command
!= ATA_CMD_FLUSH_EXT
&&
3444 qc
->tf
.command
!= ATA_CMD_FLUSH
))
3447 /* if the device failed it, it should be reported to upper layers */
3448 if (qc
->err_mask
& AC_ERR_DEV
)
3451 /* flush failed for some other reason, give it another shot */
3452 ata_tf_init(dev
, &tf
);
3454 tf
.command
= qc
->tf
.command
;
3455 tf
.flags
|= ATA_TFLAG_DEVICE
;
3456 tf
.protocol
= ATA_PROT_NODATA
;
3458 ata_dev_warn(dev
, "retrying FLUSH 0x%x Emask 0x%x\n",
3459 tf
.command
, qc
->err_mask
);
3461 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3464 * FLUSH is complete but there's no way to
3465 * successfully complete a failed command from EH.
3466 * Making sure retry is allowed at least once and
3467 * retrying it should do the trick - whatever was in
3468 * the cache is already on the platter and this won't
3469 * cause infinite loop.
3471 qc
->scsicmd
->allowed
= max(qc
->scsicmd
->allowed
, 1);
3473 ata_dev_warn(dev
, "FLUSH failed Emask 0x%x\n",
3477 /* if device failed it, report it to upper layers */
3478 if (err_mask
& AC_ERR_DEV
) {
3479 qc
->err_mask
|= AC_ERR_DEV
;
3481 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
3489 * ata_eh_set_lpm - configure SATA interface power management
3490 * @link: link to configure power management
3491 * @policy: the link power management policy
3492 * @r_failed_dev: out parameter for failed device
3494 * Enable SATA Interface power management. This will enable
3495 * Device Interface Power Management (DIPM) for min_power
3496 * policy, and then call driver specific callbacks for
3497 * enabling Host Initiated Power management.
3503 * 0 on success, -errno on failure.
3505 static int ata_eh_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
3506 struct ata_device
**r_failed_dev
)
3508 struct ata_port
*ap
= ata_is_host_link(link
) ? link
->ap
: NULL
;
3509 struct ata_eh_context
*ehc
= &link
->eh_context
;
3510 struct ata_device
*dev
, *link_dev
= NULL
, *lpm_dev
= NULL
;
3511 enum ata_lpm_policy old_policy
= link
->lpm_policy
;
3512 bool no_dipm
= link
->ap
->flags
& ATA_FLAG_NO_DIPM
;
3513 unsigned int hints
= ATA_LPM_EMPTY
| ATA_LPM_HIPM
;
3514 unsigned int err_mask
;
3517 /* if the link or host doesn't do LPM, noop */
3518 if ((link
->flags
& ATA_LFLAG_NO_LPM
) || (ap
&& !ap
->ops
->set_lpm
))
3522 * DIPM is enabled only for MIN_POWER as some devices
3523 * misbehave when the host NACKs transition to SLUMBER. Order
3524 * device and link configurations such that the host always
3525 * allows DIPM requests.
3527 ata_for_each_dev(dev
, link
, ENABLED
) {
3528 bool hipm
= ata_id_has_hipm(dev
->id
);
3529 bool dipm
= ata_id_has_dipm(dev
->id
) && !no_dipm
;
3531 /* find the first enabled and LPM enabled devices */
3535 if (!lpm_dev
&& (hipm
|| dipm
))
3538 hints
&= ~ATA_LPM_EMPTY
;
3540 hints
&= ~ATA_LPM_HIPM
;
3542 /* disable DIPM before changing link config */
3543 if (policy
!= ATA_LPM_MIN_POWER
&& dipm
) {
3544 err_mask
= ata_dev_set_feature(dev
,
3545 SETFEATURES_SATA_DISABLE
, SATA_DIPM
);
3546 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3548 "failed to disable DIPM, Emask 0x%x\n",
3557 rc
= ap
->ops
->set_lpm(link
, policy
, hints
);
3558 if (!rc
&& ap
->slave_link
)
3559 rc
= ap
->ops
->set_lpm(ap
->slave_link
, policy
, hints
);
3561 rc
= sata_pmp_set_lpm(link
, policy
, hints
);
3564 * Attribute link config failure to the first (LPM) enabled
3565 * device on the link.
3568 if (rc
== -EOPNOTSUPP
) {
3569 link
->flags
|= ATA_LFLAG_NO_LPM
;
3572 dev
= lpm_dev
? lpm_dev
: link_dev
;
3577 * Low level driver acked the transition. Issue DIPM command
3578 * with the new policy set.
3580 link
->lpm_policy
= policy
;
3581 if (ap
&& ap
->slave_link
)
3582 ap
->slave_link
->lpm_policy
= policy
;
3584 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3585 ata_for_each_dev(dev
, link
, ENABLED
) {
3586 if (policy
== ATA_LPM_MIN_POWER
&& !no_dipm
&&
3587 ata_id_has_dipm(dev
->id
)) {
3588 err_mask
= ata_dev_set_feature(dev
,
3589 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
3590 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3592 "failed to enable DIPM, Emask 0x%x\n",
3600 link
->last_lpm_change
= jiffies
;
3601 link
->flags
|= ATA_LFLAG_CHANGED
;
3606 /* restore the old policy */
3607 link
->lpm_policy
= old_policy
;
3608 if (ap
&& ap
->slave_link
)
3609 ap
->slave_link
->lpm_policy
= old_policy
;
3611 /* if no device or only one more chance is left, disable LPM */
3612 if (!dev
|| ehc
->tries
[dev
->devno
] <= 2) {
3613 ata_link_warn(link
, "disabling LPM on the link\n");
3614 link
->flags
|= ATA_LFLAG_NO_LPM
;
3617 *r_failed_dev
= dev
;
3621 int ata_link_nr_enabled(struct ata_link
*link
)
3623 struct ata_device
*dev
;
3626 ata_for_each_dev(dev
, link
, ENABLED
)
3631 static int ata_link_nr_vacant(struct ata_link
*link
)
3633 struct ata_device
*dev
;
3636 ata_for_each_dev(dev
, link
, ALL
)
3637 if (dev
->class == ATA_DEV_UNKNOWN
)
3642 static int ata_eh_skip_recovery(struct ata_link
*link
)
3644 struct ata_port
*ap
= link
->ap
;
3645 struct ata_eh_context
*ehc
= &link
->eh_context
;
3646 struct ata_device
*dev
;
3648 /* skip disabled links */
3649 if (link
->flags
& ATA_LFLAG_DISABLED
)
3652 /* skip if explicitly requested */
3653 if (ehc
->i
.flags
& ATA_EHI_NO_RECOVERY
)
3656 /* thaw frozen port and recover failed devices */
3657 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) || ata_link_nr_enabled(link
))
3660 /* reset at least once if reset is requested */
3661 if ((ehc
->i
.action
& ATA_EH_RESET
) &&
3662 !(ehc
->i
.flags
& ATA_EHI_DID_RESET
))
3665 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3666 ata_for_each_dev(dev
, link
, ALL
) {
3667 if (dev
->class == ATA_DEV_UNKNOWN
&&
3668 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
3675 static int ata_count_probe_trials_cb(struct ata_ering_entry
*ent
, void *void_arg
)
3677 u64 interval
= msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL
);
3678 u64 now
= get_jiffies_64();
3679 int *trials
= void_arg
;
3681 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) ||
3682 (ent
->timestamp
< now
- min(now
, interval
)))
3689 static int ata_eh_schedule_probe(struct ata_device
*dev
)
3691 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3692 struct ata_link
*link
= ata_dev_phys_link(dev
);
3695 if (!(ehc
->i
.probe_mask
& (1 << dev
->devno
)) ||
3696 (ehc
->did_probe_mask
& (1 << dev
->devno
)))
3699 ata_eh_detach_dev(dev
);
3701 ehc
->did_probe_mask
|= (1 << dev
->devno
);
3702 ehc
->i
.action
|= ATA_EH_RESET
;
3703 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
3704 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
3706 /* the link maybe in a deep sleep, wake it up */
3707 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
) {
3708 if (ata_is_host_link(link
))
3709 link
->ap
->ops
->set_lpm(link
, ATA_LPM_MAX_POWER
,
3712 sata_pmp_set_lpm(link
, ATA_LPM_MAX_POWER
,
3716 /* Record and count probe trials on the ering. The specific
3717 * error mask used is irrelevant. Because a successful device
3718 * detection clears the ering, this count accumulates only if
3719 * there are consecutive failed probes.
3721 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3722 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3723 * forced to 1.5Gbps.
3725 * This is to work around cases where failed link speed
3726 * negotiation results in device misdetection leading to
3727 * infinite DEVXCHG or PHRDY CHG events.
3729 ata_ering_record(&dev
->ering
, 0, AC_ERR_OTHER
);
3730 ata_ering_map(&dev
->ering
, ata_count_probe_trials_cb
, &trials
);
3732 if (trials
> ATA_EH_PROBE_TRIALS
)
3733 sata_down_spd_limit(link
, 1);
3738 static int ata_eh_handle_dev_fail(struct ata_device
*dev
, int err
)
3740 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3742 /* -EAGAIN from EH routine indicates retry without prejudice.
3743 * The requester is responsible for ensuring forward progress.
3746 ehc
->tries
[dev
->devno
]--;
3750 /* device missing or wrong IDENTIFY data, schedule probing */
3751 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
3753 /* give it just one more chance */
3754 ehc
->tries
[dev
->devno
] = min(ehc
->tries
[dev
->devno
], 1);
3756 if (ehc
->tries
[dev
->devno
] == 1) {
3757 /* This is the last chance, better to slow
3758 * down than lose it.
3760 sata_down_spd_limit(ata_dev_phys_link(dev
), 0);
3761 if (dev
->pio_mode
> XFER_PIO_0
)
3762 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
3766 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
3767 /* disable device if it has used up all its chances */
3768 ata_dev_disable(dev
);
3770 /* detach if offline */
3771 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
3772 ata_eh_detach_dev(dev
);
3774 /* schedule probe if necessary */
3775 if (ata_eh_schedule_probe(dev
)) {
3776 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3777 memset(ehc
->cmd_timeout_idx
[dev
->devno
], 0,
3778 sizeof(ehc
->cmd_timeout_idx
[dev
->devno
]));
3783 ehc
->i
.action
|= ATA_EH_RESET
;
3789 * ata_eh_recover - recover host port after error
3790 * @ap: host port to recover
3791 * @prereset: prereset method (can be NULL)
3792 * @softreset: softreset method (can be NULL)
3793 * @hardreset: hardreset method (can be NULL)
3794 * @postreset: postreset method (can be NULL)
3795 * @r_failed_link: out parameter for failed link
3797 * This is the alpha and omega, eum and yang, heart and soul of
3798 * libata exception handling. On entry, actions required to
3799 * recover each link and hotplug requests are recorded in the
3800 * link's eh_context. This function executes all the operations
3801 * with appropriate retrials and fallbacks to resurrect failed
3802 * devices, detach goners and greet newcomers.
3805 * Kernel thread context (may sleep).
3808 * 0 on success, -errno on failure.
3810 int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3811 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3812 ata_postreset_fn_t postreset
,
3813 struct ata_link
**r_failed_link
)
3815 struct ata_link
*link
;
3816 struct ata_device
*dev
;
3818 unsigned long flags
, deadline
;
3822 /* prep for recovery */
3823 ata_for_each_link(link
, ap
, EDGE
) {
3824 struct ata_eh_context
*ehc
= &link
->eh_context
;
3826 /* re-enable link? */
3827 if (ehc
->i
.action
& ATA_EH_ENABLE_LINK
) {
3828 ata_eh_about_to_do(link
, NULL
, ATA_EH_ENABLE_LINK
);
3829 spin_lock_irqsave(ap
->lock
, flags
);
3830 link
->flags
&= ~ATA_LFLAG_DISABLED
;
3831 spin_unlock_irqrestore(ap
->lock
, flags
);
3832 ata_eh_done(link
, NULL
, ATA_EH_ENABLE_LINK
);
3835 ata_for_each_dev(dev
, link
, ALL
) {
3836 if (link
->flags
& ATA_LFLAG_NO_RETRY
)
3837 ehc
->tries
[dev
->devno
] = 1;
3839 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3841 /* collect port action mask recorded in dev actions */
3842 ehc
->i
.action
|= ehc
->i
.dev_action
[dev
->devno
] &
3843 ~ATA_EH_PERDEV_MASK
;
3844 ehc
->i
.dev_action
[dev
->devno
] &= ATA_EH_PERDEV_MASK
;
3846 /* process hotplug request */
3847 if (dev
->flags
& ATA_DFLAG_DETACH
)
3848 ata_eh_detach_dev(dev
);
3850 /* schedule probe if necessary */
3851 if (!ata_dev_enabled(dev
))
3852 ata_eh_schedule_probe(dev
);
3859 /* if UNLOADING, finish immediately */
3860 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
3864 ata_for_each_link(link
, ap
, EDGE
) {
3865 struct ata_eh_context
*ehc
= &link
->eh_context
;
3867 /* skip EH if possible. */
3868 if (ata_eh_skip_recovery(link
))
3871 ata_for_each_dev(dev
, link
, ALL
)
3872 ehc
->classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
3876 ata_for_each_link(link
, ap
, EDGE
) {
3877 struct ata_eh_context
*ehc
= &link
->eh_context
;
3879 if (!(ehc
->i
.action
& ATA_EH_RESET
))
3882 rc
= ata_eh_reset(link
, ata_link_nr_vacant(link
),
3883 prereset
, softreset
, hardreset
, postreset
);
3885 ata_link_err(link
, "reset failed, giving up\n");
3894 * clears ATA_EH_PARK in eh_info and resets
3895 * ap->park_req_pending
3897 ata_eh_pull_park_action(ap
);
3900 ata_for_each_link(link
, ap
, EDGE
) {
3901 ata_for_each_dev(dev
, link
, ALL
) {
3902 struct ata_eh_context
*ehc
= &link
->eh_context
;
3905 if (dev
->class != ATA_DEV_ATA
&&
3906 dev
->class != ATA_DEV_ZAC
)
3908 if (!(ehc
->i
.dev_action
[dev
->devno
] &
3911 tmp
= dev
->unpark_deadline
;
3912 if (time_before(deadline
, tmp
))
3914 else if (time_before_eq(tmp
, jiffies
))
3916 if (ehc
->unloaded_mask
& (1 << dev
->devno
))
3919 ata_eh_park_issue_cmd(dev
, 1);
3924 if (time_before_eq(deadline
, now
))
3928 deadline
= wait_for_completion_timeout(&ap
->park_req_pending
,
3932 ata_for_each_link(link
, ap
, EDGE
) {
3933 ata_for_each_dev(dev
, link
, ALL
) {
3934 if (!(link
->eh_context
.unloaded_mask
&
3938 ata_eh_park_issue_cmd(dev
, 0);
3939 ata_eh_done(link
, dev
, ATA_EH_PARK
);
3945 ata_for_each_link(link
, ap
, PMP_FIRST
) {
3946 struct ata_eh_context
*ehc
= &link
->eh_context
;
3948 if (sata_pmp_attached(ap
) && ata_is_host_link(link
))
3951 /* revalidate existing devices and attach new ones */
3952 rc
= ata_eh_revalidate_and_attach(link
, &dev
);
3956 /* if PMP got attached, return, pmp EH will take care of it */
3957 if (link
->device
->class == ATA_DEV_PMP
) {
3962 /* configure transfer mode if necessary */
3963 if (ehc
->i
.flags
& ATA_EHI_SETMODE
) {
3964 rc
= ata_set_mode(link
, &dev
);
3967 ehc
->i
.flags
&= ~ATA_EHI_SETMODE
;
3970 /* If reset has been issued, clear UA to avoid
3971 * disrupting the current users of the device.
3973 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
3974 ata_for_each_dev(dev
, link
, ALL
) {
3975 if (dev
->class != ATA_DEV_ATAPI
)
3977 rc
= atapi_eh_clear_ua(dev
);
3980 if (zpodd_dev_enabled(dev
))
3981 zpodd_post_poweron(dev
);
3985 /* retry flush if necessary */
3986 ata_for_each_dev(dev
, link
, ALL
) {
3987 if (dev
->class != ATA_DEV_ATA
&&
3988 dev
->class != ATA_DEV_ZAC
)
3990 rc
= ata_eh_maybe_retry_flush(dev
);
3996 /* configure link power saving */
3997 if (link
->lpm_policy
!= ap
->target_lpm_policy
) {
3998 rc
= ata_eh_set_lpm(link
, ap
->target_lpm_policy
, &dev
);
4003 /* this link is okay now */
4010 ata_eh_handle_dev_fail(dev
, rc
);
4012 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
4013 /* PMP reset requires working host port.
4014 * Can't retry if it's frozen.
4016 if (sata_pmp_attached(ap
))
4026 if (rc
&& r_failed_link
)
4027 *r_failed_link
= link
;
4029 DPRINTK("EXIT, rc=%d\n", rc
);
4034 * ata_eh_finish - finish up EH
4035 * @ap: host port to finish EH for
4037 * Recovery is complete. Clean up EH states and retry or finish
4043 void ata_eh_finish(struct ata_port
*ap
)
4047 /* retry or finish qcs */
4048 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
4049 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
4051 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
4055 /* FIXME: Once EH migration is complete,
4056 * generate sense data in this function,
4057 * considering both err_mask and tf.
4059 if (qc
->flags
& ATA_QCFLAG_RETRY
)
4060 ata_eh_qc_retry(qc
);
4062 ata_eh_qc_complete(qc
);
4064 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
4065 ata_eh_qc_complete(qc
);
4067 /* feed zero TF to sense generation */
4068 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
4069 ata_eh_qc_retry(qc
);
4074 /* make sure nr_active_links is zero after EH */
4075 WARN_ON(ap
->nr_active_links
);
4076 ap
->nr_active_links
= 0;
4080 * ata_do_eh - do standard error handling
4081 * @ap: host port to handle error for
4083 * @prereset: prereset method (can be NULL)
4084 * @softreset: softreset method (can be NULL)
4085 * @hardreset: hardreset method (can be NULL)
4086 * @postreset: postreset method (can be NULL)
4088 * Perform standard error handling sequence.
4091 * Kernel thread context (may sleep).
4093 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
4094 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
4095 ata_postreset_fn_t postreset
)
4097 struct ata_device
*dev
;
4103 rc
= ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
,
4106 ata_for_each_dev(dev
, &ap
->link
, ALL
)
4107 ata_dev_disable(dev
);
4114 * ata_std_error_handler - standard error handler
4115 * @ap: host port to handle error for
4117 * Standard error handler
4120 * Kernel thread context (may sleep).
4122 void ata_std_error_handler(struct ata_port
*ap
)
4124 struct ata_port_operations
*ops
= ap
->ops
;
4125 ata_reset_fn_t hardreset
= ops
->hardreset
;
4127 /* ignore built-in hardreset if SCR access is not available */
4128 if (hardreset
== sata_std_hardreset
&& !sata_scr_valid(&ap
->link
))
4131 ata_do_eh(ap
, ops
->prereset
, ops
->softreset
, hardreset
, ops
->postreset
);
4136 * ata_eh_handle_port_suspend - perform port suspend operation
4137 * @ap: port to suspend
4142 * Kernel thread context (may sleep).
4144 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
4146 unsigned long flags
;
4148 struct ata_device
*dev
;
4150 /* are we suspending? */
4151 spin_lock_irqsave(ap
->lock
, flags
);
4152 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4153 ap
->pm_mesg
.event
& PM_EVENT_RESUME
) {
4154 spin_unlock_irqrestore(ap
->lock
, flags
);
4157 spin_unlock_irqrestore(ap
->lock
, flags
);
4159 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
4162 * If we have a ZPODD attached, check its zero
4163 * power ready status before the port is frozen.
4164 * Only needed for runtime suspend.
4166 if (PMSG_IS_AUTO(ap
->pm_mesg
)) {
4167 ata_for_each_dev(dev
, &ap
->link
, ENABLED
) {
4168 if (zpodd_dev_enabled(dev
))
4169 zpodd_on_suspend(dev
);
4173 /* tell ACPI we're suspending */
4174 rc
= ata_acpi_on_suspend(ap
);
4179 ata_eh_freeze_port(ap
);
4181 if (ap
->ops
->port_suspend
)
4182 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
4184 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4186 /* update the flags */
4187 spin_lock_irqsave(ap
->lock
, flags
);
4189 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
4191 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
4192 else if (ap
->pflags
& ATA_PFLAG_FROZEN
)
4193 ata_port_schedule_eh(ap
);
4195 spin_unlock_irqrestore(ap
->lock
, flags
);
4201 * ata_eh_handle_port_resume - perform port resume operation
4202 * @ap: port to resume
4207 * Kernel thread context (may sleep).
4209 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
4211 struct ata_link
*link
;
4212 struct ata_device
*dev
;
4213 unsigned long flags
;
4216 /* are we resuming? */
4217 spin_lock_irqsave(ap
->lock
, flags
);
4218 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4219 !(ap
->pm_mesg
.event
& PM_EVENT_RESUME
)) {
4220 spin_unlock_irqrestore(ap
->lock
, flags
);
4223 spin_unlock_irqrestore(ap
->lock
, flags
);
4225 WARN_ON(!(ap
->pflags
& ATA_PFLAG_SUSPENDED
));
4228 * Error timestamps are in jiffies which doesn't run while
4229 * suspended and PHY events during resume isn't too uncommon.
4230 * When the two are combined, it can lead to unnecessary speed
4231 * downs if the machine is suspended and resumed repeatedly.
4232 * Clear error history.
4234 ata_for_each_link(link
, ap
, HOST_FIRST
)
4235 ata_for_each_dev(dev
, link
, ALL
)
4236 ata_ering_clear(&dev
->ering
);
4238 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4240 if (ap
->ops
->port_resume
)
4241 rc
= ap
->ops
->port_resume(ap
);
4243 /* tell ACPI that we're resuming */
4244 ata_acpi_on_resume(ap
);
4246 /* update the flags */
4247 spin_lock_irqsave(ap
->lock
, flags
);
4248 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
4249 spin_unlock_irqrestore(ap
->lock
, flags
);
4251 #endif /* CONFIG_PM */