]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/ata/libata-eh.c
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / drivers / ata / libata-eh.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
ece1d636
TH
2/*
3 * libata-eh.c - libata error handling
4 *
8c3d3d4b 5 * Maintained by: Tejun Heo <tj@kernel.org>
ece1d636
TH
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2006 Tejun Heo <htejun@gmail.com>
10 *
ece1d636 11 * libata documentation is available via 'make {ps|pdf}docs',
9bb9a39c 12 * as Documentation/driver-api/libata.rst
ece1d636
TH
13 *
14 * Hardware documentation available from http://www.t13.org/ and
15 * http://www.sata-io.org/
ece1d636
TH
16 */
17
ece1d636 18#include <linux/kernel.h>
242f9dcb 19#include <linux/blkdev.h>
38789fda 20#include <linux/export.h>
2855568b 21#include <linux/pci.h>
ece1d636
TH
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_cmnd.h>
6521148c 27#include <scsi/scsi_dbg.h>
c6fd2807 28#include "../scsi/scsi_transport_api.h"
ece1d636
TH
29
30#include <linux/libata.h>
31
255c03d1 32#include <trace/events/libata.h>
ece1d636
TH
33#include "libata.h"
34
7d47e8d4 35enum {
3884f7b0 36 /* speed down verdicts */
7d47e8d4
TH
37 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
38 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
39 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 40 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
41
42 /* error flags */
43 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 44 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
d9027470 45 ATA_EFLAG_OLD_ER = (1 << 31),
3884f7b0
TH
46
47 /* error categories */
48 ATA_ECAT_NONE = 0,
49 ATA_ECAT_ATA_BUS = 1,
50 ATA_ECAT_TOUT_HSM = 2,
51 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
52 ATA_ECAT_DUBIOUS_NONE = 4,
53 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
54 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
55 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
56 ATA_ECAT_NR = 8,
7d47e8d4 57
87fbc5a0
TH
58 ATA_EH_CMD_DFL_TIMEOUT = 5000,
59
0a2c0f56
TH
60 /* always put at least this amount of time between resets */
61 ATA_EH_RESET_COOL_DOWN = 5000,
62
341c2c95
TH
63 /* Waiting in ->prereset can never be reliable. It's
64 * sometimes nice to wait there but it can't be depended upon;
65 * otherwise, we wouldn't be resetting. Just give it enough
66 * time for most drives to spin up.
67 */
68 ATA_EH_PRERESET_TIMEOUT = 10000,
69 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
70
71 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
72
73 /* probe speed down parameters, see ata_eh_schedule_probe() */
74 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
75 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
76};
77
78/* The following table determines how we sequence resets. Each entry
79 * represents timeout for that try. The first try can be soft or
80 * hardreset. All others are hardreset if available. In most cases
81 * the first reset w/ 10sec timeout should succeed. Following entries
35bf8821
DW
82 * are mostly for error handling, hotplug and those outlier devices that
83 * take an exceptionally long time to recover from reset.
31daabda
TH
84 */
85static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
86 10000, /* most drives spin up by 10sec */
87 10000, /* > 99% working drives spin up before 20sec */
35bf8821 88 35000, /* give > 30 secs of idleness for outlier devices */
341c2c95 89 5000, /* and sweet one last chance */
d8af0eb6 90 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
91};
92
87fbc5a0
TH
93static const unsigned long ata_eh_identify_timeouts[] = {
94 5000, /* covers > 99% of successes and not too boring on failures */
95 10000, /* combined time till here is enough even for media access */
96 30000, /* for true idiots */
97 ULONG_MAX,
98};
99
850fef29
DLM
100static const unsigned long ata_eh_revalidate_timeouts[] = {
101 15000, /* Some drives are slow to read log pages when waking-up */
102 15000, /* combined time till here is enough even for media access */
103 ULONG_MAX,
104};
105
6013efd8
TH
106static const unsigned long ata_eh_flush_timeouts[] = {
107 15000, /* be generous with flush */
108 15000, /* ditto */
109 30000, /* and even more generous */
110 ULONG_MAX,
111};
112
87fbc5a0
TH
113static const unsigned long ata_eh_other_timeouts[] = {
114 5000, /* same rationale as identify timeout */
115 10000, /* ditto */
116 /* but no merciful 30sec for other commands, it just isn't worth it */
117 ULONG_MAX,
118};
119
120struct ata_eh_cmd_timeout_ent {
121 const u8 *commands;
122 const unsigned long *timeouts;
123};
124
125/* The following table determines timeouts to use for EH internal
126 * commands. Each table entry is a command class and matches the
127 * commands the entry applies to and the timeout table to use.
128 *
129 * On the retry after a command timed out, the next timeout value from
130 * the table is used. If the table doesn't contain further entries,
131 * the last value is used.
132 *
133 * ehc->cmd_timeout_idx keeps track of which timeout to use per
134 * command class, so if SET_FEATURES times out on the first try, the
135 * next try will use the second timeout value only for that class.
136 */
137#define CMDS(cmds...) (const u8 []){ cmds, 0 }
138static const struct ata_eh_cmd_timeout_ent
139ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
140 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
141 .timeouts = ata_eh_identify_timeouts, },
850fef29
DLM
142 { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
143 .timeouts = ata_eh_revalidate_timeouts, },
87fbc5a0
TH
144 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
145 .timeouts = ata_eh_other_timeouts, },
146 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
147 .timeouts = ata_eh_other_timeouts, },
148 { .commands = CMDS(ATA_CMD_SET_FEATURES),
149 .timeouts = ata_eh_other_timeouts, },
150 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
151 .timeouts = ata_eh_other_timeouts, },
6013efd8
TH
152 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
153 .timeouts = ata_eh_flush_timeouts },
87fbc5a0
TH
154};
155#undef CMDS
156
ad9e2762 157static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 158#ifdef CONFIG_PM
500530f6
TH
159static void ata_eh_handle_port_suspend(struct ata_port *ap);
160static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
161#else /* CONFIG_PM */
162static void ata_eh_handle_port_suspend(struct ata_port *ap)
163{ }
164
165static void ata_eh_handle_port_resume(struct ata_port *ap)
166{ }
6ffa01d8 167#endif /* CONFIG_PM */
ad9e2762 168
0d74d872
MM
169static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
170 const char *fmt, va_list args)
b64bbc39
TH
171{
172 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
173 ATA_EH_DESC_LEN - ehi->desc_len,
174 fmt, args);
175}
176
177/**
178 * __ata_ehi_push_desc - push error description without adding separator
179 * @ehi: target EHI
180 * @fmt: printf format string
181 *
182 * Format string according to @fmt and append it to @ehi->desc.
183 *
184 * LOCKING:
185 * spin_lock_irqsave(host lock)
186 */
187void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
188{
189 va_list args;
190
191 va_start(args, fmt);
192 __ata_ehi_pushv_desc(ehi, fmt, args);
193 va_end(args);
194}
195
196/**
197 * ata_ehi_push_desc - push error description with separator
198 * @ehi: target EHI
199 * @fmt: printf format string
200 *
201 * Format string according to @fmt and append it to @ehi->desc.
202 * If @ehi->desc is not empty, ", " is added in-between.
203 *
204 * LOCKING:
205 * spin_lock_irqsave(host lock)
206 */
207void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
208{
209 va_list args;
210
211 if (ehi->desc_len)
212 __ata_ehi_push_desc(ehi, ", ");
213
214 va_start(args, fmt);
215 __ata_ehi_pushv_desc(ehi, fmt, args);
216 va_end(args);
217}
218
219/**
220 * ata_ehi_clear_desc - clean error description
221 * @ehi: target EHI
222 *
223 * Clear @ehi->desc.
224 *
225 * LOCKING:
226 * spin_lock_irqsave(host lock)
227 */
228void ata_ehi_clear_desc(struct ata_eh_info *ehi)
229{
230 ehi->desc[0] = '\0';
231 ehi->desc_len = 0;
232}
233
cbcdd875
TH
234/**
235 * ata_port_desc - append port description
236 * @ap: target ATA port
237 * @fmt: printf format string
238 *
239 * Format string according to @fmt and append it to port
240 * description. If port description is not empty, " " is added
241 * in-between. This function is to be used while initializing
242 * ata_host. The description is printed on host registration.
243 *
244 * LOCKING:
245 * None.
246 */
247void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
248{
249 va_list args;
250
251 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
252
253 if (ap->link.eh_info.desc_len)
254 __ata_ehi_push_desc(&ap->link.eh_info, " ");
255
256 va_start(args, fmt);
257 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
258 va_end(args);
259}
260
261#ifdef CONFIG_PCI
262
263/**
264 * ata_port_pbar_desc - append PCI BAR description
265 * @ap: target ATA port
266 * @bar: target PCI BAR
267 * @offset: offset into PCI BAR
268 * @name: name of the area
269 *
270 * If @offset is negative, this function formats a string which
271 * contains the name, address, size and type of the BAR and
272 * appends it to the port description. If @offset is zero or
273 * positive, only name and offsetted address is appended.
274 *
275 * LOCKING:
276 * None.
277 */
278void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
279 const char *name)
280{
281 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
282 char *type = "";
283 unsigned long long start, len;
284
285 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
286 type = "m";
287 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
288 type = "i";
289
290 start = (unsigned long long)pci_resource_start(pdev, bar);
291 len = (unsigned long long)pci_resource_len(pdev, bar);
292
293 if (offset < 0)
294 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
295 else
e6a73ab1
AM
296 ata_port_desc(ap, "%s 0x%llx", name,
297 start + (unsigned long long)offset);
cbcdd875
TH
298}
299
300#endif /* CONFIG_PCI */
301
87fbc5a0
TH
302static int ata_lookup_timeout_table(u8 cmd)
303{
304 int i;
305
306 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
307 const u8 *cur;
308
309 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
310 if (*cur == cmd)
311 return i;
312 }
313
314 return -1;
315}
316
317/**
318 * ata_internal_cmd_timeout - determine timeout for an internal command
319 * @dev: target device
320 * @cmd: internal command to be issued
321 *
322 * Determine timeout for internal command @cmd for @dev.
323 *
324 * LOCKING:
325 * EH context.
326 *
327 * RETURNS:
328 * Determined timeout.
329 */
330unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
331{
332 struct ata_eh_context *ehc = &dev->link->eh_context;
333 int ent = ata_lookup_timeout_table(cmd);
334 int idx;
335
336 if (ent < 0)
337 return ATA_EH_CMD_DFL_TIMEOUT;
338
339 idx = ehc->cmd_timeout_idx[dev->devno][ent];
340 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
341}
342
343/**
344 * ata_internal_cmd_timed_out - notification for internal command timeout
345 * @dev: target device
346 * @cmd: internal command which timed out
347 *
348 * Notify EH that internal command @cmd for @dev timed out. This
349 * function should be called only for commands whose timeouts are
350 * determined using ata_internal_cmd_timeout().
351 *
352 * LOCKING:
353 * EH context.
354 */
355void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
356{
357 struct ata_eh_context *ehc = &dev->link->eh_context;
358 int ent = ata_lookup_timeout_table(cmd);
359 int idx;
360
361 if (ent < 0)
362 return;
363
364 idx = ehc->cmd_timeout_idx[dev->devno][ent];
365 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
366 ehc->cmd_timeout_idx[dev->devno][ent]++;
367}
368
3884f7b0 369static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
370 unsigned int err_mask)
371{
372 struct ata_ering_entry *ent;
373
374 WARN_ON(!err_mask);
375
376 ering->cursor++;
377 ering->cursor %= ATA_ERING_SIZE;
378
379 ent = &ering->ring[ering->cursor];
3884f7b0 380 ent->eflags = eflags;
0c247c55
TH
381 ent->err_mask = err_mask;
382 ent->timestamp = get_jiffies_64();
383}
384
76326ac1
TH
385static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
386{
387 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
388
389 if (ent->err_mask)
390 return ent;
391 return NULL;
392}
393
d9027470
GG
394int ata_ering_map(struct ata_ering *ering,
395 int (*map_fn)(struct ata_ering_entry *, void *),
396 void *arg)
0c247c55
TH
397{
398 int idx, rc = 0;
399 struct ata_ering_entry *ent;
400
401 idx = ering->cursor;
402 do {
403 ent = &ering->ring[idx];
404 if (!ent->err_mask)
405 break;
406 rc = map_fn(ent, arg);
407 if (rc)
408 break;
409 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
410 } while (idx != ering->cursor);
411
412 return rc;
413}
414
60428407 415static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
d9027470
GG
416{
417 ent->eflags |= ATA_EFLAG_OLD_ER;
418 return 0;
419}
420
421static void ata_ering_clear(struct ata_ering *ering)
422{
423 ata_ering_map(ering, ata_ering_clear_cb, NULL);
424}
425
64f65ca6
TH
426static unsigned int ata_eh_dev_action(struct ata_device *dev)
427{
9af5c9c9 428 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
429
430 return ehc->i.action | ehc->i.dev_action[dev->devno];
431}
432
f58229f8 433static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
434 struct ata_eh_info *ehi, unsigned int action)
435{
f58229f8 436 struct ata_device *tdev;
af181c2d
TH
437
438 if (!dev) {
439 ehi->action &= ~action;
1eca4365 440 ata_for_each_dev(tdev, link, ALL)
f58229f8 441 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
442 } else {
443 /* doesn't make sense for port-wide EH actions */
444 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
445
446 /* break ehi->action into ehi->dev_action */
447 if (ehi->action & action) {
1eca4365 448 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
449 ehi->dev_action[tdev->devno] |=
450 ehi->action & action;
af181c2d
TH
451 ehi->action &= ~action;
452 }
453
454 /* turn off the specified per-dev action */
455 ehi->dev_action[dev->devno] &= ~action;
456 }
457}
458
c0c362b6
TH
459/**
460 * ata_eh_acquire - acquire EH ownership
461 * @ap: ATA port to acquire EH ownership for
462 *
463 * Acquire EH ownership for @ap. This is the basic exclusion
464 * mechanism for ports sharing a host. Only one port hanging off
465 * the same host can claim the ownership of EH.
466 *
467 * LOCKING:
468 * EH context.
469 */
470void ata_eh_acquire(struct ata_port *ap)
471{
472 mutex_lock(&ap->host->eh_mutex);
473 WARN_ON_ONCE(ap->host->eh_owner);
474 ap->host->eh_owner = current;
475}
476
477/**
478 * ata_eh_release - release EH ownership
479 * @ap: ATA port to release EH ownership for
480 *
481 * Release EH ownership for @ap if the caller. The caller must
482 * have acquired EH ownership using ata_eh_acquire() previously.
483 *
484 * LOCKING:
485 * EH context.
486 */
487void ata_eh_release(struct ata_port *ap)
488{
489 WARN_ON_ONCE(ap->host->eh_owner != current);
490 ap->host->eh_owner = NULL;
491 mutex_unlock(&ap->host->eh_mutex);
492}
493
ece180d1
TH
494static void ata_eh_unload(struct ata_port *ap)
495{
496 struct ata_link *link;
497 struct ata_device *dev;
498 unsigned long flags;
499
500 /* Restore SControl IPM and SPD for the next driver and
501 * disable attached devices.
502 */
503 ata_for_each_link(link, ap, PMP_FIRST) {
504 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
505 ata_for_each_dev(dev, link, ALL)
506 ata_dev_disable(dev);
507 }
508
509 /* freeze and set UNLOADED */
510 spin_lock_irqsave(ap->lock, flags);
511
512 ata_port_freeze(ap); /* won't be thawed */
513 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
514 ap->pflags |= ATA_PFLAG_UNLOADED;
515
516 spin_unlock_irqrestore(ap->lock, flags);
517}
518
ece1d636
TH
519/**
520 * ata_scsi_error - SCSI layer error handler callback
521 * @host: SCSI host on which error occurred
522 *
523 * Handles SCSI-layer-thrown error events.
524 *
525 * LOCKING:
526 * Inherited from SCSI layer (none, can sleep)
527 *
528 * RETURNS:
529 * Zero.
530 */
381544bb 531void ata_scsi_error(struct Scsi_Host *host)
ece1d636 532{
35bb94b1 533 struct ata_port *ap = ata_shost_to_port(host);
ad9e2762 534 unsigned long flags;
c34aeebc 535 LIST_HEAD(eh_work_q);
ece1d636
TH
536
537 DPRINTK("ENTER\n");
538
c34aeebc
JB
539 spin_lock_irqsave(host->host_lock, flags);
540 list_splice_init(&host->eh_cmd_q, &eh_work_q);
541 spin_unlock_irqrestore(host->host_lock, flags);
542
0e0b494c
JB
543 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
544
545 /* If we timed raced normal completion and there is nothing to
546 recover nr_timedout == 0 why exactly are we doing error recovery ? */
547 ata_scsi_port_error_handler(host, ap);
548
549 /* finish or retry handled scmd's and clean up */
72d8c36e 550 WARN_ON(!list_empty(&eh_work_q));
0e0b494c
JB
551
552 DPRINTK("EXIT\n");
553}
554
555/**
556 * ata_scsi_cmd_error_handler - error callback for a list of commands
557 * @host: scsi host containing the port
558 * @ap: ATA port within the host
559 * @eh_work_q: list of commands to process
560 *
561 * process the given list of commands and return those finished to the
562 * ap->eh_done_q. This function is the first part of the libata error
563 * handler which processes a given list of failed commands.
564 */
565void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
566 struct list_head *eh_work_q)
567{
568 int i;
569 unsigned long flags;
570
c429137a
TH
571 /* make sure sff pio task is not running */
572 ata_sff_flush_pio_task(ap);
ece1d636 573
cca3974e 574 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
575
576 /* For new EH, all qcs are finished in one of three ways -
577 * normal completion, error completion, and SCSI timeout.
c96f1732 578 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
579 * completion wins, the qc never reaches EH. When error
580 * completion wins, the qc has ATA_QCFLAG_FAILED set.
581 *
582 * When SCSI timeout wins, things are a bit more complex.
583 * Normal or error completion can occur after the timeout but
584 * before this point. In such cases, both types of
585 * completions are honored. A scmd is determined to have
586 * timed out iff its associated qc is active and not failed.
587 */
a4f08141 588 spin_lock_irqsave(ap->lock, flags);
ad9e2762
TH
589 if (ap->ops->error_handler) {
590 struct scsi_cmnd *scmd, *tmp;
591 int nr_timedout = 0;
592
c96f1732
AC
593 /* This must occur under the ap->lock as we don't want
594 a polled recovery to race the real interrupt handler
d9027470 595
c96f1732
AC
596 The lost_interrupt handler checks for any completed but
597 non-notified command and completes much like an IRQ handler.
d9027470 598
c96f1732
AC
599 We then fall into the error recovery code which will treat
600 this as if normal completion won the race */
601
602 if (ap->ops->lost_interrupt)
603 ap->ops->lost_interrupt(ap);
d9027470 604
0e0b494c 605 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
ad9e2762
TH
606 struct ata_queued_cmd *qc;
607
258c4e5c 608 ata_qc_for_each_raw(ap, qc, i) {
ad9e2762
TH
609 if (qc->flags & ATA_QCFLAG_ACTIVE &&
610 qc->scsicmd == scmd)
611 break;
612 }
613
614 if (i < ATA_MAX_QUEUE) {
615 /* the scmd has an associated qc */
616 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
617 /* which hasn't failed yet, timeout */
618 qc->err_mask |= AC_ERR_TIMEOUT;
619 qc->flags |= ATA_QCFLAG_FAILED;
620 nr_timedout++;
621 }
622 } else {
623 /* Normal completion occurred after
624 * SCSI timeout but before this point.
625 * Successfully complete it.
626 */
627 scmd->retries = scmd->allowed;
628 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
629 }
630 }
631
632 /* If we have timed out qcs. They belong to EH from
633 * this point but the state of the controller is
634 * unknown. Freeze the port to make sure the IRQ
635 * handler doesn't diddle with those qcs. This must
636 * be done atomically w.r.t. setting QCFLAG_FAILED.
637 */
638 if (nr_timedout)
639 __ata_port_freeze(ap);
640
a1e10f7e
TH
641
642 /* initialize eh_tries */
643 ap->eh_tries = ATA_EH_MAX_TRIES;
a4f08141
PM
644 }
645 spin_unlock_irqrestore(ap->lock, flags);
d9027470 646
0e0b494c
JB
647}
648EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
649
650/**
651 * ata_scsi_port_error_handler - recover the port after the commands
652 * @host: SCSI host containing the port
653 * @ap: the ATA port
654 *
655 * Handle the recovery of the port @ap after all the commands
656 * have been recovered.
657 */
658void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
659{
660 unsigned long flags;
ad9e2762 661
ad9e2762
TH
662 /* invoke error handler */
663 if (ap->ops->error_handler) {
cf1b86c8
TH
664 struct ata_link *link;
665
c0c362b6
TH
666 /* acquire EH ownership */
667 ata_eh_acquire(ap);
668 repeat:
5ddf24c5
TH
669 /* kill fast drain timer */
670 del_timer_sync(&ap->fastdrain_timer);
671
500530f6
TH
672 /* process port resume request */
673 ata_eh_handle_port_resume(ap);
674
f3e81b19 675 /* fetch & clear EH info */
e30349d2 676 spin_lock_irqsave(ap->lock, flags);
f3e81b19 677
1eca4365 678 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
679 struct ata_eh_context *ehc = &link->eh_context;
680 struct ata_device *dev;
681
cf1b86c8
TH
682 memset(&link->eh_context, 0, sizeof(link->eh_context));
683 link->eh_context.i = link->eh_info;
684 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 685
1eca4365 686 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
687 int devno = dev->devno;
688
689 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
690 if (ata_ncq_enabled(dev))
691 ehc->saved_ncq_enabled |= 1 << devno;
692 }
cf1b86c8 693 }
f3e81b19 694
b51e9e5d
TH
695 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
696 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 697 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 698
e30349d2 699 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 700
500530f6
TH
701 /* invoke EH, skip if unloading or suspended */
702 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 703 ap->ops->error_handler(ap);
ece180d1
TH
704 else {
705 /* if unloading, commence suicide */
706 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
707 !(ap->pflags & ATA_PFLAG_UNLOADED))
708 ata_eh_unload(ap);
720ba126 709 ata_eh_finish(ap);
ece180d1 710 }
ad9e2762 711
500530f6
TH
712 /* process port suspend request */
713 ata_eh_handle_port_suspend(ap);
714
25985edc 715 /* Exception might have happened after ->error_handler
ad9e2762
TH
716 * recovered the port but before this point. Repeat
717 * EH in such case.
718 */
e30349d2 719 spin_lock_irqsave(ap->lock, flags);
ad9e2762 720
b51e9e5d 721 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 722 if (--ap->eh_tries) {
e30349d2 723 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
724 goto repeat;
725 }
a9a79dfe
JP
726 ata_port_err(ap,
727 "EH pending after %d tries, giving up\n",
728 ATA_EH_MAX_TRIES);
914616a3 729 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
730 }
731
f3e81b19 732 /* this run is complete, make sure EH info is clear */
1eca4365 733 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 734 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 735
e4a9c373
DW
736 /* end eh (clear host_eh_scheduled) while holding
737 * ap->lock such that if exception occurs after this
738 * point but before EH completion, SCSI midlayer will
ad9e2762
TH
739 * re-initiate EH.
740 */
e4a9c373 741 ap->ops->end_eh(ap);
ad9e2762 742
e30349d2 743 spin_unlock_irqrestore(ap->lock, flags);
c0c362b6 744 ata_eh_release(ap);
ad9e2762 745 } else {
9af5c9c9 746 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
747 ap->ops->eng_timeout(ap);
748 }
ece1d636 749
ece1d636
TH
750 scsi_eh_flush_done_q(&ap->eh_done_q);
751
ad9e2762 752 /* clean up */
e30349d2 753 spin_lock_irqsave(ap->lock, flags);
ad9e2762 754
1cdaf534 755 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 756 ap->pflags &= ~ATA_PFLAG_LOADING;
6f54120e
JY
757 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
758 !(ap->flags & ATA_FLAG_SAS_HOST))
ad72cf98 759 schedule_delayed_work(&ap->hotplug_task, 0);
1cdaf534
TH
760
761 if (ap->pflags & ATA_PFLAG_RECOVERED)
a9a79dfe 762 ata_port_info(ap, "EH complete\n");
580b2102 763
b51e9e5d 764 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 765
c6cf9e99 766 /* tell wait_eh that we're done */
b51e9e5d 767 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
768 wake_up_all(&ap->eh_wait_q);
769
e30349d2 770 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 771}
0e0b494c 772EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
ece1d636 773
c6cf9e99
TH
774/**
775 * ata_port_wait_eh - Wait for the currently pending EH to complete
776 * @ap: Port to wait EH for
777 *
778 * Wait until the currently pending EH is complete.
779 *
780 * LOCKING:
781 * Kernel thread context (may sleep).
782 */
783void ata_port_wait_eh(struct ata_port *ap)
784{
785 unsigned long flags;
786 DEFINE_WAIT(wait);
787
788 retry:
ba6a1308 789 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 790
b51e9e5d 791 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 792 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 793 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 794 schedule();
ba6a1308 795 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 796 }
0a1b622e 797 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 798
ba6a1308 799 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
800
801 /* make sure SCSI EH is complete */
cca3974e 802 if (scsi_host_in_recovery(ap->scsi_host)) {
97750ceb 803 ata_msleep(ap, 10);
c6cf9e99
TH
804 goto retry;
805 }
806}
81c757bc 807EXPORT_SYMBOL_GPL(ata_port_wait_eh);
c6cf9e99 808
5ddf24c5
TH
809static int ata_eh_nr_in_flight(struct ata_port *ap)
810{
258c4e5c 811 struct ata_queued_cmd *qc;
5ddf24c5
TH
812 unsigned int tag;
813 int nr = 0;
814
815 /* count only non-internal commands */
258c4e5c
JA
816 ata_qc_for_each(ap, qc, tag) {
817 if (qc)
5ddf24c5 818 nr++;
9d207acc 819 }
5ddf24c5
TH
820
821 return nr;
822}
823
b93ab338 824void ata_eh_fastdrain_timerfn(struct timer_list *t)
5ddf24c5 825{
b93ab338 826 struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
5ddf24c5
TH
827 unsigned long flags;
828 int cnt;
829
830 spin_lock_irqsave(ap->lock, flags);
831
832 cnt = ata_eh_nr_in_flight(ap);
833
834 /* are we done? */
835 if (!cnt)
836 goto out_unlock;
837
838 if (cnt == ap->fastdrain_cnt) {
258c4e5c 839 struct ata_queued_cmd *qc;
5ddf24c5
TH
840 unsigned int tag;
841
842 /* No progress during the last interval, tag all
843 * in-flight qcs as timed out and freeze the port.
844 */
258c4e5c 845 ata_qc_for_each(ap, qc, tag) {
5ddf24c5
TH
846 if (qc)
847 qc->err_mask |= AC_ERR_TIMEOUT;
848 }
849
850 ata_port_freeze(ap);
851 } else {
852 /* some qcs have finished, give it another chance */
853 ap->fastdrain_cnt = cnt;
854 ap->fastdrain_timer.expires =
341c2c95 855 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
856 add_timer(&ap->fastdrain_timer);
857 }
858
859 out_unlock:
860 spin_unlock_irqrestore(ap->lock, flags);
861}
862
863/**
864 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
865 * @ap: target ATA port
866 * @fastdrain: activate fast drain
867 *
868 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
869 * is non-zero and EH wasn't pending before. Fast drain ensures
870 * that EH kicks in in timely manner.
871 *
872 * LOCKING:
873 * spin_lock_irqsave(host lock)
874 */
875static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
876{
877 int cnt;
878
879 /* already scheduled? */
880 if (ap->pflags & ATA_PFLAG_EH_PENDING)
881 return;
882
883 ap->pflags |= ATA_PFLAG_EH_PENDING;
884
885 if (!fastdrain)
886 return;
887
888 /* do we have in-flight qcs? */
889 cnt = ata_eh_nr_in_flight(ap);
890 if (!cnt)
891 return;
892
893 /* activate fast drain */
894 ap->fastdrain_cnt = cnt;
341c2c95
TH
895 ap->fastdrain_timer.expires =
896 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
897 add_timer(&ap->fastdrain_timer);
898}
899
f686bcb8
TH
900/**
901 * ata_qc_schedule_eh - schedule qc for error handling
902 * @qc: command to schedule error handling for
903 *
904 * Schedule error handling for @qc. EH will kick in as soon as
905 * other commands are drained.
906 *
907 * LOCKING:
cca3974e 908 * spin_lock_irqsave(host lock)
f686bcb8
TH
909 */
910void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
911{
912 struct ata_port *ap = qc->ap;
913
914 WARN_ON(!ap->ops->error_handler);
915
916 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 917 ata_eh_set_pending(ap, 1);
f686bcb8
TH
918
919 /* The following will fail if timeout has already expired.
920 * ata_scsi_error() takes care of such scmds on EH entry.
921 * Note that ATA_QCFLAG_FAILED is unconditionally set after
922 * this function completes.
923 */
242f9dcb 924 blk_abort_request(qc->scsicmd->request);
f686bcb8
TH
925}
926
7b70fc03 927/**
e4a9c373
DW
928 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
929 * @ap: ATA port to schedule EH for
7b70fc03 930 *
e4a9c373 931 * LOCKING: inherited from ata_port_schedule_eh
cca3974e 932 * spin_lock_irqsave(host lock)
7b70fc03 933 */
e4a9c373 934void ata_std_sched_eh(struct ata_port *ap)
7b70fc03
TH
935{
936 WARN_ON(!ap->ops->error_handler);
937
f4d6d004
TH
938 if (ap->pflags & ATA_PFLAG_INITIALIZING)
939 return;
940
5ddf24c5 941 ata_eh_set_pending(ap, 1);
cca3974e 942 scsi_schedule_eh(ap->scsi_host);
7b70fc03
TH
943
944 DPRINTK("port EH scheduled\n");
945}
e4a9c373
DW
946EXPORT_SYMBOL_GPL(ata_std_sched_eh);
947
948/**
949 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
950 * @ap: ATA port to end EH for
951 *
952 * In the libata object model there is a 1:1 mapping of ata_port to
953 * shost, so host fields can be directly manipulated under ap->lock, in
954 * the libsas case we need to hold a lock at the ha->level to coordinate
955 * these events.
956 *
957 * LOCKING:
958 * spin_lock_irqsave(host lock)
959 */
960void ata_std_end_eh(struct ata_port *ap)
961{
962 struct Scsi_Host *host = ap->scsi_host;
963
964 host->host_eh_scheduled = 0;
965}
966EXPORT_SYMBOL(ata_std_end_eh);
967
968
969/**
970 * ata_port_schedule_eh - schedule error handling without a qc
971 * @ap: ATA port to schedule EH for
972 *
973 * Schedule error handling for @ap. EH will kick in as soon as
974 * all commands are drained.
975 *
976 * LOCKING:
977 * spin_lock_irqsave(host lock)
978 */
979void ata_port_schedule_eh(struct ata_port *ap)
980{
981 /* see: ata_std_sched_eh, unless you know better */
982 ap->ops->sched_eh(ap);
983}
7b70fc03 984
dbd82616 985static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03 986{
258c4e5c 987 struct ata_queued_cmd *qc;
7b70fc03
TH
988 int tag, nr_aborted = 0;
989
990 WARN_ON(!ap->ops->error_handler);
991
5ddf24c5
TH
992 /* we're gonna abort all commands, no need for fast drain */
993 ata_eh_set_pending(ap, 0);
994
28361c40 995 /* include internal tag in iteration */
258c4e5c 996 ata_qc_for_each_with_internal(ap, qc, tag) {
dbd82616 997 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
998 qc->flags |= ATA_QCFLAG_FAILED;
999 ata_qc_complete(qc);
1000 nr_aborted++;
1001 }
1002 }
1003
1004 if (!nr_aborted)
1005 ata_port_schedule_eh(ap);
1006
1007 return nr_aborted;
1008}
1009
dbd82616
TH
1010/**
1011 * ata_link_abort - abort all qc's on the link
1012 * @link: ATA link to abort qc's for
1013 *
1014 * Abort all active qc's active on @link and schedule EH.
1015 *
1016 * LOCKING:
1017 * spin_lock_irqsave(host lock)
1018 *
1019 * RETURNS:
1020 * Number of aborted qc's.
1021 */
1022int ata_link_abort(struct ata_link *link)
1023{
1024 return ata_do_link_abort(link->ap, link);
1025}
1026
1027/**
1028 * ata_port_abort - abort all qc's on the port
1029 * @ap: ATA port to abort qc's for
1030 *
1031 * Abort all active qc's of @ap and schedule EH.
1032 *
1033 * LOCKING:
1034 * spin_lock_irqsave(host_set lock)
1035 *
1036 * RETURNS:
1037 * Number of aborted qc's.
1038 */
1039int ata_port_abort(struct ata_port *ap)
1040{
1041 return ata_do_link_abort(ap, NULL);
1042}
1043
e3180499
TH
1044/**
1045 * __ata_port_freeze - freeze port
1046 * @ap: ATA port to freeze
1047 *
1048 * This function is called when HSM violation or some other
1049 * condition disrupts normal operation of the port. Frozen port
1050 * is not allowed to perform any operation until the port is
1051 * thawed, which usually follows a successful reset.
1052 *
1053 * ap->ops->freeze() callback can be used for freezing the port
1054 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1055 * port cannot be frozen hardware-wise, the interrupt handler
1056 * must ack and clear interrupts unconditionally while the port
1057 * is frozen.
1058 *
1059 * LOCKING:
cca3974e 1060 * spin_lock_irqsave(host lock)
e3180499
TH
1061 */
1062static void __ata_port_freeze(struct ata_port *ap)
1063{
1064 WARN_ON(!ap->ops->error_handler);
1065
1066 if (ap->ops->freeze)
1067 ap->ops->freeze(ap);
1068
b51e9e5d 1069 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 1070
44877b4e 1071 DPRINTK("ata%u port frozen\n", ap->print_id);
e3180499
TH
1072}
1073
1074/**
1075 * ata_port_freeze - abort & freeze port
1076 * @ap: ATA port to freeze
1077 *
54c38444
JG
1078 * Abort and freeze @ap. The freeze operation must be called
1079 * first, because some hardware requires special operations
1080 * before the taskfile registers are accessible.
e3180499
TH
1081 *
1082 * LOCKING:
cca3974e 1083 * spin_lock_irqsave(host lock)
e3180499
TH
1084 *
1085 * RETURNS:
1086 * Number of aborted commands.
1087 */
1088int ata_port_freeze(struct ata_port *ap)
1089{
1090 int nr_aborted;
1091
1092 WARN_ON(!ap->ops->error_handler);
1093
e3180499 1094 __ata_port_freeze(ap);
54c38444 1095 nr_aborted = ata_port_abort(ap);
e3180499
TH
1096
1097 return nr_aborted;
1098}
1099
7d77b247
TH
1100/**
1101 * sata_async_notification - SATA async notification handler
1102 * @ap: ATA port where async notification is received
1103 *
1104 * Handler to be called when async notification via SDB FIS is
1105 * received. This function schedules EH if necessary.
1106 *
1107 * LOCKING:
1108 * spin_lock_irqsave(host lock)
1109 *
1110 * RETURNS:
1111 * 1 if EH is scheduled, 0 otherwise.
1112 */
1113int sata_async_notification(struct ata_port *ap)
1114{
1115 u32 sntf;
1116 int rc;
1117
1118 if (!(ap->flags & ATA_FLAG_AN))
1119 return 0;
1120
1121 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1122 if (rc == 0)
1123 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1124
071f44b1 1125 if (!sata_pmp_attached(ap) || rc) {
7d77b247 1126 /* PMP is not attached or SNTF is not available */
071f44b1 1127 if (!sata_pmp_attached(ap)) {
7d77b247
TH
1128 /* PMP is not attached. Check whether ATAPI
1129 * AN is configured. If so, notify media
1130 * change.
1131 */
1132 struct ata_device *dev = ap->link.device;
1133
1134 if ((dev->class == ATA_DEV_ATAPI) &&
1135 (dev->flags & ATA_DFLAG_AN))
1136 ata_scsi_media_change_notify(dev);
1137 return 0;
1138 } else {
1139 /* PMP is attached but SNTF is not available.
1140 * ATAPI async media change notification is
1141 * not used. The PMP must be reporting PHY
1142 * status change, schedule EH.
1143 */
1144 ata_port_schedule_eh(ap);
1145 return 1;
1146 }
1147 } else {
1148 /* PMP is attached and SNTF is available */
1149 struct ata_link *link;
1150
1151 /* check and notify ATAPI AN */
1eca4365 1152 ata_for_each_link(link, ap, EDGE) {
7d77b247
TH
1153 if (!(sntf & (1 << link->pmp)))
1154 continue;
1155
1156 if ((link->device->class == ATA_DEV_ATAPI) &&
1157 (link->device->flags & ATA_DFLAG_AN))
1158 ata_scsi_media_change_notify(link->device);
1159 }
1160
1161 /* If PMP is reporting that PHY status of some
1162 * downstream ports has changed, schedule EH.
1163 */
1164 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1165 ata_port_schedule_eh(ap);
1166 return 1;
1167 }
1168
1169 return 0;
1170 }
1171}
1172
e3180499
TH
1173/**
1174 * ata_eh_freeze_port - EH helper to freeze port
1175 * @ap: ATA port to freeze
1176 *
1177 * Freeze @ap.
1178 *
1179 * LOCKING:
1180 * None.
1181 */
1182void ata_eh_freeze_port(struct ata_port *ap)
1183{
1184 unsigned long flags;
1185
1186 if (!ap->ops->error_handler)
1187 return;
1188
ba6a1308 1189 spin_lock_irqsave(ap->lock, flags);
e3180499 1190 __ata_port_freeze(ap);
ba6a1308 1191 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1192}
1193
1194/**
1195 * ata_port_thaw_port - EH helper to thaw port
1196 * @ap: ATA port to thaw
1197 *
1198 * Thaw frozen port @ap.
1199 *
1200 * LOCKING:
1201 * None.
1202 */
1203void ata_eh_thaw_port(struct ata_port *ap)
1204{
1205 unsigned long flags;
1206
1207 if (!ap->ops->error_handler)
1208 return;
1209
ba6a1308 1210 spin_lock_irqsave(ap->lock, flags);
e3180499 1211
b51e9e5d 1212 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1213
1214 if (ap->ops->thaw)
1215 ap->ops->thaw(ap);
1216
ba6a1308 1217 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1218
44877b4e 1219 DPRINTK("ata%u port thawed\n", ap->print_id);
e3180499
TH
1220}
1221
ece1d636
TH
1222static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1223{
1224 /* nada */
1225}
1226
1227static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1228{
1229 struct ata_port *ap = qc->ap;
1230 struct scsi_cmnd *scmd = qc->scsicmd;
1231 unsigned long flags;
1232
ba6a1308 1233 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1234 qc->scsidone = ata_eh_scsidone;
1235 __ata_qc_complete(qc);
1236 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1237 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1238
1239 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1240}
1241
1242/**
1243 * ata_eh_qc_complete - Complete an active ATA command from EH
1244 * @qc: Command to complete
1245 *
1246 * Indicate to the mid and upper layers that an ATA command has
1247 * completed. To be used from EH.
1248 */
1249void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1250{
1251 struct scsi_cmnd *scmd = qc->scsicmd;
1252 scmd->retries = scmd->allowed;
1253 __ata_eh_qc_complete(qc);
1254}
1255
1256/**
1257 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1258 * @qc: Command to retry
1259 *
1260 * Indicate to the mid and upper layers that an ATA command
1261 * should be retried. To be used from EH.
1262 *
1263 * SCSI midlayer limits the number of retries to scmd->allowed.
f13e2201 1264 * scmd->allowed is incremented for commands which get retried
ece1d636
TH
1265 * due to unrelated failures (qc->err_mask is zero).
1266 */
1267void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1268{
1269 struct scsi_cmnd *scmd = qc->scsicmd;
f13e2201
GG
1270 if (!qc->err_mask)
1271 scmd->allowed++;
ece1d636
TH
1272 __ata_eh_qc_complete(qc);
1273}
022bdb07 1274
678afac6
TH
1275/**
1276 * ata_dev_disable - disable ATA device
1277 * @dev: ATA device to disable
1278 *
1279 * Disable @dev.
1280 *
1281 * Locking:
1282 * EH context.
1283 */
1284void ata_dev_disable(struct ata_device *dev)
1285{
1286 if (!ata_dev_enabled(dev))
1287 return;
1288
1289 if (ata_msg_drv(dev->link->ap))
a9a79dfe 1290 ata_dev_warn(dev, "disabled\n");
678afac6
TH
1291 ata_acpi_on_disable(dev);
1292 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1293 dev->class++;
99cf610a
TH
1294
1295 /* From now till the next successful probe, ering is used to
1296 * track probe failures. Clear accumulated device error info.
1297 */
1298 ata_ering_clear(&dev->ering);
678afac6
TH
1299}
1300
0ea035a3
TH
1301/**
1302 * ata_eh_detach_dev - detach ATA device
1303 * @dev: ATA device to detach
1304 *
1305 * Detach @dev.
1306 *
1307 * LOCKING:
1308 * None.
1309 */
fb7fd614 1310void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1311{
f58229f8
TH
1312 struct ata_link *link = dev->link;
1313 struct ata_port *ap = link->ap;
90484ebf 1314 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1315 unsigned long flags;
1316
1317 ata_dev_disable(dev);
1318
ba6a1308 1319 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1320
1321 dev->flags &= ~ATA_DFLAG_DETACH;
1322
1323 if (ata_scsi_offline_dev(dev)) {
1324 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1325 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1326 }
1327
90484ebf 1328 /* clear per-dev EH info */
f58229f8
TH
1329 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1330 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1331 ehc->saved_xfer_mode[dev->devno] = 0;
1332 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1333
ba6a1308 1334 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1335}
1336
022bdb07
TH
1337/**
1338 * ata_eh_about_to_do - about to perform eh_action
955e57df 1339 * @link: target ATA link
47005f25 1340 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1341 * @action: action about to be performed
1342 *
1343 * Called just before performing EH actions to clear related bits
955e57df
TH
1344 * in @link->eh_info such that eh actions are not unnecessarily
1345 * repeated.
022bdb07
TH
1346 *
1347 * LOCKING:
1348 * None.
1349 */
fb7fd614
TH
1350void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1351 unsigned int action)
022bdb07 1352{
955e57df
TH
1353 struct ata_port *ap = link->ap;
1354 struct ata_eh_info *ehi = &link->eh_info;
1355 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1356 unsigned long flags;
1357
ba6a1308 1358 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1359
955e57df 1360 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1361
a568d1d2
TH
1362 /* About to take EH action, set RECOVERED. Ignore actions on
1363 * slave links as master will do them again.
1364 */
1365 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1366 ap->pflags |= ATA_PFLAG_RECOVERED;
1367
ba6a1308 1368 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1369}
1370
47005f25
TH
1371/**
1372 * ata_eh_done - EH action complete
2f60e1ab 1373 * @link: ATA link for which EH actions are complete
47005f25
TH
1374 * @dev: target ATA dev for per-dev action (can be NULL)
1375 * @action: action just completed
1376 *
1377 * Called right after performing EH actions to clear related bits
955e57df 1378 * in @link->eh_context.
47005f25
TH
1379 *
1380 * LOCKING:
1381 * None.
1382 */
fb7fd614
TH
1383void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1384 unsigned int action)
47005f25 1385{
955e57df 1386 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1387
955e57df 1388 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1389}
1390
022bdb07
TH
1391/**
1392 * ata_err_string - convert err_mask to descriptive string
1393 * @err_mask: error mask to convert to string
1394 *
1395 * Convert @err_mask to descriptive string. Errors are
1396 * prioritized according to severity and only the most severe
1397 * error is reported.
1398 *
1399 * LOCKING:
1400 * None.
1401 *
1402 * RETURNS:
1403 * Descriptive string for @err_mask
1404 */
2dcb407e 1405static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1406{
1407 if (err_mask & AC_ERR_HOST_BUS)
1408 return "host bus error";
1409 if (err_mask & AC_ERR_ATA_BUS)
1410 return "ATA bus error";
1411 if (err_mask & AC_ERR_TIMEOUT)
1412 return "timeout";
1413 if (err_mask & AC_ERR_HSM)
1414 return "HSM violation";
1415 if (err_mask & AC_ERR_SYSTEM)
1416 return "internal error";
1417 if (err_mask & AC_ERR_MEDIA)
1418 return "media error";
1419 if (err_mask & AC_ERR_INVALID)
1420 return "invalid argument";
1421 if (err_mask & AC_ERR_DEV)
1422 return "device error";
54fb131b
DLM
1423 if (err_mask & AC_ERR_NCQ)
1424 return "NCQ error";
1425 if (err_mask & AC_ERR_NODEV_HINT)
1426 return "Polling detection error";
022bdb07
TH
1427 return "unknown error";
1428}
1429
e8ee8451
TH
1430/**
1431 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1432 * @dev: Device to read log page 10h from
1433 * @tag: Resulting tag of the failed command
1434 * @tf: Resulting taskfile registers of the failed command
1435 *
1436 * Read log page 10h to obtain NCQ error details and clear error
1437 * condition.
1438 *
1439 * LOCKING:
1440 * Kernel thread context (may sleep).
1441 *
1442 * RETURNS:
1443 * 0 on success, -errno otherwise.
1444 */
1445static int ata_eh_read_log_10h(struct ata_device *dev,
1446 int *tag, struct ata_taskfile *tf)
1447{
9af5c9c9 1448 u8 *buf = dev->link->ap->sector_buf;
e8ee8451
TH
1449 unsigned int err_mask;
1450 u8 csum;
1451 int i;
1452
65fe1f0f 1453 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
e8ee8451
TH
1454 if (err_mask)
1455 return -EIO;
1456
1457 csum = 0;
1458 for (i = 0; i < ATA_SECT_SIZE; i++)
1459 csum += buf[i];
1460 if (csum)
a9a79dfe
JP
1461 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1462 csum);
e8ee8451
TH
1463
1464 if (buf[0] & 0x80)
1465 return -ENOENT;
1466
1467 *tag = buf[0] & 0x1f;
1468
1469 tf->command = buf[2];
1470 tf->feature = buf[3];
1471 tf->lbal = buf[4];
1472 tf->lbam = buf[5];
1473 tf->lbah = buf[6];
1474 tf->device = buf[7];
1475 tf->hob_lbal = buf[8];
1476 tf->hob_lbam = buf[9];
1477 tf->hob_lbah = buf[10];
1478 tf->nsect = buf[12];
1479 tf->hob_nsect = buf[13];
ca156e00 1480 if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
5b01e4b9 1481 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
e8ee8451
TH
1482
1483 return 0;
1484}
1485
11fc33da
TH
1486/**
1487 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1488 * @dev: target ATAPI device
1489 * @r_sense_key: out parameter for sense_key
1490 *
1491 * Perform ATAPI TEST_UNIT_READY.
1492 *
1493 * LOCKING:
1494 * EH context (may sleep).
1495 *
1496 * RETURNS:
1497 * 0 on success, AC_ERR_* mask on failure.
1498 */
3dc67440 1499unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
11fc33da
TH
1500{
1501 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1502 struct ata_taskfile tf;
1503 unsigned int err_mask;
1504
1505 ata_tf_init(dev, &tf);
1506
1507 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1508 tf.command = ATA_CMD_PACKET;
1509 tf.protocol = ATAPI_PROT_NODATA;
1510
1511 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1512 if (err_mask == AC_ERR_DEV)
1513 *r_sense_key = tf.feature >> 4;
1514 return err_mask;
1515}
1516
e87fd28c
HR
1517/**
1518 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
2f60e1ab 1519 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
e87fd28c
HR
1520 * @cmd: scsi command for which the sense code should be set
1521 *
1522 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1523 * SENSE. This function is an EH helper.
1524 *
1525 * LOCKING:
1526 * Kernel thread context (may sleep).
1527 */
1528static void ata_eh_request_sense(struct ata_queued_cmd *qc,
1529 struct scsi_cmnd *cmd)
1530{
1531 struct ata_device *dev = qc->dev;
1532 struct ata_taskfile tf;
1533 unsigned int err_mask;
1534
1535 if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
1536 ata_dev_warn(dev, "sense data available but port frozen\n");
1537 return;
1538 }
1539
d238ffd5 1540 if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
e87fd28c
HR
1541 return;
1542
1543 if (!ata_id_sense_reporting_enabled(dev->id)) {
1544 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1545 return;
1546 }
1547
1548 DPRINTK("ATA request sense\n");
1549
1550 ata_tf_init(dev, &tf);
1551 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1552 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1553 tf.command = ATA_CMD_REQ_SENSE_DATA;
1554 tf.protocol = ATA_PROT_NODATA;
1555
1556 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1557 /* Ignore err_mask; ATA_ERR might be set */
1558 if (tf.command & ATA_SENSE) {
06dbde5f 1559 ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
e87fd28c
HR
1560 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1561 } else {
1562 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1563 tf.command, err_mask);
1564 }
1565}
1566
022bdb07
TH
1567/**
1568 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1569 * @dev: device to perform REQUEST_SENSE to
1570 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1571 * @dfl_sense_key: default sense key to use
022bdb07
TH
1572 *
1573 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1574 * SENSE. This function is EH helper.
1575 *
1576 * LOCKING:
1577 * Kernel thread context (may sleep).
1578 *
1579 * RETURNS:
1580 * 0 on success, AC_ERR_* mask on failure
1581 */
3dc67440 1582unsigned int atapi_eh_request_sense(struct ata_device *dev,
3eabddb8 1583 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1584{
3eabddb8
TH
1585 u8 cdb[ATAPI_CDB_LEN] =
1586 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1587 struct ata_port *ap = dev->link->ap;
022bdb07 1588 struct ata_taskfile tf;
022bdb07
TH
1589
1590 DPRINTK("ATAPI request sense\n");
1591
022bdb07
TH
1592 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1593
56287768
AL
1594 /* initialize sense_buf with the error register,
1595 * for the case where they are -not- overwritten
1596 */
022bdb07 1597 sense_buf[0] = 0x70;
3eabddb8 1598 sense_buf[2] = dfl_sense_key;
56287768 1599
a617c09f 1600 /* some devices time out if garbage left in tf */
56287768 1601 ata_tf_init(dev, &tf);
022bdb07 1602
022bdb07
TH
1603 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1604 tf.command = ATA_CMD_PACKET;
1605
1606 /* is it pointless to prefer PIO for "safety reasons"? */
1607 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1608 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1609 tf.feature |= ATAPI_PKT_DMA;
1610 } else {
0dc36888 1611 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1612 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1613 tf.lbah = 0;
022bdb07
TH
1614 }
1615
1616 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1617 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1618}
1619
1620/**
1621 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1622 * @link: ATA link to analyze SError for
022bdb07
TH
1623 *
1624 * Analyze SError if available and further determine cause of
1625 * failure.
1626 *
1627 * LOCKING:
1628 * None.
1629 */
0260731f 1630static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1631{
0260731f 1632 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1633 u32 serror = ehc->i.serror;
1634 unsigned int err_mask = 0, action = 0;
f9df58cb 1635 u32 hotplug_mask;
022bdb07 1636
e0614db2 1637 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1638 err_mask |= AC_ERR_ATA_BUS;
cf480626 1639 action |= ATA_EH_RESET;
022bdb07
TH
1640 }
1641 if (serror & SERR_PROTOCOL) {
1642 err_mask |= AC_ERR_HSM;
cf480626 1643 action |= ATA_EH_RESET;
022bdb07
TH
1644 }
1645 if (serror & SERR_INTERNAL) {
1646 err_mask |= AC_ERR_SYSTEM;
cf480626 1647 action |= ATA_EH_RESET;
022bdb07 1648 }
f9df58cb
TH
1649
1650 /* Determine whether a hotplug event has occurred. Both
1651 * SError.N/X are considered hotplug events for enabled or
1652 * host links. For disabled PMP links, only N bit is
1653 * considered as X bit is left at 1 for link plugging.
1654 */
eb0e85e3 1655 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6b7ae954
TH
1656 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1657 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
f9df58cb
TH
1658 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1659 else
1660 hotplug_mask = SERR_PHYRDY_CHG;
1661
1662 if (serror & hotplug_mask)
084fe639 1663 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1664
1665 ehc->i.err_mask |= err_mask;
1666 ehc->i.action |= action;
1667}
1668
e8ee8451
TH
1669/**
1670 * ata_eh_analyze_ncq_error - analyze NCQ error
0260731f 1671 * @link: ATA link to analyze NCQ error for
e8ee8451
TH
1672 *
1673 * Read log page 10h, determine the offending qc and acquire
1674 * error status TF. For NCQ device errors, all LLDDs have to do
1675 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1676 * care of the rest.
1677 *
1678 * LOCKING:
1679 * Kernel thread context (may sleep).
1680 */
10acf3b0 1681void ata_eh_analyze_ncq_error(struct ata_link *link)
e8ee8451 1682{
0260731f
TH
1683 struct ata_port *ap = link->ap;
1684 struct ata_eh_context *ehc = &link->eh_context;
1685 struct ata_device *dev = link->device;
e8ee8451
TH
1686 struct ata_queued_cmd *qc;
1687 struct ata_taskfile tf;
1688 int tag, rc;
1689
1690 /* if frozen, we can't do much */
b51e9e5d 1691 if (ap->pflags & ATA_PFLAG_FROZEN)
e8ee8451
TH
1692 return;
1693
1694 /* is it NCQ device error? */
0260731f 1695 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
e8ee8451
TH
1696 return;
1697
1698 /* has LLDD analyzed already? */
258c4e5c 1699 ata_qc_for_each_raw(ap, qc, tag) {
e8ee8451
TH
1700 if (!(qc->flags & ATA_QCFLAG_FAILED))
1701 continue;
1702
1703 if (qc->err_mask)
1704 return;
1705 }
1706
1707 /* okay, this error is ours */
a09bf4cd 1708 memset(&tf, 0, sizeof(tf));
e8ee8451
TH
1709 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1710 if (rc) {
a9a79dfe
JP
1711 ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1712 rc);
e8ee8451
TH
1713 return;
1714 }
1715
0260731f 1716 if (!(link->sactive & (1 << tag))) {
a9a79dfe
JP
1717 ata_link_err(link, "log page 10h reported inactive tag %d\n",
1718 tag);
e8ee8451
TH
1719 return;
1720 }
1721
1722 /* we've got the perpetrator, condemn it */
1723 qc = __ata_qc_from_tag(ap, tag);
1724 memcpy(&qc->result_tf, &tf, sizeof(tf));
a6116c9e 1725 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
5335b729 1726 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
ca156e00
TH
1727 if (dev->class == ATA_DEV_ZAC &&
1728 ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
5b01e4b9
HR
1729 char sense_key, asc, ascq;
1730
1731 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1732 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1733 ascq = qc->result_tf.auxiliary & 0xff;
06dbde5f 1734 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
492bf621
HR
1735 ata_scsi_set_sense_information(dev, qc->scsicmd,
1736 &qc->result_tf);
5b01e4b9
HR
1737 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1738 }
1739
e8ee8451
TH
1740 ehc->i.err_mask &= ~AC_ERR_DEV;
1741}
1742
022bdb07
TH
1743/**
1744 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1745 * @qc: qc to analyze
1746 * @tf: Taskfile registers to analyze
1747 *
1748 * Analyze taskfile of @qc and further determine cause of
1749 * failure. This function also requests ATAPI sense data if
25985edc 1750 * available.
022bdb07
TH
1751 *
1752 * LOCKING:
1753 * Kernel thread context (may sleep).
1754 *
1755 * RETURNS:
1756 * Determined recovery action
1757 */
1758static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1759 const struct ata_taskfile *tf)
1760{
1761 unsigned int tmp, action = 0;
1762 u8 stat = tf->command, err = tf->feature;
1763
1764 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1765 qc->err_mask |= AC_ERR_HSM;
cf480626 1766 return ATA_EH_RESET;
022bdb07
TH
1767 }
1768
e87fd28c 1769 if (stat & (ATA_ERR | ATA_DF)) {
a51d644a 1770 qc->err_mask |= AC_ERR_DEV;
e87fd28c
HR
1771 /*
1772 * Sense data reporting does not work if the
1773 * device fault bit is set.
1774 */
1775 if (stat & ATA_DF)
1776 stat &= ~ATA_SENSE;
1777 } else {
022bdb07 1778 return 0;
e87fd28c 1779 }
022bdb07
TH
1780
1781 switch (qc->dev->class) {
9162c657 1782 case ATA_DEV_ZAC:
e87fd28c
HR
1783 if (stat & ATA_SENSE)
1784 ata_eh_request_sense(qc, qc->scsicmd);
ca156e00
TH
1785 /* fall through */
1786 case ATA_DEV_ATA:
022bdb07
TH
1787 if (err & ATA_ICRC)
1788 qc->err_mask |= AC_ERR_ATA_BUS;
eec7e1c1 1789 if (err & (ATA_UNC | ATA_AMNF))
022bdb07
TH
1790 qc->err_mask |= AC_ERR_MEDIA;
1791 if (err & ATA_IDNF)
1792 qc->err_mask |= AC_ERR_INVALID;
1793 break;
1794
1795 case ATA_DEV_ATAPI:
a569a30d 1796 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1797 tmp = atapi_eh_request_sense(qc->dev,
1798 qc->scsicmd->sense_buffer,
1799 qc->result_tf.feature >> 4);
3852e373 1800 if (!tmp)
a569a30d 1801 qc->flags |= ATA_QCFLAG_SENSE_VALID;
3852e373 1802 else
a569a30d
TH
1803 qc->err_mask |= tmp;
1804 }
022bdb07
TH
1805 }
1806
3852e373
HR
1807 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1808 int ret = scsi_check_sense(qc->scsicmd);
1809 /*
79487259 1810 * SUCCESS here means that the sense code could be
3852e373
HR
1811 * evaluated and should be passed to the upper layers
1812 * for correct evaluation.
79487259 1813 * FAILED means the sense code could not be interpreted
3852e373
HR
1814 * and the device would need to be reset.
1815 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1816 * command would need to be retried.
1817 */
1818 if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
1819 qc->flags |= ATA_QCFLAG_RETRY;
1820 qc->err_mask |= AC_ERR_OTHER;
1821 } else if (ret != SUCCESS) {
1822 qc->err_mask |= AC_ERR_HSM;
1823 }
1824 }
022bdb07 1825 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1826 action |= ATA_EH_RESET;
022bdb07
TH
1827
1828 return action;
1829}
1830
76326ac1
TH
1831static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1832 int *xfer_ok)
022bdb07 1833{
76326ac1
TH
1834 int base = 0;
1835
1836 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1837 *xfer_ok = 1;
1838
1839 if (!*xfer_ok)
75f9cafc 1840 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1841
7d47e8d4 1842 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1843 return base + ATA_ECAT_ATA_BUS;
022bdb07 1844
7d47e8d4 1845 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1846 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1847
3884f7b0 1848 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1849 if (err_mask & AC_ERR_HSM)
76326ac1 1850 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1851 if ((err_mask &
1852 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1853 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1854 }
1855
1856 return 0;
1857}
1858
7d47e8d4 1859struct speed_down_verdict_arg {
022bdb07 1860 u64 since;
76326ac1 1861 int xfer_ok;
3884f7b0 1862 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1863};
1864
7d47e8d4 1865static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1866{
7d47e8d4 1867 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1868 int cat;
022bdb07 1869
d9027470 1870 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
022bdb07
TH
1871 return -1;
1872
76326ac1
TH
1873 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1874 &arg->xfer_ok);
7d47e8d4 1875 arg->nr_errors[cat]++;
76326ac1 1876
022bdb07
TH
1877 return 0;
1878}
1879
1880/**
7d47e8d4 1881 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1882 * @dev: Device of interest
1883 *
1884 * This function examines error ring of @dev and determines
7d47e8d4
TH
1885 * whether NCQ needs to be turned off, transfer speed should be
1886 * stepped down, or falling back to PIO is necessary.
022bdb07 1887 *
3884f7b0
TH
1888 * ECAT_ATA_BUS : ATA_BUS error for any command
1889 *
1890 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1891 * IO commands
1892 *
1893 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1894 *
76326ac1
TH
1895 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1896 * data transfer hasn't been verified.
1897 *
3884f7b0
TH
1898 * Verdicts are
1899 *
1900 * NCQ_OFF : Turn off NCQ.
022bdb07 1901 *
3884f7b0
TH
1902 * SPEED_DOWN : Speed down transfer speed but don't fall back
1903 * to PIO.
7d47e8d4 1904 *
3884f7b0 1905 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1906 *
3884f7b0 1907 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1908 * taken per error. An action triggered by non-DUBIOUS errors
1909 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1910 * This is to expedite speed down decisions right after device is
1911 * initially configured.
1912 *
4091fb95 1913 * The following are speed down rules. #1 and #2 deal with
76326ac1 1914 * DUBIOUS errors.
7d47e8d4 1915 *
76326ac1
TH
1916 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1917 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1918 *
1919 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1920 * occurred during last 5 mins, NCQ_OFF.
1921 *
1922 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
25985edc 1923 * occurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1924 *
76326ac1 1925 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1926 * during last 10 mins, NCQ_OFF.
1927 *
76326ac1 1928 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1929 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1930 *
022bdb07
TH
1931 * LOCKING:
1932 * Inherited from caller.
1933 *
1934 * RETURNS:
7d47e8d4 1935 * OR of ATA_EH_SPDN_* flags.
022bdb07 1936 */
7d47e8d4 1937static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1938{
7d47e8d4
TH
1939 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1940 u64 j64 = get_jiffies_64();
1941 struct speed_down_verdict_arg arg;
1942 unsigned int verdict = 0;
022bdb07 1943
3884f7b0 1944 /* scan past 5 mins of error history */
7d47e8d4 1945 memset(&arg, 0, sizeof(arg));
3884f7b0 1946 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1947 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1948
76326ac1
TH
1949 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1950 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1951 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1952 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1953
1954 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1955 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1956 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1957
3884f7b0
TH
1958 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1959 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1960 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1961 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1962
3884f7b0 1963 /* scan past 10 mins of error history */
022bdb07 1964 memset(&arg, 0, sizeof(arg));
3884f7b0 1965 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1966 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1967
3884f7b0
TH
1968 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1969 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1970 verdict |= ATA_EH_SPDN_NCQ_OFF;
1971
1972 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1973 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1974 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1975 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1976
7d47e8d4 1977 return verdict;
022bdb07
TH
1978}
1979
1980/**
1981 * ata_eh_speed_down - record error and speed down if necessary
1982 * @dev: Failed device
3884f7b0 1983 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1984 * @err_mask: err_mask of the error
1985 *
1986 * Record error and examine error history to determine whether
1987 * adjusting transmission speed is necessary. It also sets
1988 * transmission limits appropriately if such adjustment is
1989 * necessary.
1990 *
1991 * LOCKING:
1992 * Kernel thread context (may sleep).
1993 *
1994 * RETURNS:
7d47e8d4 1995 * Determined recovery action.
022bdb07 1996 */
3884f7b0
TH
1997static unsigned int ata_eh_speed_down(struct ata_device *dev,
1998 unsigned int eflags, unsigned int err_mask)
022bdb07 1999{
b1c72916 2000 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 2001 int xfer_ok = 0;
7d47e8d4
TH
2002 unsigned int verdict;
2003 unsigned int action = 0;
2004
2005 /* don't bother if Cat-0 error */
76326ac1 2006 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
2007 return 0;
2008
2009 /* record error and determine whether speed down is necessary */
3884f7b0 2010 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 2011 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 2012
7d47e8d4
TH
2013 /* turn off NCQ? */
2014 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2015 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2016 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2017 dev->flags |= ATA_DFLAG_NCQ_OFF;
a9a79dfe 2018 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
7d47e8d4
TH
2019 goto done;
2020 }
022bdb07 2021
7d47e8d4
TH
2022 /* speed down? */
2023 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2024 /* speed down SATA link speed if possible */
a07d499b 2025 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 2026 action |= ATA_EH_RESET;
7d47e8d4
TH
2027 goto done;
2028 }
022bdb07 2029
7d47e8d4
TH
2030 /* lower transfer mode */
2031 if (dev->spdn_cnt < 2) {
2032 static const int dma_dnxfer_sel[] =
2033 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
2034 static const int pio_dnxfer_sel[] =
2035 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2036 int sel;
2037
2038 if (dev->xfer_shift != ATA_SHIFT_PIO)
2039 sel = dma_dnxfer_sel[dev->spdn_cnt];
2040 else
2041 sel = pio_dnxfer_sel[dev->spdn_cnt];
2042
2043 dev->spdn_cnt++;
2044
2045 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 2046 action |= ATA_EH_RESET;
7d47e8d4
TH
2047 goto done;
2048 }
2049 }
2050 }
2051
2052 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 2053 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
2054 */
2055 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 2056 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
2057 (dev->xfer_shift != ATA_SHIFT_PIO)) {
2058 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2059 dev->spdn_cnt = 0;
cf480626 2060 action |= ATA_EH_RESET;
7d47e8d4
TH
2061 goto done;
2062 }
2063 }
022bdb07 2064
022bdb07 2065 return 0;
7d47e8d4
TH
2066 done:
2067 /* device has been slowed down, blow error history */
76326ac1
TH
2068 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2069 ata_ering_clear(&dev->ering);
7d47e8d4 2070 return action;
022bdb07
TH
2071}
2072
8d899e70
ML
2073/**
2074 * ata_eh_worth_retry - analyze error and decide whether to retry
2075 * @qc: qc to possibly retry
2076 *
2077 * Look at the cause of the error and decide if a retry
2078 * might be useful or not. We don't want to retry media errors
2079 * because the drive itself has probably already taken 10-30 seconds
2080 * doing its own internal retries before reporting the failure.
2081 */
2082static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2083{
1eaca39a 2084 if (qc->err_mask & AC_ERR_MEDIA)
8d899e70
ML
2085 return 0; /* don't retry media errors */
2086 if (qc->flags & ATA_QCFLAG_IO)
2087 return 1; /* otherwise retry anything from fs stack */
2088 if (qc->err_mask & AC_ERR_INVALID)
2089 return 0; /* don't retry these */
2090 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
2091}
2092
7eb49509
DLM
2093/**
2094 * ata_eh_quiet - check if we need to be quiet about a command error
2095 * @qc: qc to check
2096 *
2097 * Look at the qc flags anbd its scsi command request flags to determine
2098 * if we need to be quiet about the command failure.
2099 */
2100static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
2101{
2102 if (qc->scsicmd &&
2103 qc->scsicmd->request->rq_flags & RQF_QUIET)
2104 qc->flags |= ATA_QCFLAG_QUIET;
2105 return qc->flags & ATA_QCFLAG_QUIET;
2106}
2107
022bdb07 2108/**
9b1e2658
TH
2109 * ata_eh_link_autopsy - analyze error and determine recovery action
2110 * @link: host link to perform autopsy on
022bdb07 2111 *
0260731f
TH
2112 * Analyze why @link failed and determine which recovery actions
2113 * are needed. This function also sets more detailed AC_ERR_*
2114 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
2115 *
2116 * LOCKING:
2117 * Kernel thread context (may sleep).
2118 */
9b1e2658 2119static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 2120{
0260731f 2121 struct ata_port *ap = link->ap;
936fd732 2122 struct ata_eh_context *ehc = &link->eh_context;
258c4e5c 2123 struct ata_queued_cmd *qc;
dfcc173d 2124 struct ata_device *dev;
3884f7b0 2125 unsigned int all_err_mask = 0, eflags = 0;
7eb49509 2126 int tag, nr_failed = 0, nr_quiet = 0;
022bdb07
TH
2127 u32 serror;
2128 int rc;
2129
2130 DPRINTK("ENTER\n");
2131
1cdaf534
TH
2132 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2133 return;
2134
022bdb07 2135 /* obtain and analyze SError */
936fd732 2136 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
2137 if (rc == 0) {
2138 ehc->i.serror |= serror;
0260731f 2139 ata_eh_analyze_serror(link);
4e57c517 2140 } else if (rc != -EOPNOTSUPP) {
cf480626 2141 /* SError read failed, force reset and probing */
b558eddd 2142 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 2143 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
2144 ehc->i.err_mask |= AC_ERR_OTHER;
2145 }
022bdb07 2146
e8ee8451 2147 /* analyze NCQ failure */
0260731f 2148 ata_eh_analyze_ncq_error(link);
e8ee8451 2149
022bdb07
TH
2150 /* any real error trumps AC_ERR_OTHER */
2151 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2152 ehc->i.err_mask &= ~AC_ERR_OTHER;
2153
2154 all_err_mask |= ehc->i.err_mask;
2155
258c4e5c 2156 ata_qc_for_each_raw(ap, qc, tag) {
b1c72916
TH
2157 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2158 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
2159 continue;
2160
2161 /* inherit upper level err_mask */
2162 qc->err_mask |= ehc->i.err_mask;
2163
022bdb07 2164 /* analyze TF */
4528e4da 2165 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
2166
2167 /* DEV errors are probably spurious in case of ATA_BUS error */
2168 if (qc->err_mask & AC_ERR_ATA_BUS)
2169 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2170 AC_ERR_INVALID);
2171
2172 /* any real error trumps unknown error */
2173 if (qc->err_mask & ~AC_ERR_OTHER)
2174 qc->err_mask &= ~AC_ERR_OTHER;
2175
804689ad
DLM
2176 /*
2177 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2178 * layers will determine whether the command is worth retrying
2179 * based on the sense data and device class/type. Otherwise,
2180 * determine directly if the command is worth retrying using its
2181 * error mask and flags.
2182 */
f90f0828 2183 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 2184 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
804689ad 2185 else if (ata_eh_worth_retry(qc))
03faab78
TH
2186 qc->flags |= ATA_QCFLAG_RETRY;
2187
022bdb07 2188 /* accumulate error info */
4528e4da 2189 ehc->i.dev = qc->dev;
022bdb07
TH
2190 all_err_mask |= qc->err_mask;
2191 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 2192 eflags |= ATA_EFLAG_IS_IO;
255c03d1 2193 trace_ata_eh_link_autopsy_qc(qc);
7eb49509
DLM
2194
2195 /* Count quiet errors */
2196 if (ata_eh_quiet(qc))
2197 nr_quiet++;
2198 nr_failed++;
022bdb07
TH
2199 }
2200
7eb49509
DLM
2201 /* If all failed commands requested silence, then be quiet */
2202 if (nr_quiet == nr_failed)
2203 ehc->i.flags |= ATA_EHI_QUIET;
2204
a20f33ff 2205 /* enforce default EH actions */
b51e9e5d 2206 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 2207 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 2208 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2209 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2210 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2211 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2212
dfcc173d
TH
2213 /* If we have offending qcs and the associated failed device,
2214 * perform per-dev EH action only on the offending device.
2215 */
4528e4da 2216 if (ehc->i.dev) {
4528e4da
TH
2217 ehc->i.dev_action[ehc->i.dev->devno] |=
2218 ehc->i.action & ATA_EH_PERDEV_MASK;
2219 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2220 }
2221
2695e366
TH
2222 /* propagate timeout to host link */
2223 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2224 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2225
2226 /* record error and consider speeding down */
dfcc173d 2227 dev = ehc->i.dev;
2695e366
TH
2228 if (!dev && ((ata_link_max_devices(link) == 1 &&
2229 ata_dev_enabled(link->device))))
2230 dev = link->device;
dfcc173d 2231
76326ac1
TH
2232 if (dev) {
2233 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2234 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2235 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
f1601113 2236 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
76326ac1 2237 }
022bdb07
TH
2238 DPRINTK("EXIT\n");
2239}
2240
2241/**
9b1e2658
TH
2242 * ata_eh_autopsy - analyze error and determine recovery action
2243 * @ap: host port to perform autopsy on
2244 *
2245 * Analyze all links of @ap and determine why they failed and
2246 * which recovery actions are needed.
2247 *
2248 * LOCKING:
2249 * Kernel thread context (may sleep).
2250 */
fb7fd614 2251void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2252{
2253 struct ata_link *link;
2254
1eca4365 2255 ata_for_each_link(link, ap, EDGE)
9b1e2658 2256 ata_eh_link_autopsy(link);
2695e366 2257
b1c72916
TH
2258 /* Handle the frigging slave link. Autopsy is done similarly
2259 * but actions and flags are transferred over to the master
2260 * link and handled from there.
2261 */
2262 if (ap->slave_link) {
2263 struct ata_eh_context *mehc = &ap->link.eh_context;
2264 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2265
848e4c68
TH
2266 /* transfer control flags from master to slave */
2267 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2268
2269 /* perform autopsy on the slave link */
b1c72916
TH
2270 ata_eh_link_autopsy(ap->slave_link);
2271
848e4c68 2272 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2273 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2274 mehc->i.action |= sehc->i.action;
2275 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2276 mehc->i.flags |= sehc->i.flags;
2277 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2278 }
2279
2695e366
TH
2280 /* Autopsy of fanout ports can affect host link autopsy.
2281 * Perform host link autopsy last.
2282 */
071f44b1 2283 if (sata_pmp_attached(ap))
2695e366 2284 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2285}
2286
6521148c
RH
2287/**
2288 * ata_get_cmd_descript - get description for ATA command
2289 * @command: ATA command code to get description for
2290 *
2291 * Return a textual description of the given command, or NULL if the
2292 * command is not known.
2293 *
2294 * LOCKING:
2295 * None
2296 */
2297const char *ata_get_cmd_descript(u8 command)
2298{
2299#ifdef CONFIG_ATA_VERBOSE_ERROR
2300 static const struct
2301 {
2302 u8 command;
2303 const char *text;
2304 } cmd_descr[] = {
2305 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
825e2d87
HR
2306 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2307 { ATA_CMD_STANDBY, "STANDBY" },
2308 { ATA_CMD_IDLE, "IDLE" },
2309 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2310 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
3915c3b5 2311 { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
6521148c 2312 { ATA_CMD_NOP, "NOP" },
825e2d87
HR
2313 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2314 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2315 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2316 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2317 { ATA_CMD_SERVICE, "SERVICE" },
2318 { ATA_CMD_READ, "READ DMA" },
2319 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2320 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2321 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
6521148c 2322 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
825e2d87
HR
2323 { ATA_CMD_WRITE, "WRITE DMA" },
2324 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2325 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2326 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
6521148c
RH
2327 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2328 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2329 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2330 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2331 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
3915c3b5
RH
2332 { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
2333 { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
6521148c
RH
2334 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2335 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2336 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2337 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2338 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2339 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2340 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2341 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
825e2d87 2342 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
6521148c
RH
2343 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2344 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2345 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2346 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2347 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2348 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2349 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2350 { ATA_CMD_SLEEP, "SLEEP" },
2351 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2352 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2353 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2354 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2355 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2356 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2357 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2358 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
825e2d87 2359 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
3915c3b5 2360 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
6521148c 2361 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
825e2d87 2362 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
6521148c 2363 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
825e2d87 2364 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
6521148c 2365 { ATA_CMD_PMP_READ, "READ BUFFER" },
3915c3b5 2366 { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
6521148c 2367 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
3915c3b5 2368 { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
6521148c
RH
2369 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2370 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2371 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2372 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2373 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2374 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2375 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2376 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2377 { ATA_CMD_SMART, "SMART" },
2378 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2379 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
acad7627 2380 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
825e2d87
HR
2381 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2382 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
6521148c
RH
2383 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2384 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2385 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
825e2d87 2386 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
3915c3b5
RH
2387 { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
2388 { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
28a3fc22 2389 { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
27708a95 2390 { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
6521148c
RH
2391 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2392 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2393 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2394 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2395 { ATA_CMD_RESTORE, "RECALIBRATE" },
2396 { 0, NULL } /* terminate list */
2397 };
2398
2399 unsigned int i;
2400 for (i = 0; cmd_descr[i].text; i++)
2401 if (cmd_descr[i].command == command)
2402 return cmd_descr[i].text;
2403#endif
2404
2405 return NULL;
2406}
36aae28e 2407EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
6521148c 2408
9b1e2658
TH
2409/**
2410 * ata_eh_link_report - report error handling to user
0260731f 2411 * @link: ATA link EH is going on
022bdb07
TH
2412 *
2413 * Report EH to user.
2414 *
2415 * LOCKING:
2416 * None.
2417 */
9b1e2658 2418static void ata_eh_link_report(struct ata_link *link)
022bdb07 2419{
0260731f
TH
2420 struct ata_port *ap = link->ap;
2421 struct ata_eh_context *ehc = &link->eh_context;
258c4e5c 2422 struct ata_queued_cmd *qc;
022bdb07 2423 const char *frozen, *desc;
462098b0 2424 char tries_buf[6] = "";
022bdb07
TH
2425 int tag, nr_failed = 0;
2426
94ff3d54
TH
2427 if (ehc->i.flags & ATA_EHI_QUIET)
2428 return;
2429
022bdb07
TH
2430 desc = NULL;
2431 if (ehc->i.desc[0] != '\0')
2432 desc = ehc->i.desc;
2433
258c4e5c 2434 ata_qc_for_each_raw(ap, qc, tag) {
b1c72916
TH
2435 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2436 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2437 ((qc->flags & ATA_QCFLAG_QUIET) &&
2438 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2439 continue;
2440 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2441 continue;
2442
2443 nr_failed++;
2444 }
2445
2446 if (!nr_failed && !ehc->i.err_mask)
2447 return;
2448
2449 frozen = "";
b51e9e5d 2450 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2451 frozen = " frozen";
2452
a1e10f7e 2453 if (ap->eh_tries < ATA_EH_MAX_TRIES)
462098b0 2454 snprintf(tries_buf, sizeof(tries_buf), " t%d",
a1e10f7e
TH
2455 ap->eh_tries);
2456
022bdb07 2457 if (ehc->i.dev) {
a9a79dfe
JP
2458 ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2459 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2460 ehc->i.err_mask, link->sactive, ehc->i.serror,
2461 ehc->i.action, frozen, tries_buf);
022bdb07 2462 if (desc)
a9a79dfe 2463 ata_dev_err(ehc->i.dev, "%s\n", desc);
022bdb07 2464 } else {
a9a79dfe
JP
2465 ata_link_err(link, "exception Emask 0x%x "
2466 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2467 ehc->i.err_mask, link->sactive, ehc->i.serror,
2468 ehc->i.action, frozen, tries_buf);
022bdb07 2469 if (desc)
a9a79dfe 2470 ata_link_err(link, "%s\n", desc);
022bdb07
TH
2471 }
2472
6521148c 2473#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2474 if (ehc->i.serror)
a9a79dfe 2475 ata_link_err(link,
1333e194
RH
2476 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2477 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2478 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2479 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2480 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2481 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2482 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2483 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2484 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2485 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2486 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2487 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2488 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2489 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2490 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2491 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2492 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2493 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
6521148c 2494#endif
1333e194 2495
258c4e5c 2496 ata_qc_for_each_raw(ap, qc, tag) {
8a937581 2497 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2498 char data_buf[20] = "";
2499 char cdb_buf[70] = "";
022bdb07 2500
0260731f 2501 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2502 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2503 continue;
2504
abb6a889
TH
2505 if (qc->dma_dir != DMA_NONE) {
2506 static const char *dma_str[] = {
2507 [DMA_BIDIRECTIONAL] = "bidi",
2508 [DMA_TO_DEVICE] = "out",
2509 [DMA_FROM_DEVICE] = "in",
2510 };
fb1b8b11
GU
2511 const char *prot_str = NULL;
2512
2513 switch (qc->tf.protocol) {
2514 case ATA_PROT_UNKNOWN:
2515 prot_str = "unknown";
2516 break;
2517 case ATA_PROT_NODATA:
2518 prot_str = "nodata";
2519 break;
2520 case ATA_PROT_PIO:
2521 prot_str = "pio";
2522 break;
2523 case ATA_PROT_DMA:
2524 prot_str = "dma";
2525 break;
2526 case ATA_PROT_NCQ:
2527 prot_str = "ncq dma";
2528 break;
2529 case ATA_PROT_NCQ_NODATA:
2530 prot_str = "ncq nodata";
2531 break;
2532 case ATAPI_PROT_NODATA:
2533 prot_str = "nodata";
2534 break;
2535 case ATAPI_PROT_PIO:
2536 prot_str = "pio";
2537 break;
2538 case ATAPI_PROT_DMA:
2539 prot_str = "dma";
2540 break;
2541 }
abb6a889 2542 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
fb1b8b11 2543 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
abb6a889
TH
2544 }
2545
6521148c 2546 if (ata_is_atapi(qc->tf.protocol)) {
a13b0c9d
HR
2547 const u8 *cdb = qc->cdb;
2548 size_t cdb_len = qc->dev->cdb_len;
2549
cbba5b0e
HR
2550 if (qc->scsicmd) {
2551 cdb = qc->scsicmd->cmnd;
2552 cdb_len = qc->scsicmd->cmd_len;
2553 }
2554 __scsi_format_command(cdb_buf, sizeof(cdb_buf),
2555 cdb, cdb_len);
6521148c
RH
2556 } else {
2557 const char *descr = ata_get_cmd_descript(cmd->command);
2558 if (descr)
a9a79dfe
JP
2559 ata_dev_err(qc->dev, "failed command: %s\n",
2560 descr);
6521148c 2561 }
abb6a889 2562
a9a79dfe 2563 ata_dev_err(qc->dev,
8a937581 2564 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2565 "tag %d%s\n %s"
8a937581 2566 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2567 "Emask 0x%x (%s)%s\n",
8a937581
TH
2568 cmd->command, cmd->feature, cmd->nsect,
2569 cmd->lbal, cmd->lbam, cmd->lbah,
2570 cmd->hob_feature, cmd->hob_nsect,
2571 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2572 cmd->device, qc->tag, data_buf, cdb_buf,
8a937581
TH
2573 res->command, res->feature, res->nsect,
2574 res->lbal, res->lbam, res->lbah,
2575 res->hob_feature, res->hob_nsect,
2576 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2577 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2578 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194 2579
6521148c 2580#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2581 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
e87fd28c 2582 ATA_SENSE | ATA_ERR)) {
1333e194 2583 if (res->command & ATA_BUSY)
a9a79dfe 2584 ata_dev_err(qc->dev, "status: { Busy }\n");
1333e194 2585 else
e87fd28c 2586 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
1333e194
RH
2587 res->command & ATA_DRDY ? "DRDY " : "",
2588 res->command & ATA_DF ? "DF " : "",
2589 res->command & ATA_DRQ ? "DRQ " : "",
e87fd28c 2590 res->command & ATA_SENSE ? "SENSE " : "",
2dcb407e 2591 res->command & ATA_ERR ? "ERR " : "");
1333e194
RH
2592 }
2593
2594 if (cmd->command != ATA_CMD_PACKET &&
eec7e1c1
AA
2595 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2596 ATA_IDNF | ATA_ABORTED)))
2597 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
1333e194
RH
2598 res->feature & ATA_ICRC ? "ICRC " : "",
2599 res->feature & ATA_UNC ? "UNC " : "",
eec7e1c1 2600 res->feature & ATA_AMNF ? "AMNF " : "",
1333e194 2601 res->feature & ATA_IDNF ? "IDNF " : "",
2dcb407e 2602 res->feature & ATA_ABORTED ? "ABRT " : "");
6521148c 2603#endif
022bdb07
TH
2604 }
2605}
2606
9b1e2658
TH
2607/**
2608 * ata_eh_report - report error handling to user
2609 * @ap: ATA port to report EH about
2610 *
2611 * Report EH to user.
2612 *
2613 * LOCKING:
2614 * None.
2615 */
fb7fd614 2616void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2617{
2618 struct ata_link *link;
2619
1eca4365 2620 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2621 ata_eh_link_report(link);
2622}
2623
cc0680a5 2624static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2625 unsigned int *classes, unsigned long deadline,
2626 bool clear_classes)
d87fa38e 2627{
f58229f8 2628 struct ata_device *dev;
d87fa38e 2629
b1c72916 2630 if (clear_classes)
1eca4365 2631 ata_for_each_dev(dev, link, ALL)
b1c72916 2632 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2633
f046519f 2634 return reset(link, classes, deadline);
d87fa38e
TH
2635}
2636
e8411fba 2637static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
664faf09 2638{
45db2f6c 2639 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2640 return 0;
5dbfc9cb
TH
2641 if (rc == -EAGAIN)
2642 return 1;
071f44b1 2643 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2644 return 1;
664faf09
TH
2645 return 0;
2646}
2647
fb7fd614
TH
2648int ata_eh_reset(struct ata_link *link, int classify,
2649 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2650 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2651{
afaa5c37 2652 struct ata_port *ap = link->ap;
b1c72916 2653 struct ata_link *slave = ap->slave_link;
936fd732 2654 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2655 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2656 unsigned int *classes = ehc->classes;
416dc9ed 2657 unsigned int lflags = link->flags;
1cdaf534 2658 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2659 int max_tries = 0, try = 0;
b1c72916 2660 struct ata_link *failed_link;
f58229f8 2661 struct ata_device *dev;
416dc9ed 2662 unsigned long deadline, now;
022bdb07 2663 ata_reset_fn_t reset;
afaa5c37 2664 unsigned long flags;
416dc9ed 2665 u32 sstatus;
b1c72916 2666 int nr_unknown, rc;
022bdb07 2667
932648b0
TH
2668 /*
2669 * Prepare to reset
2670 */
d8af0eb6
TH
2671 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2672 max_tries++;
ca6d43b0
DW
2673 if (link->flags & ATA_LFLAG_RST_ONCE)
2674 max_tries = 1;
05944bdf
TH
2675 if (link->flags & ATA_LFLAG_NO_HRST)
2676 hardreset = NULL;
2677 if (link->flags & ATA_LFLAG_NO_SRST)
2678 softreset = NULL;
d8af0eb6 2679
25985edc 2680 /* make sure each reset attempt is at least COOL_DOWN apart */
19b72321
TH
2681 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2682 now = jiffies;
2683 WARN_ON(time_after(ehc->last_reset, now));
2684 deadline = ata_deadline(ehc->last_reset,
2685 ATA_EH_RESET_COOL_DOWN);
2686 if (time_before(now, deadline))
2687 schedule_timeout_uninterruptible(deadline - now);
2688 }
0a2c0f56 2689
afaa5c37
TH
2690 spin_lock_irqsave(ap->lock, flags);
2691 ap->pflags |= ATA_PFLAG_RESETTING;
2692 spin_unlock_irqrestore(ap->lock, flags);
2693
cf480626 2694 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2695
1eca4365 2696 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2697 /* If we issue an SRST then an ATA drive (not ATAPI)
2698 * may change configuration and be in PIO0 timing. If
2699 * we do a hard reset (or are coming from power on)
2700 * this is true for ATA or ATAPI. Until we've set a
2701 * suitable controller mode we should not touch the
2702 * bus as we may be talking too fast.
2703 */
2704 dev->pio_mode = XFER_PIO_0;
5416912a 2705 dev->dma_mode = 0xff;
cdeab114
TH
2706
2707 /* If the controller has a pio mode setup function
2708 * then use it to set the chipset to rights. Don't
2709 * touch the DMA setup as that will be dealt with when
2710 * configuring devices.
2711 */
2712 if (ap->ops->set_piomode)
2713 ap->ops->set_piomode(ap, dev);
2714 }
2715
cf480626 2716 /* prefer hardreset */
932648b0 2717 reset = NULL;
cf480626
TH
2718 ehc->i.action &= ~ATA_EH_RESET;
2719 if (hardreset) {
2720 reset = hardreset;
a674050e 2721 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2722 } else if (softreset) {
cf480626 2723 reset = softreset;
a674050e 2724 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2725 }
f5914a46
TH
2726
2727 if (prereset) {
b1c72916
TH
2728 unsigned long deadline = ata_deadline(jiffies,
2729 ATA_EH_PRERESET_TIMEOUT);
2730
2731 if (slave) {
2732 sehc->i.action &= ~ATA_EH_RESET;
2733 sehc->i.action |= ehc->i.action;
2734 }
2735
2736 rc = prereset(link, deadline);
2737
2738 /* If present, do prereset on slave link too. Reset
2739 * is skipped iff both master and slave links report
2740 * -ENOENT or clear ATA_EH_RESET.
2741 */
2742 if (slave && (rc == 0 || rc == -ENOENT)) {
2743 int tmp;
2744
2745 tmp = prereset(slave, deadline);
2746 if (tmp != -ENOENT)
2747 rc = tmp;
2748
2749 ehc->i.action |= sehc->i.action;
2750 }
2751
f5914a46 2752 if (rc) {
c961922b 2753 if (rc == -ENOENT) {
a9a79dfe 2754 ata_link_dbg(link, "port disabled--ignoring\n");
cf480626 2755 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2756
1eca4365 2757 ata_for_each_dev(dev, link, ALL)
f58229f8 2758 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2759
2760 rc = 0;
c961922b 2761 } else
a9a79dfe
JP
2762 ata_link_err(link,
2763 "prereset failed (errno=%d)\n",
2764 rc);
fccb6ea5 2765 goto out;
f5914a46 2766 }
f5914a46 2767
932648b0 2768 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2769 * bang classes, thaw and return.
932648b0
TH
2770 */
2771 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2772 ata_for_each_dev(dev, link, ALL)
932648b0 2773 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2774 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2775 ata_is_host_link(link))
2776 ata_eh_thaw_port(ap);
932648b0
TH
2777 rc = 0;
2778 goto out;
2779 }
f5914a46
TH
2780 }
2781
022bdb07 2782 retry:
932648b0
TH
2783 /*
2784 * Perform reset
2785 */
dc98c32c
TH
2786 if (ata_is_host_link(link))
2787 ata_eh_freeze_port(ap);
2788
341c2c95 2789 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2790
932648b0
TH
2791 if (reset) {
2792 if (verbose)
a9a79dfe
JP
2793 ata_link_info(link, "%s resetting link\n",
2794 reset == softreset ? "soft" : "hard");
932648b0
TH
2795
2796 /* mark that this EH session started with reset */
19b72321 2797 ehc->last_reset = jiffies;
932648b0
TH
2798 if (reset == hardreset)
2799 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2800 else
2801 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
022bdb07 2802
b1c72916
TH
2803 rc = ata_do_reset(link, reset, classes, deadline, true);
2804 if (rc && rc != -EAGAIN) {
2805 failed_link = link;
5dbfc9cb 2806 goto fail;
b1c72916
TH
2807 }
2808
2809 /* hardreset slave link if existent */
2810 if (slave && reset == hardreset) {
2811 int tmp;
2812
2813 if (verbose)
a9a79dfe 2814 ata_link_info(slave, "hard resetting link\n");
b1c72916
TH
2815
2816 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2817 tmp = ata_do_reset(slave, reset, classes, deadline,
2818 false);
2819 switch (tmp) {
2820 case -EAGAIN:
2821 rc = -EAGAIN;
2822 case 0:
2823 break;
2824 default:
2825 failed_link = slave;
2826 rc = tmp;
2827 goto fail;
2828 }
2829 }
022bdb07 2830
b1c72916 2831 /* perform follow-up SRST if necessary */
932648b0 2832 if (reset == hardreset &&
e8411fba 2833 ata_eh_followup_srst_needed(link, rc)) {
932648b0 2834 reset = softreset;
022bdb07 2835
932648b0 2836 if (!reset) {
a9a79dfe
JP
2837 ata_link_err(link,
2838 "follow-up softreset required but no softreset available\n");
b1c72916 2839 failed_link = link;
932648b0
TH
2840 rc = -EINVAL;
2841 goto fail;
2842 }
664faf09 2843
932648b0 2844 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
b1c72916 2845 rc = ata_do_reset(link, reset, classes, deadline, true);
fe2c4d01
TH
2846 if (rc) {
2847 failed_link = link;
2848 goto fail;
2849 }
664faf09 2850 }
932648b0
TH
2851 } else {
2852 if (verbose)
a9a79dfe
JP
2853 ata_link_info(link,
2854 "no reset method available, skipping reset\n");
932648b0
TH
2855 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2856 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2857 }
2858
932648b0
TH
2859 /*
2860 * Post-reset processing
2861 */
1eca4365 2862 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2863 /* After the reset, the device state is PIO 0 and the
2864 * controller state is undefined. Reset also wakes up
2865 * drives from sleeping mode.
2866 */
2867 dev->pio_mode = XFER_PIO_0;
2868 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2869
3b761d3d
TH
2870 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2871 continue;
2872
2873 /* apply class override */
2874 if (lflags & ATA_LFLAG_ASSUME_ATA)
2875 classes[dev->devno] = ATA_DEV_ATA;
2876 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2877 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
022bdb07
TH
2878 }
2879
416dc9ed
TH
2880 /* record current link speed */
2881 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2882 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2883 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2884 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2885
dc98c32c
TH
2886 /* thaw the port */
2887 if (ata_is_host_link(link))
2888 ata_eh_thaw_port(ap);
2889
f046519f
TH
2890 /* postreset() should clear hardware SError. Although SError
2891 * is cleared during link resume, clearing SError here is
2892 * necessary as some PHYs raise hotplug events after SRST.
2893 * This introduces race condition where hotplug occurs between
2894 * reset and here. This race is mediated by cross checking
2895 * link onlineness and classification result later.
2896 */
b1c72916 2897 if (postreset) {
416dc9ed 2898 postreset(link, classes);
b1c72916
TH
2899 if (slave)
2900 postreset(slave, classes);
2901 }
20952b69 2902
1e641060 2903 /*
8c56cacc
TH
2904 * Some controllers can't be frozen very well and may set spurious
2905 * error conditions during reset. Clear accumulated error
2906 * information and re-thaw the port if frozen. As reset is the
2907 * final recovery action and we cross check link onlineness against
2908 * device classification later, no hotplug event is lost by this.
1e641060 2909 */
f046519f 2910 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2911 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2912 if (slave)
1e641060
TH
2913 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2914 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2915 spin_unlock_irqrestore(link->ap->lock, flags);
2916
8c56cacc
TH
2917 if (ap->pflags & ATA_PFLAG_FROZEN)
2918 ata_eh_thaw_port(ap);
2919
3b761d3d
TH
2920 /*
2921 * Make sure onlineness and classification result correspond.
f046519f
TH
2922 * Hotplug could have happened during reset and some
2923 * controllers fail to wait while a drive is spinning up after
2924 * being hotplugged causing misdetection. By cross checking
3b761d3d
TH
2925 * link on/offlineness and classification result, those
2926 * conditions can be reliably detected and retried.
f046519f 2927 */
b1c72916 2928 nr_unknown = 0;
1eca4365 2929 ata_for_each_dev(dev, link, ALL) {
3b761d3d
TH
2930 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2931 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
a9a79dfe 2932 ata_dev_dbg(dev, "link online but device misclassified\n");
3b761d3d 2933 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2934 nr_unknown++;
3b761d3d
TH
2935 }
2936 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2937 if (ata_class_enabled(classes[dev->devno]))
a9a79dfe
JP
2938 ata_dev_dbg(dev,
2939 "link offline, clearing class %d to NONE\n",
2940 classes[dev->devno]);
3b761d3d
TH
2941 classes[dev->devno] = ATA_DEV_NONE;
2942 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
a9a79dfe
JP
2943 ata_dev_dbg(dev,
2944 "link status unknown, clearing UNKNOWN to NONE\n");
3b761d3d 2945 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2946 }
f046519f
TH
2947 }
2948
b1c72916 2949 if (classify && nr_unknown) {
f046519f 2950 if (try < max_tries) {
a9a79dfe
JP
2951 ata_link_warn(link,
2952 "link online but %d devices misclassified, retrying\n",
2953 nr_unknown);
b1c72916 2954 failed_link = link;
f046519f
TH
2955 rc = -EAGAIN;
2956 goto fail;
2957 }
a9a79dfe
JP
2958 ata_link_warn(link,
2959 "link online but %d devices misclassified, "
2960 "device detection might fail\n", nr_unknown);
f046519f
TH
2961 }
2962
416dc9ed 2963 /* reset successful, schedule revalidation */
cf480626 2964 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2965 if (slave)
2966 ata_eh_done(slave, NULL, ATA_EH_RESET);
6b7ae954 2967 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2968 ehc->i.action |= ATA_EH_REVALIDATE;
6b7ae954 2969 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
ae791c05 2970
416dc9ed 2971 rc = 0;
fccb6ea5
TH
2972 out:
2973 /* clear hotplug flag */
2974 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2975 if (slave)
2976 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2977
2978 spin_lock_irqsave(ap->lock, flags);
2979 ap->pflags &= ~ATA_PFLAG_RESETTING;
2980 spin_unlock_irqrestore(ap->lock, flags);
2981
022bdb07 2982 return rc;
416dc9ed
TH
2983
2984 fail:
5958e302
TH
2985 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2986 if (!ata_is_host_link(link) &&
2987 sata_scr_read(link, SCR_STATUS, &sstatus))
2988 rc = -ERESTART;
2989
7a46c078 2990 if (try >= max_tries) {
8ea7645c
TH
2991 /*
2992 * Thaw host port even if reset failed, so that the port
2993 * can be retried on the next phy event. This risks
2994 * repeated EH runs but seems to be a better tradeoff than
2995 * shutting down a port after a botched hotplug attempt.
2996 */
2997 if (ata_is_host_link(link))
2998 ata_eh_thaw_port(ap);
416dc9ed 2999 goto out;
8ea7645c 3000 }
416dc9ed
TH
3001
3002 now = jiffies;
3003 if (time_before(now, deadline)) {
3004 unsigned long delta = deadline - now;
3005
a9a79dfe 3006 ata_link_warn(failed_link,
0a2c0f56
TH
3007 "reset failed (errno=%d), retrying in %u secs\n",
3008 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed 3009
c0c362b6 3010 ata_eh_release(ap);
416dc9ed
TH
3011 while (delta)
3012 delta = schedule_timeout_uninterruptible(delta);
c0c362b6 3013 ata_eh_acquire(ap);
416dc9ed
TH
3014 }
3015
7a46c078
GG
3016 /*
3017 * While disks spinup behind PMP, some controllers fail sending SRST.
3018 * They need to be reset - as well as the PMP - before retrying.
3019 */
3020 if (rc == -ERESTART) {
3021 if (ata_is_host_link(link))
3022 ata_eh_thaw_port(ap);
3023 goto out;
3024 }
3025
b1c72916 3026 if (try == max_tries - 1) {
a07d499b 3027 sata_down_spd_limit(link, 0);
b1c72916 3028 if (slave)
a07d499b 3029 sata_down_spd_limit(slave, 0);
b1c72916 3030 } else if (rc == -EPIPE)
a07d499b 3031 sata_down_spd_limit(failed_link, 0);
b1c72916 3032
416dc9ed
TH
3033 if (hardreset)
3034 reset = hardreset;
3035 goto retry;
022bdb07
TH
3036}
3037
45fabbb7
EO
3038static inline void ata_eh_pull_park_action(struct ata_port *ap)
3039{
3040 struct ata_link *link;
3041 struct ata_device *dev;
3042 unsigned long flags;
3043
3044 /*
3045 * This function can be thought of as an extended version of
3046 * ata_eh_about_to_do() specially crafted to accommodate the
3047 * requirements of ATA_EH_PARK handling. Since the EH thread
3048 * does not leave the do {} while () loop in ata_eh_recover as
3049 * long as the timeout for a park request to *one* device on
3050 * the port has not expired, and since we still want to pick
3051 * up park requests to other devices on the same port or
3052 * timeout updates for the same device, we have to pull
3053 * ATA_EH_PARK actions from eh_info into eh_context.i
3054 * ourselves at the beginning of each pass over the loop.
3055 *
3056 * Additionally, all write accesses to &ap->park_req_pending
16735d02 3057 * through reinit_completion() (see below) or complete_all()
45fabbb7
EO
3058 * (see ata_scsi_park_store()) are protected by the host lock.
3059 * As a result we have that park_req_pending.done is zero on
3060 * exit from this function, i.e. when ATA_EH_PARK actions for
3061 * *all* devices on port ap have been pulled into the
3062 * respective eh_context structs. If, and only if,
3063 * park_req_pending.done is non-zero by the time we reach
3064 * wait_for_completion_timeout(), another ATA_EH_PARK action
3065 * has been scheduled for at least one of the devices on port
3066 * ap and we have to cycle over the do {} while () loop in
3067 * ata_eh_recover() again.
3068 */
3069
3070 spin_lock_irqsave(ap->lock, flags);
16735d02 3071 reinit_completion(&ap->park_req_pending);
1eca4365
TH
3072 ata_for_each_link(link, ap, EDGE) {
3073 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3074 struct ata_eh_info *ehi = &link->eh_info;
3075
3076 link->eh_context.i.dev_action[dev->devno] |=
3077 ehi->dev_action[dev->devno] & ATA_EH_PARK;
3078 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3079 }
3080 }
3081 spin_unlock_irqrestore(ap->lock, flags);
3082}
3083
3084static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3085{
3086 struct ata_eh_context *ehc = &dev->link->eh_context;
3087 struct ata_taskfile tf;
3088 unsigned int err_mask;
3089
3090 ata_tf_init(dev, &tf);
3091 if (park) {
3092 ehc->unloaded_mask |= 1 << dev->devno;
3093 tf.command = ATA_CMD_IDLEIMMEDIATE;
3094 tf.feature = 0x44;
3095 tf.lbal = 0x4c;
3096 tf.lbam = 0x4e;
3097 tf.lbah = 0x55;
3098 } else {
3099 ehc->unloaded_mask &= ~(1 << dev->devno);
3100 tf.command = ATA_CMD_CHK_POWER;
3101 }
3102
3103 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
bd18bc04 3104 tf.protocol = ATA_PROT_NODATA;
45fabbb7
EO
3105 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3106 if (park && (err_mask || tf.lbal != 0xc4)) {
a9a79dfe 3107 ata_dev_err(dev, "head unload failed!\n");
45fabbb7
EO
3108 ehc->unloaded_mask &= ~(1 << dev->devno);
3109 }
3110}
3111
0260731f 3112static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 3113 struct ata_device **r_failed_dev)
022bdb07 3114{
0260731f
TH
3115 struct ata_port *ap = link->ap;
3116 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3117 struct ata_device *dev;
8c3c52a8 3118 unsigned int new_mask = 0;
084fe639 3119 unsigned long flags;
f58229f8 3120 int rc = 0;
022bdb07
TH
3121
3122 DPRINTK("ENTER\n");
3123
8c3c52a8
TH
3124 /* For PATA drive side cable detection to work, IDENTIFY must
3125 * be done backwards such that PDIAG- is released by the slave
3126 * device before the master device is identified.
3127 */
1eca4365 3128 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
3129 unsigned int action = ata_eh_dev_action(dev);
3130 unsigned int readid_flags = 0;
022bdb07 3131
bff04647
TH
3132 if (ehc->i.flags & ATA_EHI_DID_RESET)
3133 readid_flags |= ATA_READID_POSTRESET;
3134
9666f400 3135 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
3136 WARN_ON(dev->class == ATA_DEV_PMP);
3137
b1c72916 3138 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 3139 rc = -EIO;
8c3c52a8 3140 goto err;
022bdb07
TH
3141 }
3142
0260731f 3143 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
3144 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3145 readid_flags);
022bdb07 3146 if (rc)
8c3c52a8 3147 goto err;
022bdb07 3148
0260731f 3149 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 3150
baa1e78a
TH
3151 /* Configuration may have changed, reconfigure
3152 * transfer mode.
3153 */
3154 ehc->i.flags |= ATA_EHI_SETMODE;
3155
3057ac3c 3156 /* schedule the scsi_rescan_device() here */
ad72cf98 3157 schedule_work(&(ap->scsi_rescan_task));
084fe639
TH
3158 } else if (dev->class == ATA_DEV_UNKNOWN &&
3159 ehc->tries[dev->devno] &&
3160 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
3161 /* Temporarily set dev->class, it will be
3162 * permanently set once all configurations are
3163 * complete. This is necessary because new
3164 * device configuration is done in two
3165 * separate loops.
3166 */
084fe639
TH
3167 dev->class = ehc->classes[dev->devno];
3168
633273a3
TH
3169 if (dev->class == ATA_DEV_PMP)
3170 rc = sata_pmp_attach(dev);
3171 else
3172 rc = ata_dev_read_id(dev, &dev->class,
3173 readid_flags, dev->id);
842faa6c
TH
3174
3175 /* read_id might have changed class, store and reset */
3176 ehc->classes[dev->devno] = dev->class;
3177 dev->class = ATA_DEV_UNKNOWN;
3178
8c3c52a8
TH
3179 switch (rc) {
3180 case 0:
99cf610a
TH
3181 /* clear error info accumulated during probe */
3182 ata_ering_clear(&dev->ering);
f58229f8 3183 new_mask |= 1 << dev->devno;
8c3c52a8
TH
3184 break;
3185 case -ENOENT:
55a8e2c8
TH
3186 /* IDENTIFY was issued to non-existent
3187 * device. No need to reset. Just
842faa6c 3188 * thaw and ignore the device.
55a8e2c8
TH
3189 */
3190 ata_eh_thaw_port(ap);
084fe639 3191 break;
8c3c52a8 3192 default:
8c3c52a8 3193 goto err;
084fe639 3194 }
8c3c52a8
TH
3195 }
3196 }
084fe639 3197
c1c4e8d5 3198 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
3199 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3200 if (ap->ops->cable_detect)
3201 ap->cbl = ap->ops->cable_detect(ap);
3202 ata_force_cbl(ap);
3203 }
c1c4e8d5 3204
8c3c52a8
TH
3205 /* Configure new devices forward such that user doesn't see
3206 * device detection messages backwards.
3207 */
1eca4365 3208 ata_for_each_dev(dev, link, ALL) {
4f7c2874 3209 if (!(new_mask & (1 << dev->devno)))
8c3c52a8
TH
3210 continue;
3211
842faa6c
TH
3212 dev->class = ehc->classes[dev->devno];
3213
4f7c2874
TH
3214 if (dev->class == ATA_DEV_PMP)
3215 continue;
3216
8c3c52a8
TH
3217 ehc->i.flags |= ATA_EHI_PRINTINFO;
3218 rc = ata_dev_configure(dev);
3219 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
3220 if (rc) {
3221 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 3222 goto err;
842faa6c 3223 }
8c3c52a8
TH
3224
3225 spin_lock_irqsave(ap->lock, flags);
3226 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3227 spin_unlock_irqrestore(ap->lock, flags);
3228
3229 /* new device discovered, configure xfermode */
3230 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
3231 }
3232
8c3c52a8 3233 return 0;
022bdb07 3234
8c3c52a8
TH
3235 err:
3236 *r_failed_dev = dev;
3237 DPRINTK("EXIT rc=%d\n", rc);
022bdb07
TH
3238 return rc;
3239}
3240
6f1d1e3a
TH
3241/**
3242 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3243 * @link: link on which timings will be programmed
98a1708d 3244 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
3245 *
3246 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3247 * ata_set_mode() fails, pointer to the failing device is
3248 * returned in @r_failed_dev.
3249 *
3250 * LOCKING:
3251 * PCI/etc. bus probe sem.
3252 *
3253 * RETURNS:
3254 * 0 on success, negative errno otherwise
3255 */
3256int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3257{
3258 struct ata_port *ap = link->ap;
00115e0f
TH
3259 struct ata_device *dev;
3260 int rc;
6f1d1e3a 3261
76326ac1 3262 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 3263 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
3264 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3265 struct ata_ering_entry *ent;
3266
3267 ent = ata_ering_top(&dev->ering);
3268 if (ent)
3269 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3270 }
3271 }
3272
6f1d1e3a
TH
3273 /* has private set_mode? */
3274 if (ap->ops->set_mode)
00115e0f
TH
3275 rc = ap->ops->set_mode(link, r_failed_dev);
3276 else
3277 rc = ata_do_set_mode(link, r_failed_dev);
3278
3279 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 3280 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
3281 struct ata_eh_context *ehc = &link->eh_context;
3282 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3283 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3284
3285 if (dev->xfer_mode != saved_xfer_mode ||
3286 ata_ncq_enabled(dev) != saved_ncq)
3287 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3288 }
3289
3290 return rc;
6f1d1e3a
TH
3291}
3292
11fc33da
TH
3293/**
3294 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3295 * @dev: ATAPI device to clear UA for
3296 *
3297 * Resets and other operations can make an ATAPI device raise
3298 * UNIT ATTENTION which causes the next operation to fail. This
3299 * function clears UA.
3300 *
3301 * LOCKING:
3302 * EH context (may sleep).
3303 *
3304 * RETURNS:
3305 * 0 on success, -errno on failure.
3306 */
3307static int atapi_eh_clear_ua(struct ata_device *dev)
3308{
3309 int i;
3310
3311 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 3312 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
3313 u8 sense_key = 0;
3314 unsigned int err_mask;
3315
3316 err_mask = atapi_eh_tur(dev, &sense_key);
3317 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
a9a79dfe
JP
3318 ata_dev_warn(dev,
3319 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3320 err_mask);
11fc33da
TH
3321 return -EIO;
3322 }
3323
3324 if (!err_mask || sense_key != UNIT_ATTENTION)
3325 return 0;
3326
3327 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3328 if (err_mask) {
a9a79dfe 3329 ata_dev_warn(dev, "failed to clear "
11fc33da
TH
3330 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3331 return -EIO;
3332 }
3333 }
3334
a9a79dfe
JP
3335 ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3336 ATA_EH_UA_TRIES);
11fc33da
TH
3337
3338 return 0;
3339}
3340
6013efd8
TH
3341/**
3342 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3343 * @dev: ATA device which may need FLUSH retry
3344 *
3345 * If @dev failed FLUSH, it needs to be reported upper layer
3346 * immediately as it means that @dev failed to remap and already
3347 * lost at least a sector and further FLUSH retrials won't make
3348 * any difference to the lost sector. However, if FLUSH failed
3349 * for other reasons, for example transmission error, FLUSH needs
3350 * to be retried.
3351 *
3352 * This function determines whether FLUSH failure retry is
3353 * necessary and performs it if so.
3354 *
3355 * RETURNS:
3356 * 0 if EH can continue, -errno if EH needs to be repeated.
3357 */
3358static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3359{
3360 struct ata_link *link = dev->link;
3361 struct ata_port *ap = link->ap;
3362 struct ata_queued_cmd *qc;
3363 struct ata_taskfile tf;
3364 unsigned int err_mask;
3365 int rc = 0;
3366
3367 /* did flush fail for this device? */
3368 if (!ata_tag_valid(link->active_tag))
3369 return 0;
3370
3371 qc = __ata_qc_from_tag(ap, link->active_tag);
3372 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3373 qc->tf.command != ATA_CMD_FLUSH))
3374 return 0;
3375
3376 /* if the device failed it, it should be reported to upper layers */
3377 if (qc->err_mask & AC_ERR_DEV)
3378 return 0;
3379
3380 /* flush failed for some other reason, give it another shot */
3381 ata_tf_init(dev, &tf);
3382
3383 tf.command = qc->tf.command;
3384 tf.flags |= ATA_TFLAG_DEVICE;
3385 tf.protocol = ATA_PROT_NODATA;
3386
a9a79dfe 3387 ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
6013efd8
TH
3388 tf.command, qc->err_mask);
3389
3390 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3391 if (!err_mask) {
3392 /*
3393 * FLUSH is complete but there's no way to
3394 * successfully complete a failed command from EH.
3395 * Making sure retry is allowed at least once and
3396 * retrying it should do the trick - whatever was in
3397 * the cache is already on the platter and this won't
3398 * cause infinite loop.
3399 */
3400 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3401 } else {
a9a79dfe 3402 ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
6013efd8
TH
3403 err_mask);
3404 rc = -EIO;
3405
3406 /* if device failed it, report it to upper layers */
3407 if (err_mask & AC_ERR_DEV) {
3408 qc->err_mask |= AC_ERR_DEV;
3409 qc->result_tf = tf;
3410 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3411 rc = 0;
3412 }
3413 }
3414 return rc;
3415}
3416
6b7ae954
TH
3417/**
3418 * ata_eh_set_lpm - configure SATA interface power management
3419 * @link: link to configure power management
3420 * @policy: the link power management policy
3421 * @r_failed_dev: out parameter for failed device
3422 *
3423 * Enable SATA Interface power management. This will enable
f4ac6476
HG
3424 * Device Interface Power Management (DIPM) for min_power and
3425 * medium_power_with_dipm policies, and then call driver specific
3426 * callbacks for enabling Host Initiated Power management.
6b7ae954
TH
3427 *
3428 * LOCKING:
3429 * EH context.
3430 *
3431 * RETURNS:
3432 * 0 on success, -errno on failure.
3433 */
3434static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3435 struct ata_device **r_failed_dev)
3436{
6c8ea89c 3437 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
6b7ae954
TH
3438 struct ata_eh_context *ehc = &link->eh_context;
3439 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
e5005b15 3440 enum ata_lpm_policy old_policy = link->lpm_policy;
5f6f12cc 3441 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
6b7ae954
TH
3442 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3443 unsigned int err_mask;
3444 int rc;
3445
3446 /* if the link or host doesn't do LPM, noop */
3447 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3448 return 0;
3449
3450 /*
3451 * DIPM is enabled only for MIN_POWER as some devices
3452 * misbehave when the host NACKs transition to SLUMBER. Order
3453 * device and link configurations such that the host always
3454 * allows DIPM requests.
3455 */
3456 ata_for_each_dev(dev, link, ENABLED) {
3457 bool hipm = ata_id_has_hipm(dev->id);
ae01b249 3458 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
6b7ae954
TH
3459
3460 /* find the first enabled and LPM enabled devices */
3461 if (!link_dev)
3462 link_dev = dev;
3463
3464 if (!lpm_dev && (hipm || dipm))
3465 lpm_dev = dev;
3466
3467 hints &= ~ATA_LPM_EMPTY;
3468 if (!hipm)
3469 hints &= ~ATA_LPM_HIPM;
3470
3471 /* disable DIPM before changing link config */
f4ac6476 3472 if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
6b7ae954
TH
3473 err_mask = ata_dev_set_feature(dev,
3474 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3475 if (err_mask && err_mask != AC_ERR_DEV) {
a9a79dfe
JP
3476 ata_dev_warn(dev,
3477 "failed to disable DIPM, Emask 0x%x\n",
3478 err_mask);
6b7ae954
TH
3479 rc = -EIO;
3480 goto fail;
3481 }
3482 }
3483 }
3484
6c8ea89c
TH
3485 if (ap) {
3486 rc = ap->ops->set_lpm(link, policy, hints);
3487 if (!rc && ap->slave_link)
3488 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3489 } else
3490 rc = sata_pmp_set_lpm(link, policy, hints);
6b7ae954
TH
3491
3492 /*
3493 * Attribute link config failure to the first (LPM) enabled
3494 * device on the link.
3495 */
3496 if (rc) {
3497 if (rc == -EOPNOTSUPP) {
3498 link->flags |= ATA_LFLAG_NO_LPM;
3499 return 0;
3500 }
3501 dev = lpm_dev ? lpm_dev : link_dev;
3502 goto fail;
3503 }
3504
e5005b15
TH
3505 /*
3506 * Low level driver acked the transition. Issue DIPM command
3507 * with the new policy set.
3508 */
3509 link->lpm_policy = policy;
3510 if (ap && ap->slave_link)
3511 ap->slave_link->lpm_policy = policy;
3512
6b7ae954
TH
3513 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3514 ata_for_each_dev(dev, link, ENABLED) {
f4ac6476 3515 if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ae01b249 3516 ata_id_has_dipm(dev->id)) {
6b7ae954
TH
3517 err_mask = ata_dev_set_feature(dev,
3518 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3519 if (err_mask && err_mask != AC_ERR_DEV) {
a9a79dfe 3520 ata_dev_warn(dev,
6b7ae954
TH
3521 "failed to enable DIPM, Emask 0x%x\n",
3522 err_mask);
3523 rc = -EIO;
3524 goto fail;
3525 }
3526 }
3527 }
3528
09c5b480
GM
3529 link->last_lpm_change = jiffies;
3530 link->flags |= ATA_LFLAG_CHANGED;
3531
6b7ae954
TH
3532 return 0;
3533
3534fail:
e5005b15
TH
3535 /* restore the old policy */
3536 link->lpm_policy = old_policy;
3537 if (ap && ap->slave_link)
3538 ap->slave_link->lpm_policy = old_policy;
3539
6b7ae954
TH
3540 /* if no device or only one more chance is left, disable LPM */
3541 if (!dev || ehc->tries[dev->devno] <= 2) {
a9a79dfe 3542 ata_link_warn(link, "disabling LPM on the link\n");
6b7ae954
TH
3543 link->flags |= ATA_LFLAG_NO_LPM;
3544 }
3545 if (r_failed_dev)
3546 *r_failed_dev = dev;
3547 return rc;
3548}
3549
8a745f1f 3550int ata_link_nr_enabled(struct ata_link *link)
022bdb07 3551{
f58229f8
TH
3552 struct ata_device *dev;
3553 int cnt = 0;
022bdb07 3554
1eca4365
TH
3555 ata_for_each_dev(dev, link, ENABLED)
3556 cnt++;
022bdb07
TH
3557 return cnt;
3558}
3559
0260731f 3560static int ata_link_nr_vacant(struct ata_link *link)
084fe639 3561{
f58229f8
TH
3562 struct ata_device *dev;
3563 int cnt = 0;
084fe639 3564
1eca4365 3565 ata_for_each_dev(dev, link, ALL)
f58229f8 3566 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
3567 cnt++;
3568 return cnt;
3569}
3570
0260731f 3571static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 3572{
672b2d65 3573 struct ata_port *ap = link->ap;
0260731f 3574 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3575 struct ata_device *dev;
084fe639 3576
f9df58cb
TH
3577 /* skip disabled links */
3578 if (link->flags & ATA_LFLAG_DISABLED)
3579 return 1;
3580
e2f3d75f
TH
3581 /* skip if explicitly requested */
3582 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3583 return 1;
3584
672b2d65
TH
3585 /* thaw frozen port and recover failed devices */
3586 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3587 return 0;
3588
3589 /* reset at least once if reset is requested */
3590 if ((ehc->i.action & ATA_EH_RESET) &&
3591 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3592 return 0;
3593
3594 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3595 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3596 if (dev->class == ATA_DEV_UNKNOWN &&
3597 ehc->classes[dev->devno] != ATA_DEV_NONE)
3598 return 0;
3599 }
3600
3601 return 1;
3602}
3603
c2c7a89c
TH
3604static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3605{
3606 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3607 u64 now = get_jiffies_64();
3608 int *trials = void_arg;
3609
6868225e
LM
3610 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3611 (ent->timestamp < now - min(now, interval)))
c2c7a89c
TH
3612 return -1;
3613
3614 (*trials)++;
3615 return 0;
3616}
3617
02c05a27
TH
3618static int ata_eh_schedule_probe(struct ata_device *dev)
3619{
3620 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3621 struct ata_link *link = ata_dev_phys_link(dev);
3622 int trials = 0;
02c05a27
TH
3623
3624 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3625 (ehc->did_probe_mask & (1 << dev->devno)))
3626 return 0;
3627
3628 ata_eh_detach_dev(dev);
3629 ata_dev_init(dev);
3630 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3631 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3632 ehc->saved_xfer_mode[dev->devno] = 0;
3633 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3634
6b7ae954 3635 /* the link maybe in a deep sleep, wake it up */
6c8ea89c
TH
3636 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3637 if (ata_is_host_link(link))
3638 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3639 ATA_LPM_EMPTY);
3640 else
3641 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3642 ATA_LPM_EMPTY);
3643 }
6b7ae954 3644
c2c7a89c
TH
3645 /* Record and count probe trials on the ering. The specific
3646 * error mask used is irrelevant. Because a successful device
3647 * detection clears the ering, this count accumulates only if
3648 * there are consecutive failed probes.
3649 *
3650 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3651 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3652 * forced to 1.5Gbps.
3653 *
3654 * This is to work around cases where failed link speed
3655 * negotiation results in device misdetection leading to
3656 * infinite DEVXCHG or PHRDY CHG events.
3657 */
3658 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3659 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3660
3661 if (trials > ATA_EH_PROBE_TRIALS)
3662 sata_down_spd_limit(link, 1);
3663
02c05a27
TH
3664 return 1;
3665}
3666
9b1e2658 3667static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3668{
9af5c9c9 3669 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3670
cf9a590a
TH
3671 /* -EAGAIN from EH routine indicates retry without prejudice.
3672 * The requester is responsible for ensuring forward progress.
3673 */
3674 if (err != -EAGAIN)
3675 ehc->tries[dev->devno]--;
fee7ca72
TH
3676
3677 switch (err) {
3678 case -ENODEV:
3679 /* device missing or wrong IDENTIFY data, schedule probing */
3680 ehc->i.probe_mask |= (1 << dev->devno);
05b83605 3681 /* fall through */
fee7ca72
TH
3682 case -EINVAL:
3683 /* give it just one more chance */
3684 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
05b83605 3685 /* fall through */
fee7ca72 3686 case -EIO:
d89293ab 3687 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3688 /* This is the last chance, better to slow
3689 * down than lose it.
3690 */
a07d499b 3691 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3692 if (dev->pio_mode > XFER_PIO_0)
3693 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3694 }
3695 }
3696
3697 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3698 /* disable device if it has used up all its chances */
3699 ata_dev_disable(dev);
3700
3701 /* detach if offline */
b1c72916 3702 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3703 ata_eh_detach_dev(dev);
3704
02c05a27 3705 /* schedule probe if necessary */
87fbc5a0 3706 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3707 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3708 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3709 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3710 }
9b1e2658
TH
3711
3712 return 1;
fee7ca72 3713 } else {
cf480626 3714 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3715 return 0;
fee7ca72
TH
3716 }
3717}
3718
022bdb07
TH
3719/**
3720 * ata_eh_recover - recover host port after error
3721 * @ap: host port to recover
f5914a46 3722 * @prereset: prereset method (can be NULL)
022bdb07
TH
3723 * @softreset: softreset method (can be NULL)
3724 * @hardreset: hardreset method (can be NULL)
3725 * @postreset: postreset method (can be NULL)
9b1e2658 3726 * @r_failed_link: out parameter for failed link
022bdb07
TH
3727 *
3728 * This is the alpha and omega, eum and yang, heart and soul of
3729 * libata exception handling. On entry, actions required to
9b1e2658
TH
3730 * recover each link and hotplug requests are recorded in the
3731 * link's eh_context. This function executes all the operations
3732 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3733 * devices, detach goners and greet newcomers.
022bdb07
TH
3734 *
3735 * LOCKING:
3736 * Kernel thread context (may sleep).
3737 *
3738 * RETURNS:
3739 * 0 on success, -errno on failure.
3740 */
fb7fd614
TH
3741int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3742 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3743 ata_postreset_fn_t postreset,
3744 struct ata_link **r_failed_link)
022bdb07 3745{
9b1e2658 3746 struct ata_link *link;
022bdb07 3747 struct ata_device *dev;
6b7ae954 3748 int rc, nr_fails;
45fabbb7 3749 unsigned long flags, deadline;
022bdb07
TH
3750
3751 DPRINTK("ENTER\n");
3752
3753 /* prep for recovery */
1eca4365 3754 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3755 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3756
f9df58cb
TH
3757 /* re-enable link? */
3758 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3759 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3760 spin_lock_irqsave(ap->lock, flags);
3761 link->flags &= ~ATA_LFLAG_DISABLED;
3762 spin_unlock_irqrestore(ap->lock, flags);
3763 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3764 }
3765
1eca4365 3766 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3767 if (link->flags & ATA_LFLAG_NO_RETRY)
3768 ehc->tries[dev->devno] = 1;
3769 else
3770 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3771
9b1e2658
TH
3772 /* collect port action mask recorded in dev actions */
3773 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3774 ~ATA_EH_PERDEV_MASK;
3775 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3776
3777 /* process hotplug request */
3778 if (dev->flags & ATA_DFLAG_DETACH)
3779 ata_eh_detach_dev(dev);
3780
02c05a27
TH
3781 /* schedule probe if necessary */
3782 if (!ata_dev_enabled(dev))
3783 ata_eh_schedule_probe(dev);
084fe639 3784 }
022bdb07
TH
3785 }
3786
3787 retry:
022bdb07
TH
3788 rc = 0;
3789
aeb2ecd6 3790 /* if UNLOADING, finish immediately */
b51e9e5d 3791 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3792 goto out;
3793
9b1e2658 3794 /* prep for EH */
1eca4365 3795 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3796 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3797
9b1e2658
TH
3798 /* skip EH if possible. */
3799 if (ata_eh_skip_recovery(link))
3800 ehc->i.action = 0;
3801
1eca4365 3802 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3803 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3804 }
084fe639 3805
022bdb07 3806 /* reset */
1eca4365 3807 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3808 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3809
dc98c32c
TH
3810 if (!(ehc->i.action & ATA_EH_RESET))
3811 continue;
9b1e2658 3812
dc98c32c
TH
3813 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3814 prereset, softreset, hardreset, postreset);
3815 if (rc) {
a9a79dfe 3816 ata_link_err(link, "reset failed, giving up\n");
dc98c32c 3817 goto out;
022bdb07 3818 }
022bdb07
TH
3819 }
3820
45fabbb7
EO
3821 do {
3822 unsigned long now;
3823
3824 /*
3825 * clears ATA_EH_PARK in eh_info and resets
3826 * ap->park_req_pending
3827 */
3828 ata_eh_pull_park_action(ap);
3829
3830 deadline = jiffies;
1eca4365
TH
3831 ata_for_each_link(link, ap, EDGE) {
3832 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3833 struct ata_eh_context *ehc = &link->eh_context;
3834 unsigned long tmp;
3835
9162c657
HR
3836 if (dev->class != ATA_DEV_ATA &&
3837 dev->class != ATA_DEV_ZAC)
45fabbb7
EO
3838 continue;
3839 if (!(ehc->i.dev_action[dev->devno] &
3840 ATA_EH_PARK))
3841 continue;
3842 tmp = dev->unpark_deadline;
3843 if (time_before(deadline, tmp))
3844 deadline = tmp;
3845 else if (time_before_eq(tmp, jiffies))
3846 continue;
3847 if (ehc->unloaded_mask & (1 << dev->devno))
3848 continue;
3849
3850 ata_eh_park_issue_cmd(dev, 1);
3851 }
3852 }
3853
3854 now = jiffies;
3855 if (time_before_eq(deadline, now))
3856 break;
3857
c0c362b6 3858 ata_eh_release(ap);
45fabbb7
EO
3859 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3860 deadline - now);
c0c362b6 3861 ata_eh_acquire(ap);
45fabbb7 3862 } while (deadline);
1eca4365
TH
3863 ata_for_each_link(link, ap, EDGE) {
3864 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3865 if (!(link->eh_context.unloaded_mask &
3866 (1 << dev->devno)))
3867 continue;
3868
3869 ata_eh_park_issue_cmd(dev, 0);
3870 ata_eh_done(link, dev, ATA_EH_PARK);
3871 }
3872 }
3873
9b1e2658 3874 /* the rest */
6b7ae954
TH
3875 nr_fails = 0;
3876 ata_for_each_link(link, ap, PMP_FIRST) {
9b1e2658 3877 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3878
6b7ae954
TH
3879 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3880 goto config_lpm;
3881
9b1e2658
TH
3882 /* revalidate existing devices and attach new ones */
3883 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3884 if (rc)
6b7ae954 3885 goto rest_fail;
022bdb07 3886
633273a3
TH
3887 /* if PMP got attached, return, pmp EH will take care of it */
3888 if (link->device->class == ATA_DEV_PMP) {
3889 ehc->i.action = 0;
3890 return 0;
3891 }
3892
9b1e2658
TH
3893 /* configure transfer mode if necessary */
3894 if (ehc->i.flags & ATA_EHI_SETMODE) {
3895 rc = ata_set_mode(link, &dev);
3896 if (rc)
6b7ae954 3897 goto rest_fail;
9b1e2658
TH
3898 ehc->i.flags &= ~ATA_EHI_SETMODE;
3899 }
3900
11fc33da
TH
3901 /* If reset has been issued, clear UA to avoid
3902 * disrupting the current users of the device.
3903 */
3904 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3905 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3906 if (dev->class != ATA_DEV_ATAPI)
3907 continue;
3908 rc = atapi_eh_clear_ua(dev);
3909 if (rc)
6b7ae954 3910 goto rest_fail;
21334205
AL
3911 if (zpodd_dev_enabled(dev))
3912 zpodd_post_poweron(dev);
11fc33da
TH
3913 }
3914 }
3915
6013efd8
TH
3916 /* retry flush if necessary */
3917 ata_for_each_dev(dev, link, ALL) {
9162c657
HR
3918 if (dev->class != ATA_DEV_ATA &&
3919 dev->class != ATA_DEV_ZAC)
6013efd8
TH
3920 continue;
3921 rc = ata_eh_maybe_retry_flush(dev);
3922 if (rc)
6b7ae954 3923 goto rest_fail;
6013efd8
TH
3924 }
3925
6b7ae954 3926 config_lpm:
11fc33da 3927 /* configure link power saving */
6b7ae954
TH
3928 if (link->lpm_policy != ap->target_lpm_policy) {
3929 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3930 if (rc)
3931 goto rest_fail;
3932 }
ca77329f 3933
9b1e2658
TH
3934 /* this link is okay now */
3935 ehc->i.flags = 0;
3936 continue;
022bdb07 3937
6b7ae954
TH
3938 rest_fail:
3939 nr_fails++;
3940 if (dev)
3941 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3942
b06ce3e5
TH
3943 if (ap->pflags & ATA_PFLAG_FROZEN) {
3944 /* PMP reset requires working host port.
3945 * Can't retry if it's frozen.
3946 */
071f44b1 3947 if (sata_pmp_attached(ap))
b06ce3e5 3948 goto out;
9b1e2658 3949 break;
b06ce3e5 3950 }
022bdb07
TH
3951 }
3952
6b7ae954 3953 if (nr_fails)
9b1e2658 3954 goto retry;
022bdb07 3955
9b1e2658
TH
3956 out:
3957 if (rc && r_failed_link)
3958 *r_failed_link = link;
3959
022bdb07
TH
3960 DPRINTK("EXIT, rc=%d\n", rc);
3961 return rc;
3962}
3963
3964/**
3965 * ata_eh_finish - finish up EH
3966 * @ap: host port to finish EH for
3967 *
3968 * Recovery is complete. Clean up EH states and retry or finish
3969 * failed qcs.
3970 *
3971 * LOCKING:
3972 * None.
3973 */
fb7fd614 3974void ata_eh_finish(struct ata_port *ap)
022bdb07 3975{
258c4e5c 3976 struct ata_queued_cmd *qc;
022bdb07
TH
3977 int tag;
3978
3979 /* retry or finish qcs */
258c4e5c 3980 ata_qc_for_each_raw(ap, qc, tag) {
022bdb07
TH
3981 if (!(qc->flags & ATA_QCFLAG_FAILED))
3982 continue;
3983
3984 if (qc->err_mask) {
3985 /* FIXME: Once EH migration is complete,
3986 * generate sense data in this function,
3987 * considering both err_mask and tf.
3988 */
03faab78 3989 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3990 ata_eh_qc_retry(qc);
03faab78
TH
3991 else
3992 ata_eh_qc_complete(qc);
022bdb07
TH
3993 } else {
3994 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3995 ata_eh_qc_complete(qc);
3996 } else {
3997 /* feed zero TF to sense generation */
3998 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3999 ata_eh_qc_retry(qc);
4000 }
4001 }
4002 }
da917d69
TH
4003
4004 /* make sure nr_active_links is zero after EH */
4005 WARN_ON(ap->nr_active_links);
4006 ap->nr_active_links = 0;
022bdb07
TH
4007}
4008
4009/**
4010 * ata_do_eh - do standard error handling
4011 * @ap: host port to handle error for
a1efdaba 4012 *
f5914a46 4013 * @prereset: prereset method (can be NULL)
022bdb07
TH
4014 * @softreset: softreset method (can be NULL)
4015 * @hardreset: hardreset method (can be NULL)
4016 * @postreset: postreset method (can be NULL)
4017 *
4018 * Perform standard error handling sequence.
4019 *
4020 * LOCKING:
4021 * Kernel thread context (may sleep).
4022 */
f5914a46
TH
4023void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4024 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4025 ata_postreset_fn_t postreset)
022bdb07 4026{
9b1e2658
TH
4027 struct ata_device *dev;
4028 int rc;
4029
4030 ata_eh_autopsy(ap);
4031 ata_eh_report(ap);
4032
4033 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4034 NULL);
4035 if (rc) {
1eca4365 4036 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
4037 ata_dev_disable(dev);
4038 }
4039
022bdb07
TH
4040 ata_eh_finish(ap);
4041}
500530f6 4042
a1efdaba
TH
4043/**
4044 * ata_std_error_handler - standard error handler
4045 * @ap: host port to handle error for
4046 *
4047 * Standard error handler
4048 *
4049 * LOCKING:
4050 * Kernel thread context (may sleep).
4051 */
4052void ata_std_error_handler(struct ata_port *ap)
4053{
4054 struct ata_port_operations *ops = ap->ops;
4055 ata_reset_fn_t hardreset = ops->hardreset;
4056
57c9efdf 4057 /* ignore built-in hardreset if SCR access is not available */
fe06e5f9 4058 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
a1efdaba
TH
4059 hardreset = NULL;
4060
4061 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4062}
4063
6ffa01d8 4064#ifdef CONFIG_PM
500530f6
TH
4065/**
4066 * ata_eh_handle_port_suspend - perform port suspend operation
4067 * @ap: port to suspend
4068 *
4069 * Suspend @ap.
4070 *
4071 * LOCKING:
4072 * Kernel thread context (may sleep).
4073 */
4074static void ata_eh_handle_port_suspend(struct ata_port *ap)
4075{
4076 unsigned long flags;
4077 int rc = 0;
3dc67440 4078 struct ata_device *dev;
500530f6
TH
4079
4080 /* are we suspending? */
4081 spin_lock_irqsave(ap->lock, flags);
4082 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
a7ff60db 4083 ap->pm_mesg.event & PM_EVENT_RESUME) {
500530f6
TH
4084 spin_unlock_irqrestore(ap->lock, flags);
4085 return;
4086 }
4087 spin_unlock_irqrestore(ap->lock, flags);
4088
4089 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4090
3dc67440
AL
4091 /*
4092 * If we have a ZPODD attached, check its zero
4093 * power ready status before the port is frozen.
a7ff60db 4094 * Only needed for runtime suspend.
3dc67440 4095 */
a7ff60db
AL
4096 if (PMSG_IS_AUTO(ap->pm_mesg)) {
4097 ata_for_each_dev(dev, &ap->link, ENABLED) {
4098 if (zpodd_dev_enabled(dev))
4099 zpodd_on_suspend(dev);
4100 }
3dc67440
AL
4101 }
4102
64578a3d
TH
4103 /* tell ACPI we're suspending */
4104 rc = ata_acpi_on_suspend(ap);
4105 if (rc)
4106 goto out;
4107
500530f6
TH
4108 /* suspend */
4109 ata_eh_freeze_port(ap);
4110
4111 if (ap->ops->port_suspend)
4112 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4113
a7ff60db 4114 ata_acpi_set_state(ap, ap->pm_mesg);
64578a3d 4115 out:
bc6e7c4b 4116 /* update the flags */
500530f6
TH
4117 spin_lock_irqsave(ap->lock, flags);
4118
4119 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4120 if (rc == 0)
4121 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 4122 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
4123 ata_port_schedule_eh(ap);
4124
500530f6
TH
4125 spin_unlock_irqrestore(ap->lock, flags);
4126
4127 return;
4128}
4129
4130/**
4131 * ata_eh_handle_port_resume - perform port resume operation
4132 * @ap: port to resume
4133 *
4134 * Resume @ap.
4135 *
500530f6
TH
4136 * LOCKING:
4137 * Kernel thread context (may sleep).
4138 */
4139static void ata_eh_handle_port_resume(struct ata_port *ap)
4140{
6f9c1ea2
TH
4141 struct ata_link *link;
4142 struct ata_device *dev;
500530f6 4143 unsigned long flags;
500530f6
TH
4144
4145 /* are we resuming? */
4146 spin_lock_irqsave(ap->lock, flags);
4147 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
a7ff60db 4148 !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
500530f6
TH
4149 spin_unlock_irqrestore(ap->lock, flags);
4150 return;
4151 }
4152 spin_unlock_irqrestore(ap->lock, flags);
4153
9666f400 4154 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 4155
6f9c1ea2
TH
4156 /*
4157 * Error timestamps are in jiffies which doesn't run while
4158 * suspended and PHY events during resume isn't too uncommon.
4159 * When the two are combined, it can lead to unnecessary speed
4160 * downs if the machine is suspended and resumed repeatedly.
4161 * Clear error history.
4162 */
4163 ata_for_each_link(link, ap, HOST_FIRST)
4164 ata_for_each_dev(dev, link, ALL)
4165 ata_ering_clear(&dev->ering);
4166
a7ff60db 4167 ata_acpi_set_state(ap, ap->pm_mesg);
bd3adca5 4168
500530f6 4169 if (ap->ops->port_resume)
ae867937 4170 ap->ops->port_resume(ap);
500530f6 4171
6746544c
TH
4172 /* tell ACPI that we're resuming */
4173 ata_acpi_on_resume(ap);
4174
bc6e7c4b 4175 /* update the flags */
500530f6
TH
4176 spin_lock_irqsave(ap->lock, flags);
4177 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
500530f6
TH
4178 spin_unlock_irqrestore(ap->lock, flags);
4179}
6ffa01d8 4180#endif /* CONFIG_PM */