]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/ata/libata-eh.c
95838b3822313bebb6282642a716c486a9d332a2
[mirror_ubuntu-zesty-kernel.git] / drivers / ata / libata-eh.c
1 /*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
45
46 #include <linux/libata.h>
47
48 #include "libata.h"
49
50 enum {
51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56
57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60 ATA_EFLAG_OLD_ER = (1 << 31),
61
62 /* error categories */
63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3,
67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8,
72
73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74
75 /* always put at least this amount of time between resets */
76 ATA_EH_RESET_COOL_DOWN = 5000,
77
78 /* Waiting in ->prereset can never be reliable. It's
79 * sometimes nice to wait there but it can't be depended upon;
80 * otherwise, we wouldn't be resetting. Just give it enough
81 * time for most drives to spin up.
82 */
83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
85
86 ATA_EH_UA_TRIES = 5,
87
88 /* probe speed down parameters, see ata_eh_schedule_probe() */
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
90 ATA_EH_PROBE_TRIALS = 2,
91 };
92
93 /* The following table determines how we sequence resets. Each entry
94 * represents timeout for that try. The first try can be soft or
95 * hardreset. All others are hardreset if available. In most cases
96 * the first reset w/ 10sec timeout should succeed. Following entries
97 * are mostly for error handling, hotplug and retarded devices.
98 */
99 static const unsigned long ata_eh_reset_timeouts[] = {
100 10000, /* most drives spin up by 10sec */
101 10000, /* > 99% working drives spin up before 20sec */
102 35000, /* give > 30 secs of idleness for retarded devices */
103 5000, /* and sweet one last chance */
104 ULONG_MAX, /* > 1 min has elapsed, give up */
105 };
106
107 static const unsigned long ata_eh_identify_timeouts[] = {
108 5000, /* covers > 99% of successes and not too boring on failures */
109 10000, /* combined time till here is enough even for media access */
110 30000, /* for true idiots */
111 ULONG_MAX,
112 };
113
114 static const unsigned long ata_eh_flush_timeouts[] = {
115 15000, /* be generous with flush */
116 15000, /* ditto */
117 30000, /* and even more generous */
118 ULONG_MAX,
119 };
120
121 static const unsigned long ata_eh_other_timeouts[] = {
122 5000, /* same rationale as identify timeout */
123 10000, /* ditto */
124 /* but no merciful 30sec for other commands, it just isn't worth it */
125 ULONG_MAX,
126 };
127
128 struct ata_eh_cmd_timeout_ent {
129 const u8 *commands;
130 const unsigned long *timeouts;
131 };
132
133 /* The following table determines timeouts to use for EH internal
134 * commands. Each table entry is a command class and matches the
135 * commands the entry applies to and the timeout table to use.
136 *
137 * On the retry after a command timed out, the next timeout value from
138 * the table is used. If the table doesn't contain further entries,
139 * the last value is used.
140 *
141 * ehc->cmd_timeout_idx keeps track of which timeout to use per
142 * command class, so if SET_FEATURES times out on the first try, the
143 * next try will use the second timeout value only for that class.
144 */
145 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
146 static const struct ata_eh_cmd_timeout_ent
147 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, },
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts },
160 };
161 #undef CMDS
162
163 static void __ata_port_freeze(struct ata_port *ap);
164 #ifdef CONFIG_PM
165 static void ata_eh_handle_port_suspend(struct ata_port *ap);
166 static void ata_eh_handle_port_resume(struct ata_port *ap);
167 #else /* CONFIG_PM */
168 static void ata_eh_handle_port_suspend(struct ata_port *ap)
169 { }
170
171 static void ata_eh_handle_port_resume(struct ata_port *ap)
172 { }
173 #endif /* CONFIG_PM */
174
175 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args)
177 {
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args);
181 }
182
183 /**
184 * __ata_ehi_push_desc - push error description without adding separator
185 * @ehi: target EHI
186 * @fmt: printf format string
187 *
188 * Format string according to @fmt and append it to @ehi->desc.
189 *
190 * LOCKING:
191 * spin_lock_irqsave(host lock)
192 */
193 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194 {
195 va_list args;
196
197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args);
200 }
201
202 /**
203 * ata_ehi_push_desc - push error description with separator
204 * @ehi: target EHI
205 * @fmt: printf format string
206 *
207 * Format string according to @fmt and append it to @ehi->desc.
208 * If @ehi->desc is not empty, ", " is added in-between.
209 *
210 * LOCKING:
211 * spin_lock_irqsave(host lock)
212 */
213 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214 {
215 va_list args;
216
217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", ");
219
220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args);
223 }
224
225 /**
226 * ata_ehi_clear_desc - clean error description
227 * @ehi: target EHI
228 *
229 * Clear @ehi->desc.
230 *
231 * LOCKING:
232 * spin_lock_irqsave(host lock)
233 */
234 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235 {
236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0;
238 }
239
240 /**
241 * ata_port_desc - append port description
242 * @ap: target ATA port
243 * @fmt: printf format string
244 *
245 * Format string according to @fmt and append it to port
246 * description. If port description is not empty, " " is added
247 * in-between. This function is to be used while initializing
248 * ata_host. The description is printed on host registration.
249 *
250 * LOCKING:
251 * None.
252 */
253 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254 {
255 va_list args;
256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258
259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261
262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args);
265 }
266
267 #ifdef CONFIG_PCI
268
269 /**
270 * ata_port_pbar_desc - append PCI BAR description
271 * @ap: target ATA port
272 * @bar: target PCI BAR
273 * @offset: offset into PCI BAR
274 * @name: name of the area
275 *
276 * If @offset is negative, this function formats a string which
277 * contains the name, address, size and type of the BAR and
278 * appends it to the port description. If @offset is zero or
279 * positive, only name and offsetted address is appended.
280 *
281 * LOCKING:
282 * None.
283 */
284 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name)
286 {
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = "";
289 unsigned long long start, len;
290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i";
295
296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar);
298
299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else
302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset);
304 }
305
306 #endif /* CONFIG_PCI */
307
308 static int ata_lookup_timeout_table(u8 cmd)
309 {
310 int i;
311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur;
314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd)
317 return i;
318 }
319
320 return -1;
321 }
322
323 /**
324 * ata_internal_cmd_timeout - determine timeout for an internal command
325 * @dev: target device
326 * @cmd: internal command to be issued
327 *
328 * Determine timeout for internal command @cmd for @dev.
329 *
330 * LOCKING:
331 * EH context.
332 *
333 * RETURNS:
334 * Determined timeout.
335 */
336 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337 {
338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd);
340 int idx;
341
342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT;
344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347 }
348
349 /**
350 * ata_internal_cmd_timed_out - notification for internal command timeout
351 * @dev: target device
352 * @cmd: internal command which timed out
353 *
354 * Notify EH that internal command @cmd for @dev timed out. This
355 * function should be called only for commands whose timeouts are
356 * determined using ata_internal_cmd_timeout().
357 *
358 * LOCKING:
359 * EH context.
360 */
361 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362 {
363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd);
365 int idx;
366
367 if (ent < 0)
368 return;
369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373 }
374
375 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
376 unsigned int err_mask)
377 {
378 struct ata_ering_entry *ent;
379
380 WARN_ON(!err_mask);
381
382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE;
384
385 ent = &ering->ring[ering->cursor];
386 ent->eflags = eflags;
387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64();
389 }
390
391 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392 {
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394
395 if (ent->err_mask)
396 return ent;
397 return NULL;
398 }
399
400 int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg)
403 {
404 int idx, rc = 0;
405 struct ata_ering_entry *ent;
406
407 idx = ering->cursor;
408 do {
409 ent = &ering->ring[idx];
410 if (!ent->err_mask)
411 break;
412 rc = map_fn(ent, arg);
413 if (rc)
414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor);
417
418 return rc;
419 }
420
421 int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422 {
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425 }
426
427 static void ata_ering_clear(struct ata_ering *ering)
428 {
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430 }
431
432 static unsigned int ata_eh_dev_action(struct ata_device *dev)
433 {
434 struct ata_eh_context *ehc = &dev->link->eh_context;
435
436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437 }
438
439 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
440 struct ata_eh_info *ehi, unsigned int action)
441 {
442 struct ata_device *tdev;
443
444 if (!dev) {
445 ehi->action &= ~action;
446 ata_for_each_dev(tdev, link, ALL)
447 ehi->dev_action[tdev->devno] &= ~action;
448 } else {
449 /* doesn't make sense for port-wide EH actions */
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451
452 /* break ehi->action into ehi->dev_action */
453 if (ehi->action & action) {
454 ata_for_each_dev(tdev, link, ALL)
455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action;
457 ehi->action &= ~action;
458 }
459
460 /* turn off the specified per-dev action */
461 ehi->dev_action[dev->devno] &= ~action;
462 }
463 }
464
465 /**
466 * ata_scsi_timed_out - SCSI layer time out callback
467 * @cmd: timed out SCSI command
468 *
469 * Handles SCSI layer timeout. We race with normal completion of
470 * the qc for @cmd. If the qc is already gone, we lose and let
471 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
472 * timed out and EH should be invoked. Prevent ata_qc_complete()
473 * from finishing it by setting EH_SCHEDULED and return
474 * EH_NOT_HANDLED.
475 *
476 * TODO: kill this function once old EH is gone.
477 *
478 * LOCKING:
479 * Called from timer context
480 *
481 * RETURNS:
482 * EH_HANDLED or EH_NOT_HANDLED
483 */
484 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
485 {
486 struct Scsi_Host *host = cmd->device->host;
487 struct ata_port *ap = ata_shost_to_port(host);
488 unsigned long flags;
489 struct ata_queued_cmd *qc;
490 enum blk_eh_timer_return ret;
491
492 DPRINTK("ENTER\n");
493
494 if (ap->ops->error_handler) {
495 ret = BLK_EH_NOT_HANDLED;
496 goto out;
497 }
498
499 ret = BLK_EH_HANDLED;
500 spin_lock_irqsave(ap->lock, flags);
501 qc = ata_qc_from_tag(ap, ap->link.active_tag);
502 if (qc) {
503 WARN_ON(qc->scsicmd != cmd);
504 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
505 qc->err_mask |= AC_ERR_TIMEOUT;
506 ret = BLK_EH_NOT_HANDLED;
507 }
508 spin_unlock_irqrestore(ap->lock, flags);
509
510 out:
511 DPRINTK("EXIT, ret=%d\n", ret);
512 return ret;
513 }
514
515 static void ata_eh_unload(struct ata_port *ap)
516 {
517 struct ata_link *link;
518 struct ata_device *dev;
519 unsigned long flags;
520
521 /* Restore SControl IPM and SPD for the next driver and
522 * disable attached devices.
523 */
524 ata_for_each_link(link, ap, PMP_FIRST) {
525 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
526 ata_for_each_dev(dev, link, ALL)
527 ata_dev_disable(dev);
528 }
529
530 /* freeze and set UNLOADED */
531 spin_lock_irqsave(ap->lock, flags);
532
533 ata_port_freeze(ap); /* won't be thawed */
534 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
535 ap->pflags |= ATA_PFLAG_UNLOADED;
536
537 spin_unlock_irqrestore(ap->lock, flags);
538 }
539
540 /**
541 * ata_scsi_error - SCSI layer error handler callback
542 * @host: SCSI host on which error occurred
543 *
544 * Handles SCSI-layer-thrown error events.
545 *
546 * LOCKING:
547 * Inherited from SCSI layer (none, can sleep)
548 *
549 * RETURNS:
550 * Zero.
551 */
552 void ata_scsi_error(struct Scsi_Host *host)
553 {
554 struct ata_port *ap = ata_shost_to_port(host);
555 int i;
556 unsigned long flags;
557
558 DPRINTK("ENTER\n");
559
560 /* make sure sff pio task is not running */
561 ata_sff_flush_pio_task(ap);
562
563 /* synchronize with host lock and sort out timeouts */
564
565 /* For new EH, all qcs are finished in one of three ways -
566 * normal completion, error completion, and SCSI timeout.
567 * Both completions can race against SCSI timeout. When normal
568 * completion wins, the qc never reaches EH. When error
569 * completion wins, the qc has ATA_QCFLAG_FAILED set.
570 *
571 * When SCSI timeout wins, things are a bit more complex.
572 * Normal or error completion can occur after the timeout but
573 * before this point. In such cases, both types of
574 * completions are honored. A scmd is determined to have
575 * timed out iff its associated qc is active and not failed.
576 */
577 if (ap->ops->error_handler) {
578 struct scsi_cmnd *scmd, *tmp;
579 int nr_timedout = 0;
580
581 spin_lock_irqsave(ap->lock, flags);
582
583 /* This must occur under the ap->lock as we don't want
584 a polled recovery to race the real interrupt handler
585
586 The lost_interrupt handler checks for any completed but
587 non-notified command and completes much like an IRQ handler.
588
589 We then fall into the error recovery code which will treat
590 this as if normal completion won the race */
591
592 if (ap->ops->lost_interrupt)
593 ap->ops->lost_interrupt(ap);
594
595 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
596 struct ata_queued_cmd *qc;
597
598 for (i = 0; i < ATA_MAX_QUEUE; i++) {
599 qc = __ata_qc_from_tag(ap, i);
600 if (qc->flags & ATA_QCFLAG_ACTIVE &&
601 qc->scsicmd == scmd)
602 break;
603 }
604
605 if (i < ATA_MAX_QUEUE) {
606 /* the scmd has an associated qc */
607 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
608 /* which hasn't failed yet, timeout */
609 qc->err_mask |= AC_ERR_TIMEOUT;
610 qc->flags |= ATA_QCFLAG_FAILED;
611 nr_timedout++;
612 }
613 } else {
614 /* Normal completion occurred after
615 * SCSI timeout but before this point.
616 * Successfully complete it.
617 */
618 scmd->retries = scmd->allowed;
619 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
620 }
621 }
622
623 /* If we have timed out qcs. They belong to EH from
624 * this point but the state of the controller is
625 * unknown. Freeze the port to make sure the IRQ
626 * handler doesn't diddle with those qcs. This must
627 * be done atomically w.r.t. setting QCFLAG_FAILED.
628 */
629 if (nr_timedout)
630 __ata_port_freeze(ap);
631
632 spin_unlock_irqrestore(ap->lock, flags);
633
634 /* initialize eh_tries */
635 ap->eh_tries = ATA_EH_MAX_TRIES;
636 } else
637 spin_unlock_wait(ap->lock);
638
639 /* If we timed raced normal completion and there is nothing to
640 recover nr_timedout == 0 why exactly are we doing error recovery ? */
641
642 repeat:
643 /* invoke error handler */
644 if (ap->ops->error_handler) {
645 struct ata_link *link;
646
647 /* kill fast drain timer */
648 del_timer_sync(&ap->fastdrain_timer);
649
650 /* process port resume request */
651 ata_eh_handle_port_resume(ap);
652
653 /* fetch & clear EH info */
654 spin_lock_irqsave(ap->lock, flags);
655
656 ata_for_each_link(link, ap, HOST_FIRST) {
657 struct ata_eh_context *ehc = &link->eh_context;
658 struct ata_device *dev;
659
660 memset(&link->eh_context, 0, sizeof(link->eh_context));
661 link->eh_context.i = link->eh_info;
662 memset(&link->eh_info, 0, sizeof(link->eh_info));
663
664 ata_for_each_dev(dev, link, ENABLED) {
665 int devno = dev->devno;
666
667 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
668 if (ata_ncq_enabled(dev))
669 ehc->saved_ncq_enabled |= 1 << devno;
670 }
671 }
672
673 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
674 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
675 ap->excl_link = NULL; /* don't maintain exclusion over EH */
676
677 spin_unlock_irqrestore(ap->lock, flags);
678
679 /* invoke EH, skip if unloading or suspended */
680 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
681 ap->ops->error_handler(ap);
682 else {
683 /* if unloading, commence suicide */
684 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
685 !(ap->pflags & ATA_PFLAG_UNLOADED))
686 ata_eh_unload(ap);
687 ata_eh_finish(ap);
688 }
689
690 /* process port suspend request */
691 ata_eh_handle_port_suspend(ap);
692
693 /* Exception might have happend after ->error_handler
694 * recovered the port but before this point. Repeat
695 * EH in such case.
696 */
697 spin_lock_irqsave(ap->lock, flags);
698
699 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
700 if (--ap->eh_tries) {
701 spin_unlock_irqrestore(ap->lock, flags);
702 goto repeat;
703 }
704 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
705 "tries, giving up\n", ATA_EH_MAX_TRIES);
706 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
707 }
708
709 /* this run is complete, make sure EH info is clear */
710 ata_for_each_link(link, ap, HOST_FIRST)
711 memset(&link->eh_info, 0, sizeof(link->eh_info));
712
713 /* Clear host_eh_scheduled while holding ap->lock such
714 * that if exception occurs after this point but
715 * before EH completion, SCSI midlayer will
716 * re-initiate EH.
717 */
718 host->host_eh_scheduled = 0;
719
720 spin_unlock_irqrestore(ap->lock, flags);
721 } else {
722 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
723 ap->ops->eng_timeout(ap);
724 }
725
726 /* finish or retry handled scmd's and clean up */
727 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
728
729 scsi_eh_flush_done_q(&ap->eh_done_q);
730
731 /* clean up */
732 spin_lock_irqsave(ap->lock, flags);
733
734 if (ap->pflags & ATA_PFLAG_LOADING)
735 ap->pflags &= ~ATA_PFLAG_LOADING;
736 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
737 schedule_delayed_work(&ap->hotplug_task, 0);
738
739 if (ap->pflags & ATA_PFLAG_RECOVERED)
740 ata_port_printk(ap, KERN_INFO, "EH complete\n");
741
742 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
743
744 /* tell wait_eh that we're done */
745 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
746 wake_up_all(&ap->eh_wait_q);
747
748 spin_unlock_irqrestore(ap->lock, flags);
749
750 DPRINTK("EXIT\n");
751 }
752
753 /**
754 * ata_port_wait_eh - Wait for the currently pending EH to complete
755 * @ap: Port to wait EH for
756 *
757 * Wait until the currently pending EH is complete.
758 *
759 * LOCKING:
760 * Kernel thread context (may sleep).
761 */
762 void ata_port_wait_eh(struct ata_port *ap)
763 {
764 unsigned long flags;
765 DEFINE_WAIT(wait);
766
767 retry:
768 spin_lock_irqsave(ap->lock, flags);
769
770 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
771 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
772 spin_unlock_irqrestore(ap->lock, flags);
773 schedule();
774 spin_lock_irqsave(ap->lock, flags);
775 }
776 finish_wait(&ap->eh_wait_q, &wait);
777
778 spin_unlock_irqrestore(ap->lock, flags);
779
780 /* make sure SCSI EH is complete */
781 if (scsi_host_in_recovery(ap->scsi_host)) {
782 msleep(10);
783 goto retry;
784 }
785 }
786
787 static int ata_eh_nr_in_flight(struct ata_port *ap)
788 {
789 unsigned int tag;
790 int nr = 0;
791
792 /* count only non-internal commands */
793 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
794 if (ata_qc_from_tag(ap, tag))
795 nr++;
796
797 return nr;
798 }
799
800 void ata_eh_fastdrain_timerfn(unsigned long arg)
801 {
802 struct ata_port *ap = (void *)arg;
803 unsigned long flags;
804 int cnt;
805
806 spin_lock_irqsave(ap->lock, flags);
807
808 cnt = ata_eh_nr_in_flight(ap);
809
810 /* are we done? */
811 if (!cnt)
812 goto out_unlock;
813
814 if (cnt == ap->fastdrain_cnt) {
815 unsigned int tag;
816
817 /* No progress during the last interval, tag all
818 * in-flight qcs as timed out and freeze the port.
819 */
820 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
821 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
822 if (qc)
823 qc->err_mask |= AC_ERR_TIMEOUT;
824 }
825
826 ata_port_freeze(ap);
827 } else {
828 /* some qcs have finished, give it another chance */
829 ap->fastdrain_cnt = cnt;
830 ap->fastdrain_timer.expires =
831 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
832 add_timer(&ap->fastdrain_timer);
833 }
834
835 out_unlock:
836 spin_unlock_irqrestore(ap->lock, flags);
837 }
838
839 /**
840 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
841 * @ap: target ATA port
842 * @fastdrain: activate fast drain
843 *
844 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
845 * is non-zero and EH wasn't pending before. Fast drain ensures
846 * that EH kicks in in timely manner.
847 *
848 * LOCKING:
849 * spin_lock_irqsave(host lock)
850 */
851 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
852 {
853 int cnt;
854
855 /* already scheduled? */
856 if (ap->pflags & ATA_PFLAG_EH_PENDING)
857 return;
858
859 ap->pflags |= ATA_PFLAG_EH_PENDING;
860
861 if (!fastdrain)
862 return;
863
864 /* do we have in-flight qcs? */
865 cnt = ata_eh_nr_in_flight(ap);
866 if (!cnt)
867 return;
868
869 /* activate fast drain */
870 ap->fastdrain_cnt = cnt;
871 ap->fastdrain_timer.expires =
872 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
873 add_timer(&ap->fastdrain_timer);
874 }
875
876 /**
877 * ata_qc_schedule_eh - schedule qc for error handling
878 * @qc: command to schedule error handling for
879 *
880 * Schedule error handling for @qc. EH will kick in as soon as
881 * other commands are drained.
882 *
883 * LOCKING:
884 * spin_lock_irqsave(host lock)
885 */
886 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
887 {
888 struct ata_port *ap = qc->ap;
889 struct request_queue *q = qc->scsicmd->device->request_queue;
890 unsigned long flags;
891
892 WARN_ON(!ap->ops->error_handler);
893
894 qc->flags |= ATA_QCFLAG_FAILED;
895 ata_eh_set_pending(ap, 1);
896
897 /* The following will fail if timeout has already expired.
898 * ata_scsi_error() takes care of such scmds on EH entry.
899 * Note that ATA_QCFLAG_FAILED is unconditionally set after
900 * this function completes.
901 */
902 spin_lock_irqsave(q->queue_lock, flags);
903 blk_abort_request(qc->scsicmd->request);
904 spin_unlock_irqrestore(q->queue_lock, flags);
905 }
906
907 /**
908 * ata_port_schedule_eh - schedule error handling without a qc
909 * @ap: ATA port to schedule EH for
910 *
911 * Schedule error handling for @ap. EH will kick in as soon as
912 * all commands are drained.
913 *
914 * LOCKING:
915 * spin_lock_irqsave(host lock)
916 */
917 void ata_port_schedule_eh(struct ata_port *ap)
918 {
919 WARN_ON(!ap->ops->error_handler);
920
921 if (ap->pflags & ATA_PFLAG_INITIALIZING)
922 return;
923
924 ata_eh_set_pending(ap, 1);
925 scsi_schedule_eh(ap->scsi_host);
926
927 DPRINTK("port EH scheduled\n");
928 }
929
930 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
931 {
932 int tag, nr_aborted = 0;
933
934 WARN_ON(!ap->ops->error_handler);
935
936 /* we're gonna abort all commands, no need for fast drain */
937 ata_eh_set_pending(ap, 0);
938
939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
940 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
941
942 if (qc && (!link || qc->dev->link == link)) {
943 qc->flags |= ATA_QCFLAG_FAILED;
944 ata_qc_complete(qc);
945 nr_aborted++;
946 }
947 }
948
949 if (!nr_aborted)
950 ata_port_schedule_eh(ap);
951
952 return nr_aborted;
953 }
954
955 /**
956 * ata_link_abort - abort all qc's on the link
957 * @link: ATA link to abort qc's for
958 *
959 * Abort all active qc's active on @link and schedule EH.
960 *
961 * LOCKING:
962 * spin_lock_irqsave(host lock)
963 *
964 * RETURNS:
965 * Number of aborted qc's.
966 */
967 int ata_link_abort(struct ata_link *link)
968 {
969 return ata_do_link_abort(link->ap, link);
970 }
971
972 /**
973 * ata_port_abort - abort all qc's on the port
974 * @ap: ATA port to abort qc's for
975 *
976 * Abort all active qc's of @ap and schedule EH.
977 *
978 * LOCKING:
979 * spin_lock_irqsave(host_set lock)
980 *
981 * RETURNS:
982 * Number of aborted qc's.
983 */
984 int ata_port_abort(struct ata_port *ap)
985 {
986 return ata_do_link_abort(ap, NULL);
987 }
988
989 /**
990 * __ata_port_freeze - freeze port
991 * @ap: ATA port to freeze
992 *
993 * This function is called when HSM violation or some other
994 * condition disrupts normal operation of the port. Frozen port
995 * is not allowed to perform any operation until the port is
996 * thawed, which usually follows a successful reset.
997 *
998 * ap->ops->freeze() callback can be used for freezing the port
999 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1000 * port cannot be frozen hardware-wise, the interrupt handler
1001 * must ack and clear interrupts unconditionally while the port
1002 * is frozen.
1003 *
1004 * LOCKING:
1005 * spin_lock_irqsave(host lock)
1006 */
1007 static void __ata_port_freeze(struct ata_port *ap)
1008 {
1009 WARN_ON(!ap->ops->error_handler);
1010
1011 if (ap->ops->freeze)
1012 ap->ops->freeze(ap);
1013
1014 ap->pflags |= ATA_PFLAG_FROZEN;
1015
1016 DPRINTK("ata%u port frozen\n", ap->print_id);
1017 }
1018
1019 /**
1020 * ata_port_freeze - abort & freeze port
1021 * @ap: ATA port to freeze
1022 *
1023 * Abort and freeze @ap. The freeze operation must be called
1024 * first, because some hardware requires special operations
1025 * before the taskfile registers are accessible.
1026 *
1027 * LOCKING:
1028 * spin_lock_irqsave(host lock)
1029 *
1030 * RETURNS:
1031 * Number of aborted commands.
1032 */
1033 int ata_port_freeze(struct ata_port *ap)
1034 {
1035 int nr_aborted;
1036
1037 WARN_ON(!ap->ops->error_handler);
1038
1039 __ata_port_freeze(ap);
1040 nr_aborted = ata_port_abort(ap);
1041
1042 return nr_aborted;
1043 }
1044
1045 /**
1046 * sata_async_notification - SATA async notification handler
1047 * @ap: ATA port where async notification is received
1048 *
1049 * Handler to be called when async notification via SDB FIS is
1050 * received. This function schedules EH if necessary.
1051 *
1052 * LOCKING:
1053 * spin_lock_irqsave(host lock)
1054 *
1055 * RETURNS:
1056 * 1 if EH is scheduled, 0 otherwise.
1057 */
1058 int sata_async_notification(struct ata_port *ap)
1059 {
1060 u32 sntf;
1061 int rc;
1062
1063 if (!(ap->flags & ATA_FLAG_AN))
1064 return 0;
1065
1066 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1067 if (rc == 0)
1068 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1069
1070 if (!sata_pmp_attached(ap) || rc) {
1071 /* PMP is not attached or SNTF is not available */
1072 if (!sata_pmp_attached(ap)) {
1073 /* PMP is not attached. Check whether ATAPI
1074 * AN is configured. If so, notify media
1075 * change.
1076 */
1077 struct ata_device *dev = ap->link.device;
1078
1079 if ((dev->class == ATA_DEV_ATAPI) &&
1080 (dev->flags & ATA_DFLAG_AN))
1081 ata_scsi_media_change_notify(dev);
1082 return 0;
1083 } else {
1084 /* PMP is attached but SNTF is not available.
1085 * ATAPI async media change notification is
1086 * not used. The PMP must be reporting PHY
1087 * status change, schedule EH.
1088 */
1089 ata_port_schedule_eh(ap);
1090 return 1;
1091 }
1092 } else {
1093 /* PMP is attached and SNTF is available */
1094 struct ata_link *link;
1095
1096 /* check and notify ATAPI AN */
1097 ata_for_each_link(link, ap, EDGE) {
1098 if (!(sntf & (1 << link->pmp)))
1099 continue;
1100
1101 if ((link->device->class == ATA_DEV_ATAPI) &&
1102 (link->device->flags & ATA_DFLAG_AN))
1103 ata_scsi_media_change_notify(link->device);
1104 }
1105
1106 /* If PMP is reporting that PHY status of some
1107 * downstream ports has changed, schedule EH.
1108 */
1109 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1110 ata_port_schedule_eh(ap);
1111 return 1;
1112 }
1113
1114 return 0;
1115 }
1116 }
1117
1118 /**
1119 * ata_eh_freeze_port - EH helper to freeze port
1120 * @ap: ATA port to freeze
1121 *
1122 * Freeze @ap.
1123 *
1124 * LOCKING:
1125 * None.
1126 */
1127 void ata_eh_freeze_port(struct ata_port *ap)
1128 {
1129 unsigned long flags;
1130
1131 if (!ap->ops->error_handler)
1132 return;
1133
1134 spin_lock_irqsave(ap->lock, flags);
1135 __ata_port_freeze(ap);
1136 spin_unlock_irqrestore(ap->lock, flags);
1137 }
1138
1139 /**
1140 * ata_port_thaw_port - EH helper to thaw port
1141 * @ap: ATA port to thaw
1142 *
1143 * Thaw frozen port @ap.
1144 *
1145 * LOCKING:
1146 * None.
1147 */
1148 void ata_eh_thaw_port(struct ata_port *ap)
1149 {
1150 unsigned long flags;
1151
1152 if (!ap->ops->error_handler)
1153 return;
1154
1155 spin_lock_irqsave(ap->lock, flags);
1156
1157 ap->pflags &= ~ATA_PFLAG_FROZEN;
1158
1159 if (ap->ops->thaw)
1160 ap->ops->thaw(ap);
1161
1162 spin_unlock_irqrestore(ap->lock, flags);
1163
1164 DPRINTK("ata%u port thawed\n", ap->print_id);
1165 }
1166
1167 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1168 {
1169 /* nada */
1170 }
1171
1172 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1173 {
1174 struct ata_port *ap = qc->ap;
1175 struct scsi_cmnd *scmd = qc->scsicmd;
1176 unsigned long flags;
1177
1178 spin_lock_irqsave(ap->lock, flags);
1179 qc->scsidone = ata_eh_scsidone;
1180 __ata_qc_complete(qc);
1181 WARN_ON(ata_tag_valid(qc->tag));
1182 spin_unlock_irqrestore(ap->lock, flags);
1183
1184 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1185 }
1186
1187 /**
1188 * ata_eh_qc_complete - Complete an active ATA command from EH
1189 * @qc: Command to complete
1190 *
1191 * Indicate to the mid and upper layers that an ATA command has
1192 * completed. To be used from EH.
1193 */
1194 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1195 {
1196 struct scsi_cmnd *scmd = qc->scsicmd;
1197 scmd->retries = scmd->allowed;
1198 __ata_eh_qc_complete(qc);
1199 }
1200
1201 /**
1202 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1203 * @qc: Command to retry
1204 *
1205 * Indicate to the mid and upper layers that an ATA command
1206 * should be retried. To be used from EH.
1207 *
1208 * SCSI midlayer limits the number of retries to scmd->allowed.
1209 * scmd->retries is decremented for commands which get retried
1210 * due to unrelated failures (qc->err_mask is zero).
1211 */
1212 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1213 {
1214 struct scsi_cmnd *scmd = qc->scsicmd;
1215 if (!qc->err_mask && scmd->retries)
1216 scmd->retries--;
1217 __ata_eh_qc_complete(qc);
1218 }
1219
1220 /**
1221 * ata_dev_disable - disable ATA device
1222 * @dev: ATA device to disable
1223 *
1224 * Disable @dev.
1225 *
1226 * Locking:
1227 * EH context.
1228 */
1229 void ata_dev_disable(struct ata_device *dev)
1230 {
1231 if (!ata_dev_enabled(dev))
1232 return;
1233
1234 if (ata_msg_drv(dev->link->ap))
1235 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1236 ata_acpi_on_disable(dev);
1237 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1238 dev->class++;
1239
1240 /* From now till the next successful probe, ering is used to
1241 * track probe failures. Clear accumulated device error info.
1242 */
1243 ata_ering_clear(&dev->ering);
1244 }
1245
1246 /**
1247 * ata_eh_detach_dev - detach ATA device
1248 * @dev: ATA device to detach
1249 *
1250 * Detach @dev.
1251 *
1252 * LOCKING:
1253 * None.
1254 */
1255 void ata_eh_detach_dev(struct ata_device *dev)
1256 {
1257 struct ata_link *link = dev->link;
1258 struct ata_port *ap = link->ap;
1259 struct ata_eh_context *ehc = &link->eh_context;
1260 unsigned long flags;
1261
1262 ata_dev_disable(dev);
1263
1264 spin_lock_irqsave(ap->lock, flags);
1265
1266 dev->flags &= ~ATA_DFLAG_DETACH;
1267
1268 if (ata_scsi_offline_dev(dev)) {
1269 dev->flags |= ATA_DFLAG_DETACHED;
1270 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1271 }
1272
1273 /* clear per-dev EH info */
1274 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1275 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1276 ehc->saved_xfer_mode[dev->devno] = 0;
1277 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1278
1279 spin_unlock_irqrestore(ap->lock, flags);
1280 }
1281
1282 /**
1283 * ata_eh_about_to_do - about to perform eh_action
1284 * @link: target ATA link
1285 * @dev: target ATA dev for per-dev action (can be NULL)
1286 * @action: action about to be performed
1287 *
1288 * Called just before performing EH actions to clear related bits
1289 * in @link->eh_info such that eh actions are not unnecessarily
1290 * repeated.
1291 *
1292 * LOCKING:
1293 * None.
1294 */
1295 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1296 unsigned int action)
1297 {
1298 struct ata_port *ap = link->ap;
1299 struct ata_eh_info *ehi = &link->eh_info;
1300 struct ata_eh_context *ehc = &link->eh_context;
1301 unsigned long flags;
1302
1303 spin_lock_irqsave(ap->lock, flags);
1304
1305 ata_eh_clear_action(link, dev, ehi, action);
1306
1307 /* About to take EH action, set RECOVERED. Ignore actions on
1308 * slave links as master will do them again.
1309 */
1310 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1311 ap->pflags |= ATA_PFLAG_RECOVERED;
1312
1313 spin_unlock_irqrestore(ap->lock, flags);
1314 }
1315
1316 /**
1317 * ata_eh_done - EH action complete
1318 * @ap: target ATA port
1319 * @dev: target ATA dev for per-dev action (can be NULL)
1320 * @action: action just completed
1321 *
1322 * Called right after performing EH actions to clear related bits
1323 * in @link->eh_context.
1324 *
1325 * LOCKING:
1326 * None.
1327 */
1328 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1329 unsigned int action)
1330 {
1331 struct ata_eh_context *ehc = &link->eh_context;
1332
1333 ata_eh_clear_action(link, dev, &ehc->i, action);
1334 }
1335
1336 /**
1337 * ata_err_string - convert err_mask to descriptive string
1338 * @err_mask: error mask to convert to string
1339 *
1340 * Convert @err_mask to descriptive string. Errors are
1341 * prioritized according to severity and only the most severe
1342 * error is reported.
1343 *
1344 * LOCKING:
1345 * None.
1346 *
1347 * RETURNS:
1348 * Descriptive string for @err_mask
1349 */
1350 static const char *ata_err_string(unsigned int err_mask)
1351 {
1352 if (err_mask & AC_ERR_HOST_BUS)
1353 return "host bus error";
1354 if (err_mask & AC_ERR_ATA_BUS)
1355 return "ATA bus error";
1356 if (err_mask & AC_ERR_TIMEOUT)
1357 return "timeout";
1358 if (err_mask & AC_ERR_HSM)
1359 return "HSM violation";
1360 if (err_mask & AC_ERR_SYSTEM)
1361 return "internal error";
1362 if (err_mask & AC_ERR_MEDIA)
1363 return "media error";
1364 if (err_mask & AC_ERR_INVALID)
1365 return "invalid argument";
1366 if (err_mask & AC_ERR_DEV)
1367 return "device error";
1368 return "unknown error";
1369 }
1370
1371 /**
1372 * ata_read_log_page - read a specific log page
1373 * @dev: target device
1374 * @page: page to read
1375 * @buf: buffer to store read page
1376 * @sectors: number of sectors to read
1377 *
1378 * Read log page using READ_LOG_EXT command.
1379 *
1380 * LOCKING:
1381 * Kernel thread context (may sleep).
1382 *
1383 * RETURNS:
1384 * 0 on success, AC_ERR_* mask otherwise.
1385 */
1386 static unsigned int ata_read_log_page(struct ata_device *dev,
1387 u8 page, void *buf, unsigned int sectors)
1388 {
1389 struct ata_taskfile tf;
1390 unsigned int err_mask;
1391
1392 DPRINTK("read log page - page %d\n", page);
1393
1394 ata_tf_init(dev, &tf);
1395 tf.command = ATA_CMD_READ_LOG_EXT;
1396 tf.lbal = page;
1397 tf.nsect = sectors;
1398 tf.hob_nsect = sectors >> 8;
1399 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1400 tf.protocol = ATA_PROT_PIO;
1401
1402 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1403 buf, sectors * ATA_SECT_SIZE, 0);
1404
1405 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1406 return err_mask;
1407 }
1408
1409 /**
1410 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1411 * @dev: Device to read log page 10h from
1412 * @tag: Resulting tag of the failed command
1413 * @tf: Resulting taskfile registers of the failed command
1414 *
1415 * Read log page 10h to obtain NCQ error details and clear error
1416 * condition.
1417 *
1418 * LOCKING:
1419 * Kernel thread context (may sleep).
1420 *
1421 * RETURNS:
1422 * 0 on success, -errno otherwise.
1423 */
1424 static int ata_eh_read_log_10h(struct ata_device *dev,
1425 int *tag, struct ata_taskfile *tf)
1426 {
1427 u8 *buf = dev->link->ap->sector_buf;
1428 unsigned int err_mask;
1429 u8 csum;
1430 int i;
1431
1432 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1433 if (err_mask)
1434 return -EIO;
1435
1436 csum = 0;
1437 for (i = 0; i < ATA_SECT_SIZE; i++)
1438 csum += buf[i];
1439 if (csum)
1440 ata_dev_printk(dev, KERN_WARNING,
1441 "invalid checksum 0x%x on log page 10h\n", csum);
1442
1443 if (buf[0] & 0x80)
1444 return -ENOENT;
1445
1446 *tag = buf[0] & 0x1f;
1447
1448 tf->command = buf[2];
1449 tf->feature = buf[3];
1450 tf->lbal = buf[4];
1451 tf->lbam = buf[5];
1452 tf->lbah = buf[6];
1453 tf->device = buf[7];
1454 tf->hob_lbal = buf[8];
1455 tf->hob_lbam = buf[9];
1456 tf->hob_lbah = buf[10];
1457 tf->nsect = buf[12];
1458 tf->hob_nsect = buf[13];
1459
1460 return 0;
1461 }
1462
1463 /**
1464 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1465 * @dev: target ATAPI device
1466 * @r_sense_key: out parameter for sense_key
1467 *
1468 * Perform ATAPI TEST_UNIT_READY.
1469 *
1470 * LOCKING:
1471 * EH context (may sleep).
1472 *
1473 * RETURNS:
1474 * 0 on success, AC_ERR_* mask on failure.
1475 */
1476 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1477 {
1478 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1479 struct ata_taskfile tf;
1480 unsigned int err_mask;
1481
1482 ata_tf_init(dev, &tf);
1483
1484 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1485 tf.command = ATA_CMD_PACKET;
1486 tf.protocol = ATAPI_PROT_NODATA;
1487
1488 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1489 if (err_mask == AC_ERR_DEV)
1490 *r_sense_key = tf.feature >> 4;
1491 return err_mask;
1492 }
1493
1494 /**
1495 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1496 * @dev: device to perform REQUEST_SENSE to
1497 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1498 * @dfl_sense_key: default sense key to use
1499 *
1500 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1501 * SENSE. This function is EH helper.
1502 *
1503 * LOCKING:
1504 * Kernel thread context (may sleep).
1505 *
1506 * RETURNS:
1507 * 0 on success, AC_ERR_* mask on failure
1508 */
1509 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1510 u8 *sense_buf, u8 dfl_sense_key)
1511 {
1512 u8 cdb[ATAPI_CDB_LEN] =
1513 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1514 struct ata_port *ap = dev->link->ap;
1515 struct ata_taskfile tf;
1516
1517 DPRINTK("ATAPI request sense\n");
1518
1519 /* FIXME: is this needed? */
1520 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1521
1522 /* initialize sense_buf with the error register,
1523 * for the case where they are -not- overwritten
1524 */
1525 sense_buf[0] = 0x70;
1526 sense_buf[2] = dfl_sense_key;
1527
1528 /* some devices time out if garbage left in tf */
1529 ata_tf_init(dev, &tf);
1530
1531 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1532 tf.command = ATA_CMD_PACKET;
1533
1534 /* is it pointless to prefer PIO for "safety reasons"? */
1535 if (ap->flags & ATA_FLAG_PIO_DMA) {
1536 tf.protocol = ATAPI_PROT_DMA;
1537 tf.feature |= ATAPI_PKT_DMA;
1538 } else {
1539 tf.protocol = ATAPI_PROT_PIO;
1540 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1541 tf.lbah = 0;
1542 }
1543
1544 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1545 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1546 }
1547
1548 /**
1549 * ata_eh_analyze_serror - analyze SError for a failed port
1550 * @link: ATA link to analyze SError for
1551 *
1552 * Analyze SError if available and further determine cause of
1553 * failure.
1554 *
1555 * LOCKING:
1556 * None.
1557 */
1558 static void ata_eh_analyze_serror(struct ata_link *link)
1559 {
1560 struct ata_eh_context *ehc = &link->eh_context;
1561 u32 serror = ehc->i.serror;
1562 unsigned int err_mask = 0, action = 0;
1563 u32 hotplug_mask;
1564
1565 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1566 err_mask |= AC_ERR_ATA_BUS;
1567 action |= ATA_EH_RESET;
1568 }
1569 if (serror & SERR_PROTOCOL) {
1570 err_mask |= AC_ERR_HSM;
1571 action |= ATA_EH_RESET;
1572 }
1573 if (serror & SERR_INTERNAL) {
1574 err_mask |= AC_ERR_SYSTEM;
1575 action |= ATA_EH_RESET;
1576 }
1577
1578 /* Determine whether a hotplug event has occurred. Both
1579 * SError.N/X are considered hotplug events for enabled or
1580 * host links. For disabled PMP links, only N bit is
1581 * considered as X bit is left at 1 for link plugging.
1582 */
1583 hotplug_mask = 0;
1584
1585 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1586 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1587 else
1588 hotplug_mask = SERR_PHYRDY_CHG;
1589
1590 if (serror & hotplug_mask)
1591 ata_ehi_hotplugged(&ehc->i);
1592
1593 ehc->i.err_mask |= err_mask;
1594 ehc->i.action |= action;
1595 }
1596
1597 /**
1598 * ata_eh_analyze_ncq_error - analyze NCQ error
1599 * @link: ATA link to analyze NCQ error for
1600 *
1601 * Read log page 10h, determine the offending qc and acquire
1602 * error status TF. For NCQ device errors, all LLDDs have to do
1603 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1604 * care of the rest.
1605 *
1606 * LOCKING:
1607 * Kernel thread context (may sleep).
1608 */
1609 void ata_eh_analyze_ncq_error(struct ata_link *link)
1610 {
1611 struct ata_port *ap = link->ap;
1612 struct ata_eh_context *ehc = &link->eh_context;
1613 struct ata_device *dev = link->device;
1614 struct ata_queued_cmd *qc;
1615 struct ata_taskfile tf;
1616 int tag, rc;
1617
1618 /* if frozen, we can't do much */
1619 if (ap->pflags & ATA_PFLAG_FROZEN)
1620 return;
1621
1622 /* is it NCQ device error? */
1623 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1624 return;
1625
1626 /* has LLDD analyzed already? */
1627 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1628 qc = __ata_qc_from_tag(ap, tag);
1629
1630 if (!(qc->flags & ATA_QCFLAG_FAILED))
1631 continue;
1632
1633 if (qc->err_mask)
1634 return;
1635 }
1636
1637 /* okay, this error is ours */
1638 memset(&tf, 0, sizeof(tf));
1639 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1640 if (rc) {
1641 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1642 "(errno=%d)\n", rc);
1643 return;
1644 }
1645
1646 if (!(link->sactive & (1 << tag))) {
1647 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1648 "inactive tag %d\n", tag);
1649 return;
1650 }
1651
1652 /* we've got the perpetrator, condemn it */
1653 qc = __ata_qc_from_tag(ap, tag);
1654 memcpy(&qc->result_tf, &tf, sizeof(tf));
1655 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1656 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1657 ehc->i.err_mask &= ~AC_ERR_DEV;
1658 }
1659
1660 /**
1661 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1662 * @qc: qc to analyze
1663 * @tf: Taskfile registers to analyze
1664 *
1665 * Analyze taskfile of @qc and further determine cause of
1666 * failure. This function also requests ATAPI sense data if
1667 * avaliable.
1668 *
1669 * LOCKING:
1670 * Kernel thread context (may sleep).
1671 *
1672 * RETURNS:
1673 * Determined recovery action
1674 */
1675 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1676 const struct ata_taskfile *tf)
1677 {
1678 unsigned int tmp, action = 0;
1679 u8 stat = tf->command, err = tf->feature;
1680
1681 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1682 qc->err_mask |= AC_ERR_HSM;
1683 return ATA_EH_RESET;
1684 }
1685
1686 if (stat & (ATA_ERR | ATA_DF))
1687 qc->err_mask |= AC_ERR_DEV;
1688 else
1689 return 0;
1690
1691 switch (qc->dev->class) {
1692 case ATA_DEV_ATA:
1693 if (err & ATA_ICRC)
1694 qc->err_mask |= AC_ERR_ATA_BUS;
1695 if (err & ATA_UNC)
1696 qc->err_mask |= AC_ERR_MEDIA;
1697 if (err & ATA_IDNF)
1698 qc->err_mask |= AC_ERR_INVALID;
1699 break;
1700
1701 case ATA_DEV_ATAPI:
1702 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1703 tmp = atapi_eh_request_sense(qc->dev,
1704 qc->scsicmd->sense_buffer,
1705 qc->result_tf.feature >> 4);
1706 if (!tmp) {
1707 /* ATA_QCFLAG_SENSE_VALID is used to
1708 * tell atapi_qc_complete() that sense
1709 * data is already valid.
1710 *
1711 * TODO: interpret sense data and set
1712 * appropriate err_mask.
1713 */
1714 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1715 } else
1716 qc->err_mask |= tmp;
1717 }
1718 }
1719
1720 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1721 action |= ATA_EH_RESET;
1722
1723 return action;
1724 }
1725
1726 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1727 int *xfer_ok)
1728 {
1729 int base = 0;
1730
1731 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1732 *xfer_ok = 1;
1733
1734 if (!*xfer_ok)
1735 base = ATA_ECAT_DUBIOUS_NONE;
1736
1737 if (err_mask & AC_ERR_ATA_BUS)
1738 return base + ATA_ECAT_ATA_BUS;
1739
1740 if (err_mask & AC_ERR_TIMEOUT)
1741 return base + ATA_ECAT_TOUT_HSM;
1742
1743 if (eflags & ATA_EFLAG_IS_IO) {
1744 if (err_mask & AC_ERR_HSM)
1745 return base + ATA_ECAT_TOUT_HSM;
1746 if ((err_mask &
1747 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1748 return base + ATA_ECAT_UNK_DEV;
1749 }
1750
1751 return 0;
1752 }
1753
1754 struct speed_down_verdict_arg {
1755 u64 since;
1756 int xfer_ok;
1757 int nr_errors[ATA_ECAT_NR];
1758 };
1759
1760 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1761 {
1762 struct speed_down_verdict_arg *arg = void_arg;
1763 int cat;
1764
1765 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1766 return -1;
1767
1768 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1769 &arg->xfer_ok);
1770 arg->nr_errors[cat]++;
1771
1772 return 0;
1773 }
1774
1775 /**
1776 * ata_eh_speed_down_verdict - Determine speed down verdict
1777 * @dev: Device of interest
1778 *
1779 * This function examines error ring of @dev and determines
1780 * whether NCQ needs to be turned off, transfer speed should be
1781 * stepped down, or falling back to PIO is necessary.
1782 *
1783 * ECAT_ATA_BUS : ATA_BUS error for any command
1784 *
1785 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1786 * IO commands
1787 *
1788 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1789 *
1790 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1791 * data transfer hasn't been verified.
1792 *
1793 * Verdicts are
1794 *
1795 * NCQ_OFF : Turn off NCQ.
1796 *
1797 * SPEED_DOWN : Speed down transfer speed but don't fall back
1798 * to PIO.
1799 *
1800 * FALLBACK_TO_PIO : Fall back to PIO.
1801 *
1802 * Even if multiple verdicts are returned, only one action is
1803 * taken per error. An action triggered by non-DUBIOUS errors
1804 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1805 * This is to expedite speed down decisions right after device is
1806 * initially configured.
1807 *
1808 * The followings are speed down rules. #1 and #2 deal with
1809 * DUBIOUS errors.
1810 *
1811 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1812 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1813 *
1814 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1815 * occurred during last 5 mins, NCQ_OFF.
1816 *
1817 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1818 * ocurred during last 5 mins, FALLBACK_TO_PIO
1819 *
1820 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1821 * during last 10 mins, NCQ_OFF.
1822 *
1823 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1824 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1825 *
1826 * LOCKING:
1827 * Inherited from caller.
1828 *
1829 * RETURNS:
1830 * OR of ATA_EH_SPDN_* flags.
1831 */
1832 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1833 {
1834 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1835 u64 j64 = get_jiffies_64();
1836 struct speed_down_verdict_arg arg;
1837 unsigned int verdict = 0;
1838
1839 /* scan past 5 mins of error history */
1840 memset(&arg, 0, sizeof(arg));
1841 arg.since = j64 - min(j64, j5mins);
1842 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1843
1844 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1845 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1846 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1847 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1848
1849 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1850 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1851 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1852
1853 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1854 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1855 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1856 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1857
1858 /* scan past 10 mins of error history */
1859 memset(&arg, 0, sizeof(arg));
1860 arg.since = j64 - min(j64, j10mins);
1861 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1862
1863 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1864 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1865 verdict |= ATA_EH_SPDN_NCQ_OFF;
1866
1867 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1868 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1869 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1870 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1871
1872 return verdict;
1873 }
1874
1875 /**
1876 * ata_eh_speed_down - record error and speed down if necessary
1877 * @dev: Failed device
1878 * @eflags: mask of ATA_EFLAG_* flags
1879 * @err_mask: err_mask of the error
1880 *
1881 * Record error and examine error history to determine whether
1882 * adjusting transmission speed is necessary. It also sets
1883 * transmission limits appropriately if such adjustment is
1884 * necessary.
1885 *
1886 * LOCKING:
1887 * Kernel thread context (may sleep).
1888 *
1889 * RETURNS:
1890 * Determined recovery action.
1891 */
1892 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1893 unsigned int eflags, unsigned int err_mask)
1894 {
1895 struct ata_link *link = ata_dev_phys_link(dev);
1896 int xfer_ok = 0;
1897 unsigned int verdict;
1898 unsigned int action = 0;
1899
1900 /* don't bother if Cat-0 error */
1901 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1902 return 0;
1903
1904 /* record error and determine whether speed down is necessary */
1905 ata_ering_record(&dev->ering, eflags, err_mask);
1906 verdict = ata_eh_speed_down_verdict(dev);
1907
1908 /* turn off NCQ? */
1909 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1910 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1911 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1912 dev->flags |= ATA_DFLAG_NCQ_OFF;
1913 ata_dev_printk(dev, KERN_WARNING,
1914 "NCQ disabled due to excessive errors\n");
1915 goto done;
1916 }
1917
1918 /* speed down? */
1919 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1920 /* speed down SATA link speed if possible */
1921 if (sata_down_spd_limit(link, 0) == 0) {
1922 action |= ATA_EH_RESET;
1923 goto done;
1924 }
1925
1926 /* lower transfer mode */
1927 if (dev->spdn_cnt < 2) {
1928 static const int dma_dnxfer_sel[] =
1929 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1930 static const int pio_dnxfer_sel[] =
1931 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1932 int sel;
1933
1934 if (dev->xfer_shift != ATA_SHIFT_PIO)
1935 sel = dma_dnxfer_sel[dev->spdn_cnt];
1936 else
1937 sel = pio_dnxfer_sel[dev->spdn_cnt];
1938
1939 dev->spdn_cnt++;
1940
1941 if (ata_down_xfermask_limit(dev, sel) == 0) {
1942 action |= ATA_EH_RESET;
1943 goto done;
1944 }
1945 }
1946 }
1947
1948 /* Fall back to PIO? Slowing down to PIO is meaningless for
1949 * SATA ATA devices. Consider it only for PATA and SATAPI.
1950 */
1951 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1952 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1953 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1954 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1955 dev->spdn_cnt = 0;
1956 action |= ATA_EH_RESET;
1957 goto done;
1958 }
1959 }
1960
1961 return 0;
1962 done:
1963 /* device has been slowed down, blow error history */
1964 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1965 ata_ering_clear(&dev->ering);
1966 return action;
1967 }
1968
1969 /**
1970 * ata_eh_link_autopsy - analyze error and determine recovery action
1971 * @link: host link to perform autopsy on
1972 *
1973 * Analyze why @link failed and determine which recovery actions
1974 * are needed. This function also sets more detailed AC_ERR_*
1975 * values and fills sense data for ATAPI CHECK SENSE.
1976 *
1977 * LOCKING:
1978 * Kernel thread context (may sleep).
1979 */
1980 static void ata_eh_link_autopsy(struct ata_link *link)
1981 {
1982 struct ata_port *ap = link->ap;
1983 struct ata_eh_context *ehc = &link->eh_context;
1984 struct ata_device *dev;
1985 unsigned int all_err_mask = 0, eflags = 0;
1986 int tag;
1987 u32 serror;
1988 int rc;
1989
1990 DPRINTK("ENTER\n");
1991
1992 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1993 return;
1994
1995 /* obtain and analyze SError */
1996 rc = sata_scr_read(link, SCR_ERROR, &serror);
1997 if (rc == 0) {
1998 ehc->i.serror |= serror;
1999 ata_eh_analyze_serror(link);
2000 } else if (rc != -EOPNOTSUPP) {
2001 /* SError read failed, force reset and probing */
2002 ehc->i.probe_mask |= ATA_ALL_DEVICES;
2003 ehc->i.action |= ATA_EH_RESET;
2004 ehc->i.err_mask |= AC_ERR_OTHER;
2005 }
2006
2007 /* analyze NCQ failure */
2008 ata_eh_analyze_ncq_error(link);
2009
2010 /* any real error trumps AC_ERR_OTHER */
2011 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2012 ehc->i.err_mask &= ~AC_ERR_OTHER;
2013
2014 all_err_mask |= ehc->i.err_mask;
2015
2016 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2017 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2018
2019 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2020 ata_dev_phys_link(qc->dev) != link)
2021 continue;
2022
2023 /* inherit upper level err_mask */
2024 qc->err_mask |= ehc->i.err_mask;
2025
2026 /* analyze TF */
2027 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2028
2029 /* DEV errors are probably spurious in case of ATA_BUS error */
2030 if (qc->err_mask & AC_ERR_ATA_BUS)
2031 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2032 AC_ERR_INVALID);
2033
2034 /* any real error trumps unknown error */
2035 if (qc->err_mask & ~AC_ERR_OTHER)
2036 qc->err_mask &= ~AC_ERR_OTHER;
2037
2038 /* SENSE_VALID trumps dev/unknown error and revalidation */
2039 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2040 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2041
2042 /* determine whether the command is worth retrying */
2043 if (qc->flags & ATA_QCFLAG_IO ||
2044 (!(qc->err_mask & AC_ERR_INVALID) &&
2045 qc->err_mask != AC_ERR_DEV))
2046 qc->flags |= ATA_QCFLAG_RETRY;
2047
2048 /* accumulate error info */
2049 ehc->i.dev = qc->dev;
2050 all_err_mask |= qc->err_mask;
2051 if (qc->flags & ATA_QCFLAG_IO)
2052 eflags |= ATA_EFLAG_IS_IO;
2053 }
2054
2055 /* enforce default EH actions */
2056 if (ap->pflags & ATA_PFLAG_FROZEN ||
2057 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2058 ehc->i.action |= ATA_EH_RESET;
2059 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2060 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2061 ehc->i.action |= ATA_EH_REVALIDATE;
2062
2063 /* If we have offending qcs and the associated failed device,
2064 * perform per-dev EH action only on the offending device.
2065 */
2066 if (ehc->i.dev) {
2067 ehc->i.dev_action[ehc->i.dev->devno] |=
2068 ehc->i.action & ATA_EH_PERDEV_MASK;
2069 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2070 }
2071
2072 /* propagate timeout to host link */
2073 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2074 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2075
2076 /* record error and consider speeding down */
2077 dev = ehc->i.dev;
2078 if (!dev && ((ata_link_max_devices(link) == 1 &&
2079 ata_dev_enabled(link->device))))
2080 dev = link->device;
2081
2082 if (dev) {
2083 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2084 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2085 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2086 }
2087
2088 DPRINTK("EXIT\n");
2089 }
2090
2091 /**
2092 * ata_eh_autopsy - analyze error and determine recovery action
2093 * @ap: host port to perform autopsy on
2094 *
2095 * Analyze all links of @ap and determine why they failed and
2096 * which recovery actions are needed.
2097 *
2098 * LOCKING:
2099 * Kernel thread context (may sleep).
2100 */
2101 void ata_eh_autopsy(struct ata_port *ap)
2102 {
2103 struct ata_link *link;
2104
2105 ata_for_each_link(link, ap, EDGE)
2106 ata_eh_link_autopsy(link);
2107
2108 /* Handle the frigging slave link. Autopsy is done similarly
2109 * but actions and flags are transferred over to the master
2110 * link and handled from there.
2111 */
2112 if (ap->slave_link) {
2113 struct ata_eh_context *mehc = &ap->link.eh_context;
2114 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2115
2116 /* transfer control flags from master to slave */
2117 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2118
2119 /* perform autopsy on the slave link */
2120 ata_eh_link_autopsy(ap->slave_link);
2121
2122 /* transfer actions from slave to master and clear slave */
2123 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2124 mehc->i.action |= sehc->i.action;
2125 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2126 mehc->i.flags |= sehc->i.flags;
2127 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2128 }
2129
2130 /* Autopsy of fanout ports can affect host link autopsy.
2131 * Perform host link autopsy last.
2132 */
2133 if (sata_pmp_attached(ap))
2134 ata_eh_link_autopsy(&ap->link);
2135 }
2136
2137 /**
2138 * ata_get_cmd_descript - get description for ATA command
2139 * @command: ATA command code to get description for
2140 *
2141 * Return a textual description of the given command, or NULL if the
2142 * command is not known.
2143 *
2144 * LOCKING:
2145 * None
2146 */
2147 const char *ata_get_cmd_descript(u8 command)
2148 {
2149 #ifdef CONFIG_ATA_VERBOSE_ERROR
2150 static const struct
2151 {
2152 u8 command;
2153 const char *text;
2154 } cmd_descr[] = {
2155 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2156 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2157 { ATA_CMD_STANDBY, "STANDBY" },
2158 { ATA_CMD_IDLE, "IDLE" },
2159 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2160 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2161 { ATA_CMD_NOP, "NOP" },
2162 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2163 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2164 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2165 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2166 { ATA_CMD_SERVICE, "SERVICE" },
2167 { ATA_CMD_READ, "READ DMA" },
2168 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2169 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2170 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2171 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2172 { ATA_CMD_WRITE, "WRITE DMA" },
2173 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2174 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2175 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2176 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2177 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2178 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2179 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2180 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2181 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2182 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2183 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2184 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2185 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2186 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2187 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2188 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2189 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2190 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2191 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2192 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2193 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2194 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2195 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2196 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2197 { ATA_CMD_SLEEP, "SLEEP" },
2198 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2199 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2200 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2201 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2202 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2203 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2204 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2205 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2206 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2207 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2208 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2209 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2210 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2211 { ATA_CMD_PMP_READ, "READ BUFFER" },
2212 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2213 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2214 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2215 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2216 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2217 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2218 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2219 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2220 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2221 { ATA_CMD_SMART, "SMART" },
2222 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2223 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2224 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2225 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2226 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2227 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2228 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2229 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2230 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2231 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2232 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2233 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2234 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2235 { ATA_CMD_RESTORE, "RECALIBRATE" },
2236 { 0, NULL } /* terminate list */
2237 };
2238
2239 unsigned int i;
2240 for (i = 0; cmd_descr[i].text; i++)
2241 if (cmd_descr[i].command == command)
2242 return cmd_descr[i].text;
2243 #endif
2244
2245 return NULL;
2246 }
2247
2248 /**
2249 * ata_eh_link_report - report error handling to user
2250 * @link: ATA link EH is going on
2251 *
2252 * Report EH to user.
2253 *
2254 * LOCKING:
2255 * None.
2256 */
2257 static void ata_eh_link_report(struct ata_link *link)
2258 {
2259 struct ata_port *ap = link->ap;
2260 struct ata_eh_context *ehc = &link->eh_context;
2261 const char *frozen, *desc;
2262 char tries_buf[6];
2263 int tag, nr_failed = 0;
2264
2265 if (ehc->i.flags & ATA_EHI_QUIET)
2266 return;
2267
2268 desc = NULL;
2269 if (ehc->i.desc[0] != '\0')
2270 desc = ehc->i.desc;
2271
2272 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2273 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2274
2275 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2276 ata_dev_phys_link(qc->dev) != link ||
2277 ((qc->flags & ATA_QCFLAG_QUIET) &&
2278 qc->err_mask == AC_ERR_DEV))
2279 continue;
2280 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2281 continue;
2282
2283 nr_failed++;
2284 }
2285
2286 if (!nr_failed && !ehc->i.err_mask)
2287 return;
2288
2289 frozen = "";
2290 if (ap->pflags & ATA_PFLAG_FROZEN)
2291 frozen = " frozen";
2292
2293 memset(tries_buf, 0, sizeof(tries_buf));
2294 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2295 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2296 ap->eh_tries);
2297
2298 if (ehc->i.dev) {
2299 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 ehc->i.err_mask, link->sactive, ehc->i.serror,
2302 ehc->i.action, frozen, tries_buf);
2303 if (desc)
2304 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2305 } else {
2306 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2307 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2308 ehc->i.err_mask, link->sactive, ehc->i.serror,
2309 ehc->i.action, frozen, tries_buf);
2310 if (desc)
2311 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2312 }
2313
2314 #ifdef CONFIG_ATA_VERBOSE_ERROR
2315 if (ehc->i.serror)
2316 ata_link_printk(link, KERN_ERR,
2317 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2318 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2319 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2320 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2321 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2322 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2323 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2324 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2325 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2326 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2327 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2328 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2329 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2330 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2331 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2332 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2333 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2334 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2335 #endif
2336
2337 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2338 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2339 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2340 const u8 *cdb = qc->cdb;
2341 char data_buf[20] = "";
2342 char cdb_buf[70] = "";
2343
2344 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2345 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2346 continue;
2347
2348 if (qc->dma_dir != DMA_NONE) {
2349 static const char *dma_str[] = {
2350 [DMA_BIDIRECTIONAL] = "bidi",
2351 [DMA_TO_DEVICE] = "out",
2352 [DMA_FROM_DEVICE] = "in",
2353 };
2354 static const char *prot_str[] = {
2355 [ATA_PROT_PIO] = "pio",
2356 [ATA_PROT_DMA] = "dma",
2357 [ATA_PROT_NCQ] = "ncq",
2358 [ATAPI_PROT_PIO] = "pio",
2359 [ATAPI_PROT_DMA] = "dma",
2360 };
2361
2362 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2363 prot_str[qc->tf.protocol], qc->nbytes,
2364 dma_str[qc->dma_dir]);
2365 }
2366
2367 if (ata_is_atapi(qc->tf.protocol)) {
2368 if (qc->scsicmd)
2369 scsi_print_command(qc->scsicmd);
2370 else
2371 snprintf(cdb_buf, sizeof(cdb_buf),
2372 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2373 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2374 cdb[0], cdb[1], cdb[2], cdb[3],
2375 cdb[4], cdb[5], cdb[6], cdb[7],
2376 cdb[8], cdb[9], cdb[10], cdb[11],
2377 cdb[12], cdb[13], cdb[14], cdb[15]);
2378 } else {
2379 const char *descr = ata_get_cmd_descript(cmd->command);
2380 if (descr)
2381 ata_dev_printk(qc->dev, KERN_ERR,
2382 "failed command: %s\n", descr);
2383 }
2384
2385 ata_dev_printk(qc->dev, KERN_ERR,
2386 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2387 "tag %d%s\n %s"
2388 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2389 "Emask 0x%x (%s)%s\n",
2390 cmd->command, cmd->feature, cmd->nsect,
2391 cmd->lbal, cmd->lbam, cmd->lbah,
2392 cmd->hob_feature, cmd->hob_nsect,
2393 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2394 cmd->device, qc->tag, data_buf, cdb_buf,
2395 res->command, res->feature, res->nsect,
2396 res->lbal, res->lbam, res->lbah,
2397 res->hob_feature, res->hob_nsect,
2398 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2399 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2400 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2401
2402 #ifdef CONFIG_ATA_VERBOSE_ERROR
2403 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2404 ATA_ERR)) {
2405 if (res->command & ATA_BUSY)
2406 ata_dev_printk(qc->dev, KERN_ERR,
2407 "status: { Busy }\n");
2408 else
2409 ata_dev_printk(qc->dev, KERN_ERR,
2410 "status: { %s%s%s%s}\n",
2411 res->command & ATA_DRDY ? "DRDY " : "",
2412 res->command & ATA_DF ? "DF " : "",
2413 res->command & ATA_DRQ ? "DRQ " : "",
2414 res->command & ATA_ERR ? "ERR " : "");
2415 }
2416
2417 if (cmd->command != ATA_CMD_PACKET &&
2418 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2419 ATA_ABORTED)))
2420 ata_dev_printk(qc->dev, KERN_ERR,
2421 "error: { %s%s%s%s}\n",
2422 res->feature & ATA_ICRC ? "ICRC " : "",
2423 res->feature & ATA_UNC ? "UNC " : "",
2424 res->feature & ATA_IDNF ? "IDNF " : "",
2425 res->feature & ATA_ABORTED ? "ABRT " : "");
2426 #endif
2427 }
2428 }
2429
2430 /**
2431 * ata_eh_report - report error handling to user
2432 * @ap: ATA port to report EH about
2433 *
2434 * Report EH to user.
2435 *
2436 * LOCKING:
2437 * None.
2438 */
2439 void ata_eh_report(struct ata_port *ap)
2440 {
2441 struct ata_link *link;
2442
2443 ata_for_each_link(link, ap, HOST_FIRST)
2444 ata_eh_link_report(link);
2445 }
2446
2447 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2448 unsigned int *classes, unsigned long deadline,
2449 bool clear_classes)
2450 {
2451 struct ata_device *dev;
2452
2453 if (clear_classes)
2454 ata_for_each_dev(dev, link, ALL)
2455 classes[dev->devno] = ATA_DEV_UNKNOWN;
2456
2457 return reset(link, classes, deadline);
2458 }
2459
2460 static int ata_eh_followup_srst_needed(struct ata_link *link,
2461 int rc, const unsigned int *classes)
2462 {
2463 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2464 return 0;
2465 if (rc == -EAGAIN)
2466 return 1;
2467 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2468 return 1;
2469 return 0;
2470 }
2471
2472 int ata_eh_reset(struct ata_link *link, int classify,
2473 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2474 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2475 {
2476 struct ata_port *ap = link->ap;
2477 struct ata_link *slave = ap->slave_link;
2478 struct ata_eh_context *ehc = &link->eh_context;
2479 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2480 unsigned int *classes = ehc->classes;
2481 unsigned int lflags = link->flags;
2482 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2483 int max_tries = 0, try = 0;
2484 struct ata_link *failed_link;
2485 struct ata_device *dev;
2486 unsigned long deadline, now;
2487 ata_reset_fn_t reset;
2488 unsigned long flags;
2489 u32 sstatus;
2490 int nr_unknown, rc;
2491
2492 /*
2493 * Prepare to reset
2494 */
2495 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2496 max_tries++;
2497 if (link->flags & ATA_LFLAG_NO_HRST)
2498 hardreset = NULL;
2499 if (link->flags & ATA_LFLAG_NO_SRST)
2500 softreset = NULL;
2501
2502 /* make sure each reset attemp is at least COOL_DOWN apart */
2503 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2504 now = jiffies;
2505 WARN_ON(time_after(ehc->last_reset, now));
2506 deadline = ata_deadline(ehc->last_reset,
2507 ATA_EH_RESET_COOL_DOWN);
2508 if (time_before(now, deadline))
2509 schedule_timeout_uninterruptible(deadline - now);
2510 }
2511
2512 spin_lock_irqsave(ap->lock, flags);
2513 ap->pflags |= ATA_PFLAG_RESETTING;
2514 spin_unlock_irqrestore(ap->lock, flags);
2515
2516 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2517
2518 ata_for_each_dev(dev, link, ALL) {
2519 /* If we issue an SRST then an ATA drive (not ATAPI)
2520 * may change configuration and be in PIO0 timing. If
2521 * we do a hard reset (or are coming from power on)
2522 * this is true for ATA or ATAPI. Until we've set a
2523 * suitable controller mode we should not touch the
2524 * bus as we may be talking too fast.
2525 */
2526 dev->pio_mode = XFER_PIO_0;
2527
2528 /* If the controller has a pio mode setup function
2529 * then use it to set the chipset to rights. Don't
2530 * touch the DMA setup as that will be dealt with when
2531 * configuring devices.
2532 */
2533 if (ap->ops->set_piomode)
2534 ap->ops->set_piomode(ap, dev);
2535 }
2536
2537 /* prefer hardreset */
2538 reset = NULL;
2539 ehc->i.action &= ~ATA_EH_RESET;
2540 if (hardreset) {
2541 reset = hardreset;
2542 ehc->i.action |= ATA_EH_HARDRESET;
2543 } else if (softreset) {
2544 reset = softreset;
2545 ehc->i.action |= ATA_EH_SOFTRESET;
2546 }
2547
2548 if (prereset) {
2549 unsigned long deadline = ata_deadline(jiffies,
2550 ATA_EH_PRERESET_TIMEOUT);
2551
2552 if (slave) {
2553 sehc->i.action &= ~ATA_EH_RESET;
2554 sehc->i.action |= ehc->i.action;
2555 }
2556
2557 rc = prereset(link, deadline);
2558
2559 /* If present, do prereset on slave link too. Reset
2560 * is skipped iff both master and slave links report
2561 * -ENOENT or clear ATA_EH_RESET.
2562 */
2563 if (slave && (rc == 0 || rc == -ENOENT)) {
2564 int tmp;
2565
2566 tmp = prereset(slave, deadline);
2567 if (tmp != -ENOENT)
2568 rc = tmp;
2569
2570 ehc->i.action |= sehc->i.action;
2571 }
2572
2573 if (rc) {
2574 if (rc == -ENOENT) {
2575 ata_link_printk(link, KERN_DEBUG,
2576 "port disabled. ignoring.\n");
2577 ehc->i.action &= ~ATA_EH_RESET;
2578
2579 ata_for_each_dev(dev, link, ALL)
2580 classes[dev->devno] = ATA_DEV_NONE;
2581
2582 rc = 0;
2583 } else
2584 ata_link_printk(link, KERN_ERR,
2585 "prereset failed (errno=%d)\n", rc);
2586 goto out;
2587 }
2588
2589 /* prereset() might have cleared ATA_EH_RESET. If so,
2590 * bang classes, thaw and return.
2591 */
2592 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2593 ata_for_each_dev(dev, link, ALL)
2594 classes[dev->devno] = ATA_DEV_NONE;
2595 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2596 ata_is_host_link(link))
2597 ata_eh_thaw_port(ap);
2598 rc = 0;
2599 goto out;
2600 }
2601 }
2602
2603 retry:
2604 /*
2605 * Perform reset
2606 */
2607 if (ata_is_host_link(link))
2608 ata_eh_freeze_port(ap);
2609
2610 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2611
2612 if (reset) {
2613 if (verbose)
2614 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2615 reset == softreset ? "soft" : "hard");
2616
2617 /* mark that this EH session started with reset */
2618 ehc->last_reset = jiffies;
2619 if (reset == hardreset)
2620 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2621 else
2622 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2623
2624 rc = ata_do_reset(link, reset, classes, deadline, true);
2625 if (rc && rc != -EAGAIN) {
2626 failed_link = link;
2627 goto fail;
2628 }
2629
2630 /* hardreset slave link if existent */
2631 if (slave && reset == hardreset) {
2632 int tmp;
2633
2634 if (verbose)
2635 ata_link_printk(slave, KERN_INFO,
2636 "hard resetting link\n");
2637
2638 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2639 tmp = ata_do_reset(slave, reset, classes, deadline,
2640 false);
2641 switch (tmp) {
2642 case -EAGAIN:
2643 rc = -EAGAIN;
2644 case 0:
2645 break;
2646 default:
2647 failed_link = slave;
2648 rc = tmp;
2649 goto fail;
2650 }
2651 }
2652
2653 /* perform follow-up SRST if necessary */
2654 if (reset == hardreset &&
2655 ata_eh_followup_srst_needed(link, rc, classes)) {
2656 reset = softreset;
2657
2658 if (!reset) {
2659 ata_link_printk(link, KERN_ERR,
2660 "follow-up softreset required "
2661 "but no softreset avaliable\n");
2662 failed_link = link;
2663 rc = -EINVAL;
2664 goto fail;
2665 }
2666
2667 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2668 rc = ata_do_reset(link, reset, classes, deadline, true);
2669 if (rc) {
2670 failed_link = link;
2671 goto fail;
2672 }
2673 }
2674 } else {
2675 if (verbose)
2676 ata_link_printk(link, KERN_INFO, "no reset method "
2677 "available, skipping reset\n");
2678 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2679 lflags |= ATA_LFLAG_ASSUME_ATA;
2680 }
2681
2682 /*
2683 * Post-reset processing
2684 */
2685 ata_for_each_dev(dev, link, ALL) {
2686 /* After the reset, the device state is PIO 0 and the
2687 * controller state is undefined. Reset also wakes up
2688 * drives from sleeping mode.
2689 */
2690 dev->pio_mode = XFER_PIO_0;
2691 dev->flags &= ~ATA_DFLAG_SLEEPING;
2692
2693 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2694 continue;
2695
2696 /* apply class override */
2697 if (lflags & ATA_LFLAG_ASSUME_ATA)
2698 classes[dev->devno] = ATA_DEV_ATA;
2699 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2700 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2701 }
2702
2703 /* record current link speed */
2704 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2705 link->sata_spd = (sstatus >> 4) & 0xf;
2706 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2707 slave->sata_spd = (sstatus >> 4) & 0xf;
2708
2709 /* thaw the port */
2710 if (ata_is_host_link(link))
2711 ata_eh_thaw_port(ap);
2712
2713 /* postreset() should clear hardware SError. Although SError
2714 * is cleared during link resume, clearing SError here is
2715 * necessary as some PHYs raise hotplug events after SRST.
2716 * This introduces race condition where hotplug occurs between
2717 * reset and here. This race is mediated by cross checking
2718 * link onlineness and classification result later.
2719 */
2720 if (postreset) {
2721 postreset(link, classes);
2722 if (slave)
2723 postreset(slave, classes);
2724 }
2725
2726 /*
2727 * Some controllers can't be frozen very well and may set
2728 * spuruious error conditions during reset. Clear accumulated
2729 * error information. As reset is the final recovery action,
2730 * nothing is lost by doing this.
2731 */
2732 spin_lock_irqsave(link->ap->lock, flags);
2733 memset(&link->eh_info, 0, sizeof(link->eh_info));
2734 if (slave)
2735 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2736 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2737 spin_unlock_irqrestore(link->ap->lock, flags);
2738
2739 /*
2740 * Make sure onlineness and classification result correspond.
2741 * Hotplug could have happened during reset and some
2742 * controllers fail to wait while a drive is spinning up after
2743 * being hotplugged causing misdetection. By cross checking
2744 * link on/offlineness and classification result, those
2745 * conditions can be reliably detected and retried.
2746 */
2747 nr_unknown = 0;
2748 ata_for_each_dev(dev, link, ALL) {
2749 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2750 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2751 ata_dev_printk(dev, KERN_DEBUG, "link online "
2752 "but device misclassifed\n");
2753 classes[dev->devno] = ATA_DEV_NONE;
2754 nr_unknown++;
2755 }
2756 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2757 if (ata_class_enabled(classes[dev->devno]))
2758 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2759 "clearing class %d to NONE\n",
2760 classes[dev->devno]);
2761 classes[dev->devno] = ATA_DEV_NONE;
2762 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2763 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2764 "clearing UNKNOWN to NONE\n");
2765 classes[dev->devno] = ATA_DEV_NONE;
2766 }
2767 }
2768
2769 if (classify && nr_unknown) {
2770 if (try < max_tries) {
2771 ata_link_printk(link, KERN_WARNING, "link online but "
2772 "%d devices misclassified, retrying\n",
2773 nr_unknown);
2774 failed_link = link;
2775 rc = -EAGAIN;
2776 goto fail;
2777 }
2778 ata_link_printk(link, KERN_WARNING,
2779 "link online but %d devices misclassified, "
2780 "device detection might fail\n", nr_unknown);
2781 }
2782
2783 /* reset successful, schedule revalidation */
2784 ata_eh_done(link, NULL, ATA_EH_RESET);
2785 if (slave)
2786 ata_eh_done(slave, NULL, ATA_EH_RESET);
2787 ehc->last_reset = jiffies; /* update to completion time */
2788 ehc->i.action |= ATA_EH_REVALIDATE;
2789
2790 rc = 0;
2791 out:
2792 /* clear hotplug flag */
2793 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2794 if (slave)
2795 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2796
2797 spin_lock_irqsave(ap->lock, flags);
2798 ap->pflags &= ~ATA_PFLAG_RESETTING;
2799 spin_unlock_irqrestore(ap->lock, flags);
2800
2801 return rc;
2802
2803 fail:
2804 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2805 if (!ata_is_host_link(link) &&
2806 sata_scr_read(link, SCR_STATUS, &sstatus))
2807 rc = -ERESTART;
2808
2809 if (rc == -ERESTART || try >= max_tries)
2810 goto out;
2811
2812 now = jiffies;
2813 if (time_before(now, deadline)) {
2814 unsigned long delta = deadline - now;
2815
2816 ata_link_printk(failed_link, KERN_WARNING,
2817 "reset failed (errno=%d), retrying in %u secs\n",
2818 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2819
2820 while (delta)
2821 delta = schedule_timeout_uninterruptible(delta);
2822 }
2823
2824 if (try == max_tries - 1) {
2825 sata_down_spd_limit(link, 0);
2826 if (slave)
2827 sata_down_spd_limit(slave, 0);
2828 } else if (rc == -EPIPE)
2829 sata_down_spd_limit(failed_link, 0);
2830
2831 if (hardreset)
2832 reset = hardreset;
2833 goto retry;
2834 }
2835
2836 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2837 {
2838 struct ata_link *link;
2839 struct ata_device *dev;
2840 unsigned long flags;
2841
2842 /*
2843 * This function can be thought of as an extended version of
2844 * ata_eh_about_to_do() specially crafted to accommodate the
2845 * requirements of ATA_EH_PARK handling. Since the EH thread
2846 * does not leave the do {} while () loop in ata_eh_recover as
2847 * long as the timeout for a park request to *one* device on
2848 * the port has not expired, and since we still want to pick
2849 * up park requests to other devices on the same port or
2850 * timeout updates for the same device, we have to pull
2851 * ATA_EH_PARK actions from eh_info into eh_context.i
2852 * ourselves at the beginning of each pass over the loop.
2853 *
2854 * Additionally, all write accesses to &ap->park_req_pending
2855 * through INIT_COMPLETION() (see below) or complete_all()
2856 * (see ata_scsi_park_store()) are protected by the host lock.
2857 * As a result we have that park_req_pending.done is zero on
2858 * exit from this function, i.e. when ATA_EH_PARK actions for
2859 * *all* devices on port ap have been pulled into the
2860 * respective eh_context structs. If, and only if,
2861 * park_req_pending.done is non-zero by the time we reach
2862 * wait_for_completion_timeout(), another ATA_EH_PARK action
2863 * has been scheduled for at least one of the devices on port
2864 * ap and we have to cycle over the do {} while () loop in
2865 * ata_eh_recover() again.
2866 */
2867
2868 spin_lock_irqsave(ap->lock, flags);
2869 INIT_COMPLETION(ap->park_req_pending);
2870 ata_for_each_link(link, ap, EDGE) {
2871 ata_for_each_dev(dev, link, ALL) {
2872 struct ata_eh_info *ehi = &link->eh_info;
2873
2874 link->eh_context.i.dev_action[dev->devno] |=
2875 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2876 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2877 }
2878 }
2879 spin_unlock_irqrestore(ap->lock, flags);
2880 }
2881
2882 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2883 {
2884 struct ata_eh_context *ehc = &dev->link->eh_context;
2885 struct ata_taskfile tf;
2886 unsigned int err_mask;
2887
2888 ata_tf_init(dev, &tf);
2889 if (park) {
2890 ehc->unloaded_mask |= 1 << dev->devno;
2891 tf.command = ATA_CMD_IDLEIMMEDIATE;
2892 tf.feature = 0x44;
2893 tf.lbal = 0x4c;
2894 tf.lbam = 0x4e;
2895 tf.lbah = 0x55;
2896 } else {
2897 ehc->unloaded_mask &= ~(1 << dev->devno);
2898 tf.command = ATA_CMD_CHK_POWER;
2899 }
2900
2901 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2902 tf.protocol |= ATA_PROT_NODATA;
2903 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2904 if (park && (err_mask || tf.lbal != 0xc4)) {
2905 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2906 ehc->unloaded_mask &= ~(1 << dev->devno);
2907 }
2908 }
2909
2910 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2911 struct ata_device **r_failed_dev)
2912 {
2913 struct ata_port *ap = link->ap;
2914 struct ata_eh_context *ehc = &link->eh_context;
2915 struct ata_device *dev;
2916 unsigned int new_mask = 0;
2917 unsigned long flags;
2918 int rc = 0;
2919
2920 DPRINTK("ENTER\n");
2921
2922 /* For PATA drive side cable detection to work, IDENTIFY must
2923 * be done backwards such that PDIAG- is released by the slave
2924 * device before the master device is identified.
2925 */
2926 ata_for_each_dev(dev, link, ALL_REVERSE) {
2927 unsigned int action = ata_eh_dev_action(dev);
2928 unsigned int readid_flags = 0;
2929
2930 if (ehc->i.flags & ATA_EHI_DID_RESET)
2931 readid_flags |= ATA_READID_POSTRESET;
2932
2933 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2934 WARN_ON(dev->class == ATA_DEV_PMP);
2935
2936 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2937 rc = -EIO;
2938 goto err;
2939 }
2940
2941 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2942 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2943 readid_flags);
2944 if (rc)
2945 goto err;
2946
2947 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2948
2949 /* Configuration may have changed, reconfigure
2950 * transfer mode.
2951 */
2952 ehc->i.flags |= ATA_EHI_SETMODE;
2953
2954 /* schedule the scsi_rescan_device() here */
2955 schedule_work(&(ap->scsi_rescan_task));
2956 } else if (dev->class == ATA_DEV_UNKNOWN &&
2957 ehc->tries[dev->devno] &&
2958 ata_class_enabled(ehc->classes[dev->devno])) {
2959 /* Temporarily set dev->class, it will be
2960 * permanently set once all configurations are
2961 * complete. This is necessary because new
2962 * device configuration is done in two
2963 * separate loops.
2964 */
2965 dev->class = ehc->classes[dev->devno];
2966
2967 if (dev->class == ATA_DEV_PMP)
2968 rc = sata_pmp_attach(dev);
2969 else
2970 rc = ata_dev_read_id(dev, &dev->class,
2971 readid_flags, dev->id);
2972
2973 /* read_id might have changed class, store and reset */
2974 ehc->classes[dev->devno] = dev->class;
2975 dev->class = ATA_DEV_UNKNOWN;
2976
2977 switch (rc) {
2978 case 0:
2979 /* clear error info accumulated during probe */
2980 ata_ering_clear(&dev->ering);
2981 new_mask |= 1 << dev->devno;
2982 break;
2983 case -ENOENT:
2984 /* IDENTIFY was issued to non-existent
2985 * device. No need to reset. Just
2986 * thaw and ignore the device.
2987 */
2988 ata_eh_thaw_port(ap);
2989 break;
2990 default:
2991 goto err;
2992 }
2993 }
2994 }
2995
2996 /* PDIAG- should have been released, ask cable type if post-reset */
2997 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2998 if (ap->ops->cable_detect)
2999 ap->cbl = ap->ops->cable_detect(ap);
3000 ata_force_cbl(ap);
3001 }
3002
3003 /* Configure new devices forward such that user doesn't see
3004 * device detection messages backwards.
3005 */
3006 ata_for_each_dev(dev, link, ALL) {
3007 if (!(new_mask & (1 << dev->devno)))
3008 continue;
3009
3010 dev->class = ehc->classes[dev->devno];
3011
3012 if (dev->class == ATA_DEV_PMP)
3013 continue;
3014
3015 ehc->i.flags |= ATA_EHI_PRINTINFO;
3016 rc = ata_dev_configure(dev);
3017 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3018 if (rc) {
3019 dev->class = ATA_DEV_UNKNOWN;
3020 goto err;
3021 }
3022
3023 spin_lock_irqsave(ap->lock, flags);
3024 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3025 spin_unlock_irqrestore(ap->lock, flags);
3026
3027 /* new device discovered, configure xfermode */
3028 ehc->i.flags |= ATA_EHI_SETMODE;
3029 }
3030
3031 return 0;
3032
3033 err:
3034 *r_failed_dev = dev;
3035 DPRINTK("EXIT rc=%d\n", rc);
3036 return rc;
3037 }
3038
3039 /**
3040 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3041 * @link: link on which timings will be programmed
3042 * @r_failed_dev: out parameter for failed device
3043 *
3044 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3045 * ata_set_mode() fails, pointer to the failing device is
3046 * returned in @r_failed_dev.
3047 *
3048 * LOCKING:
3049 * PCI/etc. bus probe sem.
3050 *
3051 * RETURNS:
3052 * 0 on success, negative errno otherwise
3053 */
3054 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3055 {
3056 struct ata_port *ap = link->ap;
3057 struct ata_device *dev;
3058 int rc;
3059
3060 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3061 ata_for_each_dev(dev, link, ENABLED) {
3062 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3063 struct ata_ering_entry *ent;
3064
3065 ent = ata_ering_top(&dev->ering);
3066 if (ent)
3067 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3068 }
3069 }
3070
3071 /* has private set_mode? */
3072 if (ap->ops->set_mode)
3073 rc = ap->ops->set_mode(link, r_failed_dev);
3074 else
3075 rc = ata_do_set_mode(link, r_failed_dev);
3076
3077 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3078 ata_for_each_dev(dev, link, ENABLED) {
3079 struct ata_eh_context *ehc = &link->eh_context;
3080 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3081 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3082
3083 if (dev->xfer_mode != saved_xfer_mode ||
3084 ata_ncq_enabled(dev) != saved_ncq)
3085 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3086 }
3087
3088 return rc;
3089 }
3090
3091 /**
3092 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3093 * @dev: ATAPI device to clear UA for
3094 *
3095 * Resets and other operations can make an ATAPI device raise
3096 * UNIT ATTENTION which causes the next operation to fail. This
3097 * function clears UA.
3098 *
3099 * LOCKING:
3100 * EH context (may sleep).
3101 *
3102 * RETURNS:
3103 * 0 on success, -errno on failure.
3104 */
3105 static int atapi_eh_clear_ua(struct ata_device *dev)
3106 {
3107 int i;
3108
3109 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3110 u8 *sense_buffer = dev->link->ap->sector_buf;
3111 u8 sense_key = 0;
3112 unsigned int err_mask;
3113
3114 err_mask = atapi_eh_tur(dev, &sense_key);
3115 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3116 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3117 "failed (err_mask=0x%x)\n", err_mask);
3118 return -EIO;
3119 }
3120
3121 if (!err_mask || sense_key != UNIT_ATTENTION)
3122 return 0;
3123
3124 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3125 if (err_mask) {
3126 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3127 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3128 return -EIO;
3129 }
3130 }
3131
3132 ata_dev_printk(dev, KERN_WARNING,
3133 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3134
3135 return 0;
3136 }
3137
3138 /**
3139 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3140 * @dev: ATA device which may need FLUSH retry
3141 *
3142 * If @dev failed FLUSH, it needs to be reported upper layer
3143 * immediately as it means that @dev failed to remap and already
3144 * lost at least a sector and further FLUSH retrials won't make
3145 * any difference to the lost sector. However, if FLUSH failed
3146 * for other reasons, for example transmission error, FLUSH needs
3147 * to be retried.
3148 *
3149 * This function determines whether FLUSH failure retry is
3150 * necessary and performs it if so.
3151 *
3152 * RETURNS:
3153 * 0 if EH can continue, -errno if EH needs to be repeated.
3154 */
3155 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3156 {
3157 struct ata_link *link = dev->link;
3158 struct ata_port *ap = link->ap;
3159 struct ata_queued_cmd *qc;
3160 struct ata_taskfile tf;
3161 unsigned int err_mask;
3162 int rc = 0;
3163
3164 /* did flush fail for this device? */
3165 if (!ata_tag_valid(link->active_tag))
3166 return 0;
3167
3168 qc = __ata_qc_from_tag(ap, link->active_tag);
3169 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3170 qc->tf.command != ATA_CMD_FLUSH))
3171 return 0;
3172
3173 /* if the device failed it, it should be reported to upper layers */
3174 if (qc->err_mask & AC_ERR_DEV)
3175 return 0;
3176
3177 /* flush failed for some other reason, give it another shot */
3178 ata_tf_init(dev, &tf);
3179
3180 tf.command = qc->tf.command;
3181 tf.flags |= ATA_TFLAG_DEVICE;
3182 tf.protocol = ATA_PROT_NODATA;
3183
3184 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3185 tf.command, qc->err_mask);
3186
3187 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3188 if (!err_mask) {
3189 /*
3190 * FLUSH is complete but there's no way to
3191 * successfully complete a failed command from EH.
3192 * Making sure retry is allowed at least once and
3193 * retrying it should do the trick - whatever was in
3194 * the cache is already on the platter and this won't
3195 * cause infinite loop.
3196 */
3197 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3198 } else {
3199 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3200 err_mask);
3201 rc = -EIO;
3202
3203 /* if device failed it, report it to upper layers */
3204 if (err_mask & AC_ERR_DEV) {
3205 qc->err_mask |= AC_ERR_DEV;
3206 qc->result_tf = tf;
3207 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3208 rc = 0;
3209 }
3210 }
3211 return rc;
3212 }
3213
3214 static int ata_link_nr_enabled(struct ata_link *link)
3215 {
3216 struct ata_device *dev;
3217 int cnt = 0;
3218
3219 ata_for_each_dev(dev, link, ENABLED)
3220 cnt++;
3221 return cnt;
3222 }
3223
3224 static int ata_link_nr_vacant(struct ata_link *link)
3225 {
3226 struct ata_device *dev;
3227 int cnt = 0;
3228
3229 ata_for_each_dev(dev, link, ALL)
3230 if (dev->class == ATA_DEV_UNKNOWN)
3231 cnt++;
3232 return cnt;
3233 }
3234
3235 static int ata_eh_skip_recovery(struct ata_link *link)
3236 {
3237 struct ata_port *ap = link->ap;
3238 struct ata_eh_context *ehc = &link->eh_context;
3239 struct ata_device *dev;
3240
3241 /* skip disabled links */
3242 if (link->flags & ATA_LFLAG_DISABLED)
3243 return 1;
3244
3245 /* skip if explicitly requested */
3246 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3247 return 1;
3248
3249 /* thaw frozen port and recover failed devices */
3250 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3251 return 0;
3252
3253 /* reset at least once if reset is requested */
3254 if ((ehc->i.action & ATA_EH_RESET) &&
3255 !(ehc->i.flags & ATA_EHI_DID_RESET))
3256 return 0;
3257
3258 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3259 ata_for_each_dev(dev, link, ALL) {
3260 if (dev->class == ATA_DEV_UNKNOWN &&
3261 ehc->classes[dev->devno] != ATA_DEV_NONE)
3262 return 0;
3263 }
3264
3265 return 1;
3266 }
3267
3268 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3269 {
3270 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3271 u64 now = get_jiffies_64();
3272 int *trials = void_arg;
3273
3274 if (ent->timestamp < now - min(now, interval))
3275 return -1;
3276
3277 (*trials)++;
3278 return 0;
3279 }
3280
3281 static int ata_eh_schedule_probe(struct ata_device *dev)
3282 {
3283 struct ata_eh_context *ehc = &dev->link->eh_context;
3284 struct ata_link *link = ata_dev_phys_link(dev);
3285 int trials = 0;
3286
3287 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3288 (ehc->did_probe_mask & (1 << dev->devno)))
3289 return 0;
3290
3291 ata_eh_detach_dev(dev);
3292 ata_dev_init(dev);
3293 ehc->did_probe_mask |= (1 << dev->devno);
3294 ehc->i.action |= ATA_EH_RESET;
3295 ehc->saved_xfer_mode[dev->devno] = 0;
3296 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3297
3298 /* Record and count probe trials on the ering. The specific
3299 * error mask used is irrelevant. Because a successful device
3300 * detection clears the ering, this count accumulates only if
3301 * there are consecutive failed probes.
3302 *
3303 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3304 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3305 * forced to 1.5Gbps.
3306 *
3307 * This is to work around cases where failed link speed
3308 * negotiation results in device misdetection leading to
3309 * infinite DEVXCHG or PHRDY CHG events.
3310 */
3311 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3312 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3313
3314 if (trials > ATA_EH_PROBE_TRIALS)
3315 sata_down_spd_limit(link, 1);
3316
3317 return 1;
3318 }
3319
3320 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3321 {
3322 struct ata_eh_context *ehc = &dev->link->eh_context;
3323
3324 /* -EAGAIN from EH routine indicates retry without prejudice.
3325 * The requester is responsible for ensuring forward progress.
3326 */
3327 if (err != -EAGAIN)
3328 ehc->tries[dev->devno]--;
3329
3330 switch (err) {
3331 case -ENODEV:
3332 /* device missing or wrong IDENTIFY data, schedule probing */
3333 ehc->i.probe_mask |= (1 << dev->devno);
3334 case -EINVAL:
3335 /* give it just one more chance */
3336 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3337 case -EIO:
3338 if (ehc->tries[dev->devno] == 1) {
3339 /* This is the last chance, better to slow
3340 * down than lose it.
3341 */
3342 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3343 if (dev->pio_mode > XFER_PIO_0)
3344 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3345 }
3346 }
3347
3348 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3349 /* disable device if it has used up all its chances */
3350 ata_dev_disable(dev);
3351
3352 /* detach if offline */
3353 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3354 ata_eh_detach_dev(dev);
3355
3356 /* schedule probe if necessary */
3357 if (ata_eh_schedule_probe(dev)) {
3358 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3359 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3360 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3361 }
3362
3363 return 1;
3364 } else {
3365 ehc->i.action |= ATA_EH_RESET;
3366 return 0;
3367 }
3368 }
3369
3370 /**
3371 * ata_eh_recover - recover host port after error
3372 * @ap: host port to recover
3373 * @prereset: prereset method (can be NULL)
3374 * @softreset: softreset method (can be NULL)
3375 * @hardreset: hardreset method (can be NULL)
3376 * @postreset: postreset method (can be NULL)
3377 * @r_failed_link: out parameter for failed link
3378 *
3379 * This is the alpha and omega, eum and yang, heart and soul of
3380 * libata exception handling. On entry, actions required to
3381 * recover each link and hotplug requests are recorded in the
3382 * link's eh_context. This function executes all the operations
3383 * with appropriate retrials and fallbacks to resurrect failed
3384 * devices, detach goners and greet newcomers.
3385 *
3386 * LOCKING:
3387 * Kernel thread context (may sleep).
3388 *
3389 * RETURNS:
3390 * 0 on success, -errno on failure.
3391 */
3392 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3393 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3394 ata_postreset_fn_t postreset,
3395 struct ata_link **r_failed_link)
3396 {
3397 struct ata_link *link;
3398 struct ata_device *dev;
3399 int nr_failed_devs;
3400 int rc;
3401 unsigned long flags, deadline;
3402
3403 DPRINTK("ENTER\n");
3404
3405 /* prep for recovery */
3406 ata_for_each_link(link, ap, EDGE) {
3407 struct ata_eh_context *ehc = &link->eh_context;
3408
3409 /* re-enable link? */
3410 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3411 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3412 spin_lock_irqsave(ap->lock, flags);
3413 link->flags &= ~ATA_LFLAG_DISABLED;
3414 spin_unlock_irqrestore(ap->lock, flags);
3415 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3416 }
3417
3418 ata_for_each_dev(dev, link, ALL) {
3419 if (link->flags & ATA_LFLAG_NO_RETRY)
3420 ehc->tries[dev->devno] = 1;
3421 else
3422 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3423
3424 /* collect port action mask recorded in dev actions */
3425 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3426 ~ATA_EH_PERDEV_MASK;
3427 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3428
3429 /* process hotplug request */
3430 if (dev->flags & ATA_DFLAG_DETACH)
3431 ata_eh_detach_dev(dev);
3432
3433 /* schedule probe if necessary */
3434 if (!ata_dev_enabled(dev))
3435 ata_eh_schedule_probe(dev);
3436 }
3437 }
3438
3439 retry:
3440 rc = 0;
3441 nr_failed_devs = 0;
3442
3443 /* if UNLOADING, finish immediately */
3444 if (ap->pflags & ATA_PFLAG_UNLOADING)
3445 goto out;
3446
3447 /* prep for EH */
3448 ata_for_each_link(link, ap, EDGE) {
3449 struct ata_eh_context *ehc = &link->eh_context;
3450
3451 /* skip EH if possible. */
3452 if (ata_eh_skip_recovery(link))
3453 ehc->i.action = 0;
3454
3455 ata_for_each_dev(dev, link, ALL)
3456 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3457 }
3458
3459 /* reset */
3460 ata_for_each_link(link, ap, EDGE) {
3461 struct ata_eh_context *ehc = &link->eh_context;
3462
3463 if (!(ehc->i.action & ATA_EH_RESET))
3464 continue;
3465
3466 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3467 prereset, softreset, hardreset, postreset);
3468 if (rc) {
3469 ata_link_printk(link, KERN_ERR,
3470 "reset failed, giving up\n");
3471 goto out;
3472 }
3473 }
3474
3475 do {
3476 unsigned long now;
3477
3478 /*
3479 * clears ATA_EH_PARK in eh_info and resets
3480 * ap->park_req_pending
3481 */
3482 ata_eh_pull_park_action(ap);
3483
3484 deadline = jiffies;
3485 ata_for_each_link(link, ap, EDGE) {
3486 ata_for_each_dev(dev, link, ALL) {
3487 struct ata_eh_context *ehc = &link->eh_context;
3488 unsigned long tmp;
3489
3490 if (dev->class != ATA_DEV_ATA)
3491 continue;
3492 if (!(ehc->i.dev_action[dev->devno] &
3493 ATA_EH_PARK))
3494 continue;
3495 tmp = dev->unpark_deadline;
3496 if (time_before(deadline, tmp))
3497 deadline = tmp;
3498 else if (time_before_eq(tmp, jiffies))
3499 continue;
3500 if (ehc->unloaded_mask & (1 << dev->devno))
3501 continue;
3502
3503 ata_eh_park_issue_cmd(dev, 1);
3504 }
3505 }
3506
3507 now = jiffies;
3508 if (time_before_eq(deadline, now))
3509 break;
3510
3511 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3512 deadline - now);
3513 } while (deadline);
3514 ata_for_each_link(link, ap, EDGE) {
3515 ata_for_each_dev(dev, link, ALL) {
3516 if (!(link->eh_context.unloaded_mask &
3517 (1 << dev->devno)))
3518 continue;
3519
3520 ata_eh_park_issue_cmd(dev, 0);
3521 ata_eh_done(link, dev, ATA_EH_PARK);
3522 }
3523 }
3524
3525 /* the rest */
3526 ata_for_each_link(link, ap, EDGE) {
3527 struct ata_eh_context *ehc = &link->eh_context;
3528
3529 /* revalidate existing devices and attach new ones */
3530 rc = ata_eh_revalidate_and_attach(link, &dev);
3531 if (rc)
3532 goto dev_fail;
3533
3534 /* if PMP got attached, return, pmp EH will take care of it */
3535 if (link->device->class == ATA_DEV_PMP) {
3536 ehc->i.action = 0;
3537 return 0;
3538 }
3539
3540 /* configure transfer mode if necessary */
3541 if (ehc->i.flags & ATA_EHI_SETMODE) {
3542 rc = ata_set_mode(link, &dev);
3543 if (rc)
3544 goto dev_fail;
3545 ehc->i.flags &= ~ATA_EHI_SETMODE;
3546 }
3547
3548 /* If reset has been issued, clear UA to avoid
3549 * disrupting the current users of the device.
3550 */
3551 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3552 ata_for_each_dev(dev, link, ALL) {
3553 if (dev->class != ATA_DEV_ATAPI)
3554 continue;
3555 rc = atapi_eh_clear_ua(dev);
3556 if (rc)
3557 goto dev_fail;
3558 }
3559 }
3560
3561 /* retry flush if necessary */
3562 ata_for_each_dev(dev, link, ALL) {
3563 if (dev->class != ATA_DEV_ATA)
3564 continue;
3565 rc = ata_eh_maybe_retry_flush(dev);
3566 if (rc)
3567 goto dev_fail;
3568 }
3569
3570 /* configure link power saving */
3571 if (ehc->i.action & ATA_EH_LPM)
3572 ata_for_each_dev(dev, link, ALL)
3573 ata_dev_enable_pm(dev, ap->pm_policy);
3574
3575 /* this link is okay now */
3576 ehc->i.flags = 0;
3577 continue;
3578
3579 dev_fail:
3580 nr_failed_devs++;
3581 ata_eh_handle_dev_fail(dev, rc);
3582
3583 if (ap->pflags & ATA_PFLAG_FROZEN) {
3584 /* PMP reset requires working host port.
3585 * Can't retry if it's frozen.
3586 */
3587 if (sata_pmp_attached(ap))
3588 goto out;
3589 break;
3590 }
3591 }
3592
3593 if (nr_failed_devs)
3594 goto retry;
3595
3596 out:
3597 if (rc && r_failed_link)
3598 *r_failed_link = link;
3599
3600 DPRINTK("EXIT, rc=%d\n", rc);
3601 return rc;
3602 }
3603
3604 /**
3605 * ata_eh_finish - finish up EH
3606 * @ap: host port to finish EH for
3607 *
3608 * Recovery is complete. Clean up EH states and retry or finish
3609 * failed qcs.
3610 *
3611 * LOCKING:
3612 * None.
3613 */
3614 void ata_eh_finish(struct ata_port *ap)
3615 {
3616 int tag;
3617
3618 /* retry or finish qcs */
3619 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3620 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3621
3622 if (!(qc->flags & ATA_QCFLAG_FAILED))
3623 continue;
3624
3625 if (qc->err_mask) {
3626 /* FIXME: Once EH migration is complete,
3627 * generate sense data in this function,
3628 * considering both err_mask and tf.
3629 */
3630 if (qc->flags & ATA_QCFLAG_RETRY)
3631 ata_eh_qc_retry(qc);
3632 else
3633 ata_eh_qc_complete(qc);
3634 } else {
3635 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3636 ata_eh_qc_complete(qc);
3637 } else {
3638 /* feed zero TF to sense generation */
3639 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3640 ata_eh_qc_retry(qc);
3641 }
3642 }
3643 }
3644
3645 /* make sure nr_active_links is zero after EH */
3646 WARN_ON(ap->nr_active_links);
3647 ap->nr_active_links = 0;
3648 }
3649
3650 /**
3651 * ata_do_eh - do standard error handling
3652 * @ap: host port to handle error for
3653 *
3654 * @prereset: prereset method (can be NULL)
3655 * @softreset: softreset method (can be NULL)
3656 * @hardreset: hardreset method (can be NULL)
3657 * @postreset: postreset method (can be NULL)
3658 *
3659 * Perform standard error handling sequence.
3660 *
3661 * LOCKING:
3662 * Kernel thread context (may sleep).
3663 */
3664 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3665 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3666 ata_postreset_fn_t postreset)
3667 {
3668 struct ata_device *dev;
3669 int rc;
3670
3671 ata_eh_autopsy(ap);
3672 ata_eh_report(ap);
3673
3674 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3675 NULL);
3676 if (rc) {
3677 ata_for_each_dev(dev, &ap->link, ALL)
3678 ata_dev_disable(dev);
3679 }
3680
3681 ata_eh_finish(ap);
3682 }
3683
3684 /**
3685 * ata_std_error_handler - standard error handler
3686 * @ap: host port to handle error for
3687 *
3688 * Standard error handler
3689 *
3690 * LOCKING:
3691 * Kernel thread context (may sleep).
3692 */
3693 void ata_std_error_handler(struct ata_port *ap)
3694 {
3695 struct ata_port_operations *ops = ap->ops;
3696 ata_reset_fn_t hardreset = ops->hardreset;
3697
3698 /* ignore built-in hardreset if SCR access is not available */
3699 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3700 hardreset = NULL;
3701
3702 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3703 }
3704
3705 #ifdef CONFIG_PM
3706 /**
3707 * ata_eh_handle_port_suspend - perform port suspend operation
3708 * @ap: port to suspend
3709 *
3710 * Suspend @ap.
3711 *
3712 * LOCKING:
3713 * Kernel thread context (may sleep).
3714 */
3715 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3716 {
3717 unsigned long flags;
3718 int rc = 0;
3719
3720 /* are we suspending? */
3721 spin_lock_irqsave(ap->lock, flags);
3722 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3723 ap->pm_mesg.event == PM_EVENT_ON) {
3724 spin_unlock_irqrestore(ap->lock, flags);
3725 return;
3726 }
3727 spin_unlock_irqrestore(ap->lock, flags);
3728
3729 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3730
3731 /* tell ACPI we're suspending */
3732 rc = ata_acpi_on_suspend(ap);
3733 if (rc)
3734 goto out;
3735
3736 /* suspend */
3737 ata_eh_freeze_port(ap);
3738
3739 if (ap->ops->port_suspend)
3740 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3741
3742 ata_acpi_set_state(ap, PMSG_SUSPEND);
3743 out:
3744 /* report result */
3745 spin_lock_irqsave(ap->lock, flags);
3746
3747 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3748 if (rc == 0)
3749 ap->pflags |= ATA_PFLAG_SUSPENDED;
3750 else if (ap->pflags & ATA_PFLAG_FROZEN)
3751 ata_port_schedule_eh(ap);
3752
3753 if (ap->pm_result) {
3754 *ap->pm_result = rc;
3755 ap->pm_result = NULL;
3756 }
3757
3758 spin_unlock_irqrestore(ap->lock, flags);
3759
3760 return;
3761 }
3762
3763 /**
3764 * ata_eh_handle_port_resume - perform port resume operation
3765 * @ap: port to resume
3766 *
3767 * Resume @ap.
3768 *
3769 * LOCKING:
3770 * Kernel thread context (may sleep).
3771 */
3772 static void ata_eh_handle_port_resume(struct ata_port *ap)
3773 {
3774 struct ata_link *link;
3775 struct ata_device *dev;
3776 unsigned long flags;
3777 int rc = 0;
3778
3779 /* are we resuming? */
3780 spin_lock_irqsave(ap->lock, flags);
3781 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3782 ap->pm_mesg.event != PM_EVENT_ON) {
3783 spin_unlock_irqrestore(ap->lock, flags);
3784 return;
3785 }
3786 spin_unlock_irqrestore(ap->lock, flags);
3787
3788 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3789
3790 /*
3791 * Error timestamps are in jiffies which doesn't run while
3792 * suspended and PHY events during resume isn't too uncommon.
3793 * When the two are combined, it can lead to unnecessary speed
3794 * downs if the machine is suspended and resumed repeatedly.
3795 * Clear error history.
3796 */
3797 ata_for_each_link(link, ap, HOST_FIRST)
3798 ata_for_each_dev(dev, link, ALL)
3799 ata_ering_clear(&dev->ering);
3800
3801 ata_acpi_set_state(ap, PMSG_ON);
3802
3803 if (ap->ops->port_resume)
3804 rc = ap->ops->port_resume(ap);
3805
3806 /* tell ACPI that we're resuming */
3807 ata_acpi_on_resume(ap);
3808
3809 /* report result */
3810 spin_lock_irqsave(ap->lock, flags);
3811 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3812 if (ap->pm_result) {
3813 *ap->pm_result = rc;
3814 ap->pm_result = NULL;
3815 }
3816 spin_unlock_irqrestore(ap->lock, flags);
3817 }
3818 #endif /* CONFIG_PM */