]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/scsi_error.c
[SCSI] use one-element sg list in scsi_send_eh_cmnd
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / scsi_error.c
1 /*
2 * scsi_error.c Copyright (C) 1997 Eric Youngdale
3 *
4 * SCSI error/timeout handling
5 * Initial versions: Eric Youngdale. Based upon conversations with
6 * Leonard Zubkoff and David Miller at Linux Expo,
7 * ideas originating from all over the place.
8 *
9 * Restructured scsi_unjam_host and associated functions.
10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
11 *
12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
13 * minor cleanups.
14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com)
15 */
16
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/interrupt.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_dbg.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_ioctl.h>
36
37 #include "scsi_priv.h"
38 #include "scsi_logging.h"
39
40 #define SENSE_TIMEOUT (10*HZ)
41 #define START_UNIT_TIMEOUT (30*HZ)
42
43 /*
44 * These should *probably* be handled by the host itself.
45 * Since it is allowed to sleep, it probably should.
46 */
47 #define BUS_RESET_SETTLE_TIME (10)
48 #define HOST_RESET_SETTLE_TIME (10)
49
50 /* called with shost->host_lock held */
51 void scsi_eh_wakeup(struct Scsi_Host *shost)
52 {
53 if (shost->host_busy == shost->host_failed) {
54 wake_up_process(shost->ehandler);
55 SCSI_LOG_ERROR_RECOVERY(5,
56 printk("Waking error handler thread\n"));
57 }
58 }
59
60 /**
61 * scsi_schedule_eh - schedule EH for SCSI host
62 * @shost: SCSI host to invoke error handling on.
63 *
64 * Schedule SCSI EH without scmd.
65 **/
66 void scsi_schedule_eh(struct Scsi_Host *shost)
67 {
68 unsigned long flags;
69
70 spin_lock_irqsave(shost->host_lock, flags);
71
72 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
73 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
74 shost->host_eh_scheduled++;
75 scsi_eh_wakeup(shost);
76 }
77
78 spin_unlock_irqrestore(shost->host_lock, flags);
79 }
80 EXPORT_SYMBOL_GPL(scsi_schedule_eh);
81
82 /**
83 * scsi_eh_scmd_add - add scsi cmd to error handling.
84 * @scmd: scmd to run eh on.
85 * @eh_flag: optional SCSI_EH flag.
86 *
87 * Return value:
88 * 0 on failure.
89 **/
90 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
91 {
92 struct Scsi_Host *shost = scmd->device->host;
93 unsigned long flags;
94 int ret = 0;
95
96 if (!shost->ehandler)
97 return 0;
98
99 spin_lock_irqsave(shost->host_lock, flags);
100 if (scsi_host_set_state(shost, SHOST_RECOVERY))
101 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
102 goto out_unlock;
103
104 ret = 1;
105 scmd->eh_eflags |= eh_flag;
106 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
107 shost->host_failed++;
108 scsi_eh_wakeup(shost);
109 out_unlock:
110 spin_unlock_irqrestore(shost->host_lock, flags);
111 return ret;
112 }
113
114 /**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 **/
125 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127 {
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __FUNCTION__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146 }
147
148 /**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 **/
159 int scsi_delete_timer(struct scsi_cmnd *scmd)
160 {
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __FUNCTION__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173 }
174
175 /**
176 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out.
178 *
179 * Notes:
180 * We do not need to lock this. There is the potential for a race
181 * only in that the normal completion handling might run, but if the
182 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything.
184 **/
185 void scsi_times_out(struct scsi_cmnd *scmd)
186 {
187 scsi_log_completion(scmd, TIMEOUT_ERROR);
188
189 if (scmd->device->host->transportt->eh_timed_out)
190 switch (scmd->device->host->transportt->eh_timed_out(scmd)) {
191 case EH_HANDLED:
192 __scsi_done(scmd);
193 return;
194 case EH_RESET_TIMER:
195 scsi_add_timer(scmd, scmd->timeout_per_command,
196 scsi_times_out);
197 return;
198 case EH_NOT_HANDLED:
199 break;
200 }
201
202 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
203 scmd->result |= DID_TIME_OUT << 16;
204 __scsi_done(scmd);
205 }
206 }
207
208 /**
209 * scsi_block_when_processing_errors - Prevent cmds from being queued.
210 * @sdev: Device on which we are performing recovery.
211 *
212 * Description:
213 * We block until the host is out of error recovery, and then check to
214 * see whether the host or the device is offline.
215 *
216 * Return value:
217 * 0 when dev was taken offline by error recovery. 1 OK to proceed.
218 **/
219 int scsi_block_when_processing_errors(struct scsi_device *sdev)
220 {
221 int online;
222
223 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
224
225 online = scsi_device_online(sdev);
226
227 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
228 online));
229
230 return online;
231 }
232 EXPORT_SYMBOL(scsi_block_when_processing_errors);
233
234 #ifdef CONFIG_SCSI_LOGGING
235 /**
236 * scsi_eh_prt_fail_stats - Log info on failures.
237 * @shost: scsi host being recovered.
238 * @work_q: Queue of scsi cmds to process.
239 **/
240 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
241 struct list_head *work_q)
242 {
243 struct scsi_cmnd *scmd;
244 struct scsi_device *sdev;
245 int total_failures = 0;
246 int cmd_failed = 0;
247 int cmd_cancel = 0;
248 int devices_failed = 0;
249
250 shost_for_each_device(sdev, shost) {
251 list_for_each_entry(scmd, work_q, eh_entry) {
252 if (scmd->device == sdev) {
253 ++total_failures;
254 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
255 ++cmd_cancel;
256 else
257 ++cmd_failed;
258 }
259 }
260
261 if (cmd_cancel || cmd_failed) {
262 SCSI_LOG_ERROR_RECOVERY(3,
263 sdev_printk(KERN_INFO, sdev,
264 "%s: cmds failed: %d, cancel: %d\n",
265 __FUNCTION__, cmd_failed,
266 cmd_cancel));
267 cmd_cancel = 0;
268 cmd_failed = 0;
269 ++devices_failed;
270 }
271 }
272
273 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
274 " devices require eh work\n",
275 total_failures, devices_failed));
276 }
277 #endif
278
279 /**
280 * scsi_check_sense - Examine scsi cmd sense
281 * @scmd: Cmd to have sense checked.
282 *
283 * Return value:
284 * SUCCESS or FAILED or NEEDS_RETRY
285 *
286 * Notes:
287 * When a deferred error is detected the current command has
288 * not been executed and needs retrying.
289 **/
290 static int scsi_check_sense(struct scsi_cmnd *scmd)
291 {
292 struct scsi_sense_hdr sshdr;
293
294 if (! scsi_command_normalize_sense(scmd, &sshdr))
295 return FAILED; /* no valid sense data */
296
297 if (scsi_sense_is_deferred(&sshdr))
298 return NEEDS_RETRY;
299
300 /*
301 * Previous logic looked for FILEMARK, EOM or ILI which are
302 * mainly associated with tapes and returned SUCCESS.
303 */
304 if (sshdr.response_code == 0x70) {
305 /* fixed format */
306 if (scmd->sense_buffer[2] & 0xe0)
307 return SUCCESS;
308 } else {
309 /*
310 * descriptor format: look for "stream commands sense data
311 * descriptor" (see SSC-3). Assume single sense data
312 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
313 */
314 if ((sshdr.additional_length > 3) &&
315 (scmd->sense_buffer[8] == 0x4) &&
316 (scmd->sense_buffer[11] & 0xe0))
317 return SUCCESS;
318 }
319
320 switch (sshdr.sense_key) {
321 case NO_SENSE:
322 return SUCCESS;
323 case RECOVERED_ERROR:
324 return /* soft_error */ SUCCESS;
325
326 case ABORTED_COMMAND:
327 return NEEDS_RETRY;
328 case NOT_READY:
329 case UNIT_ATTENTION:
330 /*
331 * if we are expecting a cc/ua because of a bus reset that we
332 * performed, treat this just as a retry. otherwise this is
333 * information that we should pass up to the upper-level driver
334 * so that we can deal with it there.
335 */
336 if (scmd->device->expecting_cc_ua) {
337 scmd->device->expecting_cc_ua = 0;
338 return NEEDS_RETRY;
339 }
340 /*
341 * if the device is in the process of becoming ready, we
342 * should retry.
343 */
344 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
345 return NEEDS_RETRY;
346 /*
347 * if the device is not started, we need to wake
348 * the error handler to start the motor
349 */
350 if (scmd->device->allow_restart &&
351 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
352 return FAILED;
353 return SUCCESS;
354
355 /* these three are not supported */
356 case COPY_ABORTED:
357 case VOLUME_OVERFLOW:
358 case MISCOMPARE:
359 return SUCCESS;
360
361 case MEDIUM_ERROR:
362 return NEEDS_RETRY;
363
364 case HARDWARE_ERROR:
365 if (scmd->device->retry_hwerror)
366 return NEEDS_RETRY;
367 else
368 return SUCCESS;
369
370 case ILLEGAL_REQUEST:
371 case BLANK_CHECK:
372 case DATA_PROTECT:
373 default:
374 return SUCCESS;
375 }
376 }
377
378 /**
379 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
380 * @scmd: SCSI cmd to examine.
381 *
382 * Notes:
383 * This is *only* called when we are examining the status of commands
384 * queued during error recovery. the main difference here is that we
385 * don't allow for the possibility of retries here, and we are a lot
386 * more restrictive about what we consider acceptable.
387 **/
388 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
389 {
390 /*
391 * first check the host byte, to see if there is anything in there
392 * that would indicate what we need to do.
393 */
394 if (host_byte(scmd->result) == DID_RESET) {
395 /*
396 * rats. we are already in the error handler, so we now
397 * get to try and figure out what to do next. if the sense
398 * is valid, we have a pretty good idea of what to do.
399 * if not, we mark it as FAILED.
400 */
401 return scsi_check_sense(scmd);
402 }
403 if (host_byte(scmd->result) != DID_OK)
404 return FAILED;
405
406 /*
407 * next, check the message byte.
408 */
409 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
410 return FAILED;
411
412 /*
413 * now, check the status byte to see if this indicates
414 * anything special.
415 */
416 switch (status_byte(scmd->result)) {
417 case GOOD:
418 case COMMAND_TERMINATED:
419 return SUCCESS;
420 case CHECK_CONDITION:
421 return scsi_check_sense(scmd);
422 case CONDITION_GOOD:
423 case INTERMEDIATE_GOOD:
424 case INTERMEDIATE_C_GOOD:
425 /*
426 * who knows? FIXME(eric)
427 */
428 return SUCCESS;
429 case BUSY:
430 case QUEUE_FULL:
431 case RESERVATION_CONFLICT:
432 default:
433 return FAILED;
434 }
435 return FAILED;
436 }
437
438 /**
439 * scsi_eh_done - Completion function for error handling.
440 * @scmd: Cmd that is done.
441 **/
442 static void scsi_eh_done(struct scsi_cmnd *scmd)
443 {
444 struct completion *eh_action;
445
446 SCSI_LOG_ERROR_RECOVERY(3,
447 printk("%s scmd: %p result: %x\n",
448 __FUNCTION__, scmd, scmd->result));
449
450 eh_action = scmd->device->host->eh_action;
451 if (eh_action)
452 complete(eh_action);
453 }
454
455 /**
456 * scsi_send_eh_cmnd - submit a scsi command as part of error recory
457 * @scmd: SCSI command structure to hijack
458 * @cmnd: CDB to send
459 * @cmnd_size: size in bytes of @cmnd
460 * @timeout: timeout for this request
461 * @copy_sense: request sense data if set to 1
462 *
463 * This function is used to send a scsi command down to a target device
464 * as part of the error recovery process. If @copy_sense is 0 the command
465 * sent must be one that does not transfer any data. If @copy_sense is 1
466 * the command must be REQUEST_SENSE and this functions copies out the
467 * sense buffer it got into @scmd->sense_buffer.
468 *
469 * Return value:
470 * SUCCESS or FAILED or NEEDS_RETRY
471 **/
472 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
473 int cmnd_size, int timeout, int copy_sense)
474 {
475 struct scsi_device *sdev = scmd->device;
476 struct Scsi_Host *shost = sdev->host;
477 int old_result = scmd->result;
478 DECLARE_COMPLETION_ONSTACK(done);
479 unsigned long timeleft;
480 unsigned long flags;
481 struct scatterlist sgl;
482 unsigned char old_cmnd[MAX_COMMAND_SIZE];
483 enum dma_data_direction old_data_direction;
484 unsigned short old_use_sg;
485 unsigned char old_cmd_len;
486 unsigned old_bufflen;
487 void *old_buffer;
488 int rtn;
489
490 /*
491 * We need saved copies of a number of fields - this is because
492 * error handling may need to overwrite these with different values
493 * to run different commands, and once error handling is complete,
494 * we will need to restore these values prior to running the actual
495 * command.
496 */
497 old_buffer = scmd->request_buffer;
498 old_bufflen = scmd->request_bufflen;
499 memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
500 old_data_direction = scmd->sc_data_direction;
501 old_cmd_len = scmd->cmd_len;
502 old_use_sg = scmd->use_sg;
503
504 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
505 memcpy(scmd->cmnd, cmnd, cmnd_size);
506
507 if (copy_sense) {
508 gfp_t gfp_mask = GFP_ATOMIC;
509
510 if (shost->hostt->unchecked_isa_dma)
511 gfp_mask |= __GFP_DMA;
512
513 sgl.page = alloc_page(gfp_mask);
514 if (!sgl.page)
515 return FAILED;
516 sgl.offset = 0;
517 sgl.length = 252;
518
519 scmd->sc_data_direction = DMA_FROM_DEVICE;
520 scmd->request_bufflen = sgl.length;
521 scmd->request_buffer = &sgl;
522 scmd->use_sg = 1;
523 } else {
524 scmd->request_buffer = NULL;
525 scmd->request_bufflen = 0;
526 scmd->sc_data_direction = DMA_NONE;
527 scmd->use_sg = 0;
528 }
529
530 scmd->underflow = 0;
531 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
532
533 if (sdev->scsi_level <= SCSI_2)
534 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
535 (sdev->lun << 5 & 0xe0);
536
537 /*
538 * Zero the sense buffer. The scsi spec mandates that any
539 * untransferred sense data should be interpreted as being zero.
540 */
541 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
542
543 shost->eh_action = &done;
544
545 spin_lock_irqsave(shost->host_lock, flags);
546 scsi_log_send(scmd);
547 shost->hostt->queuecommand(scmd, scsi_eh_done);
548 spin_unlock_irqrestore(shost->host_lock, flags);
549
550 timeleft = wait_for_completion_timeout(&done, timeout);
551
552 shost->eh_action = NULL;
553
554 scsi_log_completion(scmd, SUCCESS);
555
556 SCSI_LOG_ERROR_RECOVERY(3,
557 printk("%s: scmd: %p, timeleft: %ld\n",
558 __FUNCTION__, scmd, timeleft));
559
560 /*
561 * If there is time left scsi_eh_done got called, and we will
562 * examine the actual status codes to see whether the command
563 * actually did complete normally, else tell the host to forget
564 * about this command.
565 */
566 if (timeleft) {
567 rtn = scsi_eh_completed_normally(scmd);
568 SCSI_LOG_ERROR_RECOVERY(3,
569 printk("%s: scsi_eh_completed_normally %x\n",
570 __FUNCTION__, rtn));
571
572 switch (rtn) {
573 case SUCCESS:
574 case NEEDS_RETRY:
575 case FAILED:
576 break;
577 default:
578 rtn = FAILED;
579 break;
580 }
581 } else {
582 /*
583 * FIXME(eric) - we are not tracking whether we could
584 * abort a timed out command or not. not sure how
585 * we should treat them differently anyways.
586 */
587 if (shost->hostt->eh_abort_handler)
588 shost->hostt->eh_abort_handler(scmd);
589 rtn = FAILED;
590 }
591
592
593 /*
594 * Last chance to have valid sense data.
595 */
596 if (copy_sense) {
597 if (!SCSI_SENSE_VALID(scmd)) {
598 memcpy(scmd->sense_buffer, scmd->request_buffer,
599 sizeof(scmd->sense_buffer));
600 }
601 __free_page(sgl.page);
602 }
603
604
605 /*
606 * Restore original data
607 */
608 scmd->request_buffer = old_buffer;
609 scmd->request_bufflen = old_bufflen;
610 memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
611 scmd->sc_data_direction = old_data_direction;
612 scmd->cmd_len = old_cmd_len;
613 scmd->use_sg = old_use_sg;
614 scmd->result = old_result;
615 return rtn;
616 }
617
618 /**
619 * scsi_request_sense - Request sense data from a particular target.
620 * @scmd: SCSI cmd for request sense.
621 *
622 * Notes:
623 * Some hosts automatically obtain this information, others require
624 * that we obtain it on our own. This function will *not* return until
625 * the command either times out, or it completes.
626 **/
627 static int scsi_request_sense(struct scsi_cmnd *scmd)
628 {
629 static unsigned char generic_sense[6] =
630 {REQUEST_SENSE, 0, 0, 0, 252, 0};
631
632 return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
633 }
634
635 /**
636 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
637 * @scmd: Original SCSI cmd that eh has finished.
638 * @done_q: Queue for processed commands.
639 *
640 * Notes:
641 * We don't want to use the normal command completion while we are are
642 * still handling errors - it may cause other commands to be queued,
643 * and that would disturb what we are doing. thus we really want to
644 * keep a list of pending commands for final completion, and once we
645 * are ready to leave error handling we handle completion for real.
646 **/
647 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
648 {
649 scmd->device->host->host_failed--;
650 scmd->eh_eflags = 0;
651 list_move_tail(&scmd->eh_entry, done_q);
652 }
653 EXPORT_SYMBOL(scsi_eh_finish_cmd);
654
655 /**
656 * scsi_eh_get_sense - Get device sense data.
657 * @work_q: Queue of commands to process.
658 * @done_q: Queue of proccessed commands..
659 *
660 * Description:
661 * See if we need to request sense information. if so, then get it
662 * now, so we have a better idea of what to do.
663 *
664 * Notes:
665 * This has the unfortunate side effect that if a shost adapter does
666 * not automatically request sense information, that we end up shutting
667 * it down before we request it.
668 *
669 * All drivers should request sense information internally these days,
670 * so for now all I have to say is tough noogies if you end up in here.
671 *
672 * XXX: Long term this code should go away, but that needs an audit of
673 * all LLDDs first.
674 **/
675 static int scsi_eh_get_sense(struct list_head *work_q,
676 struct list_head *done_q)
677 {
678 struct scsi_cmnd *scmd, *next;
679 int rtn;
680
681 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
682 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
683 SCSI_SENSE_VALID(scmd))
684 continue;
685
686 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
687 "%s: requesting sense\n",
688 current->comm));
689 rtn = scsi_request_sense(scmd);
690 if (rtn != SUCCESS)
691 continue;
692
693 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
694 " result %x\n", scmd,
695 scmd->result));
696 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
697
698 rtn = scsi_decide_disposition(scmd);
699
700 /*
701 * if the result was normal, then just pass it along to the
702 * upper level.
703 */
704 if (rtn == SUCCESS)
705 /* we don't want this command reissued, just
706 * finished with the sense data, so set
707 * retries to the max allowed to ensure it
708 * won't get reissued */
709 scmd->retries = scmd->allowed;
710 else if (rtn != NEEDS_RETRY)
711 continue;
712
713 scsi_eh_finish_cmd(scmd, done_q);
714 }
715
716 return list_empty(work_q);
717 }
718
719 /**
720 * scsi_try_to_abort_cmd - Ask host to abort a running command.
721 * @scmd: SCSI cmd to abort from Lower Level.
722 *
723 * Notes:
724 * This function will not return until the user's completion function
725 * has been called. there is no timeout on this operation. if the
726 * author of the low-level driver wishes this operation to be timed,
727 * they can provide this facility themselves. helper functions in
728 * scsi_error.c can be supplied to make this easier to do.
729 **/
730 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
731 {
732 if (!scmd->device->host->hostt->eh_abort_handler)
733 return FAILED;
734
735 /*
736 * scsi_done was called just after the command timed out and before
737 * we had a chance to process it. (db)
738 */
739 if (scmd->serial_number == 0)
740 return SUCCESS;
741 return scmd->device->host->hostt->eh_abort_handler(scmd);
742 }
743
744 /**
745 * scsi_eh_tur - Send TUR to device.
746 * @scmd: Scsi cmd to send TUR
747 *
748 * Return value:
749 * 0 - Device is ready. 1 - Device NOT ready.
750 **/
751 static int scsi_eh_tur(struct scsi_cmnd *scmd)
752 {
753 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
754 int retry_cnt = 1, rtn;
755
756 retry_tur:
757 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
758
759 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
760 __FUNCTION__, scmd, rtn));
761
762 switch (rtn) {
763 case NEEDS_RETRY:
764 if (retry_cnt--)
765 goto retry_tur;
766 /*FALLTHRU*/
767 case SUCCESS:
768 return 0;
769 default:
770 return 1;
771 }
772 }
773
774 /**
775 * scsi_eh_abort_cmds - abort canceled commands.
776 * @shost: scsi host being recovered.
777 * @eh_done_q: list_head for processed commands.
778 *
779 * Decription:
780 * Try and see whether or not it makes sense to try and abort the
781 * running command. this only works out to be the case if we have one
782 * command that has timed out. if the command simply failed, it makes
783 * no sense to try and abort the command, since as far as the shost
784 * adapter is concerned, it isn't running.
785 **/
786 static int scsi_eh_abort_cmds(struct list_head *work_q,
787 struct list_head *done_q)
788 {
789 struct scsi_cmnd *scmd, *next;
790 int rtn;
791
792 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
793 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
794 continue;
795 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
796 "0x%p\n", current->comm,
797 scmd));
798 rtn = scsi_try_to_abort_cmd(scmd);
799 if (rtn == SUCCESS) {
800 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
801 if (!scsi_device_online(scmd->device) ||
802 !scsi_eh_tur(scmd)) {
803 scsi_eh_finish_cmd(scmd, done_q);
804 }
805
806 } else
807 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
808 " cmd failed:"
809 "0x%p\n",
810 current->comm,
811 scmd));
812 }
813
814 return list_empty(work_q);
815 }
816
817 /**
818 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
819 * @scmd: SCSI cmd used to send BDR
820 *
821 * Notes:
822 * There is no timeout for this operation. if this operation is
823 * unreliable for a given host, then the host itself needs to put a
824 * timer on it, and set the host back to a consistent state prior to
825 * returning.
826 **/
827 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
828 {
829 int rtn;
830
831 if (!scmd->device->host->hostt->eh_device_reset_handler)
832 return FAILED;
833
834 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
835 if (rtn == SUCCESS) {
836 scmd->device->was_reset = 1;
837 scmd->device->expecting_cc_ua = 1;
838 }
839
840 return rtn;
841 }
842
843 /**
844 * scsi_eh_try_stu - Send START_UNIT to device.
845 * @scmd: Scsi cmd to send START_UNIT
846 *
847 * Return value:
848 * 0 - Device is ready. 1 - Device NOT ready.
849 **/
850 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
851 {
852 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
853
854 if (scmd->device->allow_restart) {
855 int rtn;
856
857 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
858 START_UNIT_TIMEOUT, 0);
859 if (rtn == SUCCESS)
860 return 0;
861 }
862
863 return 1;
864 }
865
866 /**
867 * scsi_eh_stu - send START_UNIT if needed
868 * @shost: scsi host being recovered.
869 * @eh_done_q: list_head for processed commands.
870 *
871 * Notes:
872 * If commands are failing due to not ready, initializing command required,
873 * try revalidating the device, which will end up sending a start unit.
874 **/
875 static int scsi_eh_stu(struct Scsi_Host *shost,
876 struct list_head *work_q,
877 struct list_head *done_q)
878 {
879 struct scsi_cmnd *scmd, *stu_scmd, *next;
880 struct scsi_device *sdev;
881
882 shost_for_each_device(sdev, shost) {
883 stu_scmd = NULL;
884 list_for_each_entry(scmd, work_q, eh_entry)
885 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
886 scsi_check_sense(scmd) == FAILED ) {
887 stu_scmd = scmd;
888 break;
889 }
890
891 if (!stu_scmd)
892 continue;
893
894 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
895 " 0x%p\n", current->comm, sdev));
896
897 if (!scsi_eh_try_stu(stu_scmd)) {
898 if (!scsi_device_online(sdev) ||
899 !scsi_eh_tur(stu_scmd)) {
900 list_for_each_entry_safe(scmd, next,
901 work_q, eh_entry) {
902 if (scmd->device == sdev)
903 scsi_eh_finish_cmd(scmd, done_q);
904 }
905 }
906 } else {
907 SCSI_LOG_ERROR_RECOVERY(3,
908 printk("%s: START_UNIT failed to sdev:"
909 " 0x%p\n", current->comm, sdev));
910 }
911 }
912
913 return list_empty(work_q);
914 }
915
916
917 /**
918 * scsi_eh_bus_device_reset - send bdr if needed
919 * @shost: scsi host being recovered.
920 * @eh_done_q: list_head for processed commands.
921 *
922 * Notes:
923 * Try a bus device reset. still, look to see whether we have multiple
924 * devices that are jammed or not - if we have multiple devices, it
925 * makes no sense to try bus_device_reset - we really would need to try
926 * a bus_reset instead.
927 **/
928 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
929 struct list_head *work_q,
930 struct list_head *done_q)
931 {
932 struct scsi_cmnd *scmd, *bdr_scmd, *next;
933 struct scsi_device *sdev;
934 int rtn;
935
936 shost_for_each_device(sdev, shost) {
937 bdr_scmd = NULL;
938 list_for_each_entry(scmd, work_q, eh_entry)
939 if (scmd->device == sdev) {
940 bdr_scmd = scmd;
941 break;
942 }
943
944 if (!bdr_scmd)
945 continue;
946
947 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
948 " 0x%p\n", current->comm,
949 sdev));
950 rtn = scsi_try_bus_device_reset(bdr_scmd);
951 if (rtn == SUCCESS) {
952 if (!scsi_device_online(sdev) ||
953 !scsi_eh_tur(bdr_scmd)) {
954 list_for_each_entry_safe(scmd, next,
955 work_q, eh_entry) {
956 if (scmd->device == sdev)
957 scsi_eh_finish_cmd(scmd,
958 done_q);
959 }
960 }
961 } else {
962 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
963 " failed sdev:"
964 "0x%p\n",
965 current->comm,
966 sdev));
967 }
968 }
969
970 return list_empty(work_q);
971 }
972
973 /**
974 * scsi_try_bus_reset - ask host to perform a bus reset
975 * @scmd: SCSI cmd to send bus reset.
976 **/
977 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
978 {
979 unsigned long flags;
980 int rtn;
981
982 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
983 __FUNCTION__));
984
985 if (!scmd->device->host->hostt->eh_bus_reset_handler)
986 return FAILED;
987
988 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
989
990 if (rtn == SUCCESS) {
991 if (!scmd->device->host->hostt->skip_settle_delay)
992 ssleep(BUS_RESET_SETTLE_TIME);
993 spin_lock_irqsave(scmd->device->host->host_lock, flags);
994 scsi_report_bus_reset(scmd->device->host,
995 scmd_channel(scmd));
996 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
997 }
998
999 return rtn;
1000 }
1001
1002 /**
1003 * scsi_try_host_reset - ask host adapter to reset itself
1004 * @scmd: SCSI cmd to send hsot reset.
1005 **/
1006 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
1007 {
1008 unsigned long flags;
1009 int rtn;
1010
1011 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
1012 __FUNCTION__));
1013
1014 if (!scmd->device->host->hostt->eh_host_reset_handler)
1015 return FAILED;
1016
1017 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
1018
1019 if (rtn == SUCCESS) {
1020 if (!scmd->device->host->hostt->skip_settle_delay)
1021 ssleep(HOST_RESET_SETTLE_TIME);
1022 spin_lock_irqsave(scmd->device->host->host_lock, flags);
1023 scsi_report_bus_reset(scmd->device->host,
1024 scmd_channel(scmd));
1025 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
1026 }
1027
1028 return rtn;
1029 }
1030
1031 /**
1032 * scsi_eh_bus_reset - send a bus reset
1033 * @shost: scsi host being recovered.
1034 * @eh_done_q: list_head for processed commands.
1035 **/
1036 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1037 struct list_head *work_q,
1038 struct list_head *done_q)
1039 {
1040 struct scsi_cmnd *scmd, *chan_scmd, *next;
1041 unsigned int channel;
1042 int rtn;
1043
1044 /*
1045 * we really want to loop over the various channels, and do this on
1046 * a channel by channel basis. we should also check to see if any
1047 * of the failed commands are on soft_reset devices, and if so, skip
1048 * the reset.
1049 */
1050
1051 for (channel = 0; channel <= shost->max_channel; channel++) {
1052 chan_scmd = NULL;
1053 list_for_each_entry(scmd, work_q, eh_entry) {
1054 if (channel == scmd_channel(scmd)) {
1055 chan_scmd = scmd;
1056 break;
1057 /*
1058 * FIXME add back in some support for
1059 * soft_reset devices.
1060 */
1061 }
1062 }
1063
1064 if (!chan_scmd)
1065 continue;
1066 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1067 " %d\n", current->comm,
1068 channel));
1069 rtn = scsi_try_bus_reset(chan_scmd);
1070 if (rtn == SUCCESS) {
1071 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1072 if (channel == scmd_channel(scmd))
1073 if (!scsi_device_online(scmd->device) ||
1074 !scsi_eh_tur(scmd))
1075 scsi_eh_finish_cmd(scmd,
1076 done_q);
1077 }
1078 } else {
1079 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1080 " failed chan: %d\n",
1081 current->comm,
1082 channel));
1083 }
1084 }
1085 return list_empty(work_q);
1086 }
1087
1088 /**
1089 * scsi_eh_host_reset - send a host reset
1090 * @work_q: list_head for processed commands.
1091 * @done_q: list_head for processed commands.
1092 **/
1093 static int scsi_eh_host_reset(struct list_head *work_q,
1094 struct list_head *done_q)
1095 {
1096 struct scsi_cmnd *scmd, *next;
1097 int rtn;
1098
1099 if (!list_empty(work_q)) {
1100 scmd = list_entry(work_q->next,
1101 struct scsi_cmnd, eh_entry);
1102
1103 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1104 , current->comm));
1105
1106 rtn = scsi_try_host_reset(scmd);
1107 if (rtn == SUCCESS) {
1108 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1109 if (!scsi_device_online(scmd->device) ||
1110 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1111 !scsi_eh_tur(scmd))
1112 scsi_eh_finish_cmd(scmd, done_q);
1113 }
1114 } else {
1115 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1116 " failed\n",
1117 current->comm));
1118 }
1119 }
1120 return list_empty(work_q);
1121 }
1122
1123 /**
1124 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1125 * @work_q: list_head for processed commands.
1126 * @done_q: list_head for processed commands.
1127 *
1128 **/
1129 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1130 struct list_head *done_q)
1131 {
1132 struct scsi_cmnd *scmd, *next;
1133
1134 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1135 sdev_printk(KERN_INFO, scmd->device,
1136 "scsi: Device offlined - not"
1137 " ready after error recovery\n");
1138 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1139 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1140 /*
1141 * FIXME: Handle lost cmds.
1142 */
1143 }
1144 scsi_eh_finish_cmd(scmd, done_q);
1145 }
1146 return;
1147 }
1148
1149 /**
1150 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1151 * @scmd: SCSI cmd to examine.
1152 *
1153 * Notes:
1154 * This is *only* called when we are examining the status after sending
1155 * out the actual data command. any commands that are queued for error
1156 * recovery (e.g. test_unit_ready) do *not* come through here.
1157 *
1158 * When this routine returns failed, it means the error handler thread
1159 * is woken. In cases where the error code indicates an error that
1160 * doesn't require the error handler read (i.e. we don't need to
1161 * abort/reset), this function should return SUCCESS.
1162 **/
1163 int scsi_decide_disposition(struct scsi_cmnd *scmd)
1164 {
1165 int rtn;
1166
1167 /*
1168 * if the device is offline, then we clearly just pass the result back
1169 * up to the top level.
1170 */
1171 if (!scsi_device_online(scmd->device)) {
1172 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1173 " as SUCCESS\n",
1174 __FUNCTION__));
1175 return SUCCESS;
1176 }
1177
1178 /*
1179 * first check the host byte, to see if there is anything in there
1180 * that would indicate what we need to do.
1181 */
1182 switch (host_byte(scmd->result)) {
1183 case DID_PASSTHROUGH:
1184 /*
1185 * no matter what, pass this through to the upper layer.
1186 * nuke this special code so that it looks like we are saying
1187 * did_ok.
1188 */
1189 scmd->result &= 0xff00ffff;
1190 return SUCCESS;
1191 case DID_OK:
1192 /*
1193 * looks good. drop through, and check the next byte.
1194 */
1195 break;
1196 case DID_NO_CONNECT:
1197 case DID_BAD_TARGET:
1198 case DID_ABORT:
1199 /*
1200 * note - this means that we just report the status back
1201 * to the top level driver, not that we actually think
1202 * that it indicates SUCCESS.
1203 */
1204 return SUCCESS;
1205 /*
1206 * when the low level driver returns did_soft_error,
1207 * it is responsible for keeping an internal retry counter
1208 * in order to avoid endless loops (db)
1209 *
1210 * actually this is a bug in this function here. we should
1211 * be mindful of the maximum number of retries specified
1212 * and not get stuck in a loop.
1213 */
1214 case DID_SOFT_ERROR:
1215 goto maybe_retry;
1216 case DID_IMM_RETRY:
1217 return NEEDS_RETRY;
1218
1219 case DID_REQUEUE:
1220 return ADD_TO_MLQUEUE;
1221
1222 case DID_ERROR:
1223 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1224 status_byte(scmd->result) == RESERVATION_CONFLICT)
1225 /*
1226 * execute reservation conflict processing code
1227 * lower down
1228 */
1229 break;
1230 /* fallthrough */
1231
1232 case DID_BUS_BUSY:
1233 case DID_PARITY:
1234 goto maybe_retry;
1235 case DID_TIME_OUT:
1236 /*
1237 * when we scan the bus, we get timeout messages for
1238 * these commands if there is no device available.
1239 * other hosts report did_no_connect for the same thing.
1240 */
1241 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1242 scmd->cmnd[0] == INQUIRY)) {
1243 return SUCCESS;
1244 } else {
1245 return FAILED;
1246 }
1247 case DID_RESET:
1248 return SUCCESS;
1249 default:
1250 return FAILED;
1251 }
1252
1253 /*
1254 * next, check the message byte.
1255 */
1256 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1257 return FAILED;
1258
1259 /*
1260 * check the status byte to see if this indicates anything special.
1261 */
1262 switch (status_byte(scmd->result)) {
1263 case QUEUE_FULL:
1264 /*
1265 * the case of trying to send too many commands to a
1266 * tagged queueing device.
1267 */
1268 case BUSY:
1269 /*
1270 * device can't talk to us at the moment. Should only
1271 * occur (SAM-3) when the task queue is empty, so will cause
1272 * the empty queue handling to trigger a stall in the
1273 * device.
1274 */
1275 return ADD_TO_MLQUEUE;
1276 case GOOD:
1277 case COMMAND_TERMINATED:
1278 case TASK_ABORTED:
1279 return SUCCESS;
1280 case CHECK_CONDITION:
1281 rtn = scsi_check_sense(scmd);
1282 if (rtn == NEEDS_RETRY)
1283 goto maybe_retry;
1284 /* if rtn == FAILED, we have no sense information;
1285 * returning FAILED will wake the error handler thread
1286 * to collect the sense and redo the decide
1287 * disposition */
1288 return rtn;
1289 case CONDITION_GOOD:
1290 case INTERMEDIATE_GOOD:
1291 case INTERMEDIATE_C_GOOD:
1292 case ACA_ACTIVE:
1293 /*
1294 * who knows? FIXME(eric)
1295 */
1296 return SUCCESS;
1297
1298 case RESERVATION_CONFLICT:
1299 sdev_printk(KERN_INFO, scmd->device,
1300 "reservation conflict\n");
1301 return SUCCESS; /* causes immediate i/o error */
1302 default:
1303 return FAILED;
1304 }
1305 return FAILED;
1306
1307 maybe_retry:
1308
1309 /* we requeue for retry because the error was retryable, and
1310 * the request was not marked fast fail. Note that above,
1311 * even if the request is marked fast fail, we still requeue
1312 * for queue congestion conditions (QUEUE_FULL or BUSY) */
1313 if ((++scmd->retries) <= scmd->allowed
1314 && !blk_noretry_request(scmd->request)) {
1315 return NEEDS_RETRY;
1316 } else {
1317 /*
1318 * no more retries - report this one back to upper level.
1319 */
1320 return SUCCESS;
1321 }
1322 }
1323
1324 /**
1325 * scsi_eh_lock_door - Prevent medium removal for the specified device
1326 * @sdev: SCSI device to prevent medium removal
1327 *
1328 * Locking:
1329 * We must be called from process context; scsi_allocate_request()
1330 * may sleep.
1331 *
1332 * Notes:
1333 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1334 * head of the devices request queue, and continue.
1335 *
1336 * Bugs:
1337 * scsi_allocate_request() may sleep waiting for existing requests to
1338 * be processed. However, since we haven't kicked off any request
1339 * processing for this host, this may deadlock.
1340 *
1341 * If scsi_allocate_request() fails for what ever reason, we
1342 * completely forget to lock the door.
1343 **/
1344 static void scsi_eh_lock_door(struct scsi_device *sdev)
1345 {
1346 unsigned char cmnd[MAX_COMMAND_SIZE];
1347
1348 cmnd[0] = ALLOW_MEDIUM_REMOVAL;
1349 cmnd[1] = 0;
1350 cmnd[2] = 0;
1351 cmnd[3] = 0;
1352 cmnd[4] = SCSI_REMOVAL_PREVENT;
1353 cmnd[5] = 0;
1354
1355 scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ,
1356 5, NULL, NULL, GFP_KERNEL);
1357 }
1358
1359
1360 /**
1361 * scsi_restart_operations - restart io operations to the specified host.
1362 * @shost: Host we are restarting.
1363 *
1364 * Notes:
1365 * When we entered the error handler, we blocked all further i/o to
1366 * this device. we need to 'reverse' this process.
1367 **/
1368 static void scsi_restart_operations(struct Scsi_Host *shost)
1369 {
1370 struct scsi_device *sdev;
1371 unsigned long flags;
1372
1373 /*
1374 * If the door was locked, we need to insert a door lock request
1375 * onto the head of the SCSI request queue for the device. There
1376 * is no point trying to lock the door of an off-line device.
1377 */
1378 shost_for_each_device(sdev, shost) {
1379 if (scsi_device_online(sdev) && sdev->locked)
1380 scsi_eh_lock_door(sdev);
1381 }
1382
1383 /*
1384 * next free up anything directly waiting upon the host. this
1385 * will be requests for character device operations, and also for
1386 * ioctls to queued block devices.
1387 */
1388 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1389 __FUNCTION__));
1390
1391 spin_lock_irqsave(shost->host_lock, flags);
1392 if (scsi_host_set_state(shost, SHOST_RUNNING))
1393 if (scsi_host_set_state(shost, SHOST_CANCEL))
1394 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1395 spin_unlock_irqrestore(shost->host_lock, flags);
1396
1397 wake_up(&shost->host_wait);
1398
1399 /*
1400 * finally we need to re-initiate requests that may be pending. we will
1401 * have had everything blocked while error handling is taking place, and
1402 * now that error recovery is done, we will need to ensure that these
1403 * requests are started.
1404 */
1405 scsi_run_host_queues(shost);
1406 }
1407
1408 /**
1409 * scsi_eh_ready_devs - check device ready state and recover if not.
1410 * @shost: host to be recovered.
1411 * @eh_done_q: list_head for processed commands.
1412 *
1413 **/
1414 static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1415 struct list_head *work_q,
1416 struct list_head *done_q)
1417 {
1418 if (!scsi_eh_stu(shost, work_q, done_q))
1419 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1420 if (!scsi_eh_bus_reset(shost, work_q, done_q))
1421 if (!scsi_eh_host_reset(work_q, done_q))
1422 scsi_eh_offline_sdevs(work_q, done_q);
1423 }
1424
1425 /**
1426 * scsi_eh_flush_done_q - finish processed commands or retry them.
1427 * @done_q: list_head of processed commands.
1428 *
1429 **/
1430 void scsi_eh_flush_done_q(struct list_head *done_q)
1431 {
1432 struct scsi_cmnd *scmd, *next;
1433
1434 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1435 list_del_init(&scmd->eh_entry);
1436 if (scsi_device_online(scmd->device) &&
1437 !blk_noretry_request(scmd->request) &&
1438 (++scmd->retries <= scmd->allowed)) {
1439 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1440 " retry cmd: %p\n",
1441 current->comm,
1442 scmd));
1443 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1444 } else {
1445 /*
1446 * If just we got sense for the device (called
1447 * scsi_eh_get_sense), scmd->result is already
1448 * set, do not set DRIVER_TIMEOUT.
1449 */
1450 if (!scmd->result)
1451 scmd->result |= (DRIVER_TIMEOUT << 24);
1452 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1453 " cmd: %p\n",
1454 current->comm, scmd));
1455 scsi_finish_command(scmd);
1456 }
1457 }
1458 }
1459 EXPORT_SYMBOL(scsi_eh_flush_done_q);
1460
1461 /**
1462 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
1463 * @shost: Host to unjam.
1464 *
1465 * Notes:
1466 * When we come in here, we *know* that all commands on the bus have
1467 * either completed, failed or timed out. we also know that no further
1468 * commands are being sent to the host, so things are relatively quiet
1469 * and we have freedom to fiddle with things as we wish.
1470 *
1471 * This is only the *default* implementation. it is possible for
1472 * individual drivers to supply their own version of this function, and
1473 * if the maintainer wishes to do this, it is strongly suggested that
1474 * this function be taken as a template and modified. this function
1475 * was designed to correctly handle problems for about 95% of the
1476 * different cases out there, and it should always provide at least a
1477 * reasonable amount of error recovery.
1478 *
1479 * Any command marked 'failed' or 'timeout' must eventually have
1480 * scsi_finish_cmd() called for it. we do all of the retry stuff
1481 * here, so when we restart the host after we return it should have an
1482 * empty queue.
1483 **/
1484 static void scsi_unjam_host(struct Scsi_Host *shost)
1485 {
1486 unsigned long flags;
1487 LIST_HEAD(eh_work_q);
1488 LIST_HEAD(eh_done_q);
1489
1490 spin_lock_irqsave(shost->host_lock, flags);
1491 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1492 spin_unlock_irqrestore(shost->host_lock, flags);
1493
1494 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1495
1496 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1497 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1498 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1499
1500 scsi_eh_flush_done_q(&eh_done_q);
1501 }
1502
1503 /**
1504 * scsi_error_handler - SCSI error handler thread
1505 * @data: Host for which we are running.
1506 *
1507 * Notes:
1508 * This is the main error handling loop. This is run as a kernel thread
1509 * for every SCSI host and handles all error handling activity.
1510 **/
1511 int scsi_error_handler(void *data)
1512 {
1513 struct Scsi_Host *shost = data;
1514
1515 current->flags |= PF_NOFREEZE;
1516
1517 /*
1518 * We use TASK_INTERRUPTIBLE so that the thread is not
1519 * counted against the load average as a running process.
1520 * We never actually get interrupted because kthread_run
1521 * disables singal delivery for the created thread.
1522 */
1523 set_current_state(TASK_INTERRUPTIBLE);
1524 while (!kthread_should_stop()) {
1525 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1526 shost->host_failed != shost->host_busy) {
1527 SCSI_LOG_ERROR_RECOVERY(1,
1528 printk("Error handler scsi_eh_%d sleeping\n",
1529 shost->host_no));
1530 schedule();
1531 set_current_state(TASK_INTERRUPTIBLE);
1532 continue;
1533 }
1534
1535 __set_current_state(TASK_RUNNING);
1536 SCSI_LOG_ERROR_RECOVERY(1,
1537 printk("Error handler scsi_eh_%d waking up\n",
1538 shost->host_no));
1539
1540 /*
1541 * We have a host that is failing for some reason. Figure out
1542 * what we need to do to get it up and online again (if we can).
1543 * If we fail, we end up taking the thing offline.
1544 */
1545 if (shost->transportt->eh_strategy_handler)
1546 shost->transportt->eh_strategy_handler(shost);
1547 else
1548 scsi_unjam_host(shost);
1549
1550 /*
1551 * Note - if the above fails completely, the action is to take
1552 * individual devices offline and flush the queue of any
1553 * outstanding requests that may have been pending. When we
1554 * restart, we restart any I/O to any other devices on the bus
1555 * which are still online.
1556 */
1557 scsi_restart_operations(shost);
1558 set_current_state(TASK_INTERRUPTIBLE);
1559 }
1560 __set_current_state(TASK_RUNNING);
1561
1562 SCSI_LOG_ERROR_RECOVERY(1,
1563 printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1564 shost->ehandler = NULL;
1565 return 0;
1566 }
1567
1568 /*
1569 * Function: scsi_report_bus_reset()
1570 *
1571 * Purpose: Utility function used by low-level drivers to report that
1572 * they have observed a bus reset on the bus being handled.
1573 *
1574 * Arguments: shost - Host in question
1575 * channel - channel on which reset was observed.
1576 *
1577 * Returns: Nothing
1578 *
1579 * Lock status: Host lock must be held.
1580 *
1581 * Notes: This only needs to be called if the reset is one which
1582 * originates from an unknown location. Resets originated
1583 * by the mid-level itself don't need to call this, but there
1584 * should be no harm.
1585 *
1586 * The main purpose of this is to make sure that a CHECK_CONDITION
1587 * is properly treated.
1588 */
1589 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1590 {
1591 struct scsi_device *sdev;
1592
1593 __shost_for_each_device(sdev, shost) {
1594 if (channel == sdev_channel(sdev)) {
1595 sdev->was_reset = 1;
1596 sdev->expecting_cc_ua = 1;
1597 }
1598 }
1599 }
1600 EXPORT_SYMBOL(scsi_report_bus_reset);
1601
1602 /*
1603 * Function: scsi_report_device_reset()
1604 *
1605 * Purpose: Utility function used by low-level drivers to report that
1606 * they have observed a device reset on the device being handled.
1607 *
1608 * Arguments: shost - Host in question
1609 * channel - channel on which reset was observed
1610 * target - target on which reset was observed
1611 *
1612 * Returns: Nothing
1613 *
1614 * Lock status: Host lock must be held
1615 *
1616 * Notes: This only needs to be called if the reset is one which
1617 * originates from an unknown location. Resets originated
1618 * by the mid-level itself don't need to call this, but there
1619 * should be no harm.
1620 *
1621 * The main purpose of this is to make sure that a CHECK_CONDITION
1622 * is properly treated.
1623 */
1624 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1625 {
1626 struct scsi_device *sdev;
1627
1628 __shost_for_each_device(sdev, shost) {
1629 if (channel == sdev_channel(sdev) &&
1630 target == sdev_id(sdev)) {
1631 sdev->was_reset = 1;
1632 sdev->expecting_cc_ua = 1;
1633 }
1634 }
1635 }
1636 EXPORT_SYMBOL(scsi_report_device_reset);
1637
1638 static void
1639 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1640 {
1641 }
1642
1643 /*
1644 * Function: scsi_reset_provider
1645 *
1646 * Purpose: Send requested reset to a bus or device at any phase.
1647 *
1648 * Arguments: device - device to send reset to
1649 * flag - reset type (see scsi.h)
1650 *
1651 * Returns: SUCCESS/FAILURE.
1652 *
1653 * Notes: This is used by the SCSI Generic driver to provide
1654 * Bus/Device reset capability.
1655 */
1656 int
1657 scsi_reset_provider(struct scsi_device *dev, int flag)
1658 {
1659 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1660 struct Scsi_Host *shost = dev->host;
1661 struct request req;
1662 unsigned long flags;
1663 int rtn;
1664
1665 scmd->request = &req;
1666 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1667
1668 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
1669
1670 scmd->scsi_done = scsi_reset_provider_done_command;
1671 scmd->done = NULL;
1672 scmd->request_buffer = NULL;
1673 scmd->request_bufflen = 0;
1674
1675 scmd->cmd_len = 0;
1676
1677 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1678
1679 init_timer(&scmd->eh_timeout);
1680
1681 /*
1682 * Sometimes the command can get back into the timer chain,
1683 * so use the pid as an identifier.
1684 */
1685 scmd->pid = 0;
1686
1687 spin_lock_irqsave(shost->host_lock, flags);
1688 shost->tmf_in_progress = 1;
1689 spin_unlock_irqrestore(shost->host_lock, flags);
1690
1691 switch (flag) {
1692 case SCSI_TRY_RESET_DEVICE:
1693 rtn = scsi_try_bus_device_reset(scmd);
1694 if (rtn == SUCCESS)
1695 break;
1696 /* FALLTHROUGH */
1697 case SCSI_TRY_RESET_BUS:
1698 rtn = scsi_try_bus_reset(scmd);
1699 if (rtn == SUCCESS)
1700 break;
1701 /* FALLTHROUGH */
1702 case SCSI_TRY_RESET_HOST:
1703 rtn = scsi_try_host_reset(scmd);
1704 break;
1705 default:
1706 rtn = FAILED;
1707 }
1708
1709 spin_lock_irqsave(shost->host_lock, flags);
1710 shost->tmf_in_progress = 0;
1711 spin_unlock_irqrestore(shost->host_lock, flags);
1712
1713 /*
1714 * be sure to wake up anyone who was sleeping or had their queue
1715 * suspended while we performed the TMF.
1716 */
1717 SCSI_LOG_ERROR_RECOVERY(3,
1718 printk("%s: waking up host to restart after TMF\n",
1719 __FUNCTION__));
1720
1721 wake_up(&shost->host_wait);
1722
1723 scsi_run_host_queues(shost);
1724
1725 scsi_next_command(scmd);
1726 return rtn;
1727 }
1728 EXPORT_SYMBOL(scsi_reset_provider);
1729
1730 /**
1731 * scsi_normalize_sense - normalize main elements from either fixed or
1732 * descriptor sense data format into a common format.
1733 *
1734 * @sense_buffer: byte array containing sense data returned by device
1735 * @sb_len: number of valid bytes in sense_buffer
1736 * @sshdr: pointer to instance of structure that common
1737 * elements are written to.
1738 *
1739 * Notes:
1740 * The "main elements" from sense data are: response_code, sense_key,
1741 * asc, ascq and additional_length (only for descriptor format).
1742 *
1743 * Typically this function can be called after a device has
1744 * responded to a SCSI command with the CHECK_CONDITION status.
1745 *
1746 * Return value:
1747 * 1 if valid sense data information found, else 0;
1748 **/
1749 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1750 struct scsi_sense_hdr *sshdr)
1751 {
1752 if (!sense_buffer || !sb_len)
1753 return 0;
1754
1755 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1756
1757 sshdr->response_code = (sense_buffer[0] & 0x7f);
1758
1759 if (!scsi_sense_valid(sshdr))
1760 return 0;
1761
1762 if (sshdr->response_code >= 0x72) {
1763 /*
1764 * descriptor format
1765 */
1766 if (sb_len > 1)
1767 sshdr->sense_key = (sense_buffer[1] & 0xf);
1768 if (sb_len > 2)
1769 sshdr->asc = sense_buffer[2];
1770 if (sb_len > 3)
1771 sshdr->ascq = sense_buffer[3];
1772 if (sb_len > 7)
1773 sshdr->additional_length = sense_buffer[7];
1774 } else {
1775 /*
1776 * fixed format
1777 */
1778 if (sb_len > 2)
1779 sshdr->sense_key = (sense_buffer[2] & 0xf);
1780 if (sb_len > 7) {
1781 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
1782 sb_len : (sense_buffer[7] + 8);
1783 if (sb_len > 12)
1784 sshdr->asc = sense_buffer[12];
1785 if (sb_len > 13)
1786 sshdr->ascq = sense_buffer[13];
1787 }
1788 }
1789
1790 return 1;
1791 }
1792 EXPORT_SYMBOL(scsi_normalize_sense);
1793
1794 int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1795 struct scsi_sense_hdr *sshdr)
1796 {
1797 return scsi_normalize_sense(cmd->sense_buffer,
1798 sizeof(cmd->sense_buffer), sshdr);
1799 }
1800 EXPORT_SYMBOL(scsi_command_normalize_sense);
1801
1802 /**
1803 * scsi_sense_desc_find - search for a given descriptor type in
1804 * descriptor sense data format.
1805 *
1806 * @sense_buffer: byte array of descriptor format sense data
1807 * @sb_len: number of valid bytes in sense_buffer
1808 * @desc_type: value of descriptor type to find
1809 * (e.g. 0 -> information)
1810 *
1811 * Notes:
1812 * only valid when sense data is in descriptor format
1813 *
1814 * Return value:
1815 * pointer to start of (first) descriptor if found else NULL
1816 **/
1817 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1818 int desc_type)
1819 {
1820 int add_sen_len, add_len, desc_len, k;
1821 const u8 * descp;
1822
1823 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
1824 return NULL;
1825 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
1826 return NULL;
1827 add_sen_len = (add_sen_len < (sb_len - 8)) ?
1828 add_sen_len : (sb_len - 8);
1829 descp = &sense_buffer[8];
1830 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
1831 descp += desc_len;
1832 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
1833 desc_len = add_len + 2;
1834 if (descp[0] == desc_type)
1835 return descp;
1836 if (add_len < 0) // short descriptor ??
1837 break;
1838 }
1839 return NULL;
1840 }
1841 EXPORT_SYMBOL(scsi_sense_desc_find);
1842
1843 /**
1844 * scsi_get_sense_info_fld - attempts to get information field from
1845 * sense data (either fixed or descriptor format)
1846 *
1847 * @sense_buffer: byte array of sense data
1848 * @sb_len: number of valid bytes in sense_buffer
1849 * @info_out: pointer to 64 integer where 8 or 4 byte information
1850 * field will be placed if found.
1851 *
1852 * Return value:
1853 * 1 if information field found, 0 if not found.
1854 **/
1855 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1856 u64 * info_out)
1857 {
1858 int j;
1859 const u8 * ucp;
1860 u64 ull;
1861
1862 if (sb_len < 7)
1863 return 0;
1864 switch (sense_buffer[0] & 0x7f) {
1865 case 0x70:
1866 case 0x71:
1867 if (sense_buffer[0] & 0x80) {
1868 *info_out = (sense_buffer[3] << 24) +
1869 (sense_buffer[4] << 16) +
1870 (sense_buffer[5] << 8) + sense_buffer[6];
1871 return 1;
1872 } else
1873 return 0;
1874 case 0x72:
1875 case 0x73:
1876 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
1877 0 /* info desc */);
1878 if (ucp && (0xa == ucp[1])) {
1879 ull = 0;
1880 for (j = 0; j < 8; ++j) {
1881 if (j > 0)
1882 ull <<= 8;
1883 ull |= ucp[4 + j];
1884 }
1885 *info_out = ull;
1886 return 1;
1887 } else
1888 return 0;
1889 default:
1890 return 0;
1891 }
1892 }
1893 EXPORT_SYMBOL(scsi_get_sense_info_fld);