]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * scsi_error.c Copyright (C) 1997 Eric Youngdale | |
3 | * | |
4 | * SCSI error/timeout handling | |
5 | * Initial versions: Eric Youngdale. Based upon conversations with | |
6 | * Leonard Zubkoff and David Miller at Linux Expo, | |
7 | * ideas originating from all over the place. | |
8 | * | |
9 | * Restructured scsi_unjam_host and associated functions. | |
10 | * September 04, 2002 Mike Anderson (andmike@us.ibm.com) | |
11 | * | |
12 | * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and | |
13 | * minor cleanups. | |
14 | * September 30, 2002 Mike Anderson (andmike@us.ibm.com) | |
15 | */ | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/timer.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/delay.h> | |
26 | ||
27 | #include <scsi/scsi.h> | |
28 | #include <scsi/scsi_dbg.h> | |
29 | #include <scsi/scsi_device.h> | |
30 | #include <scsi/scsi_eh.h> | |
31 | #include <scsi/scsi_host.h> | |
32 | #include <scsi/scsi_ioctl.h> | |
33 | #include <scsi/scsi_request.h> | |
34 | ||
35 | #include "scsi_priv.h" | |
36 | #include "scsi_logging.h" | |
37 | ||
38 | #define SENSE_TIMEOUT (10*HZ) | |
39 | #define START_UNIT_TIMEOUT (30*HZ) | |
40 | ||
41 | /* | |
42 | * These should *probably* be handled by the host itself. | |
43 | * Since it is allowed to sleep, it probably should. | |
44 | */ | |
45 | #define BUS_RESET_SETTLE_TIME (10) | |
46 | #define HOST_RESET_SETTLE_TIME (10) | |
47 | ||
48 | /* called with shost->host_lock held */ | |
49 | void scsi_eh_wakeup(struct Scsi_Host *shost) | |
50 | { | |
51 | if (shost->host_busy == shost->host_failed) { | |
52 | up(shost->eh_wait); | |
53 | SCSI_LOG_ERROR_RECOVERY(5, | |
54 | printk("Waking error handler thread\n")); | |
55 | } | |
56 | } | |
57 | ||
58 | /** | |
59 | * scsi_eh_scmd_add - add scsi cmd to error handling. | |
60 | * @scmd: scmd to run eh on. | |
61 | * @eh_flag: optional SCSI_EH flag. | |
62 | * | |
63 | * Return value: | |
64 | * 0 on failure. | |
65 | **/ | |
66 | int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) | |
67 | { | |
68 | struct Scsi_Host *shost = scmd->device->host; | |
69 | unsigned long flags; | |
70 | ||
71 | if (shost->eh_wait == NULL) | |
72 | return 0; | |
73 | ||
74 | spin_lock_irqsave(shost->host_lock, flags); | |
75 | ||
76 | scsi_eh_eflags_set(scmd, eh_flag); | |
1da177e4 LT |
77 | list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); |
78 | set_bit(SHOST_RECOVERY, &shost->shost_state); | |
79 | shost->host_failed++; | |
80 | scsi_eh_wakeup(shost); | |
81 | spin_unlock_irqrestore(shost->host_lock, flags); | |
82 | return 1; | |
83 | } | |
84 | ||
85 | /** | |
86 | * scsi_add_timer - Start timeout timer for a single scsi command. | |
87 | * @scmd: scsi command that is about to start running. | |
88 | * @timeout: amount of time to allow this command to run. | |
89 | * @complete: timeout function to call if timer isn't canceled. | |
90 | * | |
91 | * Notes: | |
92 | * This should be turned into an inline function. Each scsi command | |
93 | * has its own timer, and as it is added to the queue, we set up the | |
94 | * timer. When the command completes, we cancel the timer. | |
95 | **/ | |
96 | void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, | |
97 | void (*complete)(struct scsi_cmnd *)) | |
98 | { | |
99 | ||
100 | /* | |
101 | * If the clock was already running for this command, then | |
102 | * first delete the timer. The timer handling code gets rather | |
103 | * confused if we don't do this. | |
104 | */ | |
105 | if (scmd->eh_timeout.function) | |
106 | del_timer(&scmd->eh_timeout); | |
107 | ||
108 | scmd->eh_timeout.data = (unsigned long)scmd; | |
109 | scmd->eh_timeout.expires = jiffies + timeout; | |
110 | scmd->eh_timeout.function = (void (*)(unsigned long)) complete; | |
111 | ||
112 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" | |
113 | " %d, (%p)\n", __FUNCTION__, | |
114 | scmd, timeout, complete)); | |
115 | ||
116 | add_timer(&scmd->eh_timeout); | |
117 | } | |
118 | EXPORT_SYMBOL(scsi_add_timer); | |
119 | ||
120 | /** | |
121 | * scsi_delete_timer - Delete/cancel timer for a given function. | |
122 | * @scmd: Cmd that we are canceling timer for | |
123 | * | |
124 | * Notes: | |
125 | * This should be turned into an inline function. | |
126 | * | |
127 | * Return value: | |
128 | * 1 if we were able to detach the timer. 0 if we blew it, and the | |
129 | * timer function has already started to run. | |
130 | **/ | |
131 | int scsi_delete_timer(struct scsi_cmnd *scmd) | |
132 | { | |
133 | int rtn; | |
134 | ||
135 | rtn = del_timer(&scmd->eh_timeout); | |
136 | ||
137 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," | |
138 | " rtn: %d\n", __FUNCTION__, | |
139 | scmd, rtn)); | |
140 | ||
141 | scmd->eh_timeout.data = (unsigned long)NULL; | |
142 | scmd->eh_timeout.function = NULL; | |
143 | ||
144 | return rtn; | |
145 | } | |
146 | EXPORT_SYMBOL(scsi_delete_timer); | |
147 | ||
148 | /** | |
149 | * scsi_times_out - Timeout function for normal scsi commands. | |
150 | * @scmd: Cmd that is timing out. | |
151 | * | |
152 | * Notes: | |
153 | * We do not need to lock this. There is the potential for a race | |
154 | * only in that the normal completion handling might run, but if the | |
155 | * normal completion function determines that the timer has already | |
156 | * fired, then it mustn't do anything. | |
157 | **/ | |
158 | void scsi_times_out(struct scsi_cmnd *scmd) | |
159 | { | |
160 | scsi_log_completion(scmd, TIMEOUT_ERROR); | |
161 | ||
162 | if (scmd->device->host->hostt->eh_timed_out) | |
163 | switch (scmd->device->host->hostt->eh_timed_out(scmd)) { | |
164 | case EH_HANDLED: | |
165 | __scsi_done(scmd); | |
166 | return; | |
167 | case EH_RESET_TIMER: | |
168 | /* This allows a single retry even of a command | |
169 | * with allowed == 0 */ | |
170 | if (scmd->retries++ > scmd->allowed) | |
171 | break; | |
172 | scsi_add_timer(scmd, scmd->timeout_per_command, | |
173 | scsi_times_out); | |
174 | return; | |
175 | case EH_NOT_HANDLED: | |
176 | break; | |
177 | } | |
178 | ||
179 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { | |
180 | panic("Error handler thread not present at %p %p %s %d", | |
181 | scmd, scmd->device->host, __FILE__, __LINE__); | |
182 | } | |
183 | } | |
184 | ||
185 | /** | |
186 | * scsi_block_when_processing_errors - Prevent cmds from being queued. | |
187 | * @sdev: Device on which we are performing recovery. | |
188 | * | |
189 | * Description: | |
190 | * We block until the host is out of error recovery, and then check to | |
191 | * see whether the host or the device is offline. | |
192 | * | |
193 | * Return value: | |
194 | * 0 when dev was taken offline by error recovery. 1 OK to proceed. | |
195 | **/ | |
196 | int scsi_block_when_processing_errors(struct scsi_device *sdev) | |
197 | { | |
198 | int online; | |
199 | ||
200 | wait_event(sdev->host->host_wait, (!test_bit(SHOST_RECOVERY, &sdev->host->shost_state))); | |
201 | ||
202 | online = scsi_device_online(sdev); | |
203 | ||
204 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, | |
205 | online)); | |
206 | ||
207 | return online; | |
208 | } | |
209 | EXPORT_SYMBOL(scsi_block_when_processing_errors); | |
210 | ||
211 | #ifdef CONFIG_SCSI_LOGGING | |
212 | /** | |
213 | * scsi_eh_prt_fail_stats - Log info on failures. | |
214 | * @shost: scsi host being recovered. | |
215 | * @work_q: Queue of scsi cmds to process. | |
216 | **/ | |
217 | static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, | |
218 | struct list_head *work_q) | |
219 | { | |
220 | struct scsi_cmnd *scmd; | |
221 | struct scsi_device *sdev; | |
222 | int total_failures = 0; | |
223 | int cmd_failed = 0; | |
224 | int cmd_cancel = 0; | |
225 | int devices_failed = 0; | |
226 | ||
227 | shost_for_each_device(sdev, shost) { | |
228 | list_for_each_entry(scmd, work_q, eh_entry) { | |
229 | if (scmd->device == sdev) { | |
230 | ++total_failures; | |
231 | if (scsi_eh_eflags_chk(scmd, | |
232 | SCSI_EH_CANCEL_CMD)) | |
233 | ++cmd_cancel; | |
234 | else | |
235 | ++cmd_failed; | |
236 | } | |
237 | } | |
238 | ||
239 | if (cmd_cancel || cmd_failed) { | |
240 | SCSI_LOG_ERROR_RECOVERY(3, | |
241 | printk("%s: %d:%d:%d:%d cmds failed: %d," | |
242 | " cancel: %d\n", | |
243 | __FUNCTION__, shost->host_no, | |
244 | sdev->channel, sdev->id, sdev->lun, | |
245 | cmd_failed, cmd_cancel)); | |
246 | cmd_cancel = 0; | |
247 | cmd_failed = 0; | |
248 | ++devices_failed; | |
249 | } | |
250 | } | |
251 | ||
252 | SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" | |
253 | " devices require eh work\n", | |
254 | total_failures, devices_failed)); | |
255 | } | |
256 | #endif | |
257 | ||
258 | /** | |
259 | * scsi_check_sense - Examine scsi cmd sense | |
260 | * @scmd: Cmd to have sense checked. | |
261 | * | |
262 | * Return value: | |
263 | * SUCCESS or FAILED or NEEDS_RETRY | |
264 | * | |
265 | * Notes: | |
266 | * When a deferred error is detected the current command has | |
267 | * not been executed and needs retrying. | |
268 | **/ | |
269 | static int scsi_check_sense(struct scsi_cmnd *scmd) | |
270 | { | |
271 | struct scsi_sense_hdr sshdr; | |
272 | ||
273 | if (! scsi_command_normalize_sense(scmd, &sshdr)) | |
274 | return FAILED; /* no valid sense data */ | |
275 | ||
276 | if (scsi_sense_is_deferred(&sshdr)) | |
277 | return NEEDS_RETRY; | |
278 | ||
279 | /* | |
280 | * Previous logic looked for FILEMARK, EOM or ILI which are | |
281 | * mainly associated with tapes and returned SUCCESS. | |
282 | */ | |
283 | if (sshdr.response_code == 0x70) { | |
284 | /* fixed format */ | |
285 | if (scmd->sense_buffer[2] & 0xe0) | |
286 | return SUCCESS; | |
287 | } else { | |
288 | /* | |
289 | * descriptor format: look for "stream commands sense data | |
290 | * descriptor" (see SSC-3). Assume single sense data | |
291 | * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. | |
292 | */ | |
293 | if ((sshdr.additional_length > 3) && | |
294 | (scmd->sense_buffer[8] == 0x4) && | |
295 | (scmd->sense_buffer[11] & 0xe0)) | |
296 | return SUCCESS; | |
297 | } | |
298 | ||
299 | switch (sshdr.sense_key) { | |
300 | case NO_SENSE: | |
301 | return SUCCESS; | |
302 | case RECOVERED_ERROR: | |
303 | return /* soft_error */ SUCCESS; | |
304 | ||
305 | case ABORTED_COMMAND: | |
306 | return NEEDS_RETRY; | |
307 | case NOT_READY: | |
308 | case UNIT_ATTENTION: | |
309 | /* | |
310 | * if we are expecting a cc/ua because of a bus reset that we | |
311 | * performed, treat this just as a retry. otherwise this is | |
312 | * information that we should pass up to the upper-level driver | |
313 | * so that we can deal with it there. | |
314 | */ | |
315 | if (scmd->device->expecting_cc_ua) { | |
316 | scmd->device->expecting_cc_ua = 0; | |
317 | return NEEDS_RETRY; | |
318 | } | |
319 | /* | |
320 | * if the device is in the process of becoming ready, we | |
321 | * should retry. | |
322 | */ | |
323 | if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) | |
324 | return NEEDS_RETRY; | |
325 | /* | |
326 | * if the device is not started, we need to wake | |
327 | * the error handler to start the motor | |
328 | */ | |
329 | if (scmd->device->allow_restart && | |
330 | (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) | |
331 | return FAILED; | |
332 | return SUCCESS; | |
333 | ||
334 | /* these three are not supported */ | |
335 | case COPY_ABORTED: | |
336 | case VOLUME_OVERFLOW: | |
337 | case MISCOMPARE: | |
338 | return SUCCESS; | |
339 | ||
340 | case MEDIUM_ERROR: | |
341 | return NEEDS_RETRY; | |
342 | ||
343 | case HARDWARE_ERROR: | |
344 | if (scmd->device->retry_hwerror) | |
345 | return NEEDS_RETRY; | |
346 | else | |
347 | return SUCCESS; | |
348 | ||
349 | case ILLEGAL_REQUEST: | |
350 | case BLANK_CHECK: | |
351 | case DATA_PROTECT: | |
352 | default: | |
353 | return SUCCESS; | |
354 | } | |
355 | } | |
356 | ||
357 | /** | |
358 | * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. | |
359 | * @scmd: SCSI cmd to examine. | |
360 | * | |
361 | * Notes: | |
362 | * This is *only* called when we are examining the status of commands | |
363 | * queued during error recovery. the main difference here is that we | |
364 | * don't allow for the possibility of retries here, and we are a lot | |
365 | * more restrictive about what we consider acceptable. | |
366 | **/ | |
367 | static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) | |
368 | { | |
369 | /* | |
370 | * first check the host byte, to see if there is anything in there | |
371 | * that would indicate what we need to do. | |
372 | */ | |
373 | if (host_byte(scmd->result) == DID_RESET) { | |
374 | /* | |
375 | * rats. we are already in the error handler, so we now | |
376 | * get to try and figure out what to do next. if the sense | |
377 | * is valid, we have a pretty good idea of what to do. | |
378 | * if not, we mark it as FAILED. | |
379 | */ | |
380 | return scsi_check_sense(scmd); | |
381 | } | |
382 | if (host_byte(scmd->result) != DID_OK) | |
383 | return FAILED; | |
384 | ||
385 | /* | |
386 | * next, check the message byte. | |
387 | */ | |
388 | if (msg_byte(scmd->result) != COMMAND_COMPLETE) | |
389 | return FAILED; | |
390 | ||
391 | /* | |
392 | * now, check the status byte to see if this indicates | |
393 | * anything special. | |
394 | */ | |
395 | switch (status_byte(scmd->result)) { | |
396 | case GOOD: | |
397 | case COMMAND_TERMINATED: | |
398 | return SUCCESS; | |
399 | case CHECK_CONDITION: | |
400 | return scsi_check_sense(scmd); | |
401 | case CONDITION_GOOD: | |
402 | case INTERMEDIATE_GOOD: | |
403 | case INTERMEDIATE_C_GOOD: | |
404 | /* | |
405 | * who knows? FIXME(eric) | |
406 | */ | |
407 | return SUCCESS; | |
408 | case BUSY: | |
409 | case QUEUE_FULL: | |
410 | case RESERVATION_CONFLICT: | |
411 | default: | |
412 | return FAILED; | |
413 | } | |
414 | return FAILED; | |
415 | } | |
416 | ||
417 | /** | |
418 | * scsi_eh_times_out - timeout function for error handling. | |
419 | * @scmd: Cmd that is timing out. | |
420 | * | |
421 | * Notes: | |
422 | * During error handling, the kernel thread will be sleeping waiting | |
423 | * for some action to complete on the device. our only job is to | |
424 | * record that it timed out, and to wake up the thread. | |
425 | **/ | |
426 | static void scsi_eh_times_out(struct scsi_cmnd *scmd) | |
427 | { | |
428 | scsi_eh_eflags_set(scmd, SCSI_EH_REC_TIMEOUT); | |
429 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd:%p\n", __FUNCTION__, | |
430 | scmd)); | |
431 | ||
5b8ef842 | 432 | up(scmd->device->host->eh_action); |
1da177e4 LT |
433 | } |
434 | ||
435 | /** | |
436 | * scsi_eh_done - Completion function for error handling. | |
437 | * @scmd: Cmd that is done. | |
438 | **/ | |
439 | static void scsi_eh_done(struct scsi_cmnd *scmd) | |
440 | { | |
441 | /* | |
442 | * if the timeout handler is already running, then just set the | |
443 | * flag which says we finished late, and return. we have no | |
444 | * way of stopping the timeout handler from running, so we must | |
445 | * always defer to it. | |
446 | */ | |
447 | if (del_timer(&scmd->eh_timeout)) { | |
448 | scmd->request->rq_status = RQ_SCSI_DONE; | |
1da177e4 LT |
449 | |
450 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s scmd: %p result: %x\n", | |
451 | __FUNCTION__, scmd, scmd->result)); | |
452 | ||
5b8ef842 | 453 | up(scmd->device->host->eh_action); |
1da177e4 LT |
454 | } |
455 | } | |
456 | ||
457 | /** | |
458 | * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. | |
459 | * @scmd: SCSI Cmd to send. | |
460 | * @timeout: Timeout for cmd. | |
461 | * | |
462 | * Notes: | |
463 | * The initialization of the structures is quite a bit different in | |
464 | * this case, and furthermore, there is a different completion handler | |
465 | * vs scsi_dispatch_cmd. | |
466 | * Return value: | |
467 | * SUCCESS or FAILED or NEEDS_RETRY | |
468 | **/ | |
469 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) | |
470 | { | |
f59114b7 TH |
471 | struct scsi_device *sdev = scmd->device; |
472 | struct Scsi_Host *shost = sdev->host; | |
1da177e4 LT |
473 | DECLARE_MUTEX_LOCKED(sem); |
474 | unsigned long flags; | |
475 | int rtn = SUCCESS; | |
476 | ||
477 | /* | |
478 | * we will use a queued command if possible, otherwise we will | |
479 | * emulate the queuing and calling of completion function ourselves. | |
480 | */ | |
f59114b7 | 481 | if (sdev->scsi_level <= SCSI_2) |
1da177e4 | 482 | scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | |
f59114b7 | 483 | (sdev->lun << 5 & 0xe0); |
1da177e4 LT |
484 | |
485 | scsi_add_timer(scmd, timeout, scsi_eh_times_out); | |
486 | ||
487 | /* | |
488 | * set up the semaphore so we wait for the command to complete. | |
489 | */ | |
f59114b7 | 490 | shost->eh_action = &sem; |
1da177e4 LT |
491 | scmd->request->rq_status = RQ_SCSI_BUSY; |
492 | ||
f59114b7 | 493 | spin_lock_irqsave(shost->host_lock, flags); |
1da177e4 | 494 | scsi_log_send(scmd); |
f59114b7 TH |
495 | shost->hostt->queuecommand(scmd, scsi_eh_done); |
496 | spin_unlock_irqrestore(shost->host_lock, flags); | |
1da177e4 LT |
497 | |
498 | down(&sem); | |
499 | scsi_log_completion(scmd, SUCCESS); | |
500 | ||
f59114b7 | 501 | shost->eh_action = NULL; |
1da177e4 LT |
502 | |
503 | /* | |
504 | * see if timeout. if so, tell the host to forget about it. | |
505 | * in other words, we don't want a callback any more. | |
506 | */ | |
507 | if (scsi_eh_eflags_chk(scmd, SCSI_EH_REC_TIMEOUT)) { | |
508 | scsi_eh_eflags_clr(scmd, SCSI_EH_REC_TIMEOUT); | |
1da177e4 LT |
509 | |
510 | /* | |
511 | * as far as the low level driver is | |
512 | * concerned, this command is still active, so | |
513 | * we must give the low level driver a chance | |
514 | * to abort it. (db) | |
515 | * | |
516 | * FIXME(eric) - we are not tracking whether we could | |
517 | * abort a timed out command or not. not sure how | |
518 | * we should treat them differently anyways. | |
519 | */ | |
f59114b7 TH |
520 | if (shost->hostt->eh_abort_handler) |
521 | shost->hostt->eh_abort_handler(scmd); | |
1da177e4 LT |
522 | |
523 | scmd->request->rq_status = RQ_SCSI_DONE; | |
1da177e4 LT |
524 | rtn = FAILED; |
525 | } | |
526 | ||
527 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, rtn:%x\n", | |
528 | __FUNCTION__, scmd, rtn)); | |
529 | ||
530 | /* | |
531 | * now examine the actual status codes to see whether the command | |
532 | * actually did complete normally. | |
533 | */ | |
534 | if (rtn == SUCCESS) { | |
535 | rtn = scsi_eh_completed_normally(scmd); | |
536 | SCSI_LOG_ERROR_RECOVERY(3, | |
537 | printk("%s: scsi_eh_completed_normally %x\n", | |
538 | __FUNCTION__, rtn)); | |
539 | switch (rtn) { | |
540 | case SUCCESS: | |
541 | case NEEDS_RETRY: | |
542 | case FAILED: | |
543 | break; | |
544 | default: | |
545 | rtn = FAILED; | |
546 | break; | |
547 | } | |
548 | } | |
549 | ||
550 | return rtn; | |
551 | } | |
552 | ||
553 | /** | |
554 | * scsi_request_sense - Request sense data from a particular target. | |
555 | * @scmd: SCSI cmd for request sense. | |
556 | * | |
557 | * Notes: | |
558 | * Some hosts automatically obtain this information, others require | |
559 | * that we obtain it on our own. This function will *not* return until | |
560 | * the command either times out, or it completes. | |
561 | **/ | |
562 | static int scsi_request_sense(struct scsi_cmnd *scmd) | |
563 | { | |
564 | static unsigned char generic_sense[6] = | |
565 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; | |
566 | unsigned char *scsi_result; | |
567 | int saved_result; | |
568 | int rtn; | |
569 | ||
570 | memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); | |
571 | ||
bc86120a | 572 | scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); |
1da177e4 LT |
573 | |
574 | ||
575 | if (unlikely(!scsi_result)) { | |
576 | printk(KERN_ERR "%s: cannot allocate scsi_result.\n", | |
577 | __FUNCTION__); | |
578 | return FAILED; | |
579 | } | |
580 | ||
581 | /* | |
582 | * zero the sense buffer. some host adapters automatically always | |
583 | * request sense, so it is not a good idea that | |
584 | * scmd->request_buffer and scmd->sense_buffer point to the same | |
585 | * address (db). 0 is not a valid sense code. | |
586 | */ | |
587 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
588 | memset(scsi_result, 0, 252); | |
589 | ||
590 | saved_result = scmd->result; | |
591 | scmd->request_buffer = scsi_result; | |
592 | scmd->request_bufflen = 252; | |
593 | scmd->use_sg = 0; | |
594 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
595 | scmd->sc_data_direction = DMA_FROM_DEVICE; | |
596 | scmd->underflow = 0; | |
597 | ||
598 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | |
599 | ||
600 | /* last chance to have valid sense data */ | |
601 | if(!SCSI_SENSE_VALID(scmd)) { | |
602 | memcpy(scmd->sense_buffer, scmd->request_buffer, | |
603 | sizeof(scmd->sense_buffer)); | |
604 | } | |
605 | ||
606 | kfree(scsi_result); | |
607 | ||
608 | /* | |
609 | * when we eventually call scsi_finish, we really wish to complete | |
610 | * the original request, so let's restore the original data. (db) | |
611 | */ | |
612 | scsi_setup_cmd_retry(scmd); | |
613 | scmd->result = saved_result; | |
614 | return rtn; | |
615 | } | |
616 | ||
617 | /** | |
618 | * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. | |
619 | * @scmd: Original SCSI cmd that eh has finished. | |
620 | * @done_q: Queue for processed commands. | |
621 | * | |
622 | * Notes: | |
623 | * We don't want to use the normal command completion while we are are | |
624 | * still handling errors - it may cause other commands to be queued, | |
625 | * and that would disturb what we are doing. thus we really want to | |
626 | * keep a list of pending commands for final completion, and once we | |
627 | * are ready to leave error handling we handle completion for real. | |
628 | **/ | |
629 | static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, | |
630 | struct list_head *done_q) | |
631 | { | |
632 | scmd->device->host->host_failed--; | |
1da177e4 LT |
633 | scsi_eh_eflags_clr_all(scmd); |
634 | ||
635 | /* | |
636 | * set this back so that the upper level can correctly free up | |
637 | * things. | |
638 | */ | |
639 | scsi_setup_cmd_retry(scmd); | |
640 | list_move_tail(&scmd->eh_entry, done_q); | |
641 | } | |
642 | ||
643 | /** | |
644 | * scsi_eh_get_sense - Get device sense data. | |
645 | * @work_q: Queue of commands to process. | |
646 | * @done_q: Queue of proccessed commands.. | |
647 | * | |
648 | * Description: | |
649 | * See if we need to request sense information. if so, then get it | |
650 | * now, so we have a better idea of what to do. | |
651 | * | |
652 | * Notes: | |
653 | * This has the unfortunate side effect that if a shost adapter does | |
654 | * not automatically request sense information, that we end up shutting | |
655 | * it down before we request it. | |
656 | * | |
657 | * All drivers should request sense information internally these days, | |
658 | * so for now all I have to say is tough noogies if you end up in here. | |
659 | * | |
660 | * XXX: Long term this code should go away, but that needs an audit of | |
661 | * all LLDDs first. | |
662 | **/ | |
663 | static int scsi_eh_get_sense(struct list_head *work_q, | |
664 | struct list_head *done_q) | |
665 | { | |
666 | struct list_head *lh, *lh_sf; | |
667 | struct scsi_cmnd *scmd; | |
668 | int rtn; | |
669 | ||
670 | list_for_each_safe(lh, lh_sf, work_q) { | |
671 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
672 | if (scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD) || | |
673 | SCSI_SENSE_VALID(scmd)) | |
674 | continue; | |
675 | ||
676 | SCSI_LOG_ERROR_RECOVERY(2, printk("%s: requesting sense" | |
677 | " for id: %d\n", | |
678 | current->comm, | |
679 | scmd->device->id)); | |
680 | rtn = scsi_request_sense(scmd); | |
681 | if (rtn != SUCCESS) | |
682 | continue; | |
683 | ||
684 | SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" | |
685 | " result %x\n", scmd, | |
686 | scmd->result)); | |
687 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); | |
688 | ||
689 | rtn = scsi_decide_disposition(scmd); | |
690 | ||
691 | /* | |
692 | * if the result was normal, then just pass it along to the | |
693 | * upper level. | |
694 | */ | |
695 | if (rtn == SUCCESS) | |
696 | /* we don't want this command reissued, just | |
697 | * finished with the sense data, so set | |
698 | * retries to the max allowed to ensure it | |
699 | * won't get reissued */ | |
700 | scmd->retries = scmd->allowed; | |
701 | else if (rtn != NEEDS_RETRY) | |
702 | continue; | |
703 | ||
704 | scsi_eh_finish_cmd(scmd, done_q); | |
705 | } | |
706 | ||
707 | return list_empty(work_q); | |
708 | } | |
709 | ||
710 | /** | |
711 | * scsi_try_to_abort_cmd - Ask host to abort a running command. | |
712 | * @scmd: SCSI cmd to abort from Lower Level. | |
713 | * | |
714 | * Notes: | |
715 | * This function will not return until the user's completion function | |
716 | * has been called. there is no timeout on this operation. if the | |
717 | * author of the low-level driver wishes this operation to be timed, | |
718 | * they can provide this facility themselves. helper functions in | |
719 | * scsi_error.c can be supplied to make this easier to do. | |
720 | **/ | |
721 | static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) | |
722 | { | |
1da177e4 | 723 | if (!scmd->device->host->hostt->eh_abort_handler) |
8fa728a2 | 724 | return FAILED; |
1da177e4 LT |
725 | |
726 | /* | |
727 | * scsi_done was called just after the command timed out and before | |
728 | * we had a chance to process it. (db) | |
729 | */ | |
730 | if (scmd->serial_number == 0) | |
731 | return SUCCESS; | |
8fa728a2 | 732 | return scmd->device->host->hostt->eh_abort_handler(scmd); |
1da177e4 LT |
733 | } |
734 | ||
735 | /** | |
736 | * scsi_eh_tur - Send TUR to device. | |
737 | * @scmd: Scsi cmd to send TUR | |
738 | * | |
739 | * Return value: | |
740 | * 0 - Device is ready. 1 - Device NOT ready. | |
741 | **/ | |
742 | static int scsi_eh_tur(struct scsi_cmnd *scmd) | |
743 | { | |
744 | static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; | |
745 | int retry_cnt = 1, rtn; | |
793698ce | 746 | int saved_result; |
1da177e4 LT |
747 | |
748 | retry_tur: | |
749 | memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); | |
750 | ||
751 | /* | |
752 | * zero the sense buffer. the scsi spec mandates that any | |
753 | * untransferred sense data should be interpreted as being zero. | |
754 | */ | |
755 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
756 | ||
793698ce | 757 | saved_result = scmd->result; |
1da177e4 LT |
758 | scmd->request_buffer = NULL; |
759 | scmd->request_bufflen = 0; | |
760 | scmd->use_sg = 0; | |
761 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
762 | scmd->underflow = 0; | |
763 | scmd->sc_data_direction = DMA_NONE; | |
764 | ||
765 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | |
766 | ||
767 | /* | |
768 | * when we eventually call scsi_finish, we really wish to complete | |
769 | * the original request, so let's restore the original data. (db) | |
770 | */ | |
771 | scsi_setup_cmd_retry(scmd); | |
793698ce | 772 | scmd->result = saved_result; |
1da177e4 LT |
773 | |
774 | /* | |
775 | * hey, we are done. let's look to see what happened. | |
776 | */ | |
777 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | |
778 | __FUNCTION__, scmd, rtn)); | |
779 | if (rtn == SUCCESS) | |
780 | return 0; | |
781 | else if (rtn == NEEDS_RETRY) | |
782 | if (retry_cnt--) | |
783 | goto retry_tur; | |
784 | return 1; | |
785 | } | |
786 | ||
787 | /** | |
788 | * scsi_eh_abort_cmds - abort canceled commands. | |
789 | * @shost: scsi host being recovered. | |
790 | * @eh_done_q: list_head for processed commands. | |
791 | * | |
792 | * Decription: | |
793 | * Try and see whether or not it makes sense to try and abort the | |
794 | * running command. this only works out to be the case if we have one | |
795 | * command that has timed out. if the command simply failed, it makes | |
796 | * no sense to try and abort the command, since as far as the shost | |
797 | * adapter is concerned, it isn't running. | |
798 | **/ | |
799 | static int scsi_eh_abort_cmds(struct list_head *work_q, | |
800 | struct list_head *done_q) | |
801 | { | |
802 | struct list_head *lh, *lh_sf; | |
803 | struct scsi_cmnd *scmd; | |
804 | int rtn; | |
805 | ||
806 | list_for_each_safe(lh, lh_sf, work_q) { | |
807 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
808 | if (!scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD)) | |
809 | continue; | |
810 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" | |
811 | "0x%p\n", current->comm, | |
812 | scmd)); | |
813 | rtn = scsi_try_to_abort_cmd(scmd); | |
814 | if (rtn == SUCCESS) { | |
815 | scsi_eh_eflags_clr(scmd, SCSI_EH_CANCEL_CMD); | |
816 | if (!scsi_device_online(scmd->device) || | |
817 | !scsi_eh_tur(scmd)) { | |
818 | scsi_eh_finish_cmd(scmd, done_q); | |
819 | } | |
820 | ||
821 | } else | |
822 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" | |
823 | " cmd failed:" | |
824 | "0x%p\n", | |
825 | current->comm, | |
826 | scmd)); | |
827 | } | |
828 | ||
829 | return list_empty(work_q); | |
830 | } | |
831 | ||
832 | /** | |
833 | * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev | |
834 | * @scmd: SCSI cmd used to send BDR | |
835 | * | |
836 | * Notes: | |
837 | * There is no timeout for this operation. if this operation is | |
838 | * unreliable for a given host, then the host itself needs to put a | |
839 | * timer on it, and set the host back to a consistent state prior to | |
840 | * returning. | |
841 | **/ | |
842 | static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |
843 | { | |
94d0e7b8 | 844 | int rtn; |
1da177e4 LT |
845 | |
846 | if (!scmd->device->host->hostt->eh_device_reset_handler) | |
94d0e7b8 | 847 | return FAILED; |
1da177e4 | 848 | |
1da177e4 | 849 | rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); |
1da177e4 LT |
850 | if (rtn == SUCCESS) { |
851 | scmd->device->was_reset = 1; | |
852 | scmd->device->expecting_cc_ua = 1; | |
853 | } | |
854 | ||
855 | return rtn; | |
856 | } | |
857 | ||
858 | /** | |
859 | * scsi_eh_try_stu - Send START_UNIT to device. | |
860 | * @scmd: Scsi cmd to send START_UNIT | |
861 | * | |
862 | * Return value: | |
863 | * 0 - Device is ready. 1 - Device NOT ready. | |
864 | **/ | |
865 | static int scsi_eh_try_stu(struct scsi_cmnd *scmd) | |
866 | { | |
867 | static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; | |
868 | int rtn; | |
793698ce | 869 | int saved_result; |
1da177e4 LT |
870 | |
871 | if (!scmd->device->allow_restart) | |
872 | return 1; | |
873 | ||
874 | memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); | |
875 | ||
876 | /* | |
877 | * zero the sense buffer. the scsi spec mandates that any | |
878 | * untransferred sense data should be interpreted as being zero. | |
879 | */ | |
880 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
881 | ||
793698ce | 882 | saved_result = scmd->result; |
1da177e4 LT |
883 | scmd->request_buffer = NULL; |
884 | scmd->request_bufflen = 0; | |
885 | scmd->use_sg = 0; | |
886 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
887 | scmd->underflow = 0; | |
888 | scmd->sc_data_direction = DMA_NONE; | |
889 | ||
890 | rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); | |
891 | ||
892 | /* | |
893 | * when we eventually call scsi_finish, we really wish to complete | |
894 | * the original request, so let's restore the original data. (db) | |
895 | */ | |
896 | scsi_setup_cmd_retry(scmd); | |
793698ce | 897 | scmd->result = saved_result; |
1da177e4 LT |
898 | |
899 | /* | |
900 | * hey, we are done. let's look to see what happened. | |
901 | */ | |
902 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | |
903 | __FUNCTION__, scmd, rtn)); | |
904 | if (rtn == SUCCESS) | |
905 | return 0; | |
906 | return 1; | |
907 | } | |
908 | ||
909 | /** | |
910 | * scsi_eh_stu - send START_UNIT if needed | |
911 | * @shost: scsi host being recovered. | |
912 | * @eh_done_q: list_head for processed commands. | |
913 | * | |
914 | * Notes: | |
915 | * If commands are failing due to not ready, initializing command required, | |
916 | * try revalidating the device, which will end up sending a start unit. | |
917 | **/ | |
918 | static int scsi_eh_stu(struct Scsi_Host *shost, | |
919 | struct list_head *work_q, | |
920 | struct list_head *done_q) | |
921 | { | |
922 | struct list_head *lh, *lh_sf; | |
923 | struct scsi_cmnd *scmd, *stu_scmd; | |
924 | struct scsi_device *sdev; | |
925 | ||
926 | shost_for_each_device(sdev, shost) { | |
927 | stu_scmd = NULL; | |
928 | list_for_each_entry(scmd, work_q, eh_entry) | |
929 | if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && | |
930 | scsi_check_sense(scmd) == FAILED ) { | |
931 | stu_scmd = scmd; | |
932 | break; | |
933 | } | |
934 | ||
935 | if (!stu_scmd) | |
936 | continue; | |
937 | ||
938 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" | |
939 | " 0x%p\n", current->comm, sdev)); | |
940 | ||
941 | if (!scsi_eh_try_stu(stu_scmd)) { | |
942 | if (!scsi_device_online(sdev) || | |
943 | !scsi_eh_tur(stu_scmd)) { | |
944 | list_for_each_safe(lh, lh_sf, work_q) { | |
945 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
946 | if (scmd->device == sdev) | |
947 | scsi_eh_finish_cmd(scmd, done_q); | |
948 | } | |
949 | } | |
950 | } else { | |
951 | SCSI_LOG_ERROR_RECOVERY(3, | |
952 | printk("%s: START_UNIT failed to sdev:" | |
953 | " 0x%p\n", current->comm, sdev)); | |
954 | } | |
955 | } | |
956 | ||
957 | return list_empty(work_q); | |
958 | } | |
959 | ||
960 | ||
961 | /** | |
962 | * scsi_eh_bus_device_reset - send bdr if needed | |
963 | * @shost: scsi host being recovered. | |
964 | * @eh_done_q: list_head for processed commands. | |
965 | * | |
966 | * Notes: | |
967 | * Try a bus device reset. still, look to see whether we have multiple | |
968 | * devices that are jammed or not - if we have multiple devices, it | |
969 | * makes no sense to try bus_device_reset - we really would need to try | |
970 | * a bus_reset instead. | |
971 | **/ | |
972 | static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |
973 | struct list_head *work_q, | |
974 | struct list_head *done_q) | |
975 | { | |
976 | struct list_head *lh, *lh_sf; | |
977 | struct scsi_cmnd *scmd, *bdr_scmd; | |
978 | struct scsi_device *sdev; | |
979 | int rtn; | |
980 | ||
981 | shost_for_each_device(sdev, shost) { | |
982 | bdr_scmd = NULL; | |
983 | list_for_each_entry(scmd, work_q, eh_entry) | |
984 | if (scmd->device == sdev) { | |
985 | bdr_scmd = scmd; | |
986 | break; | |
987 | } | |
988 | ||
989 | if (!bdr_scmd) | |
990 | continue; | |
991 | ||
992 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" | |
993 | " 0x%p\n", current->comm, | |
994 | sdev)); | |
995 | rtn = scsi_try_bus_device_reset(bdr_scmd); | |
996 | if (rtn == SUCCESS) { | |
997 | if (!scsi_device_online(sdev) || | |
998 | !scsi_eh_tur(bdr_scmd)) { | |
999 | list_for_each_safe(lh, lh_sf, | |
1000 | work_q) { | |
1001 | scmd = list_entry(lh, struct | |
1002 | scsi_cmnd, | |
1003 | eh_entry); | |
1004 | if (scmd->device == sdev) | |
1005 | scsi_eh_finish_cmd(scmd, | |
1006 | done_q); | |
1007 | } | |
1008 | } | |
1009 | } else { | |
1010 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" | |
1011 | " failed sdev:" | |
1012 | "0x%p\n", | |
1013 | current->comm, | |
1014 | sdev)); | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | return list_empty(work_q); | |
1019 | } | |
1020 | ||
1021 | /** | |
1022 | * scsi_try_bus_reset - ask host to perform a bus reset | |
1023 | * @scmd: SCSI cmd to send bus reset. | |
1024 | **/ | |
1025 | static int scsi_try_bus_reset(struct scsi_cmnd *scmd) | |
1026 | { | |
1027 | unsigned long flags; | |
1028 | int rtn; | |
1029 | ||
1030 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", | |
1031 | __FUNCTION__)); | |
1da177e4 LT |
1032 | |
1033 | if (!scmd->device->host->hostt->eh_bus_reset_handler) | |
1034 | return FAILED; | |
1035 | ||
1da177e4 | 1036 | rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd); |
1da177e4 LT |
1037 | |
1038 | if (rtn == SUCCESS) { | |
1039 | if (!scmd->device->host->hostt->skip_settle_delay) | |
1040 | ssleep(BUS_RESET_SETTLE_TIME); | |
1041 | spin_lock_irqsave(scmd->device->host->host_lock, flags); | |
1042 | scsi_report_bus_reset(scmd->device->host, scmd->device->channel); | |
1043 | spin_unlock_irqrestore(scmd->device->host->host_lock, flags); | |
1044 | } | |
1045 | ||
1046 | return rtn; | |
1047 | } | |
1048 | ||
1049 | /** | |
1050 | * scsi_try_host_reset - ask host adapter to reset itself | |
1051 | * @scmd: SCSI cmd to send hsot reset. | |
1052 | **/ | |
1053 | static int scsi_try_host_reset(struct scsi_cmnd *scmd) | |
1054 | { | |
1055 | unsigned long flags; | |
1056 | int rtn; | |
1057 | ||
1058 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", | |
1059 | __FUNCTION__)); | |
1da177e4 LT |
1060 | |
1061 | if (!scmd->device->host->hostt->eh_host_reset_handler) | |
1062 | return FAILED; | |
1063 | ||
1da177e4 | 1064 | rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd); |
1da177e4 LT |
1065 | |
1066 | if (rtn == SUCCESS) { | |
1067 | if (!scmd->device->host->hostt->skip_settle_delay) | |
1068 | ssleep(HOST_RESET_SETTLE_TIME); | |
1069 | spin_lock_irqsave(scmd->device->host->host_lock, flags); | |
1070 | scsi_report_bus_reset(scmd->device->host, scmd->device->channel); | |
1071 | spin_unlock_irqrestore(scmd->device->host->host_lock, flags); | |
1072 | } | |
1073 | ||
1074 | return rtn; | |
1075 | } | |
1076 | ||
1077 | /** | |
1078 | * scsi_eh_bus_reset - send a bus reset | |
1079 | * @shost: scsi host being recovered. | |
1080 | * @eh_done_q: list_head for processed commands. | |
1081 | **/ | |
1082 | static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |
1083 | struct list_head *work_q, | |
1084 | struct list_head *done_q) | |
1085 | { | |
1086 | struct list_head *lh, *lh_sf; | |
1087 | struct scsi_cmnd *scmd; | |
1088 | struct scsi_cmnd *chan_scmd; | |
1089 | unsigned int channel; | |
1090 | int rtn; | |
1091 | ||
1092 | /* | |
1093 | * we really want to loop over the various channels, and do this on | |
1094 | * a channel by channel basis. we should also check to see if any | |
1095 | * of the failed commands are on soft_reset devices, and if so, skip | |
1096 | * the reset. | |
1097 | */ | |
1098 | ||
1099 | for (channel = 0; channel <= shost->max_channel; channel++) { | |
1100 | chan_scmd = NULL; | |
1101 | list_for_each_entry(scmd, work_q, eh_entry) { | |
1102 | if (channel == scmd->device->channel) { | |
1103 | chan_scmd = scmd; | |
1104 | break; | |
1105 | /* | |
1106 | * FIXME add back in some support for | |
1107 | * soft_reset devices. | |
1108 | */ | |
1109 | } | |
1110 | } | |
1111 | ||
1112 | if (!chan_scmd) | |
1113 | continue; | |
1114 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" | |
1115 | " %d\n", current->comm, | |
1116 | channel)); | |
1117 | rtn = scsi_try_bus_reset(chan_scmd); | |
1118 | if (rtn == SUCCESS) { | |
1119 | list_for_each_safe(lh, lh_sf, work_q) { | |
1120 | scmd = list_entry(lh, struct scsi_cmnd, | |
1121 | eh_entry); | |
1122 | if (channel == scmd->device->channel) | |
1123 | if (!scsi_device_online(scmd->device) || | |
1124 | !scsi_eh_tur(scmd)) | |
1125 | scsi_eh_finish_cmd(scmd, | |
1126 | done_q); | |
1127 | } | |
1128 | } else { | |
1129 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" | |
1130 | " failed chan: %d\n", | |
1131 | current->comm, | |
1132 | channel)); | |
1133 | } | |
1134 | } | |
1135 | return list_empty(work_q); | |
1136 | } | |
1137 | ||
1138 | /** | |
1139 | * scsi_eh_host_reset - send a host reset | |
1140 | * @work_q: list_head for processed commands. | |
1141 | * @done_q: list_head for processed commands. | |
1142 | **/ | |
1143 | static int scsi_eh_host_reset(struct list_head *work_q, | |
1144 | struct list_head *done_q) | |
1145 | { | |
1146 | int rtn; | |
1147 | struct list_head *lh, *lh_sf; | |
1148 | struct scsi_cmnd *scmd; | |
1149 | ||
1150 | if (!list_empty(work_q)) { | |
1151 | scmd = list_entry(work_q->next, | |
1152 | struct scsi_cmnd, eh_entry); | |
1153 | ||
1154 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" | |
1155 | , current->comm)); | |
1156 | ||
1157 | rtn = scsi_try_host_reset(scmd); | |
1158 | if (rtn == SUCCESS) { | |
1159 | list_for_each_safe(lh, lh_sf, work_q) { | |
1160 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
1161 | if (!scsi_device_online(scmd->device) || | |
1162 | (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || | |
1163 | !scsi_eh_tur(scmd)) | |
1164 | scsi_eh_finish_cmd(scmd, done_q); | |
1165 | } | |
1166 | } else { | |
1167 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" | |
1168 | " failed\n", | |
1169 | current->comm)); | |
1170 | } | |
1171 | } | |
1172 | return list_empty(work_q); | |
1173 | } | |
1174 | ||
1175 | /** | |
1176 | * scsi_eh_offline_sdevs - offline scsi devices that fail to recover | |
1177 | * @work_q: list_head for processed commands. | |
1178 | * @done_q: list_head for processed commands. | |
1179 | * | |
1180 | **/ | |
1181 | static void scsi_eh_offline_sdevs(struct list_head *work_q, | |
1182 | struct list_head *done_q) | |
1183 | { | |
1184 | struct list_head *lh, *lh_sf; | |
1185 | struct scsi_cmnd *scmd; | |
1186 | ||
1187 | list_for_each_safe(lh, lh_sf, work_q) { | |
1188 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
1189 | printk(KERN_INFO "scsi: Device offlined - not" | |
1190 | " ready after error recovery: host" | |
1191 | " %d channel %d id %d lun %d\n", | |
1192 | scmd->device->host->host_no, | |
1193 | scmd->device->channel, | |
1194 | scmd->device->id, | |
1195 | scmd->device->lun); | |
1196 | scsi_device_set_state(scmd->device, SDEV_OFFLINE); | |
1197 | if (scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD)) { | |
1198 | /* | |
1199 | * FIXME: Handle lost cmds. | |
1200 | */ | |
1201 | } | |
1202 | scsi_eh_finish_cmd(scmd, done_q); | |
1203 | } | |
1204 | return; | |
1205 | } | |
1206 | ||
1207 | /** | |
1208 | * scsi_decide_disposition - Disposition a cmd on return from LLD. | |
1209 | * @scmd: SCSI cmd to examine. | |
1210 | * | |
1211 | * Notes: | |
1212 | * This is *only* called when we are examining the status after sending | |
1213 | * out the actual data command. any commands that are queued for error | |
1214 | * recovery (e.g. test_unit_ready) do *not* come through here. | |
1215 | * | |
1216 | * When this routine returns failed, it means the error handler thread | |
1217 | * is woken. In cases where the error code indicates an error that | |
1218 | * doesn't require the error handler read (i.e. we don't need to | |
1219 | * abort/reset), this function should return SUCCESS. | |
1220 | **/ | |
1221 | int scsi_decide_disposition(struct scsi_cmnd *scmd) | |
1222 | { | |
1223 | int rtn; | |
1224 | ||
1225 | /* | |
1226 | * if the device is offline, then we clearly just pass the result back | |
1227 | * up to the top level. | |
1228 | */ | |
1229 | if (!scsi_device_online(scmd->device)) { | |
1230 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" | |
1231 | " as SUCCESS\n", | |
1232 | __FUNCTION__)); | |
1233 | return SUCCESS; | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * first check the host byte, to see if there is anything in there | |
1238 | * that would indicate what we need to do. | |
1239 | */ | |
1240 | switch (host_byte(scmd->result)) { | |
1241 | case DID_PASSTHROUGH: | |
1242 | /* | |
1243 | * no matter what, pass this through to the upper layer. | |
1244 | * nuke this special code so that it looks like we are saying | |
1245 | * did_ok. | |
1246 | */ | |
1247 | scmd->result &= 0xff00ffff; | |
1248 | return SUCCESS; | |
1249 | case DID_OK: | |
1250 | /* | |
1251 | * looks good. drop through, and check the next byte. | |
1252 | */ | |
1253 | break; | |
1254 | case DID_NO_CONNECT: | |
1255 | case DID_BAD_TARGET: | |
1256 | case DID_ABORT: | |
1257 | /* | |
1258 | * note - this means that we just report the status back | |
1259 | * to the top level driver, not that we actually think | |
1260 | * that it indicates SUCCESS. | |
1261 | */ | |
1262 | return SUCCESS; | |
1263 | /* | |
1264 | * when the low level driver returns did_soft_error, | |
1265 | * it is responsible for keeping an internal retry counter | |
1266 | * in order to avoid endless loops (db) | |
1267 | * | |
1268 | * actually this is a bug in this function here. we should | |
1269 | * be mindful of the maximum number of retries specified | |
1270 | * and not get stuck in a loop. | |
1271 | */ | |
1272 | case DID_SOFT_ERROR: | |
1273 | goto maybe_retry; | |
1274 | case DID_IMM_RETRY: | |
1275 | return NEEDS_RETRY; | |
1276 | ||
bf341919 JB |
1277 | case DID_REQUEUE: |
1278 | return ADD_TO_MLQUEUE; | |
1279 | ||
1da177e4 LT |
1280 | case DID_ERROR: |
1281 | if (msg_byte(scmd->result) == COMMAND_COMPLETE && | |
1282 | status_byte(scmd->result) == RESERVATION_CONFLICT) | |
1283 | /* | |
1284 | * execute reservation conflict processing code | |
1285 | * lower down | |
1286 | */ | |
1287 | break; | |
1288 | /* fallthrough */ | |
1289 | ||
1290 | case DID_BUS_BUSY: | |
1291 | case DID_PARITY: | |
1292 | goto maybe_retry; | |
1293 | case DID_TIME_OUT: | |
1294 | /* | |
1295 | * when we scan the bus, we get timeout messages for | |
1296 | * these commands if there is no device available. | |
1297 | * other hosts report did_no_connect for the same thing. | |
1298 | */ | |
1299 | if ((scmd->cmnd[0] == TEST_UNIT_READY || | |
1300 | scmd->cmnd[0] == INQUIRY)) { | |
1301 | return SUCCESS; | |
1302 | } else { | |
1303 | return FAILED; | |
1304 | } | |
1305 | case DID_RESET: | |
1306 | return SUCCESS; | |
1307 | default: | |
1308 | return FAILED; | |
1309 | } | |
1310 | ||
1311 | /* | |
1312 | * next, check the message byte. | |
1313 | */ | |
1314 | if (msg_byte(scmd->result) != COMMAND_COMPLETE) | |
1315 | return FAILED; | |
1316 | ||
1317 | /* | |
1318 | * check the status byte to see if this indicates anything special. | |
1319 | */ | |
1320 | switch (status_byte(scmd->result)) { | |
1321 | case QUEUE_FULL: | |
1322 | /* | |
1323 | * the case of trying to send too many commands to a | |
1324 | * tagged queueing device. | |
1325 | */ | |
1326 | case BUSY: | |
1327 | /* | |
1328 | * device can't talk to us at the moment. Should only | |
1329 | * occur (SAM-3) when the task queue is empty, so will cause | |
1330 | * the empty queue handling to trigger a stall in the | |
1331 | * device. | |
1332 | */ | |
1333 | return ADD_TO_MLQUEUE; | |
1334 | case GOOD: | |
1335 | case COMMAND_TERMINATED: | |
1336 | case TASK_ABORTED: | |
1337 | return SUCCESS; | |
1338 | case CHECK_CONDITION: | |
1339 | rtn = scsi_check_sense(scmd); | |
1340 | if (rtn == NEEDS_RETRY) | |
1341 | goto maybe_retry; | |
1342 | /* if rtn == FAILED, we have no sense information; | |
1343 | * returning FAILED will wake the error handler thread | |
1344 | * to collect the sense and redo the decide | |
1345 | * disposition */ | |
1346 | return rtn; | |
1347 | case CONDITION_GOOD: | |
1348 | case INTERMEDIATE_GOOD: | |
1349 | case INTERMEDIATE_C_GOOD: | |
1350 | case ACA_ACTIVE: | |
1351 | /* | |
1352 | * who knows? FIXME(eric) | |
1353 | */ | |
1354 | return SUCCESS; | |
1355 | ||
1356 | case RESERVATION_CONFLICT: | |
1357 | printk(KERN_INFO "scsi: reservation conflict: host" | |
1358 | " %d channel %d id %d lun %d\n", | |
1359 | scmd->device->host->host_no, scmd->device->channel, | |
1360 | scmd->device->id, scmd->device->lun); | |
1361 | return SUCCESS; /* causes immediate i/o error */ | |
1362 | default: | |
1363 | return FAILED; | |
1364 | } | |
1365 | return FAILED; | |
1366 | ||
1367 | maybe_retry: | |
1368 | ||
1369 | /* we requeue for retry because the error was retryable, and | |
1370 | * the request was not marked fast fail. Note that above, | |
1371 | * even if the request is marked fast fail, we still requeue | |
1372 | * for queue congestion conditions (QUEUE_FULL or BUSY) */ | |
1373 | if ((++scmd->retries) < scmd->allowed | |
1374 | && !blk_noretry_request(scmd->request)) { | |
1375 | return NEEDS_RETRY; | |
1376 | } else { | |
1377 | /* | |
1378 | * no more retries - report this one back to upper level. | |
1379 | */ | |
1380 | return SUCCESS; | |
1381 | } | |
1382 | } | |
1383 | ||
1384 | /** | |
1385 | * scsi_eh_lock_done - done function for eh door lock request | |
1386 | * @scmd: SCSI command block for the door lock request | |
1387 | * | |
1388 | * Notes: | |
1389 | * We completed the asynchronous door lock request, and it has either | |
1390 | * locked the door or failed. We must free the command structures | |
1391 | * associated with this request. | |
1392 | **/ | |
1393 | static void scsi_eh_lock_done(struct scsi_cmnd *scmd) | |
1394 | { | |
1395 | struct scsi_request *sreq = scmd->sc_request; | |
1396 | ||
1397 | scsi_release_request(sreq); | |
1398 | } | |
1399 | ||
1400 | ||
1401 | /** | |
1402 | * scsi_eh_lock_door - Prevent medium removal for the specified device | |
1403 | * @sdev: SCSI device to prevent medium removal | |
1404 | * | |
1405 | * Locking: | |
1406 | * We must be called from process context; scsi_allocate_request() | |
1407 | * may sleep. | |
1408 | * | |
1409 | * Notes: | |
1410 | * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the | |
1411 | * head of the devices request queue, and continue. | |
1412 | * | |
1413 | * Bugs: | |
1414 | * scsi_allocate_request() may sleep waiting for existing requests to | |
1415 | * be processed. However, since we haven't kicked off any request | |
1416 | * processing for this host, this may deadlock. | |
1417 | * | |
1418 | * If scsi_allocate_request() fails for what ever reason, we | |
1419 | * completely forget to lock the door. | |
1420 | **/ | |
1421 | static void scsi_eh_lock_door(struct scsi_device *sdev) | |
1422 | { | |
1423 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); | |
1424 | ||
1425 | if (unlikely(!sreq)) { | |
1426 | printk(KERN_ERR "%s: request allocate failed," | |
1427 | "prevent media removal cmd not sent\n", __FUNCTION__); | |
1428 | return; | |
1429 | } | |
1430 | ||
1431 | sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL; | |
1432 | sreq->sr_cmnd[1] = 0; | |
1433 | sreq->sr_cmnd[2] = 0; | |
1434 | sreq->sr_cmnd[3] = 0; | |
1435 | sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT; | |
1436 | sreq->sr_cmnd[5] = 0; | |
1437 | sreq->sr_data_direction = DMA_NONE; | |
1438 | sreq->sr_bufflen = 0; | |
1439 | sreq->sr_buffer = NULL; | |
1440 | sreq->sr_allowed = 5; | |
1441 | sreq->sr_done = scsi_eh_lock_done; | |
1442 | sreq->sr_timeout_per_command = 10 * HZ; | |
1443 | sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); | |
1444 | ||
1445 | scsi_insert_special_req(sreq, 1); | |
1446 | } | |
1447 | ||
1448 | ||
1449 | /** | |
1450 | * scsi_restart_operations - restart io operations to the specified host. | |
1451 | * @shost: Host we are restarting. | |
1452 | * | |
1453 | * Notes: | |
1454 | * When we entered the error handler, we blocked all further i/o to | |
1455 | * this device. we need to 'reverse' this process. | |
1456 | **/ | |
1457 | static void scsi_restart_operations(struct Scsi_Host *shost) | |
1458 | { | |
1459 | struct scsi_device *sdev; | |
1460 | ||
1461 | /* | |
1462 | * If the door was locked, we need to insert a door lock request | |
1463 | * onto the head of the SCSI request queue for the device. There | |
1464 | * is no point trying to lock the door of an off-line device. | |
1465 | */ | |
1466 | shost_for_each_device(sdev, shost) { | |
1467 | if (scsi_device_online(sdev) && sdev->locked) | |
1468 | scsi_eh_lock_door(sdev); | |
1469 | } | |
1470 | ||
1471 | /* | |
1472 | * next free up anything directly waiting upon the host. this | |
1473 | * will be requests for character device operations, and also for | |
1474 | * ioctls to queued block devices. | |
1475 | */ | |
1476 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", | |
1477 | __FUNCTION__)); | |
1478 | ||
1479 | clear_bit(SHOST_RECOVERY, &shost->shost_state); | |
1480 | ||
1481 | wake_up(&shost->host_wait); | |
1482 | ||
1483 | /* | |
1484 | * finally we need to re-initiate requests that may be pending. we will | |
1485 | * have had everything blocked while error handling is taking place, and | |
1486 | * now that error recovery is done, we will need to ensure that these | |
1487 | * requests are started. | |
1488 | */ | |
1489 | scsi_run_host_queues(shost); | |
1490 | } | |
1491 | ||
1492 | /** | |
1493 | * scsi_eh_ready_devs - check device ready state and recover if not. | |
1494 | * @shost: host to be recovered. | |
1495 | * @eh_done_q: list_head for processed commands. | |
1496 | * | |
1497 | **/ | |
1498 | static void scsi_eh_ready_devs(struct Scsi_Host *shost, | |
1499 | struct list_head *work_q, | |
1500 | struct list_head *done_q) | |
1501 | { | |
1502 | if (!scsi_eh_stu(shost, work_q, done_q)) | |
1503 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) | |
1504 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) | |
1505 | if (!scsi_eh_host_reset(work_q, done_q)) | |
1506 | scsi_eh_offline_sdevs(work_q, done_q); | |
1507 | } | |
1508 | ||
1509 | /** | |
1510 | * scsi_eh_flush_done_q - finish processed commands or retry them. | |
1511 | * @done_q: list_head of processed commands. | |
1512 | * | |
1513 | **/ | |
1514 | static void scsi_eh_flush_done_q(struct list_head *done_q) | |
1515 | { | |
1516 | struct list_head *lh, *lh_sf; | |
1517 | struct scsi_cmnd *scmd; | |
1518 | ||
1519 | list_for_each_safe(lh, lh_sf, done_q) { | |
1520 | scmd = list_entry(lh, struct scsi_cmnd, eh_entry); | |
1521 | list_del_init(lh); | |
1522 | if (scsi_device_online(scmd->device) && | |
1523 | !blk_noretry_request(scmd->request) && | |
1524 | (++scmd->retries < scmd->allowed)) { | |
1525 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" | |
1526 | " retry cmd: %p\n", | |
1527 | current->comm, | |
1528 | scmd)); | |
1529 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | |
1530 | } else { | |
793698ce PM |
1531 | /* |
1532 | * If just we got sense for the device (called | |
1533 | * scsi_eh_get_sense), scmd->result is already | |
1534 | * set, do not set DRIVER_TIMEOUT. | |
1535 | */ | |
1da177e4 LT |
1536 | if (!scmd->result) |
1537 | scmd->result |= (DRIVER_TIMEOUT << 24); | |
1538 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" | |
1539 | " cmd: %p\n", | |
1540 | current->comm, scmd)); | |
1541 | scsi_finish_command(scmd); | |
1542 | } | |
1543 | } | |
1544 | } | |
1545 | ||
1546 | /** | |
1547 | * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. | |
1548 | * @shost: Host to unjam. | |
1549 | * | |
1550 | * Notes: | |
1551 | * When we come in here, we *know* that all commands on the bus have | |
1552 | * either completed, failed or timed out. we also know that no further | |
1553 | * commands are being sent to the host, so things are relatively quiet | |
1554 | * and we have freedom to fiddle with things as we wish. | |
1555 | * | |
1556 | * This is only the *default* implementation. it is possible for | |
1557 | * individual drivers to supply their own version of this function, and | |
1558 | * if the maintainer wishes to do this, it is strongly suggested that | |
1559 | * this function be taken as a template and modified. this function | |
1560 | * was designed to correctly handle problems for about 95% of the | |
1561 | * different cases out there, and it should always provide at least a | |
1562 | * reasonable amount of error recovery. | |
1563 | * | |
1564 | * Any command marked 'failed' or 'timeout' must eventually have | |
1565 | * scsi_finish_cmd() called for it. we do all of the retry stuff | |
1566 | * here, so when we restart the host after we return it should have an | |
1567 | * empty queue. | |
1568 | **/ | |
1569 | static void scsi_unjam_host(struct Scsi_Host *shost) | |
1570 | { | |
1571 | unsigned long flags; | |
1572 | LIST_HEAD(eh_work_q); | |
1573 | LIST_HEAD(eh_done_q); | |
1574 | ||
1575 | spin_lock_irqsave(shost->host_lock, flags); | |
1576 | list_splice_init(&shost->eh_cmd_q, &eh_work_q); | |
1577 | spin_unlock_irqrestore(shost->host_lock, flags); | |
1578 | ||
1579 | SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); | |
1580 | ||
1581 | if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) | |
1582 | if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) | |
1583 | scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); | |
1584 | ||
1585 | scsi_eh_flush_done_q(&eh_done_q); | |
1586 | } | |
1587 | ||
1588 | /** | |
1589 | * scsi_error_handler - Handle errors/timeouts of SCSI cmds. | |
1590 | * @data: Host for which we are running. | |
1591 | * | |
1592 | * Notes: | |
1593 | * This is always run in the context of a kernel thread. The idea is | |
1594 | * that we start this thing up when the kernel starts up (one per host | |
1595 | * that we detect), and it immediately goes to sleep and waits for some | |
1596 | * event (i.e. failure). When this takes place, we have the job of | |
1597 | * trying to unjam the bus and restarting things. | |
1598 | **/ | |
1599 | int scsi_error_handler(void *data) | |
1600 | { | |
1601 | struct Scsi_Host *shost = (struct Scsi_Host *) data; | |
1602 | int rtn; | |
1603 | DECLARE_MUTEX_LOCKED(sem); | |
1604 | ||
1605 | /* | |
1606 | * Flush resources | |
1607 | */ | |
1608 | ||
1609 | daemonize("scsi_eh_%d", shost->host_no); | |
1610 | ||
1611 | current->flags |= PF_NOFREEZE; | |
1612 | ||
1613 | shost->eh_wait = &sem; | |
1614 | shost->ehandler = current; | |
1615 | ||
1616 | /* | |
1617 | * Wake up the thread that created us. | |
1618 | */ | |
1619 | SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" | |
1620 | " scsi_eh_%d\n",shost->host_no)); | |
1621 | ||
1622 | complete(shost->eh_notify); | |
1623 | ||
1624 | while (1) { | |
1625 | /* | |
1626 | * If we get a signal, it means we are supposed to go | |
1627 | * away and die. This typically happens if the user is | |
1628 | * trying to unload a module. | |
1629 | */ | |
1630 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" | |
1631 | " scsi_eh_%d" | |
1632 | " sleeping\n",shost->host_no)); | |
1633 | ||
1634 | /* | |
1635 | * Note - we always use down_interruptible with the semaphore | |
1636 | * even if the module was loaded as part of the kernel. The | |
1637 | * reason is that down() will cause this thread to be counted | |
1638 | * in the load average as a running process, and down | |
1639 | * interruptible doesn't. Given that we need to allow this | |
1640 | * thread to die if the driver was loaded as a module, using | |
1641 | * semaphores isn't unreasonable. | |
1642 | */ | |
1643 | down_interruptible(&sem); | |
1644 | if (shost->eh_kill) | |
1645 | break; | |
1646 | ||
1647 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" | |
1648 | " scsi_eh_%d waking" | |
1649 | " up\n",shost->host_no)); | |
1650 | ||
1651 | shost->eh_active = 1; | |
1652 | ||
1653 | /* | |
1654 | * We have a host that is failing for some reason. Figure out | |
1655 | * what we need to do to get it up and online again (if we can). | |
1656 | * If we fail, we end up taking the thing offline. | |
1657 | */ | |
1658 | if (shost->hostt->eh_strategy_handler) | |
1659 | rtn = shost->hostt->eh_strategy_handler(shost); | |
1660 | else | |
1661 | scsi_unjam_host(shost); | |
1662 | ||
1663 | shost->eh_active = 0; | |
1664 | ||
1665 | /* | |
1666 | * Note - if the above fails completely, the action is to take | |
1667 | * individual devices offline and flush the queue of any | |
1668 | * outstanding requests that may have been pending. When we | |
1669 | * restart, we restart any I/O to any other devices on the bus | |
1670 | * which are still online. | |
1671 | */ | |
1672 | scsi_restart_operations(shost); | |
1673 | ||
1674 | } | |
1675 | ||
1676 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" | |
1677 | " exiting\n",shost->host_no)); | |
1678 | ||
1679 | /* | |
1680 | * Make sure that nobody tries to wake us up again. | |
1681 | */ | |
1682 | shost->eh_wait = NULL; | |
1683 | ||
1684 | /* | |
1685 | * Knock this down too. From this point on, the host is flying | |
1686 | * without a pilot. If this is because the module is being unloaded, | |
1687 | * that's fine. If the user sent a signal to this thing, we are | |
1688 | * potentially in real danger. | |
1689 | */ | |
1690 | shost->eh_active = 0; | |
1691 | shost->ehandler = NULL; | |
1692 | ||
1693 | /* | |
1694 | * If anyone is waiting for us to exit (i.e. someone trying to unload | |
1695 | * a driver), then wake up that process to let them know we are on | |
1696 | * the way out the door. | |
1697 | */ | |
1698 | complete_and_exit(shost->eh_notify, 0); | |
1699 | return 0; | |
1700 | } | |
1701 | ||
1702 | /* | |
1703 | * Function: scsi_report_bus_reset() | |
1704 | * | |
1705 | * Purpose: Utility function used by low-level drivers to report that | |
1706 | * they have observed a bus reset on the bus being handled. | |
1707 | * | |
1708 | * Arguments: shost - Host in question | |
1709 | * channel - channel on which reset was observed. | |
1710 | * | |
1711 | * Returns: Nothing | |
1712 | * | |
1713 | * Lock status: Host lock must be held. | |
1714 | * | |
1715 | * Notes: This only needs to be called if the reset is one which | |
1716 | * originates from an unknown location. Resets originated | |
1717 | * by the mid-level itself don't need to call this, but there | |
1718 | * should be no harm. | |
1719 | * | |
1720 | * The main purpose of this is to make sure that a CHECK_CONDITION | |
1721 | * is properly treated. | |
1722 | */ | |
1723 | void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) | |
1724 | { | |
1725 | struct scsi_device *sdev; | |
1726 | ||
1727 | __shost_for_each_device(sdev, shost) { | |
1728 | if (channel == sdev->channel) { | |
1729 | sdev->was_reset = 1; | |
1730 | sdev->expecting_cc_ua = 1; | |
1731 | } | |
1732 | } | |
1733 | } | |
1734 | EXPORT_SYMBOL(scsi_report_bus_reset); | |
1735 | ||
1736 | /* | |
1737 | * Function: scsi_report_device_reset() | |
1738 | * | |
1739 | * Purpose: Utility function used by low-level drivers to report that | |
1740 | * they have observed a device reset on the device being handled. | |
1741 | * | |
1742 | * Arguments: shost - Host in question | |
1743 | * channel - channel on which reset was observed | |
1744 | * target - target on which reset was observed | |
1745 | * | |
1746 | * Returns: Nothing | |
1747 | * | |
1748 | * Lock status: Host lock must be held | |
1749 | * | |
1750 | * Notes: This only needs to be called if the reset is one which | |
1751 | * originates from an unknown location. Resets originated | |
1752 | * by the mid-level itself don't need to call this, but there | |
1753 | * should be no harm. | |
1754 | * | |
1755 | * The main purpose of this is to make sure that a CHECK_CONDITION | |
1756 | * is properly treated. | |
1757 | */ | |
1758 | void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) | |
1759 | { | |
1760 | struct scsi_device *sdev; | |
1761 | ||
1762 | __shost_for_each_device(sdev, shost) { | |
1763 | if (channel == sdev->channel && | |
1764 | target == sdev->id) { | |
1765 | sdev->was_reset = 1; | |
1766 | sdev->expecting_cc_ua = 1; | |
1767 | } | |
1768 | } | |
1769 | } | |
1770 | EXPORT_SYMBOL(scsi_report_device_reset); | |
1771 | ||
1772 | static void | |
1773 | scsi_reset_provider_done_command(struct scsi_cmnd *scmd) | |
1774 | { | |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * Function: scsi_reset_provider | |
1779 | * | |
1780 | * Purpose: Send requested reset to a bus or device at any phase. | |
1781 | * | |
1782 | * Arguments: device - device to send reset to | |
1783 | * flag - reset type (see scsi.h) | |
1784 | * | |
1785 | * Returns: SUCCESS/FAILURE. | |
1786 | * | |
1787 | * Notes: This is used by the SCSI Generic driver to provide | |
1788 | * Bus/Device reset capability. | |
1789 | */ | |
1790 | int | |
1791 | scsi_reset_provider(struct scsi_device *dev, int flag) | |
1792 | { | |
1793 | struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); | |
1794 | struct request req; | |
1795 | int rtn; | |
1796 | ||
1797 | scmd->request = &req; | |
1798 | memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); | |
1799 | scmd->request->rq_status = RQ_SCSI_BUSY; | |
b4edcbca | 1800 | |
1da177e4 LT |
1801 | memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); |
1802 | ||
1803 | scmd->scsi_done = scsi_reset_provider_done_command; | |
1804 | scmd->done = NULL; | |
1805 | scmd->buffer = NULL; | |
1806 | scmd->bufflen = 0; | |
1807 | scmd->request_buffer = NULL; | |
1808 | scmd->request_bufflen = 0; | |
1da177e4 LT |
1809 | |
1810 | scmd->cmd_len = 0; | |
1811 | ||
1812 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; | |
1813 | scmd->sc_request = NULL; | |
1814 | scmd->sc_magic = SCSI_CMND_MAGIC; | |
1815 | ||
1816 | init_timer(&scmd->eh_timeout); | |
1817 | ||
1818 | /* | |
1819 | * Sometimes the command can get back into the timer chain, | |
1820 | * so use the pid as an identifier. | |
1821 | */ | |
1822 | scmd->pid = 0; | |
1823 | ||
1824 | switch (flag) { | |
1825 | case SCSI_TRY_RESET_DEVICE: | |
1826 | rtn = scsi_try_bus_device_reset(scmd); | |
1827 | if (rtn == SUCCESS) | |
1828 | break; | |
1829 | /* FALLTHROUGH */ | |
1830 | case SCSI_TRY_RESET_BUS: | |
1831 | rtn = scsi_try_bus_reset(scmd); | |
1832 | if (rtn == SUCCESS) | |
1833 | break; | |
1834 | /* FALLTHROUGH */ | |
1835 | case SCSI_TRY_RESET_HOST: | |
1836 | rtn = scsi_try_host_reset(scmd); | |
1837 | break; | |
1838 | default: | |
1839 | rtn = FAILED; | |
1840 | } | |
1841 | ||
1da177e4 LT |
1842 | scsi_next_command(scmd); |
1843 | return rtn; | |
1844 | } | |
1845 | EXPORT_SYMBOL(scsi_reset_provider); | |
1846 | ||
1847 | /** | |
1848 | * scsi_normalize_sense - normalize main elements from either fixed or | |
1849 | * descriptor sense data format into a common format. | |
1850 | * | |
1851 | * @sense_buffer: byte array containing sense data returned by device | |
1852 | * @sb_len: number of valid bytes in sense_buffer | |
1853 | * @sshdr: pointer to instance of structure that common | |
1854 | * elements are written to. | |
1855 | * | |
1856 | * Notes: | |
1857 | * The "main elements" from sense data are: response_code, sense_key, | |
1858 | * asc, ascq and additional_length (only for descriptor format). | |
1859 | * | |
1860 | * Typically this function can be called after a device has | |
1861 | * responded to a SCSI command with the CHECK_CONDITION status. | |
1862 | * | |
1863 | * Return value: | |
1864 | * 1 if valid sense data information found, else 0; | |
1865 | **/ | |
1866 | int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, | |
1867 | struct scsi_sense_hdr *sshdr) | |
1868 | { | |
1869 | if (!sense_buffer || !sb_len || (sense_buffer[0] & 0x70) != 0x70) | |
1870 | return 0; | |
1871 | ||
1872 | memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); | |
1873 | ||
1874 | sshdr->response_code = (sense_buffer[0] & 0x7f); | |
1875 | if (sshdr->response_code >= 0x72) { | |
1876 | /* | |
1877 | * descriptor format | |
1878 | */ | |
1879 | if (sb_len > 1) | |
1880 | sshdr->sense_key = (sense_buffer[1] & 0xf); | |
1881 | if (sb_len > 2) | |
1882 | sshdr->asc = sense_buffer[2]; | |
1883 | if (sb_len > 3) | |
1884 | sshdr->ascq = sense_buffer[3]; | |
1885 | if (sb_len > 7) | |
1886 | sshdr->additional_length = sense_buffer[7]; | |
1887 | } else { | |
1888 | /* | |
1889 | * fixed format | |
1890 | */ | |
1891 | if (sb_len > 2) | |
1892 | sshdr->sense_key = (sense_buffer[2] & 0xf); | |
1893 | if (sb_len > 7) { | |
1894 | sb_len = (sb_len < (sense_buffer[7] + 8)) ? | |
1895 | sb_len : (sense_buffer[7] + 8); | |
1896 | if (sb_len > 12) | |
1897 | sshdr->asc = sense_buffer[12]; | |
1898 | if (sb_len > 13) | |
1899 | sshdr->ascq = sense_buffer[13]; | |
1900 | } | |
1901 | } | |
1902 | ||
1903 | return 1; | |
1904 | } | |
1905 | EXPORT_SYMBOL(scsi_normalize_sense); | |
1906 | ||
1907 | int scsi_request_normalize_sense(struct scsi_request *sreq, | |
1908 | struct scsi_sense_hdr *sshdr) | |
1909 | { | |
1910 | return scsi_normalize_sense(sreq->sr_sense_buffer, | |
1911 | sizeof(sreq->sr_sense_buffer), sshdr); | |
1912 | } | |
1913 | EXPORT_SYMBOL(scsi_request_normalize_sense); | |
1914 | ||
1915 | int scsi_command_normalize_sense(struct scsi_cmnd *cmd, | |
1916 | struct scsi_sense_hdr *sshdr) | |
1917 | { | |
1918 | return scsi_normalize_sense(cmd->sense_buffer, | |
1919 | sizeof(cmd->sense_buffer), sshdr); | |
1920 | } | |
1921 | EXPORT_SYMBOL(scsi_command_normalize_sense); | |
1922 | ||
1923 | /** | |
1924 | * scsi_sense_desc_find - search for a given descriptor type in | |
1925 | * descriptor sense data format. | |
1926 | * | |
1927 | * @sense_buffer: byte array of descriptor format sense data | |
1928 | * @sb_len: number of valid bytes in sense_buffer | |
1929 | * @desc_type: value of descriptor type to find | |
1930 | * (e.g. 0 -> information) | |
1931 | * | |
1932 | * Notes: | |
1933 | * only valid when sense data is in descriptor format | |
1934 | * | |
1935 | * Return value: | |
1936 | * pointer to start of (first) descriptor if found else NULL | |
1937 | **/ | |
1938 | const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, | |
1939 | int desc_type) | |
1940 | { | |
1941 | int add_sen_len, add_len, desc_len, k; | |
1942 | const u8 * descp; | |
1943 | ||
1944 | if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) | |
1945 | return NULL; | |
1946 | if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) | |
1947 | return NULL; | |
1948 | add_sen_len = (add_sen_len < (sb_len - 8)) ? | |
1949 | add_sen_len : (sb_len - 8); | |
1950 | descp = &sense_buffer[8]; | |
1951 | for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { | |
1952 | descp += desc_len; | |
1953 | add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; | |
1954 | desc_len = add_len + 2; | |
1955 | if (descp[0] == desc_type) | |
1956 | return descp; | |
1957 | if (add_len < 0) // short descriptor ?? | |
1958 | break; | |
1959 | } | |
1960 | return NULL; | |
1961 | } | |
1962 | EXPORT_SYMBOL(scsi_sense_desc_find); | |
1963 | ||
1964 | /** | |
1965 | * scsi_get_sense_info_fld - attempts to get information field from | |
1966 | * sense data (either fixed or descriptor format) | |
1967 | * | |
1968 | * @sense_buffer: byte array of sense data | |
1969 | * @sb_len: number of valid bytes in sense_buffer | |
1970 | * @info_out: pointer to 64 integer where 8 or 4 byte information | |
1971 | * field will be placed if found. | |
1972 | * | |
1973 | * Return value: | |
1974 | * 1 if information field found, 0 if not found. | |
1975 | **/ | |
1976 | int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, | |
1977 | u64 * info_out) | |
1978 | { | |
1979 | int j; | |
1980 | const u8 * ucp; | |
1981 | u64 ull; | |
1982 | ||
1983 | if (sb_len < 7) | |
1984 | return 0; | |
1985 | switch (sense_buffer[0] & 0x7f) { | |
1986 | case 0x70: | |
1987 | case 0x71: | |
1988 | if (sense_buffer[0] & 0x80) { | |
1989 | *info_out = (sense_buffer[3] << 24) + | |
1990 | (sense_buffer[4] << 16) + | |
1991 | (sense_buffer[5] << 8) + sense_buffer[6]; | |
1992 | return 1; | |
1993 | } else | |
1994 | return 0; | |
1995 | case 0x72: | |
1996 | case 0x73: | |
1997 | ucp = scsi_sense_desc_find(sense_buffer, sb_len, | |
1998 | 0 /* info desc */); | |
1999 | if (ucp && (0xa == ucp[1])) { | |
2000 | ull = 0; | |
2001 | for (j = 0; j < 8; ++j) { | |
2002 | if (j > 0) | |
2003 | ull <<= 8; | |
2004 | ull |= ucp[4 + j]; | |
2005 | } | |
2006 | *info_out = ull; | |
2007 | return 1; | |
2008 | } else | |
2009 | return 0; | |
2010 | default: | |
2011 | return 0; | |
2012 | } | |
2013 | } | |
2014 | EXPORT_SYMBOL(scsi_get_sense_info_fld); |