]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * scsi_lib.c Copyright (C) 1999 Eric Youngdale | |
3 | * | |
4 | * SCSI queueing library. | |
5 | * Initial versions: Eric Youngdale (eric@andante.org). | |
6 | * Based upon conversations with large numbers | |
7 | * of people at Linux Expo. | |
8 | */ | |
9 | ||
10 | #include <linux/bio.h> | |
11 | #include <linux/blkdev.h> | |
12 | #include <linux/completion.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/mempool.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/delay.h> | |
19 | ||
20 | #include <scsi/scsi.h> | |
21 | #include <scsi/scsi_dbg.h> | |
22 | #include <scsi/scsi_device.h> | |
23 | #include <scsi/scsi_driver.h> | |
24 | #include <scsi/scsi_eh.h> | |
25 | #include <scsi/scsi_host.h> | |
26 | #include <scsi/scsi_request.h> | |
27 | ||
28 | #include "scsi_priv.h" | |
29 | #include "scsi_logging.h" | |
30 | ||
31 | ||
32 | #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool)) | |
33 | #define SG_MEMPOOL_SIZE 32 | |
34 | ||
35 | struct scsi_host_sg_pool { | |
36 | size_t size; | |
37 | char *name; | |
38 | kmem_cache_t *slab; | |
39 | mempool_t *pool; | |
40 | }; | |
41 | ||
42 | #if (SCSI_MAX_PHYS_SEGMENTS < 32) | |
43 | #error SCSI_MAX_PHYS_SEGMENTS is too small | |
44 | #endif | |
45 | ||
46 | #define SP(x) { x, "sgpool-" #x } | |
52c1da39 | 47 | static struct scsi_host_sg_pool scsi_sg_pools[] = { |
1da177e4 LT |
48 | SP(8), |
49 | SP(16), | |
50 | SP(32), | |
51 | #if (SCSI_MAX_PHYS_SEGMENTS > 32) | |
52 | SP(64), | |
53 | #if (SCSI_MAX_PHYS_SEGMENTS > 64) | |
54 | SP(128), | |
55 | #if (SCSI_MAX_PHYS_SEGMENTS > 128) | |
56 | SP(256), | |
57 | #if (SCSI_MAX_PHYS_SEGMENTS > 256) | |
58 | #error SCSI_MAX_PHYS_SEGMENTS is too large | |
59 | #endif | |
60 | #endif | |
61 | #endif | |
62 | #endif | |
63 | }; | |
64 | #undef SP | |
65 | ||
66 | ||
67 | /* | |
68 | * Function: scsi_insert_special_req() | |
69 | * | |
70 | * Purpose: Insert pre-formed request into request queue. | |
71 | * | |
72 | * Arguments: sreq - request that is ready to be queued. | |
73 | * at_head - boolean. True if we should insert at head | |
74 | * of queue, false if we should insert at tail. | |
75 | * | |
76 | * Lock status: Assumed that lock is not held upon entry. | |
77 | * | |
78 | * Returns: Nothing | |
79 | * | |
80 | * Notes: This function is called from character device and from | |
81 | * ioctl types of functions where the caller knows exactly | |
82 | * what SCSI command needs to be issued. The idea is that | |
83 | * we merely inject the command into the queue (at the head | |
84 | * for now), and then call the queue request function to actually | |
85 | * process it. | |
86 | */ | |
87 | int scsi_insert_special_req(struct scsi_request *sreq, int at_head) | |
88 | { | |
89 | /* | |
90 | * Because users of this function are apt to reuse requests with no | |
91 | * modification, we have to sanitise the request flags here | |
92 | */ | |
93 | sreq->sr_request->flags &= ~REQ_DONTPREP; | |
94 | blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, | |
867d1191 | 95 | at_head, sreq); |
1da177e4 LT |
96 | return 0; |
97 | } | |
98 | ||
a1bf9d1d | 99 | static void scsi_run_queue(struct request_queue *q); |
e91442b6 JB |
100 | |
101 | /* | |
102 | * Function: scsi_unprep_request() | |
103 | * | |
104 | * Purpose: Remove all preparation done for a request, including its | |
105 | * associated scsi_cmnd, so that it can be requeued. | |
106 | * | |
107 | * Arguments: req - request to unprepare | |
108 | * | |
109 | * Lock status: Assumed that no locks are held upon entry. | |
110 | * | |
111 | * Returns: Nothing. | |
112 | */ | |
113 | static void scsi_unprep_request(struct request *req) | |
114 | { | |
115 | struct scsi_cmnd *cmd = req->special; | |
116 | ||
117 | req->flags &= ~REQ_DONTPREP; | |
118 | req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; | |
119 | ||
e91442b6 JB |
120 | scsi_put_command(cmd); |
121 | } | |
a1bf9d1d | 122 | |
1da177e4 LT |
123 | /* |
124 | * Function: scsi_queue_insert() | |
125 | * | |
126 | * Purpose: Insert a command in the midlevel queue. | |
127 | * | |
128 | * Arguments: cmd - command that we are adding to queue. | |
129 | * reason - why we are inserting command to queue. | |
130 | * | |
131 | * Lock status: Assumed that lock is not held upon entry. | |
132 | * | |
133 | * Returns: Nothing. | |
134 | * | |
135 | * Notes: We do this for one of two cases. Either the host is busy | |
136 | * and it cannot accept any more commands for the time being, | |
137 | * or the device returned QUEUE_FULL and can accept no more | |
138 | * commands. | |
139 | * Notes: This could be called either from an interrupt context or a | |
140 | * normal process context. | |
141 | */ | |
142 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |
143 | { | |
144 | struct Scsi_Host *host = cmd->device->host; | |
145 | struct scsi_device *device = cmd->device; | |
a1bf9d1d TH |
146 | struct request_queue *q = device->request_queue; |
147 | unsigned long flags; | |
1da177e4 LT |
148 | |
149 | SCSI_LOG_MLQUEUE(1, | |
150 | printk("Inserting command %p into mlqueue\n", cmd)); | |
151 | ||
152 | /* | |
d8c37e7b | 153 | * Set the appropriate busy bit for the device/host. |
1da177e4 LT |
154 | * |
155 | * If the host/device isn't busy, assume that something actually | |
156 | * completed, and that we should be able to queue a command now. | |
157 | * | |
158 | * Note that the prior mid-layer assumption that any host could | |
159 | * always queue at least one command is now broken. The mid-layer | |
160 | * will implement a user specifiable stall (see | |
161 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | |
162 | * if a command is requeued with no other commands outstanding | |
163 | * either for the device or for the host. | |
164 | */ | |
165 | if (reason == SCSI_MLQUEUE_HOST_BUSY) | |
166 | host->host_blocked = host->max_host_blocked; | |
167 | else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) | |
168 | device->device_blocked = device->max_device_blocked; | |
169 | ||
1da177e4 LT |
170 | /* |
171 | * Decrement the counters, since these commands are no longer | |
172 | * active on the host/device. | |
173 | */ | |
174 | scsi_device_unbusy(device); | |
175 | ||
176 | /* | |
a1bf9d1d TH |
177 | * Requeue this command. It will go before all other commands |
178 | * that are already in the queue. | |
1da177e4 LT |
179 | * |
180 | * NOTE: there is magic here about the way the queue is plugged if | |
181 | * we have no outstanding commands. | |
182 | * | |
a1bf9d1d | 183 | * Although we *don't* plug the queue, we call the request |
1da177e4 LT |
184 | * function. The SCSI request function detects the blocked condition |
185 | * and plugs the queue appropriately. | |
a1bf9d1d TH |
186 | */ |
187 | spin_lock_irqsave(q->queue_lock, flags); | |
59897dad | 188 | blk_requeue_request(q, cmd->request); |
a1bf9d1d TH |
189 | spin_unlock_irqrestore(q->queue_lock, flags); |
190 | ||
191 | scsi_run_queue(q); | |
192 | ||
1da177e4 LT |
193 | return 0; |
194 | } | |
195 | ||
196 | /* | |
197 | * Function: scsi_do_req | |
198 | * | |
199 | * Purpose: Queue a SCSI request | |
200 | * | |
201 | * Arguments: sreq - command descriptor. | |
202 | * cmnd - actual SCSI command to be performed. | |
203 | * buffer - data buffer. | |
204 | * bufflen - size of data buffer. | |
205 | * done - completion function to be run. | |
206 | * timeout - how long to let it run before timeout. | |
207 | * retries - number of retries we allow. | |
208 | * | |
209 | * Lock status: No locks held upon entry. | |
210 | * | |
211 | * Returns: Nothing. | |
212 | * | |
213 | * Notes: This function is only used for queueing requests for things | |
214 | * like ioctls and character device requests - this is because | |
215 | * we essentially just inject a request into the queue for the | |
216 | * device. | |
217 | * | |
218 | * In order to support the scsi_device_quiesce function, we | |
219 | * now inject requests on the *head* of the device queue | |
220 | * rather than the tail. | |
221 | */ | |
222 | void scsi_do_req(struct scsi_request *sreq, const void *cmnd, | |
223 | void *buffer, unsigned bufflen, | |
224 | void (*done)(struct scsi_cmnd *), | |
225 | int timeout, int retries) | |
226 | { | |
227 | /* | |
228 | * If the upper level driver is reusing these things, then | |
229 | * we should release the low-level block now. Another one will | |
230 | * be allocated later when this request is getting queued. | |
231 | */ | |
232 | __scsi_release_request(sreq); | |
233 | ||
234 | /* | |
235 | * Our own function scsi_done (which marks the host as not busy, | |
236 | * disables the timeout counter, etc) will be called by us or by the | |
237 | * scsi_hosts[host].queuecommand() function needs to also call | |
238 | * the completion function for the high level driver. | |
239 | */ | |
240 | memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd)); | |
241 | sreq->sr_bufflen = bufflen; | |
242 | sreq->sr_buffer = buffer; | |
243 | sreq->sr_allowed = retries; | |
244 | sreq->sr_done = done; | |
245 | sreq->sr_timeout_per_command = timeout; | |
246 | ||
247 | if (sreq->sr_cmd_len == 0) | |
248 | sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); | |
249 | ||
250 | /* | |
251 | * head injection *required* here otherwise quiesce won't work | |
252 | */ | |
253 | scsi_insert_special_req(sreq, 1); | |
254 | } | |
255 | EXPORT_SYMBOL(scsi_do_req); | |
256 | ||
1da177e4 LT |
257 | /* This is the end routine we get to if a command was never attached |
258 | * to the request. Simply complete the request without changing | |
259 | * rq_status; this will cause a DRIVER_ERROR. */ | |
260 | static void scsi_wait_req_end_io(struct request *req) | |
261 | { | |
262 | BUG_ON(!req->waiting); | |
263 | ||
264 | complete(req->waiting); | |
265 | } | |
266 | ||
267 | void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer, | |
268 | unsigned bufflen, int timeout, int retries) | |
269 | { | |
270 | DECLARE_COMPLETION(wait); | |
39216033 | 271 | int write = (sreq->sr_data_direction == DMA_TO_DEVICE); |
e537a36d JB |
272 | struct request *req; |
273 | ||
8e640118 JB |
274 | req = blk_get_request(sreq->sr_device->request_queue, write, |
275 | __GFP_WAIT); | |
276 | if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req, | |
277 | buffer, bufflen, __GFP_WAIT)) { | |
278 | sreq->sr_result = DRIVER_ERROR << 24; | |
279 | blk_put_request(req); | |
280 | return; | |
281 | } | |
282 | ||
e537a36d JB |
283 | req->flags |= REQ_NOMERGE; |
284 | req->waiting = &wait; | |
285 | req->end_io = scsi_wait_req_end_io; | |
286 | req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]); | |
287 | req->sense = sreq->sr_sense_buffer; | |
288 | req->sense_len = 0; | |
289 | memcpy(req->cmd, cmnd, req->cmd_len); | |
290 | req->timeout = timeout; | |
291 | req->flags |= REQ_BLOCK_PC; | |
292 | req->rq_disk = NULL; | |
293 | blk_insert_request(sreq->sr_device->request_queue, req, | |
294 | sreq->sr_data_direction == DMA_TO_DEVICE, NULL); | |
1da177e4 LT |
295 | wait_for_completion(&wait); |
296 | sreq->sr_request->waiting = NULL; | |
e537a36d JB |
297 | sreq->sr_result = req->errors; |
298 | if (req->errors) | |
1da177e4 LT |
299 | sreq->sr_result |= (DRIVER_ERROR << 24); |
300 | ||
e537a36d | 301 | blk_put_request(req); |
1da177e4 | 302 | } |
e537a36d | 303 | |
1da177e4 LT |
304 | EXPORT_SYMBOL(scsi_wait_req); |
305 | ||
39216033 | 306 | /** |
33aa687d | 307 | * scsi_execute - insert request and wait for the result |
39216033 JB |
308 | * @sdev: scsi device |
309 | * @cmd: scsi command | |
310 | * @data_direction: data direction | |
311 | * @buffer: data buffer | |
312 | * @bufflen: len of buffer | |
313 | * @sense: optional sense buffer | |
314 | * @timeout: request timeout in seconds | |
315 | * @retries: number of times to retry request | |
33aa687d | 316 | * @flags: or into request flags; |
39216033 | 317 | * |
ea73a9f2 JB |
318 | * returns the req->errors value which is the the scsi_cmnd result |
319 | * field. | |
39216033 | 320 | **/ |
33aa687d JB |
321 | int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
322 | int data_direction, void *buffer, unsigned bufflen, | |
323 | unsigned char *sense, int timeout, int retries, int flags) | |
39216033 JB |
324 | { |
325 | struct request *req; | |
326 | int write = (data_direction == DMA_TO_DEVICE); | |
327 | int ret = DRIVER_ERROR << 24; | |
328 | ||
329 | req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); | |
330 | ||
331 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, | |
332 | buffer, bufflen, __GFP_WAIT)) | |
333 | goto out; | |
334 | ||
335 | req->cmd_len = COMMAND_SIZE(cmd[0]); | |
336 | memcpy(req->cmd, cmd, req->cmd_len); | |
337 | req->sense = sense; | |
338 | req->sense_len = 0; | |
339 | req->timeout = timeout; | |
3173d8c3 | 340 | req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; |
39216033 JB |
341 | |
342 | /* | |
343 | * head injection *required* here otherwise quiesce won't work | |
344 | */ | |
345 | blk_execute_rq(req->q, NULL, req, 1); | |
346 | ||
347 | ret = req->errors; | |
348 | out: | |
349 | blk_put_request(req); | |
350 | ||
351 | return ret; | |
352 | } | |
33aa687d | 353 | EXPORT_SYMBOL(scsi_execute); |
39216033 | 354 | |
ea73a9f2 JB |
355 | |
356 | int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, | |
357 | int data_direction, void *buffer, unsigned bufflen, | |
358 | struct scsi_sense_hdr *sshdr, int timeout, int retries) | |
359 | { | |
360 | char *sense = NULL; | |
1ccb48bb | 361 | int result; |
362 | ||
ea73a9f2 | 363 | if (sshdr) { |
286f3e13 | 364 | sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); |
ea73a9f2 JB |
365 | if (!sense) |
366 | return DRIVER_ERROR << 24; | |
e514385b | 367 | memset(sense, 0, SCSI_SENSE_BUFFERSIZE); |
ea73a9f2 | 368 | } |
1ccb48bb | 369 | result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, |
ea73a9f2 JB |
370 | sense, timeout, retries, 0); |
371 | if (sshdr) | |
e514385b | 372 | scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); |
ea73a9f2 JB |
373 | |
374 | kfree(sense); | |
375 | return result; | |
376 | } | |
377 | EXPORT_SYMBOL(scsi_execute_req); | |
378 | ||
1da177e4 LT |
379 | /* |
380 | * Function: scsi_init_cmd_errh() | |
381 | * | |
382 | * Purpose: Initialize cmd fields related to error handling. | |
383 | * | |
384 | * Arguments: cmd - command that is ready to be queued. | |
385 | * | |
386 | * Returns: Nothing | |
387 | * | |
388 | * Notes: This function has the job of initializing a number of | |
389 | * fields related to error handling. Typically this will | |
390 | * be called once for each command, as required. | |
391 | */ | |
392 | static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) | |
393 | { | |
1da177e4 | 394 | cmd->serial_number = 0; |
1da177e4 LT |
395 | |
396 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); | |
397 | ||
398 | if (cmd->cmd_len == 0) | |
399 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | |
400 | ||
401 | /* | |
402 | * We need saved copies of a number of fields - this is because | |
403 | * error handling may need to overwrite these with different values | |
404 | * to run different commands, and once error handling is complete, | |
405 | * we will need to restore these values prior to running the actual | |
406 | * command. | |
407 | */ | |
408 | cmd->old_use_sg = cmd->use_sg; | |
409 | cmd->old_cmd_len = cmd->cmd_len; | |
410 | cmd->sc_old_data_direction = cmd->sc_data_direction; | |
411 | cmd->old_underflow = cmd->underflow; | |
412 | memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); | |
413 | cmd->buffer = cmd->request_buffer; | |
414 | cmd->bufflen = cmd->request_bufflen; | |
1da177e4 LT |
415 | |
416 | return 1; | |
417 | } | |
418 | ||
419 | /* | |
420 | * Function: scsi_setup_cmd_retry() | |
421 | * | |
422 | * Purpose: Restore the command state for a retry | |
423 | * | |
424 | * Arguments: cmd - command to be restored | |
425 | * | |
426 | * Returns: Nothing | |
427 | * | |
428 | * Notes: Immediately prior to retrying a command, we need | |
429 | * to restore certain fields that we saved above. | |
430 | */ | |
431 | void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) | |
432 | { | |
433 | memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); | |
434 | cmd->request_buffer = cmd->buffer; | |
435 | cmd->request_bufflen = cmd->bufflen; | |
436 | cmd->use_sg = cmd->old_use_sg; | |
437 | cmd->cmd_len = cmd->old_cmd_len; | |
438 | cmd->sc_data_direction = cmd->sc_old_data_direction; | |
439 | cmd->underflow = cmd->old_underflow; | |
440 | } | |
441 | ||
442 | void scsi_device_unbusy(struct scsi_device *sdev) | |
443 | { | |
444 | struct Scsi_Host *shost = sdev->host; | |
445 | unsigned long flags; | |
446 | ||
447 | spin_lock_irqsave(shost->host_lock, flags); | |
448 | shost->host_busy--; | |
939647ee | 449 | if (unlikely(scsi_host_in_recovery(shost) && |
1da177e4 LT |
450 | shost->host_failed)) |
451 | scsi_eh_wakeup(shost); | |
452 | spin_unlock(shost->host_lock); | |
152587de | 453 | spin_lock(sdev->request_queue->queue_lock); |
1da177e4 | 454 | sdev->device_busy--; |
152587de | 455 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); |
1da177e4 LT |
456 | } |
457 | ||
458 | /* | |
459 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | |
460 | * and call blk_run_queue for all the scsi_devices on the target - | |
461 | * including current_sdev first. | |
462 | * | |
463 | * Called with *no* scsi locks held. | |
464 | */ | |
465 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | |
466 | { | |
467 | struct Scsi_Host *shost = current_sdev->host; | |
468 | struct scsi_device *sdev, *tmp; | |
469 | struct scsi_target *starget = scsi_target(current_sdev); | |
470 | unsigned long flags; | |
471 | ||
472 | spin_lock_irqsave(shost->host_lock, flags); | |
473 | starget->starget_sdev_user = NULL; | |
474 | spin_unlock_irqrestore(shost->host_lock, flags); | |
475 | ||
476 | /* | |
477 | * Call blk_run_queue for all LUNs on the target, starting with | |
478 | * current_sdev. We race with others (to set starget_sdev_user), | |
479 | * but in most cases, we will be first. Ideally, each LU on the | |
480 | * target would get some limited time or requests on the target. | |
481 | */ | |
482 | blk_run_queue(current_sdev->request_queue); | |
483 | ||
484 | spin_lock_irqsave(shost->host_lock, flags); | |
485 | if (starget->starget_sdev_user) | |
486 | goto out; | |
487 | list_for_each_entry_safe(sdev, tmp, &starget->devices, | |
488 | same_target_siblings) { | |
489 | if (sdev == current_sdev) | |
490 | continue; | |
491 | if (scsi_device_get(sdev)) | |
492 | continue; | |
493 | ||
494 | spin_unlock_irqrestore(shost->host_lock, flags); | |
495 | blk_run_queue(sdev->request_queue); | |
496 | spin_lock_irqsave(shost->host_lock, flags); | |
497 | ||
498 | scsi_device_put(sdev); | |
499 | } | |
500 | out: | |
501 | spin_unlock_irqrestore(shost->host_lock, flags); | |
502 | } | |
503 | ||
504 | /* | |
505 | * Function: scsi_run_queue() | |
506 | * | |
507 | * Purpose: Select a proper request queue to serve next | |
508 | * | |
509 | * Arguments: q - last request's queue | |
510 | * | |
511 | * Returns: Nothing | |
512 | * | |
513 | * Notes: The previous command was completely finished, start | |
514 | * a new one if possible. | |
515 | */ | |
516 | static void scsi_run_queue(struct request_queue *q) | |
517 | { | |
518 | struct scsi_device *sdev = q->queuedata; | |
519 | struct Scsi_Host *shost = sdev->host; | |
520 | unsigned long flags; | |
521 | ||
522 | if (sdev->single_lun) | |
523 | scsi_single_lun_run(sdev); | |
524 | ||
525 | spin_lock_irqsave(shost->host_lock, flags); | |
526 | while (!list_empty(&shost->starved_list) && | |
527 | !shost->host_blocked && !shost->host_self_blocked && | |
528 | !((shost->can_queue > 0) && | |
529 | (shost->host_busy >= shost->can_queue))) { | |
530 | /* | |
531 | * As long as shost is accepting commands and we have | |
532 | * starved queues, call blk_run_queue. scsi_request_fn | |
533 | * drops the queue_lock and can add us back to the | |
534 | * starved_list. | |
535 | * | |
536 | * host_lock protects the starved_list and starved_entry. | |
537 | * scsi_request_fn must get the host_lock before checking | |
538 | * or modifying starved_list or starved_entry. | |
539 | */ | |
540 | sdev = list_entry(shost->starved_list.next, | |
541 | struct scsi_device, starved_entry); | |
542 | list_del_init(&sdev->starved_entry); | |
543 | spin_unlock_irqrestore(shost->host_lock, flags); | |
544 | ||
545 | blk_run_queue(sdev->request_queue); | |
546 | ||
547 | spin_lock_irqsave(shost->host_lock, flags); | |
548 | if (unlikely(!list_empty(&sdev->starved_entry))) | |
549 | /* | |
550 | * sdev lost a race, and was put back on the | |
551 | * starved list. This is unlikely but without this | |
552 | * in theory we could loop forever. | |
553 | */ | |
554 | break; | |
555 | } | |
556 | spin_unlock_irqrestore(shost->host_lock, flags); | |
557 | ||
558 | blk_run_queue(q); | |
559 | } | |
560 | ||
561 | /* | |
562 | * Function: scsi_requeue_command() | |
563 | * | |
564 | * Purpose: Handle post-processing of completed commands. | |
565 | * | |
566 | * Arguments: q - queue to operate on | |
567 | * cmd - command that may need to be requeued. | |
568 | * | |
569 | * Returns: Nothing | |
570 | * | |
571 | * Notes: After command completion, there may be blocks left | |
572 | * over which weren't finished by the previous command | |
573 | * this can be for a number of reasons - the main one is | |
574 | * I/O errors in the middle of the request, in which case | |
575 | * we need to request the blocks that come after the bad | |
576 | * sector. | |
e91442b6 | 577 | * Notes: Upon return, cmd is a stale pointer. |
1da177e4 LT |
578 | */ |
579 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | |
580 | { | |
e91442b6 | 581 | struct request *req = cmd->request; |
283369cc TH |
582 | unsigned long flags; |
583 | ||
e91442b6 | 584 | scsi_unprep_request(req); |
283369cc | 585 | spin_lock_irqsave(q->queue_lock, flags); |
e91442b6 | 586 | blk_requeue_request(q, req); |
283369cc | 587 | spin_unlock_irqrestore(q->queue_lock, flags); |
1da177e4 LT |
588 | |
589 | scsi_run_queue(q); | |
590 | } | |
591 | ||
592 | void scsi_next_command(struct scsi_cmnd *cmd) | |
593 | { | |
594 | struct request_queue *q = cmd->device->request_queue; | |
595 | ||
596 | scsi_put_command(cmd); | |
597 | scsi_run_queue(q); | |
598 | } | |
599 | ||
600 | void scsi_run_host_queues(struct Scsi_Host *shost) | |
601 | { | |
602 | struct scsi_device *sdev; | |
603 | ||
604 | shost_for_each_device(sdev, shost) | |
605 | scsi_run_queue(sdev->request_queue); | |
606 | } | |
607 | ||
608 | /* | |
609 | * Function: scsi_end_request() | |
610 | * | |
611 | * Purpose: Post-processing of completed commands (usually invoked at end | |
612 | * of upper level post-processing and scsi_io_completion). | |
613 | * | |
614 | * Arguments: cmd - command that is complete. | |
615 | * uptodate - 1 if I/O indicates success, <= 0 for I/O error. | |
616 | * bytes - number of bytes of completed I/O | |
617 | * requeue - indicates whether we should requeue leftovers. | |
618 | * | |
619 | * Lock status: Assumed that lock is not held upon entry. | |
620 | * | |
e91442b6 | 621 | * Returns: cmd if requeue required, NULL otherwise. |
1da177e4 LT |
622 | * |
623 | * Notes: This is called for block device requests in order to | |
624 | * mark some number of sectors as complete. | |
625 | * | |
626 | * We are guaranteeing that the request queue will be goosed | |
627 | * at some point during this call. | |
e91442b6 | 628 | * Notes: If cmd was requeued, upon return it will be a stale pointer. |
1da177e4 LT |
629 | */ |
630 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | |
631 | int bytes, int requeue) | |
632 | { | |
633 | request_queue_t *q = cmd->device->request_queue; | |
634 | struct request *req = cmd->request; | |
635 | unsigned long flags; | |
636 | ||
637 | /* | |
638 | * If there are blocks left over at the end, set up the command | |
639 | * to queue the remainder of them. | |
640 | */ | |
641 | if (end_that_request_chunk(req, uptodate, bytes)) { | |
642 | int leftover = (req->hard_nr_sectors << 9); | |
643 | ||
644 | if (blk_pc_request(req)) | |
645 | leftover = req->data_len; | |
646 | ||
647 | /* kill remainder if no retrys */ | |
648 | if (!uptodate && blk_noretry_request(req)) | |
649 | end_that_request_chunk(req, 0, leftover); | |
650 | else { | |
e91442b6 | 651 | if (requeue) { |
1da177e4 LT |
652 | /* |
653 | * Bleah. Leftovers again. Stick the | |
654 | * leftovers in the front of the | |
655 | * queue, and goose the queue again. | |
656 | */ | |
657 | scsi_requeue_command(q, cmd); | |
e91442b6 JB |
658 | cmd = NULL; |
659 | } | |
1da177e4 LT |
660 | return cmd; |
661 | } | |
662 | } | |
663 | ||
664 | add_disk_randomness(req->rq_disk); | |
665 | ||
666 | spin_lock_irqsave(q->queue_lock, flags); | |
667 | if (blk_rq_tagged(req)) | |
668 | blk_queue_end_tag(q, req); | |
669 | end_that_request_last(req); | |
670 | spin_unlock_irqrestore(q->queue_lock, flags); | |
671 | ||
672 | /* | |
673 | * This will goose the queue request function at the end, so we don't | |
674 | * need to worry about launching another command. | |
675 | */ | |
676 | scsi_next_command(cmd); | |
677 | return NULL; | |
678 | } | |
679 | ||
c53033f6 | 680 | static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) |
1da177e4 LT |
681 | { |
682 | struct scsi_host_sg_pool *sgp; | |
683 | struct scatterlist *sgl; | |
684 | ||
685 | BUG_ON(!cmd->use_sg); | |
686 | ||
687 | switch (cmd->use_sg) { | |
688 | case 1 ... 8: | |
689 | cmd->sglist_len = 0; | |
690 | break; | |
691 | case 9 ... 16: | |
692 | cmd->sglist_len = 1; | |
693 | break; | |
694 | case 17 ... 32: | |
695 | cmd->sglist_len = 2; | |
696 | break; | |
697 | #if (SCSI_MAX_PHYS_SEGMENTS > 32) | |
698 | case 33 ... 64: | |
699 | cmd->sglist_len = 3; | |
700 | break; | |
701 | #if (SCSI_MAX_PHYS_SEGMENTS > 64) | |
702 | case 65 ... 128: | |
703 | cmd->sglist_len = 4; | |
704 | break; | |
705 | #if (SCSI_MAX_PHYS_SEGMENTS > 128) | |
706 | case 129 ... 256: | |
707 | cmd->sglist_len = 5; | |
708 | break; | |
709 | #endif | |
710 | #endif | |
711 | #endif | |
712 | default: | |
713 | return NULL; | |
714 | } | |
715 | ||
716 | sgp = scsi_sg_pools + cmd->sglist_len; | |
717 | sgl = mempool_alloc(sgp->pool, gfp_mask); | |
1da177e4 LT |
718 | return sgl; |
719 | } | |
720 | ||
721 | static void scsi_free_sgtable(struct scatterlist *sgl, int index) | |
722 | { | |
723 | struct scsi_host_sg_pool *sgp; | |
724 | ||
a77e3362 | 725 | BUG_ON(index >= SG_MEMPOOL_NR); |
1da177e4 LT |
726 | |
727 | sgp = scsi_sg_pools + index; | |
728 | mempool_free(sgl, sgp->pool); | |
729 | } | |
730 | ||
731 | /* | |
732 | * Function: scsi_release_buffers() | |
733 | * | |
734 | * Purpose: Completion processing for block device I/O requests. | |
735 | * | |
736 | * Arguments: cmd - command that we are bailing. | |
737 | * | |
738 | * Lock status: Assumed that no lock is held upon entry. | |
739 | * | |
740 | * Returns: Nothing | |
741 | * | |
742 | * Notes: In the event that an upper level driver rejects a | |
743 | * command, we must release resources allocated during | |
744 | * the __init_io() function. Primarily this would involve | |
745 | * the scatter-gather table, and potentially any bounce | |
746 | * buffers. | |
747 | */ | |
748 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | |
749 | { | |
750 | struct request *req = cmd->request; | |
751 | ||
752 | /* | |
753 | * Free up any indirection buffers we allocated for DMA purposes. | |
754 | */ | |
755 | if (cmd->use_sg) | |
756 | scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | |
757 | else if (cmd->request_buffer != req->buffer) | |
758 | kfree(cmd->request_buffer); | |
759 | ||
760 | /* | |
761 | * Zero these out. They now point to freed memory, and it is | |
762 | * dangerous to hang onto the pointers. | |
763 | */ | |
764 | cmd->buffer = NULL; | |
765 | cmd->bufflen = 0; | |
766 | cmd->request_buffer = NULL; | |
767 | cmd->request_bufflen = 0; | |
768 | } | |
769 | ||
770 | /* | |
771 | * Function: scsi_io_completion() | |
772 | * | |
773 | * Purpose: Completion processing for block device I/O requests. | |
774 | * | |
775 | * Arguments: cmd - command that is finished. | |
776 | * | |
777 | * Lock status: Assumed that no lock is held upon entry. | |
778 | * | |
779 | * Returns: Nothing | |
780 | * | |
781 | * Notes: This function is matched in terms of capabilities to | |
782 | * the function that created the scatter-gather list. | |
783 | * In other words, if there are no bounce buffers | |
784 | * (the normal case for most drivers), we don't need | |
785 | * the logic to deal with cleaning up afterwards. | |
786 | * | |
787 | * We must do one of several things here: | |
788 | * | |
789 | * a) Call scsi_end_request. This will finish off the | |
790 | * specified number of sectors. If we are done, the | |
791 | * command block will be released, and the queue | |
792 | * function will be goosed. If we are not done, then | |
793 | * scsi_end_request will directly goose the queue. | |
794 | * | |
795 | * b) We can just use scsi_requeue_command() here. This would | |
796 | * be used if we just wanted to retry, for example. | |
797 | */ | |
798 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |
799 | unsigned int block_bytes) | |
800 | { | |
801 | int result = cmd->result; | |
802 | int this_count = cmd->bufflen; | |
803 | request_queue_t *q = cmd->device->request_queue; | |
804 | struct request *req = cmd->request; | |
805 | int clear_errors = 1; | |
806 | struct scsi_sense_hdr sshdr; | |
807 | int sense_valid = 0; | |
808 | int sense_deferred = 0; | |
809 | ||
810 | if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) | |
811 | return; | |
812 | ||
813 | /* | |
814 | * Free up any indirection buffers we allocated for DMA purposes. | |
815 | * For the case of a READ, we need to copy the data out of the | |
816 | * bounce buffer and into the real buffer. | |
817 | */ | |
818 | if (cmd->use_sg) | |
819 | scsi_free_sgtable(cmd->buffer, cmd->sglist_len); | |
820 | else if (cmd->buffer != req->buffer) { | |
821 | if (rq_data_dir(req) == READ) { | |
822 | unsigned long flags; | |
823 | char *to = bio_kmap_irq(req->bio, &flags); | |
824 | memcpy(to, cmd->buffer, cmd->bufflen); | |
825 | bio_kunmap_irq(to, &flags); | |
826 | } | |
827 | kfree(cmd->buffer); | |
828 | } | |
829 | ||
830 | if (result) { | |
831 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | |
832 | if (sense_valid) | |
833 | sense_deferred = scsi_sense_is_deferred(&sshdr); | |
834 | } | |
835 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ | |
836 | req->errors = result; | |
837 | if (result) { | |
838 | clear_errors = 0; | |
839 | if (sense_valid && req->sense) { | |
840 | /* | |
841 | * SG_IO wants current and deferred errors | |
842 | */ | |
843 | int len = 8 + cmd->sense_buffer[7]; | |
844 | ||
845 | if (len > SCSI_SENSE_BUFFERSIZE) | |
846 | len = SCSI_SENSE_BUFFERSIZE; | |
847 | memcpy(req->sense, cmd->sense_buffer, len); | |
848 | req->sense_len = len; | |
849 | } | |
850 | } else | |
851 | req->data_len = cmd->resid; | |
852 | } | |
853 | ||
854 | /* | |
855 | * Zero these out. They now point to freed memory, and it is | |
856 | * dangerous to hang onto the pointers. | |
857 | */ | |
858 | cmd->buffer = NULL; | |
859 | cmd->bufflen = 0; | |
860 | cmd->request_buffer = NULL; | |
861 | cmd->request_bufflen = 0; | |
862 | ||
863 | /* | |
864 | * Next deal with any sectors which we were able to correctly | |
865 | * handle. | |
866 | */ | |
867 | if (good_bytes >= 0) { | |
868 | SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n", | |
869 | req->nr_sectors, good_bytes)); | |
870 | SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); | |
871 | ||
872 | if (clear_errors) | |
873 | req->errors = 0; | |
874 | /* | |
875 | * If multiple sectors are requested in one buffer, then | |
876 | * they will have been finished off by the first command. | |
877 | * If not, then we have a multi-buffer command. | |
878 | * | |
879 | * If block_bytes != 0, it means we had a medium error | |
880 | * of some sort, and that we want to mark some number of | |
881 | * sectors as not uptodate. Thus we want to inhibit | |
882 | * requeueing right here - we will requeue down below | |
883 | * when we handle the bad sectors. | |
884 | */ | |
1da177e4 LT |
885 | |
886 | /* | |
e91442b6 JB |
887 | * If the command completed without error, then either |
888 | * finish off the rest of the command, or start a new one. | |
1da177e4 | 889 | */ |
e91442b6 | 890 | if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) |
1da177e4 | 891 | return; |
1da177e4 LT |
892 | } |
893 | /* | |
894 | * Now, if we were good little boys and girls, Santa left us a request | |
895 | * sense buffer. We can extract information from this, so we | |
896 | * can choose a block to remap, etc. | |
897 | */ | |
898 | if (sense_valid && !sense_deferred) { | |
899 | switch (sshdr.sense_key) { | |
900 | case UNIT_ATTENTION: | |
901 | if (cmd->device->removable) { | |
902 | /* detected disc change. set a bit | |
903 | * and quietly refuse further access. | |
904 | */ | |
905 | cmd->device->changed = 1; | |
e91442b6 | 906 | scsi_end_request(cmd, 0, |
1da177e4 LT |
907 | this_count, 1); |
908 | return; | |
909 | } else { | |
910 | /* | |
911 | * Must have been a power glitch, or a | |
912 | * bus reset. Could not have been a | |
913 | * media change, so we just retry the | |
914 | * request and see what happens. | |
915 | */ | |
916 | scsi_requeue_command(q, cmd); | |
917 | return; | |
918 | } | |
919 | break; | |
920 | case ILLEGAL_REQUEST: | |
921 | /* | |
922 | * If we had an ILLEGAL REQUEST returned, then we may | |
923 | * have performed an unsupported command. The only | |
924 | * thing this should be would be a ten byte read where | |
925 | * only a six byte read was supported. Also, on a | |
926 | * system where READ CAPACITY failed, we may have read | |
927 | * past the end of the disk. | |
928 | */ | |
929 | if (cmd->device->use_10_for_rw && | |
930 | (cmd->cmnd[0] == READ_10 || | |
931 | cmd->cmnd[0] == WRITE_10)) { | |
932 | cmd->device->use_10_for_rw = 0; | |
933 | /* | |
934 | * This will cause a retry with a 6-byte | |
935 | * command. | |
936 | */ | |
937 | scsi_requeue_command(q, cmd); | |
938 | result = 0; | |
939 | } else { | |
e91442b6 | 940 | scsi_end_request(cmd, 0, this_count, 1); |
1da177e4 LT |
941 | return; |
942 | } | |
943 | break; | |
944 | case NOT_READY: | |
945 | /* | |
946 | * If the device is in the process of becoming ready, | |
947 | * retry. | |
948 | */ | |
949 | if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { | |
950 | scsi_requeue_command(q, cmd); | |
951 | return; | |
952 | } | |
3173d8c3 | 953 | if (!(req->flags & REQ_QUIET)) |
9ccfc756 JB |
954 | sdev_printk(KERN_INFO, |
955 | cmd->device, | |
956 | "Device not ready.\n"); | |
e91442b6 | 957 | scsi_end_request(cmd, 0, this_count, 1); |
1da177e4 LT |
958 | return; |
959 | case VOLUME_OVERFLOW: | |
3173d8c3 | 960 | if (!(req->flags & REQ_QUIET)) { |
9ccfc756 JB |
961 | sdev_printk(KERN_INFO, |
962 | cmd->device, | |
963 | "Volume overflow, CDB: "); | |
3173d8c3 JB |
964 | __scsi_print_command(cmd->data_cmnd); |
965 | scsi_print_sense("", cmd); | |
966 | } | |
e91442b6 | 967 | scsi_end_request(cmd, 0, block_bytes, 1); |
1da177e4 LT |
968 | return; |
969 | default: | |
970 | break; | |
971 | } | |
972 | } /* driver byte != 0 */ | |
973 | if (host_byte(result) == DID_RESET) { | |
974 | /* | |
975 | * Third party bus reset or reset for error | |
976 | * recovery reasons. Just retry the request | |
977 | * and see what happens. | |
978 | */ | |
979 | scsi_requeue_command(q, cmd); | |
980 | return; | |
981 | } | |
982 | if (result) { | |
3173d8c3 | 983 | if (!(req->flags & REQ_QUIET)) { |
9ccfc756 JB |
984 | sdev_printk(KERN_INFO, cmd->device, |
985 | "SCSI error: return code = 0x%x\n", | |
986 | result); | |
3173d8c3 JB |
987 | |
988 | if (driver_byte(result) & DRIVER_SENSE) | |
989 | scsi_print_sense("", cmd); | |
990 | } | |
1da177e4 LT |
991 | /* |
992 | * Mark a single buffer as not uptodate. Queue the remainder. | |
993 | * We sometimes get this cruft in the event that a medium error | |
994 | * isn't properly reported. | |
995 | */ | |
996 | block_bytes = req->hard_cur_sectors << 9; | |
997 | if (!block_bytes) | |
998 | block_bytes = req->data_len; | |
e91442b6 | 999 | scsi_end_request(cmd, 0, block_bytes, 1); |
1da177e4 LT |
1000 | } |
1001 | } | |
1002 | EXPORT_SYMBOL(scsi_io_completion); | |
1003 | ||
1004 | /* | |
1005 | * Function: scsi_init_io() | |
1006 | * | |
1007 | * Purpose: SCSI I/O initialize function. | |
1008 | * | |
1009 | * Arguments: cmd - Command descriptor we wish to initialize | |
1010 | * | |
1011 | * Returns: 0 on success | |
1012 | * BLKPREP_DEFER if the failure is retryable | |
1013 | * BLKPREP_KILL if the failure is fatal | |
1014 | */ | |
1015 | static int scsi_init_io(struct scsi_cmnd *cmd) | |
1016 | { | |
1017 | struct request *req = cmd->request; | |
1018 | struct scatterlist *sgpnt; | |
1019 | int count; | |
1020 | ||
1021 | /* | |
1022 | * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer | |
1023 | */ | |
1024 | if ((req->flags & REQ_BLOCK_PC) && !req->bio) { | |
1025 | cmd->request_bufflen = req->data_len; | |
1026 | cmd->request_buffer = req->data; | |
1027 | req->buffer = req->data; | |
1028 | cmd->use_sg = 0; | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | /* | |
1033 | * we used to not use scatter-gather for single segment request, | |
1034 | * but now we do (it makes highmem I/O easier to support without | |
1035 | * kmapping pages) | |
1036 | */ | |
1037 | cmd->use_sg = req->nr_phys_segments; | |
1038 | ||
1039 | /* | |
1040 | * if sg table allocation fails, requeue request later. | |
1041 | */ | |
1042 | sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | |
7c72ce81 AS |
1043 | if (unlikely(!sgpnt)) { |
1044 | scsi_unprep_request(req); | |
1da177e4 | 1045 | return BLKPREP_DEFER; |
7c72ce81 | 1046 | } |
1da177e4 LT |
1047 | |
1048 | cmd->request_buffer = (char *) sgpnt; | |
1049 | cmd->request_bufflen = req->nr_sectors << 9; | |
1050 | if (blk_pc_request(req)) | |
1051 | cmd->request_bufflen = req->data_len; | |
1052 | req->buffer = NULL; | |
1053 | ||
1054 | /* | |
1055 | * Next, walk the list, and fill in the addresses and sizes of | |
1056 | * each segment. | |
1057 | */ | |
1058 | count = blk_rq_map_sg(req->q, req, cmd->request_buffer); | |
1059 | ||
1060 | /* | |
1061 | * mapped well, send it off | |
1062 | */ | |
1063 | if (likely(count <= cmd->use_sg)) { | |
1064 | cmd->use_sg = count; | |
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | printk(KERN_ERR "Incorrect number of segments after building list\n"); | |
1069 | printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); | |
1070 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, | |
1071 | req->current_nr_sectors); | |
1072 | ||
1073 | /* release the command and kill it */ | |
1074 | scsi_release_buffers(cmd); | |
1075 | scsi_put_command(cmd); | |
1076 | return BLKPREP_KILL; | |
1077 | } | |
1078 | ||
1079 | static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) | |
1080 | { | |
1081 | struct scsi_device *sdev = q->queuedata; | |
1082 | struct scsi_driver *drv; | |
1083 | ||
1084 | if (sdev->sdev_state == SDEV_RUNNING) { | |
1085 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | |
1086 | ||
1087 | if (drv->prepare_flush) | |
1088 | return drv->prepare_flush(q, rq); | |
1089 | } | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) | |
1095 | { | |
1096 | struct scsi_device *sdev = q->queuedata; | |
1097 | struct request *flush_rq = rq->end_io_data; | |
1098 | struct scsi_driver *drv; | |
1099 | ||
1100 | if (flush_rq->errors) { | |
1101 | printk("scsi: barrier error, disabling flush support\n"); | |
1102 | blk_queue_ordered(q, QUEUE_ORDERED_NONE); | |
1103 | } | |
1104 | ||
1105 | if (sdev->sdev_state == SDEV_RUNNING) { | |
1106 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | |
1107 | drv->end_flush(q, rq); | |
1108 | } | |
1109 | } | |
1110 | ||
1111 | static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, | |
1112 | sector_t *error_sector) | |
1113 | { | |
1114 | struct scsi_device *sdev = q->queuedata; | |
1115 | struct scsi_driver *drv; | |
1116 | ||
1117 | if (sdev->sdev_state != SDEV_RUNNING) | |
1118 | return -ENXIO; | |
1119 | ||
1120 | drv = *(struct scsi_driver **) disk->private_data; | |
1121 | if (drv->issue_flush) | |
1122 | return drv->issue_flush(&sdev->sdev_gendev, error_sector); | |
1123 | ||
1124 | return -EOPNOTSUPP; | |
1125 | } | |
1126 | ||
e537a36d JB |
1127 | static void scsi_generic_done(struct scsi_cmnd *cmd) |
1128 | { | |
1129 | BUG_ON(!blk_pc_request(cmd->request)); | |
1130 | scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0); | |
1131 | } | |
1132 | ||
1da177e4 LT |
1133 | static int scsi_prep_fn(struct request_queue *q, struct request *req) |
1134 | { | |
1135 | struct scsi_device *sdev = q->queuedata; | |
1136 | struct scsi_cmnd *cmd; | |
1137 | int specials_only = 0; | |
1138 | ||
1139 | /* | |
1140 | * Just check to see if the device is online. If it isn't, we | |
1141 | * refuse to process any commands. The device must be brought | |
1142 | * online before trying any recovery commands | |
1143 | */ | |
1144 | if (unlikely(!scsi_device_online(sdev))) { | |
9ccfc756 JB |
1145 | sdev_printk(KERN_ERR, sdev, |
1146 | "rejecting I/O to offline device\n"); | |
6f16b535 | 1147 | goto kill; |
1da177e4 LT |
1148 | } |
1149 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { | |
1150 | /* OK, we're not in a running state don't prep | |
1151 | * user commands */ | |
1152 | if (sdev->sdev_state == SDEV_DEL) { | |
1153 | /* Device is fully deleted, no commands | |
1154 | * at all allowed down */ | |
9ccfc756 JB |
1155 | sdev_printk(KERN_ERR, sdev, |
1156 | "rejecting I/O to dead device\n"); | |
6f16b535 | 1157 | goto kill; |
1da177e4 LT |
1158 | } |
1159 | /* OK, we only allow special commands (i.e. not | |
1160 | * user initiated ones */ | |
1161 | specials_only = sdev->sdev_state; | |
1162 | } | |
1163 | ||
1164 | /* | |
1165 | * Find the actual device driver associated with this command. | |
1166 | * The SPECIAL requests are things like character device or | |
1167 | * ioctls, which did not originate from ll_rw_blk. Note that | |
1168 | * the special field is also used to indicate the cmd for | |
1169 | * the remainder of a partially fulfilled request that can | |
1170 | * come up when there is a medium error. We have to treat | |
1171 | * these two cases differently. We differentiate by looking | |
1172 | * at request->cmd, as this tells us the real story. | |
1173 | */ | |
e537a36d | 1174 | if (req->flags & REQ_SPECIAL && req->special) { |
1da177e4 LT |
1175 | struct scsi_request *sreq = req->special; |
1176 | ||
1177 | if (sreq->sr_magic == SCSI_REQ_MAGIC) { | |
1178 | cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC); | |
1179 | if (unlikely(!cmd)) | |
1180 | goto defer; | |
1181 | scsi_init_cmd_from_req(cmd, sreq); | |
1182 | } else | |
1183 | cmd = req->special; | |
1184 | } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | |
1185 | ||
e537a36d | 1186 | if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { |
1da177e4 LT |
1187 | if(specials_only == SDEV_QUIESCE || |
1188 | specials_only == SDEV_BLOCK) | |
6f16b535 | 1189 | goto defer; |
1da177e4 | 1190 | |
9ccfc756 JB |
1191 | sdev_printk(KERN_ERR, sdev, |
1192 | "rejecting I/O to device being removed\n"); | |
6f16b535 | 1193 | goto kill; |
1da177e4 LT |
1194 | } |
1195 | ||
1196 | ||
1197 | /* | |
1198 | * Now try and find a command block that we can use. | |
1199 | */ | |
1200 | if (!req->special) { | |
1201 | cmd = scsi_get_command(sdev, GFP_ATOMIC); | |
1202 | if (unlikely(!cmd)) | |
1203 | goto defer; | |
1204 | } else | |
1205 | cmd = req->special; | |
1206 | ||
1207 | /* pull a tag out of the request if we have one */ | |
1208 | cmd->tag = req->tag; | |
1209 | } else { | |
1210 | blk_dump_rq_flags(req, "SCSI bad req"); | |
6f16b535 | 1211 | goto kill; |
1da177e4 LT |
1212 | } |
1213 | ||
1214 | /* note the overloading of req->special. When the tag | |
1215 | * is active it always means cmd. If the tag goes | |
1216 | * back for re-queueing, it may be reset */ | |
1217 | req->special = cmd; | |
1218 | cmd->request = req; | |
1219 | ||
1220 | /* | |
1221 | * FIXME: drop the lock here because the functions below | |
1222 | * expect to be called without the queue lock held. Also, | |
1223 | * previously, we dequeued the request before dropping the | |
1224 | * lock. We hope REQ_STARTED prevents anything untoward from | |
1225 | * happening now. | |
1226 | */ | |
1227 | if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | |
1228 | struct scsi_driver *drv; | |
1229 | int ret; | |
1230 | ||
1231 | /* | |
1232 | * This will do a couple of things: | |
1233 | * 1) Fill in the actual SCSI command. | |
1234 | * 2) Fill in any other upper-level specific fields | |
1235 | * (timeout). | |
1236 | * | |
1237 | * If this returns 0, it means that the request failed | |
1238 | * (reading past end of disk, reading offline device, | |
1239 | * etc). This won't actually talk to the device, but | |
1240 | * some kinds of consistency checking may cause the | |
1241 | * request to be rejected immediately. | |
1242 | */ | |
1243 | ||
1244 | /* | |
1245 | * This sets up the scatter-gather table (allocating if | |
1246 | * required). | |
1247 | */ | |
1248 | ret = scsi_init_io(cmd); | |
6f16b535 | 1249 | switch(ret) { |
7c72ce81 | 1250 | /* For BLKPREP_KILL/DEFER the cmd was released */ |
6f16b535 | 1251 | case BLKPREP_KILL: |
6f16b535 MC |
1252 | goto kill; |
1253 | case BLKPREP_DEFER: | |
1254 | goto defer; | |
1255 | } | |
1da177e4 LT |
1256 | |
1257 | /* | |
1258 | * Initialize the actual SCSI command for this request. | |
1259 | */ | |
e537a36d JB |
1260 | if (req->rq_disk) { |
1261 | drv = *(struct scsi_driver **)req->rq_disk->private_data; | |
1262 | if (unlikely(!drv->init_command(cmd))) { | |
1263 | scsi_release_buffers(cmd); | |
1264 | scsi_put_command(cmd); | |
6f16b535 | 1265 | goto kill; |
e537a36d JB |
1266 | } |
1267 | } else { | |
1268 | memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); | |
186d330e | 1269 | cmd->cmd_len = req->cmd_len; |
e537a36d JB |
1270 | if (rq_data_dir(req) == WRITE) |
1271 | cmd->sc_data_direction = DMA_TO_DEVICE; | |
1272 | else if (req->data_len) | |
1273 | cmd->sc_data_direction = DMA_FROM_DEVICE; | |
1274 | else | |
1275 | cmd->sc_data_direction = DMA_NONE; | |
1276 | ||
1277 | cmd->transfersize = req->data_len; | |
1278 | cmd->allowed = 3; | |
1279 | cmd->timeout_per_command = req->timeout; | |
1280 | cmd->done = scsi_generic_done; | |
1da177e4 LT |
1281 | } |
1282 | } | |
1283 | ||
1284 | /* | |
1285 | * The request is now prepped, no need to come back here | |
1286 | */ | |
1287 | req->flags |= REQ_DONTPREP; | |
1288 | return BLKPREP_OK; | |
1289 | ||
1290 | defer: | |
1291 | /* If we defer, the elv_next_request() returns NULL, but the | |
1292 | * queue must be restarted, so we plug here if no returning | |
1293 | * command will automatically do that. */ | |
1294 | if (sdev->device_busy == 0) | |
1295 | blk_plug_device(q); | |
1296 | return BLKPREP_DEFER; | |
6f16b535 MC |
1297 | kill: |
1298 | req->errors = DID_NO_CONNECT << 16; | |
1299 | return BLKPREP_KILL; | |
1da177e4 LT |
1300 | } |
1301 | ||
1302 | /* | |
1303 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | |
1304 | * return 0. | |
1305 | * | |
1306 | * Called with the queue_lock held. | |
1307 | */ | |
1308 | static inline int scsi_dev_queue_ready(struct request_queue *q, | |
1309 | struct scsi_device *sdev) | |
1310 | { | |
1311 | if (sdev->device_busy >= sdev->queue_depth) | |
1312 | return 0; | |
1313 | if (sdev->device_busy == 0 && sdev->device_blocked) { | |
1314 | /* | |
1315 | * unblock after device_blocked iterates to zero | |
1316 | */ | |
1317 | if (--sdev->device_blocked == 0) { | |
1318 | SCSI_LOG_MLQUEUE(3, | |
9ccfc756 JB |
1319 | sdev_printk(KERN_INFO, sdev, |
1320 | "unblocking device at zero depth\n")); | |
1da177e4 LT |
1321 | } else { |
1322 | blk_plug_device(q); | |
1323 | return 0; | |
1324 | } | |
1325 | } | |
1326 | if (sdev->device_blocked) | |
1327 | return 0; | |
1328 | ||
1329 | return 1; | |
1330 | } | |
1331 | ||
1332 | /* | |
1333 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | |
1334 | * return 0. We must end up running the queue again whenever 0 is | |
1335 | * returned, else IO can hang. | |
1336 | * | |
1337 | * Called with host_lock held. | |
1338 | */ | |
1339 | static inline int scsi_host_queue_ready(struct request_queue *q, | |
1340 | struct Scsi_Host *shost, | |
1341 | struct scsi_device *sdev) | |
1342 | { | |
939647ee | 1343 | if (scsi_host_in_recovery(shost)) |
1da177e4 LT |
1344 | return 0; |
1345 | if (shost->host_busy == 0 && shost->host_blocked) { | |
1346 | /* | |
1347 | * unblock after host_blocked iterates to zero | |
1348 | */ | |
1349 | if (--shost->host_blocked == 0) { | |
1350 | SCSI_LOG_MLQUEUE(3, | |
1351 | printk("scsi%d unblocking host at zero depth\n", | |
1352 | shost->host_no)); | |
1353 | } else { | |
1354 | blk_plug_device(q); | |
1355 | return 0; | |
1356 | } | |
1357 | } | |
1358 | if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || | |
1359 | shost->host_blocked || shost->host_self_blocked) { | |
1360 | if (list_empty(&sdev->starved_entry)) | |
1361 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | |
1362 | return 0; | |
1363 | } | |
1364 | ||
1365 | /* We're OK to process the command, so we can't be starved */ | |
1366 | if (!list_empty(&sdev->starved_entry)) | |
1367 | list_del_init(&sdev->starved_entry); | |
1368 | ||
1369 | return 1; | |
1370 | } | |
1371 | ||
1372 | /* | |
e91442b6 | 1373 | * Kill a request for a dead device |
1da177e4 | 1374 | */ |
e91442b6 | 1375 | static void scsi_kill_request(struct request *req, request_queue_t *q) |
1da177e4 | 1376 | { |
e91442b6 | 1377 | struct scsi_cmnd *cmd = req->special; |
1da177e4 | 1378 | |
788ce43a JB |
1379 | blkdev_dequeue_request(req); |
1380 | ||
e91442b6 JB |
1381 | if (unlikely(cmd == NULL)) { |
1382 | printk(KERN_CRIT "impossible request in %s.\n", | |
1383 | __FUNCTION__); | |
1384 | BUG(); | |
1da177e4 | 1385 | } |
e91442b6 JB |
1386 | |
1387 | scsi_init_cmd_errh(cmd); | |
1388 | cmd->result = DID_NO_CONNECT << 16; | |
1389 | atomic_inc(&cmd->device->iorequest_cnt); | |
1390 | __scsi_done(cmd); | |
1da177e4 LT |
1391 | } |
1392 | ||
1393 | /* | |
1394 | * Function: scsi_request_fn() | |
1395 | * | |
1396 | * Purpose: Main strategy routine for SCSI. | |
1397 | * | |
1398 | * Arguments: q - Pointer to actual queue. | |
1399 | * | |
1400 | * Returns: Nothing | |
1401 | * | |
1402 | * Lock status: IO request lock assumed to be held when called. | |
1403 | */ | |
1404 | static void scsi_request_fn(struct request_queue *q) | |
1405 | { | |
1406 | struct scsi_device *sdev = q->queuedata; | |
1407 | struct Scsi_Host *shost; | |
1408 | struct scsi_cmnd *cmd; | |
1409 | struct request *req; | |
1410 | ||
1411 | if (!sdev) { | |
1412 | printk("scsi: killing requests for dead queue\n"); | |
e91442b6 JB |
1413 | while ((req = elv_next_request(q)) != NULL) |
1414 | scsi_kill_request(req, q); | |
1da177e4 LT |
1415 | return; |
1416 | } | |
1417 | ||
1418 | if(!get_device(&sdev->sdev_gendev)) | |
1419 | /* We must be tearing the block queue down already */ | |
1420 | return; | |
1421 | ||
1422 | /* | |
1423 | * To start with, we keep looping until the queue is empty, or until | |
1424 | * the host is no longer able to accept any more requests. | |
1425 | */ | |
1426 | shost = sdev->host; | |
1427 | while (!blk_queue_plugged(q)) { | |
1428 | int rtn; | |
1429 | /* | |
1430 | * get next queueable request. We do this early to make sure | |
1431 | * that the request is fully prepared even if we cannot | |
1432 | * accept it. | |
1433 | */ | |
1434 | req = elv_next_request(q); | |
1435 | if (!req || !scsi_dev_queue_ready(q, sdev)) | |
1436 | break; | |
1437 | ||
1438 | if (unlikely(!scsi_device_online(sdev))) { | |
9ccfc756 JB |
1439 | sdev_printk(KERN_ERR, sdev, |
1440 | "rejecting I/O to offline device\n"); | |
e91442b6 | 1441 | scsi_kill_request(req, q); |
1da177e4 LT |
1442 | continue; |
1443 | } | |
1444 | ||
1445 | ||
1446 | /* | |
1447 | * Remove the request from the request list. | |
1448 | */ | |
1449 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | |
1450 | blkdev_dequeue_request(req); | |
1451 | sdev->device_busy++; | |
1452 | ||
1453 | spin_unlock(q->queue_lock); | |
e91442b6 JB |
1454 | cmd = req->special; |
1455 | if (unlikely(cmd == NULL)) { | |
1456 | printk(KERN_CRIT "impossible request in %s.\n" | |
1457 | "please mail a stack trace to " | |
1458 | "linux-scsi@vger.kernel.org", | |
1459 | __FUNCTION__); | |
1460 | BUG(); | |
1461 | } | |
1da177e4 LT |
1462 | spin_lock(shost->host_lock); |
1463 | ||
1464 | if (!scsi_host_queue_ready(q, shost, sdev)) | |
1465 | goto not_ready; | |
1466 | if (sdev->single_lun) { | |
1467 | if (scsi_target(sdev)->starget_sdev_user && | |
1468 | scsi_target(sdev)->starget_sdev_user != sdev) | |
1469 | goto not_ready; | |
1470 | scsi_target(sdev)->starget_sdev_user = sdev; | |
1471 | } | |
1472 | shost->host_busy++; | |
1473 | ||
1474 | /* | |
1475 | * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will | |
1476 | * take the lock again. | |
1477 | */ | |
1478 | spin_unlock_irq(shost->host_lock); | |
1479 | ||
1da177e4 LT |
1480 | /* |
1481 | * Finally, initialize any error handling parameters, and set up | |
1482 | * the timers for timeouts. | |
1483 | */ | |
1484 | scsi_init_cmd_errh(cmd); | |
1485 | ||
1486 | /* | |
1487 | * Dispatch the command to the low-level driver. | |
1488 | */ | |
1489 | rtn = scsi_dispatch_cmd(cmd); | |
1490 | spin_lock_irq(q->queue_lock); | |
1491 | if(rtn) { | |
1492 | /* we're refusing the command; because of | |
1493 | * the way locks get dropped, we need to | |
1494 | * check here if plugging is required */ | |
1495 | if(sdev->device_busy == 0) | |
1496 | blk_plug_device(q); | |
1497 | ||
1498 | break; | |
1499 | } | |
1500 | } | |
1501 | ||
1502 | goto out; | |
1503 | ||
1504 | not_ready: | |
1505 | spin_unlock_irq(shost->host_lock); | |
1506 | ||
1507 | /* | |
1508 | * lock q, handle tag, requeue req, and decrement device_busy. We | |
1509 | * must return with queue_lock held. | |
1510 | * | |
1511 | * Decrementing device_busy without checking it is OK, as all such | |
1512 | * cases (host limits or settings) should run the queue at some | |
1513 | * later time. | |
1514 | */ | |
1515 | spin_lock_irq(q->queue_lock); | |
1516 | blk_requeue_request(q, req); | |
1517 | sdev->device_busy--; | |
1518 | if(sdev->device_busy == 0) | |
1519 | blk_plug_device(q); | |
1520 | out: | |
1521 | /* must be careful here...if we trigger the ->remove() function | |
1522 | * we cannot be holding the q lock */ | |
1523 | spin_unlock_irq(q->queue_lock); | |
1524 | put_device(&sdev->sdev_gendev); | |
1525 | spin_lock_irq(q->queue_lock); | |
1526 | } | |
1527 | ||
1528 | u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |
1529 | { | |
1530 | struct device *host_dev; | |
1531 | u64 bounce_limit = 0xffffffff; | |
1532 | ||
1533 | if (shost->unchecked_isa_dma) | |
1534 | return BLK_BOUNCE_ISA; | |
1535 | /* | |
1536 | * Platforms with virtual-DMA translation | |
1537 | * hardware have no practical limit. | |
1538 | */ | |
1539 | if (!PCI_DMA_BUS_IS_PHYS) | |
1540 | return BLK_BOUNCE_ANY; | |
1541 | ||
1542 | host_dev = scsi_get_device(shost); | |
1543 | if (host_dev && host_dev->dma_mask) | |
1544 | bounce_limit = *host_dev->dma_mask; | |
1545 | ||
1546 | return bounce_limit; | |
1547 | } | |
1548 | EXPORT_SYMBOL(scsi_calculate_bounce_limit); | |
1549 | ||
1550 | struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |
1551 | { | |
1552 | struct Scsi_Host *shost = sdev->host; | |
1553 | struct request_queue *q; | |
1554 | ||
152587de | 1555 | q = blk_init_queue(scsi_request_fn, NULL); |
1da177e4 LT |
1556 | if (!q) |
1557 | return NULL; | |
1558 | ||
1559 | blk_queue_prep_rq(q, scsi_prep_fn); | |
1560 | ||
1561 | blk_queue_max_hw_segments(q, shost->sg_tablesize); | |
1562 | blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); | |
1563 | blk_queue_max_sectors(q, shost->max_sectors); | |
1564 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | |
1565 | blk_queue_segment_boundary(q, shost->dma_boundary); | |
1566 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); | |
1567 | ||
1568 | /* | |
1569 | * ordered tags are superior to flush ordering | |
1570 | */ | |
1571 | if (shost->ordered_tag) | |
1572 | blk_queue_ordered(q, QUEUE_ORDERED_TAG); | |
1573 | else if (shost->ordered_flush) { | |
1574 | blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); | |
1575 | q->prepare_flush_fn = scsi_prepare_flush_fn; | |
1576 | q->end_flush_fn = scsi_end_flush_fn; | |
1577 | } | |
1578 | ||
1579 | if (!shost->use_clustering) | |
1580 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | |
1581 | return q; | |
1582 | } | |
1583 | ||
1584 | void scsi_free_queue(struct request_queue *q) | |
1585 | { | |
1586 | blk_cleanup_queue(q); | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | * Function: scsi_block_requests() | |
1591 | * | |
1592 | * Purpose: Utility function used by low-level drivers to prevent further | |
1593 | * commands from being queued to the device. | |
1594 | * | |
1595 | * Arguments: shost - Host in question | |
1596 | * | |
1597 | * Returns: Nothing | |
1598 | * | |
1599 | * Lock status: No locks are assumed held. | |
1600 | * | |
1601 | * Notes: There is no timer nor any other means by which the requests | |
1602 | * get unblocked other than the low-level driver calling | |
1603 | * scsi_unblock_requests(). | |
1604 | */ | |
1605 | void scsi_block_requests(struct Scsi_Host *shost) | |
1606 | { | |
1607 | shost->host_self_blocked = 1; | |
1608 | } | |
1609 | EXPORT_SYMBOL(scsi_block_requests); | |
1610 | ||
1611 | /* | |
1612 | * Function: scsi_unblock_requests() | |
1613 | * | |
1614 | * Purpose: Utility function used by low-level drivers to allow further | |
1615 | * commands from being queued to the device. | |
1616 | * | |
1617 | * Arguments: shost - Host in question | |
1618 | * | |
1619 | * Returns: Nothing | |
1620 | * | |
1621 | * Lock status: No locks are assumed held. | |
1622 | * | |
1623 | * Notes: There is no timer nor any other means by which the requests | |
1624 | * get unblocked other than the low-level driver calling | |
1625 | * scsi_unblock_requests(). | |
1626 | * | |
1627 | * This is done as an API function so that changes to the | |
1628 | * internals of the scsi mid-layer won't require wholesale | |
1629 | * changes to drivers that use this feature. | |
1630 | */ | |
1631 | void scsi_unblock_requests(struct Scsi_Host *shost) | |
1632 | { | |
1633 | shost->host_self_blocked = 0; | |
1634 | scsi_run_host_queues(shost); | |
1635 | } | |
1636 | EXPORT_SYMBOL(scsi_unblock_requests); | |
1637 | ||
1638 | int __init scsi_init_queue(void) | |
1639 | { | |
1640 | int i; | |
1641 | ||
1642 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | |
1643 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1644 | int size = sgp->size * sizeof(struct scatterlist); | |
1645 | ||
1646 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | |
1647 | SLAB_HWCACHE_ALIGN, NULL, NULL); | |
1648 | if (!sgp->slab) { | |
1649 | printk(KERN_ERR "SCSI: can't init sg slab %s\n", | |
1650 | sgp->name); | |
1651 | } | |
1652 | ||
1653 | sgp->pool = mempool_create(SG_MEMPOOL_SIZE, | |
1654 | mempool_alloc_slab, mempool_free_slab, | |
1655 | sgp->slab); | |
1656 | if (!sgp->pool) { | |
1657 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | |
1658 | sgp->name); | |
1659 | } | |
1660 | } | |
1661 | ||
1662 | return 0; | |
1663 | } | |
1664 | ||
1665 | void scsi_exit_queue(void) | |
1666 | { | |
1667 | int i; | |
1668 | ||
1669 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | |
1670 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1671 | mempool_destroy(sgp->pool); | |
1672 | kmem_cache_destroy(sgp->slab); | |
1673 | } | |
1674 | } | |
1675 | /** | |
ea73a9f2 | 1676 | * scsi_mode_sense - issue a mode sense, falling back from 10 to |
1da177e4 | 1677 | * six bytes if necessary. |
1cf72699 | 1678 | * @sdev: SCSI device to be queried |
1da177e4 LT |
1679 | * @dbd: set if mode sense will allow block descriptors to be returned |
1680 | * @modepage: mode page being requested | |
1681 | * @buffer: request buffer (may not be smaller than eight bytes) | |
1682 | * @len: length of request buffer. | |
1683 | * @timeout: command timeout | |
1684 | * @retries: number of retries before failing | |
1685 | * @data: returns a structure abstracting the mode header data | |
1cf72699 JB |
1686 | * @sense: place to put sense data (or NULL if no sense to be collected). |
1687 | * must be SCSI_SENSE_BUFFERSIZE big. | |
1da177e4 LT |
1688 | * |
1689 | * Returns zero if unsuccessful, or the header offset (either 4 | |
1690 | * or 8 depending on whether a six or ten byte command was | |
1691 | * issued) if successful. | |
1692 | **/ | |
1693 | int | |
1cf72699 | 1694 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1da177e4 | 1695 | unsigned char *buffer, int len, int timeout, int retries, |
ea73a9f2 | 1696 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { |
1da177e4 LT |
1697 | unsigned char cmd[12]; |
1698 | int use_10_for_ms; | |
1699 | int header_length; | |
1cf72699 | 1700 | int result; |
ea73a9f2 | 1701 | struct scsi_sense_hdr my_sshdr; |
1da177e4 LT |
1702 | |
1703 | memset(data, 0, sizeof(*data)); | |
1704 | memset(&cmd[0], 0, 12); | |
1705 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ | |
1706 | cmd[2] = modepage; | |
1707 | ||
ea73a9f2 JB |
1708 | /* caller might not be interested in sense, but we need it */ |
1709 | if (!sshdr) | |
1710 | sshdr = &my_sshdr; | |
1711 | ||
1da177e4 | 1712 | retry: |
1cf72699 | 1713 | use_10_for_ms = sdev->use_10_for_ms; |
1da177e4 LT |
1714 | |
1715 | if (use_10_for_ms) { | |
1716 | if (len < 8) | |
1717 | len = 8; | |
1718 | ||
1719 | cmd[0] = MODE_SENSE_10; | |
1720 | cmd[8] = len; | |
1721 | header_length = 8; | |
1722 | } else { | |
1723 | if (len < 4) | |
1724 | len = 4; | |
1725 | ||
1726 | cmd[0] = MODE_SENSE; | |
1727 | cmd[4] = len; | |
1728 | header_length = 4; | |
1729 | } | |
1730 | ||
1da177e4 LT |
1731 | memset(buffer, 0, len); |
1732 | ||
1cf72699 | 1733 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
ea73a9f2 | 1734 | sshdr, timeout, retries); |
1da177e4 LT |
1735 | |
1736 | /* This code looks awful: what it's doing is making sure an | |
1737 | * ILLEGAL REQUEST sense return identifies the actual command | |
1738 | * byte as the problem. MODE_SENSE commands can return | |
1739 | * ILLEGAL REQUEST if the code page isn't supported */ | |
1740 | ||
1cf72699 JB |
1741 | if (use_10_for_ms && !scsi_status_is_good(result) && |
1742 | (driver_byte(result) & DRIVER_SENSE)) { | |
ea73a9f2 JB |
1743 | if (scsi_sense_valid(sshdr)) { |
1744 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | |
1745 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | |
1da177e4 LT |
1746 | /* |
1747 | * Invalid command operation code | |
1748 | */ | |
1cf72699 | 1749 | sdev->use_10_for_ms = 0; |
1da177e4 LT |
1750 | goto retry; |
1751 | } | |
1752 | } | |
1753 | } | |
1754 | ||
1cf72699 | 1755 | if(scsi_status_is_good(result)) { |
1da177e4 LT |
1756 | data->header_length = header_length; |
1757 | if(use_10_for_ms) { | |
1758 | data->length = buffer[0]*256 + buffer[1] + 2; | |
1759 | data->medium_type = buffer[2]; | |
1760 | data->device_specific = buffer[3]; | |
1761 | data->longlba = buffer[4] & 0x01; | |
1762 | data->block_descriptor_length = buffer[6]*256 | |
1763 | + buffer[7]; | |
1764 | } else { | |
1765 | data->length = buffer[0] + 1; | |
1766 | data->medium_type = buffer[1]; | |
1767 | data->device_specific = buffer[2]; | |
1768 | data->block_descriptor_length = buffer[3]; | |
1769 | } | |
1770 | } | |
1771 | ||
1cf72699 | 1772 | return result; |
1da177e4 LT |
1773 | } |
1774 | EXPORT_SYMBOL(scsi_mode_sense); | |
1775 | ||
1776 | int | |
1777 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) | |
1778 | { | |
1da177e4 LT |
1779 | char cmd[] = { |
1780 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | |
1781 | }; | |
ea73a9f2 | 1782 | struct scsi_sense_hdr sshdr; |
1da177e4 LT |
1783 | int result; |
1784 | ||
ea73a9f2 | 1785 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, |
1cf72699 | 1786 | timeout, retries); |
1da177e4 | 1787 | |
1cf72699 | 1788 | if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { |
1da177e4 | 1789 | |
ea73a9f2 | 1790 | if ((scsi_sense_valid(&sshdr)) && |
1da177e4 LT |
1791 | ((sshdr.sense_key == UNIT_ATTENTION) || |
1792 | (sshdr.sense_key == NOT_READY))) { | |
1793 | sdev->changed = 1; | |
1cf72699 | 1794 | result = 0; |
1da177e4 LT |
1795 | } |
1796 | } | |
1da177e4 LT |
1797 | return result; |
1798 | } | |
1799 | EXPORT_SYMBOL(scsi_test_unit_ready); | |
1800 | ||
1801 | /** | |
1802 | * scsi_device_set_state - Take the given device through the device | |
1803 | * state model. | |
1804 | * @sdev: scsi device to change the state of. | |
1805 | * @state: state to change to. | |
1806 | * | |
1807 | * Returns zero if unsuccessful or an error if the requested | |
1808 | * transition is illegal. | |
1809 | **/ | |
1810 | int | |
1811 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |
1812 | { | |
1813 | enum scsi_device_state oldstate = sdev->sdev_state; | |
1814 | ||
1815 | if (state == oldstate) | |
1816 | return 0; | |
1817 | ||
1818 | switch (state) { | |
1819 | case SDEV_CREATED: | |
1820 | /* There are no legal states that come back to | |
1821 | * created. This is the manually initialised start | |
1822 | * state */ | |
1823 | goto illegal; | |
1824 | ||
1825 | case SDEV_RUNNING: | |
1826 | switch (oldstate) { | |
1827 | case SDEV_CREATED: | |
1828 | case SDEV_OFFLINE: | |
1829 | case SDEV_QUIESCE: | |
1830 | case SDEV_BLOCK: | |
1831 | break; | |
1832 | default: | |
1833 | goto illegal; | |
1834 | } | |
1835 | break; | |
1836 | ||
1837 | case SDEV_QUIESCE: | |
1838 | switch (oldstate) { | |
1839 | case SDEV_RUNNING: | |
1840 | case SDEV_OFFLINE: | |
1841 | break; | |
1842 | default: | |
1843 | goto illegal; | |
1844 | } | |
1845 | break; | |
1846 | ||
1847 | case SDEV_OFFLINE: | |
1848 | switch (oldstate) { | |
1849 | case SDEV_CREATED: | |
1850 | case SDEV_RUNNING: | |
1851 | case SDEV_QUIESCE: | |
1852 | case SDEV_BLOCK: | |
1853 | break; | |
1854 | default: | |
1855 | goto illegal; | |
1856 | } | |
1857 | break; | |
1858 | ||
1859 | case SDEV_BLOCK: | |
1860 | switch (oldstate) { | |
1861 | case SDEV_CREATED: | |
1862 | case SDEV_RUNNING: | |
1863 | break; | |
1864 | default: | |
1865 | goto illegal; | |
1866 | } | |
1867 | break; | |
1868 | ||
1869 | case SDEV_CANCEL: | |
1870 | switch (oldstate) { | |
1871 | case SDEV_CREATED: | |
1872 | case SDEV_RUNNING: | |
1873 | case SDEV_OFFLINE: | |
1874 | case SDEV_BLOCK: | |
1875 | break; | |
1876 | default: | |
1877 | goto illegal; | |
1878 | } | |
1879 | break; | |
1880 | ||
1881 | case SDEV_DEL: | |
1882 | switch (oldstate) { | |
1883 | case SDEV_CANCEL: | |
1884 | break; | |
1885 | default: | |
1886 | goto illegal; | |
1887 | } | |
1888 | break; | |
1889 | ||
1890 | } | |
1891 | sdev->sdev_state = state; | |
1892 | return 0; | |
1893 | ||
1894 | illegal: | |
1895 | SCSI_LOG_ERROR_RECOVERY(1, | |
9ccfc756 JB |
1896 | sdev_printk(KERN_ERR, sdev, |
1897 | "Illegal state transition %s->%s\n", | |
1898 | scsi_device_state_name(oldstate), | |
1899 | scsi_device_state_name(state)) | |
1da177e4 LT |
1900 | ); |
1901 | return -EINVAL; | |
1902 | } | |
1903 | EXPORT_SYMBOL(scsi_device_set_state); | |
1904 | ||
1905 | /** | |
1906 | * scsi_device_quiesce - Block user issued commands. | |
1907 | * @sdev: scsi device to quiesce. | |
1908 | * | |
1909 | * This works by trying to transition to the SDEV_QUIESCE state | |
1910 | * (which must be a legal transition). When the device is in this | |
1911 | * state, only special requests will be accepted, all others will | |
1912 | * be deferred. Since special requests may also be requeued requests, | |
1913 | * a successful return doesn't guarantee the device will be | |
1914 | * totally quiescent. | |
1915 | * | |
1916 | * Must be called with user context, may sleep. | |
1917 | * | |
1918 | * Returns zero if unsuccessful or an error if not. | |
1919 | **/ | |
1920 | int | |
1921 | scsi_device_quiesce(struct scsi_device *sdev) | |
1922 | { | |
1923 | int err = scsi_device_set_state(sdev, SDEV_QUIESCE); | |
1924 | if (err) | |
1925 | return err; | |
1926 | ||
1927 | scsi_run_queue(sdev->request_queue); | |
1928 | while (sdev->device_busy) { | |
1929 | msleep_interruptible(200); | |
1930 | scsi_run_queue(sdev->request_queue); | |
1931 | } | |
1932 | return 0; | |
1933 | } | |
1934 | EXPORT_SYMBOL(scsi_device_quiesce); | |
1935 | ||
1936 | /** | |
1937 | * scsi_device_resume - Restart user issued commands to a quiesced device. | |
1938 | * @sdev: scsi device to resume. | |
1939 | * | |
1940 | * Moves the device from quiesced back to running and restarts the | |
1941 | * queues. | |
1942 | * | |
1943 | * Must be called with user context, may sleep. | |
1944 | **/ | |
1945 | void | |
1946 | scsi_device_resume(struct scsi_device *sdev) | |
1947 | { | |
1948 | if(scsi_device_set_state(sdev, SDEV_RUNNING)) | |
1949 | return; | |
1950 | scsi_run_queue(sdev->request_queue); | |
1951 | } | |
1952 | EXPORT_SYMBOL(scsi_device_resume); | |
1953 | ||
1954 | static void | |
1955 | device_quiesce_fn(struct scsi_device *sdev, void *data) | |
1956 | { | |
1957 | scsi_device_quiesce(sdev); | |
1958 | } | |
1959 | ||
1960 | void | |
1961 | scsi_target_quiesce(struct scsi_target *starget) | |
1962 | { | |
1963 | starget_for_each_device(starget, NULL, device_quiesce_fn); | |
1964 | } | |
1965 | EXPORT_SYMBOL(scsi_target_quiesce); | |
1966 | ||
1967 | static void | |
1968 | device_resume_fn(struct scsi_device *sdev, void *data) | |
1969 | { | |
1970 | scsi_device_resume(sdev); | |
1971 | } | |
1972 | ||
1973 | void | |
1974 | scsi_target_resume(struct scsi_target *starget) | |
1975 | { | |
1976 | starget_for_each_device(starget, NULL, device_resume_fn); | |
1977 | } | |
1978 | EXPORT_SYMBOL(scsi_target_resume); | |
1979 | ||
1980 | /** | |
1981 | * scsi_internal_device_block - internal function to put a device | |
1982 | * temporarily into the SDEV_BLOCK state | |
1983 | * @sdev: device to block | |
1984 | * | |
1985 | * Block request made by scsi lld's to temporarily stop all | |
1986 | * scsi commands on the specified device. Called from interrupt | |
1987 | * or normal process context. | |
1988 | * | |
1989 | * Returns zero if successful or error if not | |
1990 | * | |
1991 | * Notes: | |
1992 | * This routine transitions the device to the SDEV_BLOCK state | |
1993 | * (which must be a legal transition). When the device is in this | |
1994 | * state, all commands are deferred until the scsi lld reenables | |
1995 | * the device with scsi_device_unblock or device_block_tmo fires. | |
1996 | * This routine assumes the host_lock is held on entry. | |
1997 | **/ | |
1998 | int | |
1999 | scsi_internal_device_block(struct scsi_device *sdev) | |
2000 | { | |
2001 | request_queue_t *q = sdev->request_queue; | |
2002 | unsigned long flags; | |
2003 | int err = 0; | |
2004 | ||
2005 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | |
2006 | if (err) | |
2007 | return err; | |
2008 | ||
2009 | /* | |
2010 | * The device has transitioned to SDEV_BLOCK. Stop the | |
2011 | * block layer from calling the midlayer with this device's | |
2012 | * request queue. | |
2013 | */ | |
2014 | spin_lock_irqsave(q->queue_lock, flags); | |
2015 | blk_stop_queue(q); | |
2016 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2017 | ||
2018 | return 0; | |
2019 | } | |
2020 | EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |
2021 | ||
2022 | /** | |
2023 | * scsi_internal_device_unblock - resume a device after a block request | |
2024 | * @sdev: device to resume | |
2025 | * | |
2026 | * Called by scsi lld's or the midlayer to restart the device queue | |
2027 | * for the previously suspended scsi device. Called from interrupt or | |
2028 | * normal process context. | |
2029 | * | |
2030 | * Returns zero if successful or error if not. | |
2031 | * | |
2032 | * Notes: | |
2033 | * This routine transitions the device to the SDEV_RUNNING state | |
2034 | * (which must be a legal transition) allowing the midlayer to | |
2035 | * goose the queue for this device. This routine assumes the | |
2036 | * host_lock is held upon entry. | |
2037 | **/ | |
2038 | int | |
2039 | scsi_internal_device_unblock(struct scsi_device *sdev) | |
2040 | { | |
2041 | request_queue_t *q = sdev->request_queue; | |
2042 | int err; | |
2043 | unsigned long flags; | |
2044 | ||
2045 | /* | |
2046 | * Try to transition the scsi device to SDEV_RUNNING | |
2047 | * and goose the device queue if successful. | |
2048 | */ | |
2049 | err = scsi_device_set_state(sdev, SDEV_RUNNING); | |
2050 | if (err) | |
2051 | return err; | |
2052 | ||
2053 | spin_lock_irqsave(q->queue_lock, flags); | |
2054 | blk_start_queue(q); | |
2055 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2056 | ||
2057 | return 0; | |
2058 | } | |
2059 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); | |
2060 | ||
2061 | static void | |
2062 | device_block(struct scsi_device *sdev, void *data) | |
2063 | { | |
2064 | scsi_internal_device_block(sdev); | |
2065 | } | |
2066 | ||
2067 | static int | |
2068 | target_block(struct device *dev, void *data) | |
2069 | { | |
2070 | if (scsi_is_target_device(dev)) | |
2071 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2072 | device_block); | |
2073 | return 0; | |
2074 | } | |
2075 | ||
2076 | void | |
2077 | scsi_target_block(struct device *dev) | |
2078 | { | |
2079 | if (scsi_is_target_device(dev)) | |
2080 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2081 | device_block); | |
2082 | else | |
2083 | device_for_each_child(dev, NULL, target_block); | |
2084 | } | |
2085 | EXPORT_SYMBOL_GPL(scsi_target_block); | |
2086 | ||
2087 | static void | |
2088 | device_unblock(struct scsi_device *sdev, void *data) | |
2089 | { | |
2090 | scsi_internal_device_unblock(sdev); | |
2091 | } | |
2092 | ||
2093 | static int | |
2094 | target_unblock(struct device *dev, void *data) | |
2095 | { | |
2096 | if (scsi_is_target_device(dev)) | |
2097 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2098 | device_unblock); | |
2099 | return 0; | |
2100 | } | |
2101 | ||
2102 | void | |
2103 | scsi_target_unblock(struct device *dev) | |
2104 | { | |
2105 | if (scsi_is_target_device(dev)) | |
2106 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2107 | device_unblock); | |
2108 | else | |
2109 | device_for_each_child(dev, NULL, target_unblock); | |
2110 | } | |
2111 | EXPORT_SYMBOL_GPL(scsi_target_unblock); |