]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi_lib.c
[SCSI] Convert SCSI mid-layer to scsi_execute_async
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi_lib.c
CommitLineData
1da177e4
LT
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE 32
34
35struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
52c1da39 47static struct scsi_host_sg_pool scsi_sg_pools[] = {
1da177e4
LT
48 SP(8),
49 SP(16),
50 SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
a1bf9d1d 66static void scsi_run_queue(struct request_queue *q);
e91442b6
JB
67
68/*
69 * Function: scsi_unprep_request()
70 *
71 * Purpose: Remove all preparation done for a request, including its
72 * associated scsi_cmnd, so that it can be requeued.
73 *
74 * Arguments: req - request to unprepare
75 *
76 * Lock status: Assumed that no locks are held upon entry.
77 *
78 * Returns: Nothing.
79 */
80static void scsi_unprep_request(struct request *req)
81{
82 struct scsi_cmnd *cmd = req->special;
83
84 req->flags &= ~REQ_DONTPREP;
85 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
86
e91442b6
JB
87 scsi_put_command(cmd);
88}
a1bf9d1d 89
1da177e4
LT
90/*
91 * Function: scsi_queue_insert()
92 *
93 * Purpose: Insert a command in the midlevel queue.
94 *
95 * Arguments: cmd - command that we are adding to queue.
96 * reason - why we are inserting command to queue.
97 *
98 * Lock status: Assumed that lock is not held upon entry.
99 *
100 * Returns: Nothing.
101 *
102 * Notes: We do this for one of two cases. Either the host is busy
103 * and it cannot accept any more commands for the time being,
104 * or the device returned QUEUE_FULL and can accept no more
105 * commands.
106 * Notes: This could be called either from an interrupt context or a
107 * normal process context.
108 */
109int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
110{
111 struct Scsi_Host *host = cmd->device->host;
112 struct scsi_device *device = cmd->device;
a1bf9d1d
TH
113 struct request_queue *q = device->request_queue;
114 unsigned long flags;
1da177e4
LT
115
116 SCSI_LOG_MLQUEUE(1,
117 printk("Inserting command %p into mlqueue\n", cmd));
118
119 /*
d8c37e7b 120 * Set the appropriate busy bit for the device/host.
1da177e4
LT
121 *
122 * If the host/device isn't busy, assume that something actually
123 * completed, and that we should be able to queue a command now.
124 *
125 * Note that the prior mid-layer assumption that any host could
126 * always queue at least one command is now broken. The mid-layer
127 * will implement a user specifiable stall (see
128 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
129 * if a command is requeued with no other commands outstanding
130 * either for the device or for the host.
131 */
132 if (reason == SCSI_MLQUEUE_HOST_BUSY)
133 host->host_blocked = host->max_host_blocked;
134 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
135 device->device_blocked = device->max_device_blocked;
136
1da177e4
LT
137 /*
138 * Decrement the counters, since these commands are no longer
139 * active on the host/device.
140 */
141 scsi_device_unbusy(device);
142
143 /*
a1bf9d1d
TH
144 * Requeue this command. It will go before all other commands
145 * that are already in the queue.
1da177e4
LT
146 *
147 * NOTE: there is magic here about the way the queue is plugged if
148 * we have no outstanding commands.
149 *
a1bf9d1d 150 * Although we *don't* plug the queue, we call the request
1da177e4
LT
151 * function. The SCSI request function detects the blocked condition
152 * and plugs the queue appropriately.
a1bf9d1d
TH
153 */
154 spin_lock_irqsave(q->queue_lock, flags);
59897dad 155 blk_requeue_request(q, cmd->request);
a1bf9d1d
TH
156 spin_unlock_irqrestore(q->queue_lock, flags);
157
158 scsi_run_queue(q);
159
1da177e4
LT
160 return 0;
161}
162
163/*
164 * Function: scsi_do_req
165 *
166 * Purpose: Queue a SCSI request
167 *
168 * Arguments: sreq - command descriptor.
169 * cmnd - actual SCSI command to be performed.
170 * buffer - data buffer.
171 * bufflen - size of data buffer.
172 * done - completion function to be run.
173 * timeout - how long to let it run before timeout.
174 * retries - number of retries we allow.
175 *
176 * Lock status: No locks held upon entry.
177 *
178 * Returns: Nothing.
179 *
180 * Notes: This function is only used for queueing requests for things
181 * like ioctls and character device requests - this is because
182 * we essentially just inject a request into the queue for the
183 * device.
184 *
185 * In order to support the scsi_device_quiesce function, we
186 * now inject requests on the *head* of the device queue
187 * rather than the tail.
188 */
189void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
190 void *buffer, unsigned bufflen,
191 void (*done)(struct scsi_cmnd *),
192 int timeout, int retries)
193{
194 /*
195 * If the upper level driver is reusing these things, then
196 * we should release the low-level block now. Another one will
197 * be allocated later when this request is getting queued.
198 */
199 __scsi_release_request(sreq);
200
201 /*
202 * Our own function scsi_done (which marks the host as not busy,
203 * disables the timeout counter, etc) will be called by us or by the
204 * scsi_hosts[host].queuecommand() function needs to also call
205 * the completion function for the high level driver.
206 */
207 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
208 sreq->sr_bufflen = bufflen;
209 sreq->sr_buffer = buffer;
210 sreq->sr_allowed = retries;
211 sreq->sr_done = done;
212 sreq->sr_timeout_per_command = timeout;
213
214 if (sreq->sr_cmd_len == 0)
215 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
216
217 /*
218 * head injection *required* here otherwise quiesce won't work
6e68af66
MC
219 *
220 * Because users of this function are apt to reuse requests with no
221 * modification, we have to sanitise the request flags here
1da177e4 222 */
6e68af66
MC
223 sreq->sr_request->flags &= ~REQ_DONTPREP;
224 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
225 1, sreq);
1da177e4
LT
226}
227EXPORT_SYMBOL(scsi_do_req);
228
39216033 229/**
33aa687d 230 * scsi_execute - insert request and wait for the result
39216033
JB
231 * @sdev: scsi device
232 * @cmd: scsi command
233 * @data_direction: data direction
234 * @buffer: data buffer
235 * @bufflen: len of buffer
236 * @sense: optional sense buffer
237 * @timeout: request timeout in seconds
238 * @retries: number of times to retry request
33aa687d 239 * @flags: or into request flags;
39216033 240 *
ea73a9f2
JB
241 * returns the req->errors value which is the the scsi_cmnd result
242 * field.
39216033 243 **/
33aa687d
JB
244int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
245 int data_direction, void *buffer, unsigned bufflen,
246 unsigned char *sense, int timeout, int retries, int flags)
39216033
JB
247{
248 struct request *req;
249 int write = (data_direction == DMA_TO_DEVICE);
250 int ret = DRIVER_ERROR << 24;
251
252 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
253
254 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
255 buffer, bufflen, __GFP_WAIT))
256 goto out;
257
258 req->cmd_len = COMMAND_SIZE(cmd[0]);
259 memcpy(req->cmd, cmd, req->cmd_len);
260 req->sense = sense;
261 req->sense_len = 0;
262 req->timeout = timeout;
3173d8c3 263 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
39216033
JB
264
265 /*
266 * head injection *required* here otherwise quiesce won't work
267 */
268 blk_execute_rq(req->q, NULL, req, 1);
269
270 ret = req->errors;
271 out:
272 blk_put_request(req);
273
274 return ret;
275}
33aa687d 276EXPORT_SYMBOL(scsi_execute);
39216033 277
ea73a9f2
JB
278
279int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
280 int data_direction, void *buffer, unsigned bufflen,
281 struct scsi_sense_hdr *sshdr, int timeout, int retries)
282{
283 char *sense = NULL;
1ccb48bb 284 int result;
285
ea73a9f2 286 if (sshdr) {
286f3e13 287 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
ea73a9f2
JB
288 if (!sense)
289 return DRIVER_ERROR << 24;
e514385b 290 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
ea73a9f2 291 }
1ccb48bb 292 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
ea73a9f2
JB
293 sense, timeout, retries, 0);
294 if (sshdr)
e514385b 295 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
ea73a9f2
JB
296
297 kfree(sense);
298 return result;
299}
300EXPORT_SYMBOL(scsi_execute_req);
301
6e68af66
MC
302struct scsi_io_context {
303 void *data;
304 void (*done)(void *data, char *sense, int result, int resid);
305 char sense[SCSI_SENSE_BUFFERSIZE];
306};
307
308static void scsi_end_async(struct request *req)
309{
310 struct scsi_io_context *sioc = req->end_io_data;
311
312 if (sioc->done)
313 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
314
315 kfree(sioc);
316 __blk_put_request(req->q, req);
317}
318
319static int scsi_merge_bio(struct request *rq, struct bio *bio)
320{
321 struct request_queue *q = rq->q;
322
323 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
324 if (rq_data_dir(rq) == WRITE)
325 bio->bi_rw |= (1 << BIO_RW);
326 blk_queue_bounce(q, &bio);
327
328 if (!rq->bio)
329 blk_rq_bio_prep(q, rq, bio);
330 else if (!q->back_merge_fn(q, rq, bio))
331 return -EINVAL;
332 else {
333 rq->biotail->bi_next = bio;
334 rq->biotail = bio;
335 rq->hard_nr_sectors += bio_sectors(bio);
336 rq->nr_sectors = rq->hard_nr_sectors;
337 }
338
339 return 0;
340}
341
342static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
343{
344 if (bio->bi_size)
345 return 1;
346
347 bio_put(bio);
348 return 0;
349}
350
351/**
352 * scsi_req_map_sg - map a scatterlist into a request
353 * @rq: request to fill
354 * @sg: scatterlist
355 * @nsegs: number of elements
356 * @bufflen: len of buffer
357 * @gfp: memory allocation flags
358 *
359 * scsi_req_map_sg maps a scatterlist into a request so that the
360 * request can be sent to the block layer. We do not trust the scatterlist
361 * sent to use, as some ULDs use that struct to only organize the pages.
362 */
363static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
364 int nsegs, unsigned bufflen, gfp_t gfp)
365{
366 struct request_queue *q = rq->q;
367 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
368 unsigned int data_len = 0, len, bytes, off;
369 struct page *page;
370 struct bio *bio = NULL;
371 int i, err, nr_vecs = 0;
372
373 for (i = 0; i < nsegs; i++) {
374 page = sgl[i].page;
375 off = sgl[i].offset;
376 len = sgl[i].length;
377 data_len += len;
378
379 while (len > 0) {
380 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
381
382 if (!bio) {
383 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
384 nr_pages -= nr_vecs;
385
386 bio = bio_alloc(gfp, nr_vecs);
387 if (!bio) {
388 err = -ENOMEM;
389 goto free_bios;
390 }
391 bio->bi_end_io = scsi_bi_endio;
392 }
393
394 if (bio_add_pc_page(q, bio, page, bytes, off) !=
395 bytes) {
396 bio_put(bio);
397 err = -EINVAL;
398 goto free_bios;
399 }
400
401 if (bio->bi_vcnt >= nr_vecs) {
402 err = scsi_merge_bio(rq, bio);
403 if (err) {
404 bio_endio(bio, bio->bi_size, 0);
405 goto free_bios;
406 }
407 bio = NULL;
408 }
409
410 page++;
411 len -= bytes;
412 off = 0;
413 }
414 }
415
416 rq->buffer = rq->data = NULL;
417 rq->data_len = data_len;
418 return 0;
419
420free_bios:
421 while ((bio = rq->bio) != NULL) {
422 rq->bio = bio->bi_next;
423 /*
424 * call endio instead of bio_put incase it was bounced
425 */
426 bio_endio(bio, bio->bi_size, 0);
427 }
428
429 return err;
430}
431
432/**
433 * scsi_execute_async - insert request
434 * @sdev: scsi device
435 * @cmd: scsi command
436 * @data_direction: data direction
437 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
438 * @bufflen: len of buffer
439 * @use_sg: if buffer is a scatterlist this is the number of elements
440 * @timeout: request timeout in seconds
441 * @retries: number of times to retry request
442 * @flags: or into request flags
443 **/
444int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
445 int data_direction, void *buffer, unsigned bufflen,
446 int use_sg, int timeout, int retries, void *privdata,
447 void (*done)(void *, char *, int, int), gfp_t gfp)
448{
449 struct request *req;
450 struct scsi_io_context *sioc;
451 int err = 0;
452 int write = (data_direction == DMA_TO_DEVICE);
453
454 sioc = kzalloc(sizeof(*sioc), gfp);
455 if (!sioc)
456 return DRIVER_ERROR << 24;
457
458 req = blk_get_request(sdev->request_queue, write, gfp);
459 if (!req)
460 goto free_sense;
461
462 if (use_sg)
463 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
464 else if (bufflen)
465 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
466
467 if (err)
468 goto free_req;
469
470 req->cmd_len = COMMAND_SIZE(cmd[0]);
471 memcpy(req->cmd, cmd, req->cmd_len);
472 req->sense = sioc->sense;
473 req->sense_len = 0;
474 req->timeout = timeout;
475 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
476 req->end_io_data = sioc;
477
478 sioc->data = privdata;
479 sioc->done = done;
480
481 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
482 return 0;
483
484free_req:
485 blk_put_request(req);
486free_sense:
487 kfree(sioc);
488 return DRIVER_ERROR << 24;
489}
490EXPORT_SYMBOL_GPL(scsi_execute_async);
491
1da177e4
LT
492/*
493 * Function: scsi_init_cmd_errh()
494 *
495 * Purpose: Initialize cmd fields related to error handling.
496 *
497 * Arguments: cmd - command that is ready to be queued.
498 *
499 * Returns: Nothing
500 *
501 * Notes: This function has the job of initializing a number of
502 * fields related to error handling. Typically this will
503 * be called once for each command, as required.
504 */
505static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
506{
1da177e4 507 cmd->serial_number = 0;
1da177e4
LT
508
509 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
510
511 if (cmd->cmd_len == 0)
512 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
513
514 /*
515 * We need saved copies of a number of fields - this is because
516 * error handling may need to overwrite these with different values
517 * to run different commands, and once error handling is complete,
518 * we will need to restore these values prior to running the actual
519 * command.
520 */
521 cmd->old_use_sg = cmd->use_sg;
522 cmd->old_cmd_len = cmd->cmd_len;
523 cmd->sc_old_data_direction = cmd->sc_data_direction;
524 cmd->old_underflow = cmd->underflow;
525 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
526 cmd->buffer = cmd->request_buffer;
527 cmd->bufflen = cmd->request_bufflen;
1da177e4
LT
528
529 return 1;
530}
531
532/*
533 * Function: scsi_setup_cmd_retry()
534 *
535 * Purpose: Restore the command state for a retry
536 *
537 * Arguments: cmd - command to be restored
538 *
539 * Returns: Nothing
540 *
541 * Notes: Immediately prior to retrying a command, we need
542 * to restore certain fields that we saved above.
543 */
544void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
545{
546 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
547 cmd->request_buffer = cmd->buffer;
548 cmd->request_bufflen = cmd->bufflen;
549 cmd->use_sg = cmd->old_use_sg;
550 cmd->cmd_len = cmd->old_cmd_len;
551 cmd->sc_data_direction = cmd->sc_old_data_direction;
552 cmd->underflow = cmd->old_underflow;
553}
554
555void scsi_device_unbusy(struct scsi_device *sdev)
556{
557 struct Scsi_Host *shost = sdev->host;
558 unsigned long flags;
559
560 spin_lock_irqsave(shost->host_lock, flags);
561 shost->host_busy--;
939647ee 562 if (unlikely(scsi_host_in_recovery(shost) &&
1da177e4
LT
563 shost->host_failed))
564 scsi_eh_wakeup(shost);
565 spin_unlock(shost->host_lock);
152587de 566 spin_lock(sdev->request_queue->queue_lock);
1da177e4 567 sdev->device_busy--;
152587de 568 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
1da177e4
LT
569}
570
571/*
572 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
573 * and call blk_run_queue for all the scsi_devices on the target -
574 * including current_sdev first.
575 *
576 * Called with *no* scsi locks held.
577 */
578static void scsi_single_lun_run(struct scsi_device *current_sdev)
579{
580 struct Scsi_Host *shost = current_sdev->host;
581 struct scsi_device *sdev, *tmp;
582 struct scsi_target *starget = scsi_target(current_sdev);
583 unsigned long flags;
584
585 spin_lock_irqsave(shost->host_lock, flags);
586 starget->starget_sdev_user = NULL;
587 spin_unlock_irqrestore(shost->host_lock, flags);
588
589 /*
590 * Call blk_run_queue for all LUNs on the target, starting with
591 * current_sdev. We race with others (to set starget_sdev_user),
592 * but in most cases, we will be first. Ideally, each LU on the
593 * target would get some limited time or requests on the target.
594 */
595 blk_run_queue(current_sdev->request_queue);
596
597 spin_lock_irqsave(shost->host_lock, flags);
598 if (starget->starget_sdev_user)
599 goto out;
600 list_for_each_entry_safe(sdev, tmp, &starget->devices,
601 same_target_siblings) {
602 if (sdev == current_sdev)
603 continue;
604 if (scsi_device_get(sdev))
605 continue;
606
607 spin_unlock_irqrestore(shost->host_lock, flags);
608 blk_run_queue(sdev->request_queue);
609 spin_lock_irqsave(shost->host_lock, flags);
610
611 scsi_device_put(sdev);
612 }
613 out:
614 spin_unlock_irqrestore(shost->host_lock, flags);
615}
616
617/*
618 * Function: scsi_run_queue()
619 *
620 * Purpose: Select a proper request queue to serve next
621 *
622 * Arguments: q - last request's queue
623 *
624 * Returns: Nothing
625 *
626 * Notes: The previous command was completely finished, start
627 * a new one if possible.
628 */
629static void scsi_run_queue(struct request_queue *q)
630{
631 struct scsi_device *sdev = q->queuedata;
632 struct Scsi_Host *shost = sdev->host;
633 unsigned long flags;
634
635 if (sdev->single_lun)
636 scsi_single_lun_run(sdev);
637
638 spin_lock_irqsave(shost->host_lock, flags);
639 while (!list_empty(&shost->starved_list) &&
640 !shost->host_blocked && !shost->host_self_blocked &&
641 !((shost->can_queue > 0) &&
642 (shost->host_busy >= shost->can_queue))) {
643 /*
644 * As long as shost is accepting commands and we have
645 * starved queues, call blk_run_queue. scsi_request_fn
646 * drops the queue_lock and can add us back to the
647 * starved_list.
648 *
649 * host_lock protects the starved_list and starved_entry.
650 * scsi_request_fn must get the host_lock before checking
651 * or modifying starved_list or starved_entry.
652 */
653 sdev = list_entry(shost->starved_list.next,
654 struct scsi_device, starved_entry);
655 list_del_init(&sdev->starved_entry);
656 spin_unlock_irqrestore(shost->host_lock, flags);
657
658 blk_run_queue(sdev->request_queue);
659
660 spin_lock_irqsave(shost->host_lock, flags);
661 if (unlikely(!list_empty(&sdev->starved_entry)))
662 /*
663 * sdev lost a race, and was put back on the
664 * starved list. This is unlikely but without this
665 * in theory we could loop forever.
666 */
667 break;
668 }
669 spin_unlock_irqrestore(shost->host_lock, flags);
670
671 blk_run_queue(q);
672}
673
674/*
675 * Function: scsi_requeue_command()
676 *
677 * Purpose: Handle post-processing of completed commands.
678 *
679 * Arguments: q - queue to operate on
680 * cmd - command that may need to be requeued.
681 *
682 * Returns: Nothing
683 *
684 * Notes: After command completion, there may be blocks left
685 * over which weren't finished by the previous command
686 * this can be for a number of reasons - the main one is
687 * I/O errors in the middle of the request, in which case
688 * we need to request the blocks that come after the bad
689 * sector.
e91442b6 690 * Notes: Upon return, cmd is a stale pointer.
1da177e4
LT
691 */
692static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
693{
e91442b6 694 struct request *req = cmd->request;
283369cc
TH
695 unsigned long flags;
696
e91442b6 697 scsi_unprep_request(req);
283369cc 698 spin_lock_irqsave(q->queue_lock, flags);
e91442b6 699 blk_requeue_request(q, req);
283369cc 700 spin_unlock_irqrestore(q->queue_lock, flags);
1da177e4
LT
701
702 scsi_run_queue(q);
703}
704
705void scsi_next_command(struct scsi_cmnd *cmd)
706{
49d7bc64
LT
707 struct scsi_device *sdev = cmd->device;
708 struct request_queue *q = sdev->request_queue;
709
710 /* need to hold a reference on the device before we let go of the cmd */
711 get_device(&sdev->sdev_gendev);
1da177e4
LT
712
713 scsi_put_command(cmd);
714 scsi_run_queue(q);
49d7bc64
LT
715
716 /* ok to remove device now */
717 put_device(&sdev->sdev_gendev);
1da177e4
LT
718}
719
720void scsi_run_host_queues(struct Scsi_Host *shost)
721{
722 struct scsi_device *sdev;
723
724 shost_for_each_device(sdev, shost)
725 scsi_run_queue(sdev->request_queue);
726}
727
728/*
729 * Function: scsi_end_request()
730 *
731 * Purpose: Post-processing of completed commands (usually invoked at end
732 * of upper level post-processing and scsi_io_completion).
733 *
734 * Arguments: cmd - command that is complete.
735 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
736 * bytes - number of bytes of completed I/O
737 * requeue - indicates whether we should requeue leftovers.
738 *
739 * Lock status: Assumed that lock is not held upon entry.
740 *
e91442b6 741 * Returns: cmd if requeue required, NULL otherwise.
1da177e4
LT
742 *
743 * Notes: This is called for block device requests in order to
744 * mark some number of sectors as complete.
745 *
746 * We are guaranteeing that the request queue will be goosed
747 * at some point during this call.
e91442b6 748 * Notes: If cmd was requeued, upon return it will be a stale pointer.
1da177e4
LT
749 */
750static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
751 int bytes, int requeue)
752{
753 request_queue_t *q = cmd->device->request_queue;
754 struct request *req = cmd->request;
755 unsigned long flags;
756
757 /*
758 * If there are blocks left over at the end, set up the command
759 * to queue the remainder of them.
760 */
761 if (end_that_request_chunk(req, uptodate, bytes)) {
762 int leftover = (req->hard_nr_sectors << 9);
763
764 if (blk_pc_request(req))
765 leftover = req->data_len;
766
767 /* kill remainder if no retrys */
768 if (!uptodate && blk_noretry_request(req))
769 end_that_request_chunk(req, 0, leftover);
770 else {
e91442b6 771 if (requeue) {
1da177e4
LT
772 /*
773 * Bleah. Leftovers again. Stick the
774 * leftovers in the front of the
775 * queue, and goose the queue again.
776 */
777 scsi_requeue_command(q, cmd);
e91442b6
JB
778 cmd = NULL;
779 }
1da177e4
LT
780 return cmd;
781 }
782 }
783
784 add_disk_randomness(req->rq_disk);
785
786 spin_lock_irqsave(q->queue_lock, flags);
787 if (blk_rq_tagged(req))
788 blk_queue_end_tag(q, req);
789 end_that_request_last(req);
790 spin_unlock_irqrestore(q->queue_lock, flags);
791
792 /*
793 * This will goose the queue request function at the end, so we don't
794 * need to worry about launching another command.
795 */
796 scsi_next_command(cmd);
797 return NULL;
798}
799
c53033f6 800static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1da177e4
LT
801{
802 struct scsi_host_sg_pool *sgp;
803 struct scatterlist *sgl;
804
805 BUG_ON(!cmd->use_sg);
806
807 switch (cmd->use_sg) {
808 case 1 ... 8:
809 cmd->sglist_len = 0;
810 break;
811 case 9 ... 16:
812 cmd->sglist_len = 1;
813 break;
814 case 17 ... 32:
815 cmd->sglist_len = 2;
816 break;
817#if (SCSI_MAX_PHYS_SEGMENTS > 32)
818 case 33 ... 64:
819 cmd->sglist_len = 3;
820 break;
821#if (SCSI_MAX_PHYS_SEGMENTS > 64)
822 case 65 ... 128:
823 cmd->sglist_len = 4;
824 break;
825#if (SCSI_MAX_PHYS_SEGMENTS > 128)
826 case 129 ... 256:
827 cmd->sglist_len = 5;
828 break;
829#endif
830#endif
831#endif
832 default:
833 return NULL;
834 }
835
836 sgp = scsi_sg_pools + cmd->sglist_len;
837 sgl = mempool_alloc(sgp->pool, gfp_mask);
1da177e4
LT
838 return sgl;
839}
840
841static void scsi_free_sgtable(struct scatterlist *sgl, int index)
842{
843 struct scsi_host_sg_pool *sgp;
844
a77e3362 845 BUG_ON(index >= SG_MEMPOOL_NR);
1da177e4
LT
846
847 sgp = scsi_sg_pools + index;
848 mempool_free(sgl, sgp->pool);
849}
850
851/*
852 * Function: scsi_release_buffers()
853 *
854 * Purpose: Completion processing for block device I/O requests.
855 *
856 * Arguments: cmd - command that we are bailing.
857 *
858 * Lock status: Assumed that no lock is held upon entry.
859 *
860 * Returns: Nothing
861 *
862 * Notes: In the event that an upper level driver rejects a
863 * command, we must release resources allocated during
864 * the __init_io() function. Primarily this would involve
865 * the scatter-gather table, and potentially any bounce
866 * buffers.
867 */
868static void scsi_release_buffers(struct scsi_cmnd *cmd)
869{
870 struct request *req = cmd->request;
871
872 /*
873 * Free up any indirection buffers we allocated for DMA purposes.
874 */
875 if (cmd->use_sg)
876 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
877 else if (cmd->request_buffer != req->buffer)
878 kfree(cmd->request_buffer);
879
880 /*
881 * Zero these out. They now point to freed memory, and it is
882 * dangerous to hang onto the pointers.
883 */
884 cmd->buffer = NULL;
885 cmd->bufflen = 0;
886 cmd->request_buffer = NULL;
887 cmd->request_bufflen = 0;
888}
889
890/*
891 * Function: scsi_io_completion()
892 *
893 * Purpose: Completion processing for block device I/O requests.
894 *
895 * Arguments: cmd - command that is finished.
896 *
897 * Lock status: Assumed that no lock is held upon entry.
898 *
899 * Returns: Nothing
900 *
901 * Notes: This function is matched in terms of capabilities to
902 * the function that created the scatter-gather list.
903 * In other words, if there are no bounce buffers
904 * (the normal case for most drivers), we don't need
905 * the logic to deal with cleaning up afterwards.
906 *
907 * We must do one of several things here:
908 *
909 * a) Call scsi_end_request. This will finish off the
910 * specified number of sectors. If we are done, the
911 * command block will be released, and the queue
912 * function will be goosed. If we are not done, then
913 * scsi_end_request will directly goose the queue.
914 *
915 * b) We can just use scsi_requeue_command() here. This would
916 * be used if we just wanted to retry, for example.
917 */
918void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
919 unsigned int block_bytes)
920{
921 int result = cmd->result;
922 int this_count = cmd->bufflen;
923 request_queue_t *q = cmd->device->request_queue;
924 struct request *req = cmd->request;
925 int clear_errors = 1;
926 struct scsi_sense_hdr sshdr;
927 int sense_valid = 0;
928 int sense_deferred = 0;
929
930 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
931 return;
932
933 /*
934 * Free up any indirection buffers we allocated for DMA purposes.
935 * For the case of a READ, we need to copy the data out of the
936 * bounce buffer and into the real buffer.
937 */
938 if (cmd->use_sg)
939 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
940 else if (cmd->buffer != req->buffer) {
941 if (rq_data_dir(req) == READ) {
942 unsigned long flags;
943 char *to = bio_kmap_irq(req->bio, &flags);
944 memcpy(to, cmd->buffer, cmd->bufflen);
945 bio_kunmap_irq(to, &flags);
946 }
947 kfree(cmd->buffer);
948 }
949
950 if (result) {
951 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
952 if (sense_valid)
953 sense_deferred = scsi_sense_is_deferred(&sshdr);
954 }
955 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
956 req->errors = result;
957 if (result) {
958 clear_errors = 0;
959 if (sense_valid && req->sense) {
960 /*
961 * SG_IO wants current and deferred errors
962 */
963 int len = 8 + cmd->sense_buffer[7];
964
965 if (len > SCSI_SENSE_BUFFERSIZE)
966 len = SCSI_SENSE_BUFFERSIZE;
967 memcpy(req->sense, cmd->sense_buffer, len);
968 req->sense_len = len;
969 }
970 } else
971 req->data_len = cmd->resid;
972 }
973
974 /*
975 * Zero these out. They now point to freed memory, and it is
976 * dangerous to hang onto the pointers.
977 */
978 cmd->buffer = NULL;
979 cmd->bufflen = 0;
980 cmd->request_buffer = NULL;
981 cmd->request_bufflen = 0;
982
983 /*
984 * Next deal with any sectors which we were able to correctly
985 * handle.
986 */
987 if (good_bytes >= 0) {
988 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
989 req->nr_sectors, good_bytes));
990 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
991
992 if (clear_errors)
993 req->errors = 0;
994 /*
995 * If multiple sectors are requested in one buffer, then
996 * they will have been finished off by the first command.
997 * If not, then we have a multi-buffer command.
998 *
999 * If block_bytes != 0, it means we had a medium error
1000 * of some sort, and that we want to mark some number of
1001 * sectors as not uptodate. Thus we want to inhibit
1002 * requeueing right here - we will requeue down below
1003 * when we handle the bad sectors.
1004 */
1da177e4
LT
1005
1006 /*
e91442b6
JB
1007 * If the command completed without error, then either
1008 * finish off the rest of the command, or start a new one.
1da177e4 1009 */
e91442b6 1010 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
1da177e4 1011 return;
1da177e4
LT
1012 }
1013 /*
1014 * Now, if we were good little boys and girls, Santa left us a request
1015 * sense buffer. We can extract information from this, so we
1016 * can choose a block to remap, etc.
1017 */
1018 if (sense_valid && !sense_deferred) {
1019 switch (sshdr.sense_key) {
1020 case UNIT_ATTENTION:
1021 if (cmd->device->removable) {
1022 /* detected disc change. set a bit
1023 * and quietly refuse further access.
1024 */
1025 cmd->device->changed = 1;
e91442b6 1026 scsi_end_request(cmd, 0,
1da177e4
LT
1027 this_count, 1);
1028 return;
1029 } else {
1030 /*
1031 * Must have been a power glitch, or a
1032 * bus reset. Could not have been a
1033 * media change, so we just retry the
1034 * request and see what happens.
1035 */
1036 scsi_requeue_command(q, cmd);
1037 return;
1038 }
1039 break;
1040 case ILLEGAL_REQUEST:
1041 /*
1042 * If we had an ILLEGAL REQUEST returned, then we may
1043 * have performed an unsupported command. The only
1044 * thing this should be would be a ten byte read where
1045 * only a six byte read was supported. Also, on a
1046 * system where READ CAPACITY failed, we may have read
1047 * past the end of the disk.
1048 */
26a68019
JA
1049 if ((cmd->device->use_10_for_rw &&
1050 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1da177e4
LT
1051 (cmd->cmnd[0] == READ_10 ||
1052 cmd->cmnd[0] == WRITE_10)) {
1053 cmd->device->use_10_for_rw = 0;
1054 /*
1055 * This will cause a retry with a 6-byte
1056 * command.
1057 */
1058 scsi_requeue_command(q, cmd);
1059 result = 0;
1060 } else {
e91442b6 1061 scsi_end_request(cmd, 0, this_count, 1);
1da177e4
LT
1062 return;
1063 }
1064 break;
1065 case NOT_READY:
1066 /*
1067 * If the device is in the process of becoming ready,
1068 * retry.
1069 */
1070 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
1071 scsi_requeue_command(q, cmd);
1072 return;
1073 }
3173d8c3 1074 if (!(req->flags & REQ_QUIET))
3bf743e7
JG
1075 scmd_printk(KERN_INFO, cmd,
1076 "Device not ready.\n");
e91442b6 1077 scsi_end_request(cmd, 0, this_count, 1);
1da177e4
LT
1078 return;
1079 case VOLUME_OVERFLOW:
3173d8c3 1080 if (!(req->flags & REQ_QUIET)) {
3bf743e7
JG
1081 scmd_printk(KERN_INFO, cmd,
1082 "Volume overflow, CDB: ");
3173d8c3
JB
1083 __scsi_print_command(cmd->data_cmnd);
1084 scsi_print_sense("", cmd);
1085 }
e91442b6 1086 scsi_end_request(cmd, 0, block_bytes, 1);
1da177e4
LT
1087 return;
1088 default:
1089 break;
1090 }
1091 } /* driver byte != 0 */
1092 if (host_byte(result) == DID_RESET) {
1093 /*
1094 * Third party bus reset or reset for error
1095 * recovery reasons. Just retry the request
1096 * and see what happens.
1097 */
1098 scsi_requeue_command(q, cmd);
1099 return;
1100 }
1101 if (result) {
3173d8c3 1102 if (!(req->flags & REQ_QUIET)) {
3bf743e7
JG
1103 scmd_printk(KERN_INFO, cmd,
1104 "SCSI error: return code = 0x%x\n", result);
3173d8c3
JB
1105
1106 if (driver_byte(result) & DRIVER_SENSE)
1107 scsi_print_sense("", cmd);
1108 }
1da177e4
LT
1109 /*
1110 * Mark a single buffer as not uptodate. Queue the remainder.
1111 * We sometimes get this cruft in the event that a medium error
1112 * isn't properly reported.
1113 */
1114 block_bytes = req->hard_cur_sectors << 9;
1115 if (!block_bytes)
1116 block_bytes = req->data_len;
e91442b6 1117 scsi_end_request(cmd, 0, block_bytes, 1);
1da177e4
LT
1118 }
1119}
1120EXPORT_SYMBOL(scsi_io_completion);
1121
1122/*
1123 * Function: scsi_init_io()
1124 *
1125 * Purpose: SCSI I/O initialize function.
1126 *
1127 * Arguments: cmd - Command descriptor we wish to initialize
1128 *
1129 * Returns: 0 on success
1130 * BLKPREP_DEFER if the failure is retryable
1131 * BLKPREP_KILL if the failure is fatal
1132 */
1133static int scsi_init_io(struct scsi_cmnd *cmd)
1134{
1135 struct request *req = cmd->request;
1136 struct scatterlist *sgpnt;
1137 int count;
1138
1139 /*
1140 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1141 */
1142 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1143 cmd->request_bufflen = req->data_len;
1144 cmd->request_buffer = req->data;
1145 req->buffer = req->data;
1146 cmd->use_sg = 0;
1147 return 0;
1148 }
1149
1150 /*
1151 * we used to not use scatter-gather for single segment request,
1152 * but now we do (it makes highmem I/O easier to support without
1153 * kmapping pages)
1154 */
1155 cmd->use_sg = req->nr_phys_segments;
1156
1157 /*
1158 * if sg table allocation fails, requeue request later.
1159 */
1160 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
7c72ce81
AS
1161 if (unlikely(!sgpnt)) {
1162 scsi_unprep_request(req);
1da177e4 1163 return BLKPREP_DEFER;
7c72ce81 1164 }
1da177e4
LT
1165
1166 cmd->request_buffer = (char *) sgpnt;
1167 cmd->request_bufflen = req->nr_sectors << 9;
1168 if (blk_pc_request(req))
1169 cmd->request_bufflen = req->data_len;
1170 req->buffer = NULL;
1171
1172 /*
1173 * Next, walk the list, and fill in the addresses and sizes of
1174 * each segment.
1175 */
1176 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1177
1178 /*
1179 * mapped well, send it off
1180 */
1181 if (likely(count <= cmd->use_sg)) {
1182 cmd->use_sg = count;
1183 return 0;
1184 }
1185
1186 printk(KERN_ERR "Incorrect number of segments after building list\n");
1187 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1188 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1189 req->current_nr_sectors);
1190
1191 /* release the command and kill it */
1192 scsi_release_buffers(cmd);
1193 scsi_put_command(cmd);
1194 return BLKPREP_KILL;
1195}
1196
1197static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1198{
1199 struct scsi_device *sdev = q->queuedata;
1200 struct scsi_driver *drv;
1201
1202 if (sdev->sdev_state == SDEV_RUNNING) {
1203 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1204
1205 if (drv->prepare_flush)
1206 return drv->prepare_flush(q, rq);
1207 }
1208
1209 return 0;
1210}
1211
1212static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1213{
1214 struct scsi_device *sdev = q->queuedata;
1215 struct request *flush_rq = rq->end_io_data;
1216 struct scsi_driver *drv;
1217
1218 if (flush_rq->errors) {
1219 printk("scsi: barrier error, disabling flush support\n");
1220 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1221 }
1222
1223 if (sdev->sdev_state == SDEV_RUNNING) {
1224 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1225 drv->end_flush(q, rq);
1226 }
1227}
1228
1229static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1230 sector_t *error_sector)
1231{
1232 struct scsi_device *sdev = q->queuedata;
1233 struct scsi_driver *drv;
1234
1235 if (sdev->sdev_state != SDEV_RUNNING)
1236 return -ENXIO;
1237
1238 drv = *(struct scsi_driver **) disk->private_data;
1239 if (drv->issue_flush)
1240 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1241
1242 return -EOPNOTSUPP;
1243}
1244
e537a36d
JB
1245static void scsi_generic_done(struct scsi_cmnd *cmd)
1246{
1247 BUG_ON(!blk_pc_request(cmd->request));
1248 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1249}
1250
1da177e4
LT
1251static int scsi_prep_fn(struct request_queue *q, struct request *req)
1252{
1253 struct scsi_device *sdev = q->queuedata;
1254 struct scsi_cmnd *cmd;
1255 int specials_only = 0;
1256
1257 /*
1258 * Just check to see if the device is online. If it isn't, we
1259 * refuse to process any commands. The device must be brought
1260 * online before trying any recovery commands
1261 */
1262 if (unlikely(!scsi_device_online(sdev))) {
9ccfc756
JB
1263 sdev_printk(KERN_ERR, sdev,
1264 "rejecting I/O to offline device\n");
6f16b535 1265 goto kill;
1da177e4
LT
1266 }
1267 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1268 /* OK, we're not in a running state don't prep
1269 * user commands */
1270 if (sdev->sdev_state == SDEV_DEL) {
1271 /* Device is fully deleted, no commands
1272 * at all allowed down */
9ccfc756
JB
1273 sdev_printk(KERN_ERR, sdev,
1274 "rejecting I/O to dead device\n");
6f16b535 1275 goto kill;
1da177e4
LT
1276 }
1277 /* OK, we only allow special commands (i.e. not
1278 * user initiated ones */
1279 specials_only = sdev->sdev_state;
1280 }
1281
1282 /*
1283 * Find the actual device driver associated with this command.
1284 * The SPECIAL requests are things like character device or
1285 * ioctls, which did not originate from ll_rw_blk. Note that
1286 * the special field is also used to indicate the cmd for
1287 * the remainder of a partially fulfilled request that can
1288 * come up when there is a medium error. We have to treat
1289 * these two cases differently. We differentiate by looking
1290 * at request->cmd, as this tells us the real story.
1291 */
e537a36d 1292 if (req->flags & REQ_SPECIAL && req->special) {
1da177e4
LT
1293 struct scsi_request *sreq = req->special;
1294
1295 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1296 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1297 if (unlikely(!cmd))
1298 goto defer;
1299 scsi_init_cmd_from_req(cmd, sreq);
1300 } else
1301 cmd = req->special;
1302 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1303
e537a36d 1304 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1da177e4
LT
1305 if(specials_only == SDEV_QUIESCE ||
1306 specials_only == SDEV_BLOCK)
6f16b535 1307 goto defer;
1da177e4 1308
9ccfc756
JB
1309 sdev_printk(KERN_ERR, sdev,
1310 "rejecting I/O to device being removed\n");
6f16b535 1311 goto kill;
1da177e4
LT
1312 }
1313
1314
1315 /*
1316 * Now try and find a command block that we can use.
1317 */
1318 if (!req->special) {
1319 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1320 if (unlikely(!cmd))
1321 goto defer;
1322 } else
1323 cmd = req->special;
1324
1325 /* pull a tag out of the request if we have one */
1326 cmd->tag = req->tag;
1327 } else {
1328 blk_dump_rq_flags(req, "SCSI bad req");
6f16b535 1329 goto kill;
1da177e4
LT
1330 }
1331
1332 /* note the overloading of req->special. When the tag
1333 * is active it always means cmd. If the tag goes
1334 * back for re-queueing, it may be reset */
1335 req->special = cmd;
1336 cmd->request = req;
1337
1338 /*
1339 * FIXME: drop the lock here because the functions below
1340 * expect to be called without the queue lock held. Also,
1341 * previously, we dequeued the request before dropping the
1342 * lock. We hope REQ_STARTED prevents anything untoward from
1343 * happening now.
1344 */
1345 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1346 struct scsi_driver *drv;
1347 int ret;
1348
1349 /*
1350 * This will do a couple of things:
1351 * 1) Fill in the actual SCSI command.
1352 * 2) Fill in any other upper-level specific fields
1353 * (timeout).
1354 *
1355 * If this returns 0, it means that the request failed
1356 * (reading past end of disk, reading offline device,
1357 * etc). This won't actually talk to the device, but
1358 * some kinds of consistency checking may cause the
1359 * request to be rejected immediately.
1360 */
1361
1362 /*
1363 * This sets up the scatter-gather table (allocating if
1364 * required).
1365 */
1366 ret = scsi_init_io(cmd);
6f16b535 1367 switch(ret) {
7c72ce81 1368 /* For BLKPREP_KILL/DEFER the cmd was released */
6f16b535 1369 case BLKPREP_KILL:
6f16b535
MC
1370 goto kill;
1371 case BLKPREP_DEFER:
1372 goto defer;
1373 }
1da177e4
LT
1374
1375 /*
1376 * Initialize the actual SCSI command for this request.
1377 */
e537a36d
JB
1378 if (req->rq_disk) {
1379 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1380 if (unlikely(!drv->init_command(cmd))) {
1381 scsi_release_buffers(cmd);
1382 scsi_put_command(cmd);
6f16b535 1383 goto kill;
e537a36d
JB
1384 }
1385 } else {
1386 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
186d330e 1387 cmd->cmd_len = req->cmd_len;
e537a36d
JB
1388 if (rq_data_dir(req) == WRITE)
1389 cmd->sc_data_direction = DMA_TO_DEVICE;
1390 else if (req->data_len)
1391 cmd->sc_data_direction = DMA_FROM_DEVICE;
1392 else
1393 cmd->sc_data_direction = DMA_NONE;
1394
1395 cmd->transfersize = req->data_len;
1396 cmd->allowed = 3;
1397 cmd->timeout_per_command = req->timeout;
1398 cmd->done = scsi_generic_done;
1da177e4
LT
1399 }
1400 }
1401
1402 /*
1403 * The request is now prepped, no need to come back here
1404 */
1405 req->flags |= REQ_DONTPREP;
1406 return BLKPREP_OK;
1407
1408 defer:
1409 /* If we defer, the elv_next_request() returns NULL, but the
1410 * queue must be restarted, so we plug here if no returning
1411 * command will automatically do that. */
1412 if (sdev->device_busy == 0)
1413 blk_plug_device(q);
1414 return BLKPREP_DEFER;
6f16b535
MC
1415 kill:
1416 req->errors = DID_NO_CONNECT << 16;
1417 return BLKPREP_KILL;
1da177e4
LT
1418}
1419
1420/*
1421 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1422 * return 0.
1423 *
1424 * Called with the queue_lock held.
1425 */
1426static inline int scsi_dev_queue_ready(struct request_queue *q,
1427 struct scsi_device *sdev)
1428{
1429 if (sdev->device_busy >= sdev->queue_depth)
1430 return 0;
1431 if (sdev->device_busy == 0 && sdev->device_blocked) {
1432 /*
1433 * unblock after device_blocked iterates to zero
1434 */
1435 if (--sdev->device_blocked == 0) {
1436 SCSI_LOG_MLQUEUE(3,
9ccfc756
JB
1437 sdev_printk(KERN_INFO, sdev,
1438 "unblocking device at zero depth\n"));
1da177e4
LT
1439 } else {
1440 blk_plug_device(q);
1441 return 0;
1442 }
1443 }
1444 if (sdev->device_blocked)
1445 return 0;
1446
1447 return 1;
1448}
1449
1450/*
1451 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1452 * return 0. We must end up running the queue again whenever 0 is
1453 * returned, else IO can hang.
1454 *
1455 * Called with host_lock held.
1456 */
1457static inline int scsi_host_queue_ready(struct request_queue *q,
1458 struct Scsi_Host *shost,
1459 struct scsi_device *sdev)
1460{
939647ee 1461 if (scsi_host_in_recovery(shost))
1da177e4
LT
1462 return 0;
1463 if (shost->host_busy == 0 && shost->host_blocked) {
1464 /*
1465 * unblock after host_blocked iterates to zero
1466 */
1467 if (--shost->host_blocked == 0) {
1468 SCSI_LOG_MLQUEUE(3,
1469 printk("scsi%d unblocking host at zero depth\n",
1470 shost->host_no));
1471 } else {
1472 blk_plug_device(q);
1473 return 0;
1474 }
1475 }
1476 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1477 shost->host_blocked || shost->host_self_blocked) {
1478 if (list_empty(&sdev->starved_entry))
1479 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1480 return 0;
1481 }
1482
1483 /* We're OK to process the command, so we can't be starved */
1484 if (!list_empty(&sdev->starved_entry))
1485 list_del_init(&sdev->starved_entry);
1486
1487 return 1;
1488}
1489
1490/*
e91442b6 1491 * Kill a request for a dead device
1da177e4 1492 */
e91442b6 1493static void scsi_kill_request(struct request *req, request_queue_t *q)
1da177e4 1494{
e91442b6 1495 struct scsi_cmnd *cmd = req->special;
1da177e4 1496
788ce43a
JB
1497 blkdev_dequeue_request(req);
1498
e91442b6
JB
1499 if (unlikely(cmd == NULL)) {
1500 printk(KERN_CRIT "impossible request in %s.\n",
1501 __FUNCTION__);
1502 BUG();
1da177e4 1503 }
e91442b6
JB
1504
1505 scsi_init_cmd_errh(cmd);
1506 cmd->result = DID_NO_CONNECT << 16;
1507 atomic_inc(&cmd->device->iorequest_cnt);
1508 __scsi_done(cmd);
1da177e4
LT
1509}
1510
1511/*
1512 * Function: scsi_request_fn()
1513 *
1514 * Purpose: Main strategy routine for SCSI.
1515 *
1516 * Arguments: q - Pointer to actual queue.
1517 *
1518 * Returns: Nothing
1519 *
1520 * Lock status: IO request lock assumed to be held when called.
1521 */
1522static void scsi_request_fn(struct request_queue *q)
1523{
1524 struct scsi_device *sdev = q->queuedata;
1525 struct Scsi_Host *shost;
1526 struct scsi_cmnd *cmd;
1527 struct request *req;
1528
1529 if (!sdev) {
1530 printk("scsi: killing requests for dead queue\n");
e91442b6
JB
1531 while ((req = elv_next_request(q)) != NULL)
1532 scsi_kill_request(req, q);
1da177e4
LT
1533 return;
1534 }
1535
1536 if(!get_device(&sdev->sdev_gendev))
1537 /* We must be tearing the block queue down already */
1538 return;
1539
1540 /*
1541 * To start with, we keep looping until the queue is empty, or until
1542 * the host is no longer able to accept any more requests.
1543 */
1544 shost = sdev->host;
1545 while (!blk_queue_plugged(q)) {
1546 int rtn;
1547 /*
1548 * get next queueable request. We do this early to make sure
1549 * that the request is fully prepared even if we cannot
1550 * accept it.
1551 */
1552 req = elv_next_request(q);
1553 if (!req || !scsi_dev_queue_ready(q, sdev))
1554 break;
1555
1556 if (unlikely(!scsi_device_online(sdev))) {
9ccfc756
JB
1557 sdev_printk(KERN_ERR, sdev,
1558 "rejecting I/O to offline device\n");
e91442b6 1559 scsi_kill_request(req, q);
1da177e4
LT
1560 continue;
1561 }
1562
1563
1564 /*
1565 * Remove the request from the request list.
1566 */
1567 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1568 blkdev_dequeue_request(req);
1569 sdev->device_busy++;
1570
1571 spin_unlock(q->queue_lock);
e91442b6
JB
1572 cmd = req->special;
1573 if (unlikely(cmd == NULL)) {
1574 printk(KERN_CRIT "impossible request in %s.\n"
1575 "please mail a stack trace to "
1576 "linux-scsi@vger.kernel.org",
1577 __FUNCTION__);
1578 BUG();
1579 }
1da177e4
LT
1580 spin_lock(shost->host_lock);
1581
1582 if (!scsi_host_queue_ready(q, shost, sdev))
1583 goto not_ready;
1584 if (sdev->single_lun) {
1585 if (scsi_target(sdev)->starget_sdev_user &&
1586 scsi_target(sdev)->starget_sdev_user != sdev)
1587 goto not_ready;
1588 scsi_target(sdev)->starget_sdev_user = sdev;
1589 }
1590 shost->host_busy++;
1591
1592 /*
1593 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1594 * take the lock again.
1595 */
1596 spin_unlock_irq(shost->host_lock);
1597
1da177e4
LT
1598 /*
1599 * Finally, initialize any error handling parameters, and set up
1600 * the timers for timeouts.
1601 */
1602 scsi_init_cmd_errh(cmd);
1603
1604 /*
1605 * Dispatch the command to the low-level driver.
1606 */
1607 rtn = scsi_dispatch_cmd(cmd);
1608 spin_lock_irq(q->queue_lock);
1609 if(rtn) {
1610 /* we're refusing the command; because of
1611 * the way locks get dropped, we need to
1612 * check here if plugging is required */
1613 if(sdev->device_busy == 0)
1614 blk_plug_device(q);
1615
1616 break;
1617 }
1618 }
1619
1620 goto out;
1621
1622 not_ready:
1623 spin_unlock_irq(shost->host_lock);
1624
1625 /*
1626 * lock q, handle tag, requeue req, and decrement device_busy. We
1627 * must return with queue_lock held.
1628 *
1629 * Decrementing device_busy without checking it is OK, as all such
1630 * cases (host limits or settings) should run the queue at some
1631 * later time.
1632 */
1633 spin_lock_irq(q->queue_lock);
1634 blk_requeue_request(q, req);
1635 sdev->device_busy--;
1636 if(sdev->device_busy == 0)
1637 blk_plug_device(q);
1638 out:
1639 /* must be careful here...if we trigger the ->remove() function
1640 * we cannot be holding the q lock */
1641 spin_unlock_irq(q->queue_lock);
1642 put_device(&sdev->sdev_gendev);
1643 spin_lock_irq(q->queue_lock);
1644}
1645
1646u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1647{
1648 struct device *host_dev;
1649 u64 bounce_limit = 0xffffffff;
1650
1651 if (shost->unchecked_isa_dma)
1652 return BLK_BOUNCE_ISA;
1653 /*
1654 * Platforms with virtual-DMA translation
1655 * hardware have no practical limit.
1656 */
1657 if (!PCI_DMA_BUS_IS_PHYS)
1658 return BLK_BOUNCE_ANY;
1659
1660 host_dev = scsi_get_device(shost);
1661 if (host_dev && host_dev->dma_mask)
1662 bounce_limit = *host_dev->dma_mask;
1663
1664 return bounce_limit;
1665}
1666EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1667
1668struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1669{
1670 struct Scsi_Host *shost = sdev->host;
1671 struct request_queue *q;
1672
152587de 1673 q = blk_init_queue(scsi_request_fn, NULL);
1da177e4
LT
1674 if (!q)
1675 return NULL;
1676
1677 blk_queue_prep_rq(q, scsi_prep_fn);
1678
1679 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1680 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1681 blk_queue_max_sectors(q, shost->max_sectors);
1682 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1683 blk_queue_segment_boundary(q, shost->dma_boundary);
1684 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1685
1686 /*
1687 * ordered tags are superior to flush ordering
1688 */
1689 if (shost->ordered_tag)
1690 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1691 else if (shost->ordered_flush) {
1692 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1693 q->prepare_flush_fn = scsi_prepare_flush_fn;
1694 q->end_flush_fn = scsi_end_flush_fn;
1695 }
1696
1697 if (!shost->use_clustering)
1698 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1699 return q;
1700}
1701
1702void scsi_free_queue(struct request_queue *q)
1703{
1704 blk_cleanup_queue(q);
1705}
1706
1707/*
1708 * Function: scsi_block_requests()
1709 *
1710 * Purpose: Utility function used by low-level drivers to prevent further
1711 * commands from being queued to the device.
1712 *
1713 * Arguments: shost - Host in question
1714 *
1715 * Returns: Nothing
1716 *
1717 * Lock status: No locks are assumed held.
1718 *
1719 * Notes: There is no timer nor any other means by which the requests
1720 * get unblocked other than the low-level driver calling
1721 * scsi_unblock_requests().
1722 */
1723void scsi_block_requests(struct Scsi_Host *shost)
1724{
1725 shost->host_self_blocked = 1;
1726}
1727EXPORT_SYMBOL(scsi_block_requests);
1728
1729/*
1730 * Function: scsi_unblock_requests()
1731 *
1732 * Purpose: Utility function used by low-level drivers to allow further
1733 * commands from being queued to the device.
1734 *
1735 * Arguments: shost - Host in question
1736 *
1737 * Returns: Nothing
1738 *
1739 * Lock status: No locks are assumed held.
1740 *
1741 * Notes: There is no timer nor any other means by which the requests
1742 * get unblocked other than the low-level driver calling
1743 * scsi_unblock_requests().
1744 *
1745 * This is done as an API function so that changes to the
1746 * internals of the scsi mid-layer won't require wholesale
1747 * changes to drivers that use this feature.
1748 */
1749void scsi_unblock_requests(struct Scsi_Host *shost)
1750{
1751 shost->host_self_blocked = 0;
1752 scsi_run_host_queues(shost);
1753}
1754EXPORT_SYMBOL(scsi_unblock_requests);
1755
1756int __init scsi_init_queue(void)
1757{
1758 int i;
1759
1760 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1761 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1762 int size = sgp->size * sizeof(struct scatterlist);
1763
1764 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1765 SLAB_HWCACHE_ALIGN, NULL, NULL);
1766 if (!sgp->slab) {
1767 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1768 sgp->name);
1769 }
1770
1771 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1772 mempool_alloc_slab, mempool_free_slab,
1773 sgp->slab);
1774 if (!sgp->pool) {
1775 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1776 sgp->name);
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783void scsi_exit_queue(void)
1784{
1785 int i;
1786
1787 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1788 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1789 mempool_destroy(sgp->pool);
1790 kmem_cache_destroy(sgp->slab);
1791 }
1792}
1793/**
ea73a9f2 1794 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1da177e4 1795 * six bytes if necessary.
1cf72699 1796 * @sdev: SCSI device to be queried
1da177e4
LT
1797 * @dbd: set if mode sense will allow block descriptors to be returned
1798 * @modepage: mode page being requested
1799 * @buffer: request buffer (may not be smaller than eight bytes)
1800 * @len: length of request buffer.
1801 * @timeout: command timeout
1802 * @retries: number of retries before failing
1803 * @data: returns a structure abstracting the mode header data
1cf72699
JB
1804 * @sense: place to put sense data (or NULL if no sense to be collected).
1805 * must be SCSI_SENSE_BUFFERSIZE big.
1da177e4
LT
1806 *
1807 * Returns zero if unsuccessful, or the header offset (either 4
1808 * or 8 depending on whether a six or ten byte command was
1809 * issued) if successful.
1810 **/
1811int
1cf72699 1812scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1da177e4 1813 unsigned char *buffer, int len, int timeout, int retries,
ea73a9f2 1814 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1da177e4
LT
1815 unsigned char cmd[12];
1816 int use_10_for_ms;
1817 int header_length;
1cf72699 1818 int result;
ea73a9f2 1819 struct scsi_sense_hdr my_sshdr;
1da177e4
LT
1820
1821 memset(data, 0, sizeof(*data));
1822 memset(&cmd[0], 0, 12);
1823 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1824 cmd[2] = modepage;
1825
ea73a9f2
JB
1826 /* caller might not be interested in sense, but we need it */
1827 if (!sshdr)
1828 sshdr = &my_sshdr;
1829
1da177e4 1830 retry:
1cf72699 1831 use_10_for_ms = sdev->use_10_for_ms;
1da177e4
LT
1832
1833 if (use_10_for_ms) {
1834 if (len < 8)
1835 len = 8;
1836
1837 cmd[0] = MODE_SENSE_10;
1838 cmd[8] = len;
1839 header_length = 8;
1840 } else {
1841 if (len < 4)
1842 len = 4;
1843
1844 cmd[0] = MODE_SENSE;
1845 cmd[4] = len;
1846 header_length = 4;
1847 }
1848
1da177e4
LT
1849 memset(buffer, 0, len);
1850
1cf72699 1851 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
ea73a9f2 1852 sshdr, timeout, retries);
1da177e4
LT
1853
1854 /* This code looks awful: what it's doing is making sure an
1855 * ILLEGAL REQUEST sense return identifies the actual command
1856 * byte as the problem. MODE_SENSE commands can return
1857 * ILLEGAL REQUEST if the code page isn't supported */
1858
1cf72699
JB
1859 if (use_10_for_ms && !scsi_status_is_good(result) &&
1860 (driver_byte(result) & DRIVER_SENSE)) {
ea73a9f2
JB
1861 if (scsi_sense_valid(sshdr)) {
1862 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1863 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1da177e4
LT
1864 /*
1865 * Invalid command operation code
1866 */
1cf72699 1867 sdev->use_10_for_ms = 0;
1da177e4
LT
1868 goto retry;
1869 }
1870 }
1871 }
1872
1cf72699 1873 if(scsi_status_is_good(result)) {
1da177e4
LT
1874 data->header_length = header_length;
1875 if(use_10_for_ms) {
1876 data->length = buffer[0]*256 + buffer[1] + 2;
1877 data->medium_type = buffer[2];
1878 data->device_specific = buffer[3];
1879 data->longlba = buffer[4] & 0x01;
1880 data->block_descriptor_length = buffer[6]*256
1881 + buffer[7];
1882 } else {
1883 data->length = buffer[0] + 1;
1884 data->medium_type = buffer[1];
1885 data->device_specific = buffer[2];
1886 data->block_descriptor_length = buffer[3];
1887 }
1888 }
1889
1cf72699 1890 return result;
1da177e4
LT
1891}
1892EXPORT_SYMBOL(scsi_mode_sense);
1893
1894int
1895scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1896{
1da177e4
LT
1897 char cmd[] = {
1898 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1899 };
ea73a9f2 1900 struct scsi_sense_hdr sshdr;
1da177e4
LT
1901 int result;
1902
ea73a9f2 1903 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1cf72699 1904 timeout, retries);
1da177e4 1905
1cf72699 1906 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1da177e4 1907
ea73a9f2 1908 if ((scsi_sense_valid(&sshdr)) &&
1da177e4
LT
1909 ((sshdr.sense_key == UNIT_ATTENTION) ||
1910 (sshdr.sense_key == NOT_READY))) {
1911 sdev->changed = 1;
1cf72699 1912 result = 0;
1da177e4
LT
1913 }
1914 }
1da177e4
LT
1915 return result;
1916}
1917EXPORT_SYMBOL(scsi_test_unit_ready);
1918
1919/**
1920 * scsi_device_set_state - Take the given device through the device
1921 * state model.
1922 * @sdev: scsi device to change the state of.
1923 * @state: state to change to.
1924 *
1925 * Returns zero if unsuccessful or an error if the requested
1926 * transition is illegal.
1927 **/
1928int
1929scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1930{
1931 enum scsi_device_state oldstate = sdev->sdev_state;
1932
1933 if (state == oldstate)
1934 return 0;
1935
1936 switch (state) {
1937 case SDEV_CREATED:
1938 /* There are no legal states that come back to
1939 * created. This is the manually initialised start
1940 * state */
1941 goto illegal;
1942
1943 case SDEV_RUNNING:
1944 switch (oldstate) {
1945 case SDEV_CREATED:
1946 case SDEV_OFFLINE:
1947 case SDEV_QUIESCE:
1948 case SDEV_BLOCK:
1949 break;
1950 default:
1951 goto illegal;
1952 }
1953 break;
1954
1955 case SDEV_QUIESCE:
1956 switch (oldstate) {
1957 case SDEV_RUNNING:
1958 case SDEV_OFFLINE:
1959 break;
1960 default:
1961 goto illegal;
1962 }
1963 break;
1964
1965 case SDEV_OFFLINE:
1966 switch (oldstate) {
1967 case SDEV_CREATED:
1968 case SDEV_RUNNING:
1969 case SDEV_QUIESCE:
1970 case SDEV_BLOCK:
1971 break;
1972 default:
1973 goto illegal;
1974 }
1975 break;
1976
1977 case SDEV_BLOCK:
1978 switch (oldstate) {
1979 case SDEV_CREATED:
1980 case SDEV_RUNNING:
1981 break;
1982 default:
1983 goto illegal;
1984 }
1985 break;
1986
1987 case SDEV_CANCEL:
1988 switch (oldstate) {
1989 case SDEV_CREATED:
1990 case SDEV_RUNNING:
1991 case SDEV_OFFLINE:
1992 case SDEV_BLOCK:
1993 break;
1994 default:
1995 goto illegal;
1996 }
1997 break;
1998
1999 case SDEV_DEL:
2000 switch (oldstate) {
2001 case SDEV_CANCEL:
2002 break;
2003 default:
2004 goto illegal;
2005 }
2006 break;
2007
2008 }
2009 sdev->sdev_state = state;
2010 return 0;
2011
2012 illegal:
2013 SCSI_LOG_ERROR_RECOVERY(1,
9ccfc756
JB
2014 sdev_printk(KERN_ERR, sdev,
2015 "Illegal state transition %s->%s\n",
2016 scsi_device_state_name(oldstate),
2017 scsi_device_state_name(state))
1da177e4
LT
2018 );
2019 return -EINVAL;
2020}
2021EXPORT_SYMBOL(scsi_device_set_state);
2022
2023/**
2024 * scsi_device_quiesce - Block user issued commands.
2025 * @sdev: scsi device to quiesce.
2026 *
2027 * This works by trying to transition to the SDEV_QUIESCE state
2028 * (which must be a legal transition). When the device is in this
2029 * state, only special requests will be accepted, all others will
2030 * be deferred. Since special requests may also be requeued requests,
2031 * a successful return doesn't guarantee the device will be
2032 * totally quiescent.
2033 *
2034 * Must be called with user context, may sleep.
2035 *
2036 * Returns zero if unsuccessful or an error if not.
2037 **/
2038int
2039scsi_device_quiesce(struct scsi_device *sdev)
2040{
2041 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2042 if (err)
2043 return err;
2044
2045 scsi_run_queue(sdev->request_queue);
2046 while (sdev->device_busy) {
2047 msleep_interruptible(200);
2048 scsi_run_queue(sdev->request_queue);
2049 }
2050 return 0;
2051}
2052EXPORT_SYMBOL(scsi_device_quiesce);
2053
2054/**
2055 * scsi_device_resume - Restart user issued commands to a quiesced device.
2056 * @sdev: scsi device to resume.
2057 *
2058 * Moves the device from quiesced back to running and restarts the
2059 * queues.
2060 *
2061 * Must be called with user context, may sleep.
2062 **/
2063void
2064scsi_device_resume(struct scsi_device *sdev)
2065{
2066 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2067 return;
2068 scsi_run_queue(sdev->request_queue);
2069}
2070EXPORT_SYMBOL(scsi_device_resume);
2071
2072static void
2073device_quiesce_fn(struct scsi_device *sdev, void *data)
2074{
2075 scsi_device_quiesce(sdev);
2076}
2077
2078void
2079scsi_target_quiesce(struct scsi_target *starget)
2080{
2081 starget_for_each_device(starget, NULL, device_quiesce_fn);
2082}
2083EXPORT_SYMBOL(scsi_target_quiesce);
2084
2085static void
2086device_resume_fn(struct scsi_device *sdev, void *data)
2087{
2088 scsi_device_resume(sdev);
2089}
2090
2091void
2092scsi_target_resume(struct scsi_target *starget)
2093{
2094 starget_for_each_device(starget, NULL, device_resume_fn);
2095}
2096EXPORT_SYMBOL(scsi_target_resume);
2097
2098/**
2099 * scsi_internal_device_block - internal function to put a device
2100 * temporarily into the SDEV_BLOCK state
2101 * @sdev: device to block
2102 *
2103 * Block request made by scsi lld's to temporarily stop all
2104 * scsi commands on the specified device. Called from interrupt
2105 * or normal process context.
2106 *
2107 * Returns zero if successful or error if not
2108 *
2109 * Notes:
2110 * This routine transitions the device to the SDEV_BLOCK state
2111 * (which must be a legal transition). When the device is in this
2112 * state, all commands are deferred until the scsi lld reenables
2113 * the device with scsi_device_unblock or device_block_tmo fires.
2114 * This routine assumes the host_lock is held on entry.
2115 **/
2116int
2117scsi_internal_device_block(struct scsi_device *sdev)
2118{
2119 request_queue_t *q = sdev->request_queue;
2120 unsigned long flags;
2121 int err = 0;
2122
2123 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2124 if (err)
2125 return err;
2126
2127 /*
2128 * The device has transitioned to SDEV_BLOCK. Stop the
2129 * block layer from calling the midlayer with this device's
2130 * request queue.
2131 */
2132 spin_lock_irqsave(q->queue_lock, flags);
2133 blk_stop_queue(q);
2134 spin_unlock_irqrestore(q->queue_lock, flags);
2135
2136 return 0;
2137}
2138EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2139
2140/**
2141 * scsi_internal_device_unblock - resume a device after a block request
2142 * @sdev: device to resume
2143 *
2144 * Called by scsi lld's or the midlayer to restart the device queue
2145 * for the previously suspended scsi device. Called from interrupt or
2146 * normal process context.
2147 *
2148 * Returns zero if successful or error if not.
2149 *
2150 * Notes:
2151 * This routine transitions the device to the SDEV_RUNNING state
2152 * (which must be a legal transition) allowing the midlayer to
2153 * goose the queue for this device. This routine assumes the
2154 * host_lock is held upon entry.
2155 **/
2156int
2157scsi_internal_device_unblock(struct scsi_device *sdev)
2158{
2159 request_queue_t *q = sdev->request_queue;
2160 int err;
2161 unsigned long flags;
2162
2163 /*
2164 * Try to transition the scsi device to SDEV_RUNNING
2165 * and goose the device queue if successful.
2166 */
2167 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2168 if (err)
2169 return err;
2170
2171 spin_lock_irqsave(q->queue_lock, flags);
2172 blk_start_queue(q);
2173 spin_unlock_irqrestore(q->queue_lock, flags);
2174
2175 return 0;
2176}
2177EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2178
2179static void
2180device_block(struct scsi_device *sdev, void *data)
2181{
2182 scsi_internal_device_block(sdev);
2183}
2184
2185static int
2186target_block(struct device *dev, void *data)
2187{
2188 if (scsi_is_target_device(dev))
2189 starget_for_each_device(to_scsi_target(dev), NULL,
2190 device_block);
2191 return 0;
2192}
2193
2194void
2195scsi_target_block(struct device *dev)
2196{
2197 if (scsi_is_target_device(dev))
2198 starget_for_each_device(to_scsi_target(dev), NULL,
2199 device_block);
2200 else
2201 device_for_each_child(dev, NULL, target_block);
2202}
2203EXPORT_SYMBOL_GPL(scsi_target_block);
2204
2205static void
2206device_unblock(struct scsi_device *sdev, void *data)
2207{
2208 scsi_internal_device_unblock(sdev);
2209}
2210
2211static int
2212target_unblock(struct device *dev, void *data)
2213{
2214 if (scsi_is_target_device(dev))
2215 starget_for_each_device(to_scsi_target(dev), NULL,
2216 device_unblock);
2217 return 0;
2218}
2219
2220void
2221scsi_target_unblock(struct device *dev)
2222{
2223 if (scsi_is_target_device(dev))
2224 starget_for_each_device(to_scsi_target(dev), NULL,
2225 device_unblock);
2226 else
2227 device_for_each_child(dev, NULL, target_unblock);
2228}
2229EXPORT_SYMBOL_GPL(scsi_target_unblock);