]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/scsi_lib.c
[SCSI] Change return type of scsi_queue_insert() into void
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / scsi_lib.c
CommitLineData
1da177e4
LT
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
d3f46f39 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
09703660 15#include <linux/export.h>
1da177e4
LT
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
faead26d 21#include <linux/hardirq.h>
c6132da1 22#include <linux/scatterlist.h>
1da177e4
LT
23
24#include <scsi/scsi.h>
beb40487 25#include <scsi/scsi_cmnd.h>
1da177e4
LT
26#include <scsi/scsi_dbg.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_driver.h>
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_host.h>
1da177e4
LT
31
32#include "scsi_priv.h"
33#include "scsi_logging.h"
34
35
6391a113 36#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
5972511b 37#define SG_MEMPOOL_SIZE 2
1da177e4
LT
38
39struct scsi_host_sg_pool {
40 size_t size;
a8474ce2 41 char *name;
e18b890b 42 struct kmem_cache *slab;
1da177e4
LT
43 mempool_t *pool;
44};
45
d3f46f39
JB
46#define SP(x) { x, "sgpool-" __stringify(x) }
47#if (SCSI_MAX_SG_SEGMENTS < 32)
48#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
49#endif
52c1da39 50static struct scsi_host_sg_pool scsi_sg_pools[] = {
1da177e4
LT
51 SP(8),
52 SP(16),
fd820f40 53#if (SCSI_MAX_SG_SEGMENTS > 32)
d3f46f39 54 SP(32),
fd820f40 55#if (SCSI_MAX_SG_SEGMENTS > 64)
d3f46f39
JB
56 SP(64),
57#if (SCSI_MAX_SG_SEGMENTS > 128)
1da177e4 58 SP(128),
d3f46f39
JB
59#if (SCSI_MAX_SG_SEGMENTS > 256)
60#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
fd820f40
FT
61#endif
62#endif
63#endif
d3f46f39
JB
64#endif
65 SP(SCSI_MAX_SG_SEGMENTS)
a8474ce2 66};
1da177e4
LT
67#undef SP
68
7027ad72 69struct kmem_cache *scsi_sdb_cache;
6f9a35e2 70
a488e749
JA
71/*
72 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
73 * not change behaviour from the previous unplug mechanism, experimentation
74 * may prove this needs changing.
75 */
76#define SCSI_QUEUE_DELAY 3
77
e91442b6
JB
78/*
79 * Function: scsi_unprep_request()
80 *
81 * Purpose: Remove all preparation done for a request, including its
82 * associated scsi_cmnd, so that it can be requeued.
83 *
84 * Arguments: req - request to unprepare
85 *
86 * Lock status: Assumed that no locks are held upon entry.
87 *
88 * Returns: Nothing.
89 */
90static void scsi_unprep_request(struct request *req)
91{
92 struct scsi_cmnd *cmd = req->special;
93
28018c24 94 blk_unprep_request(req);
beb40487 95 req->special = NULL;
e91442b6 96
e91442b6
JB
97 scsi_put_command(cmd);
98}
a1bf9d1d 99
4f5299ac
JB
100/**
101 * __scsi_queue_insert - private queue insertion
102 * @cmd: The SCSI command being requeued
103 * @reason: The reason for the requeue
104 * @unbusy: Whether the queue should be unbusied
1da177e4 105 *
4f5299ac
JB
106 * This is a private queue insertion. The public interface
107 * scsi_queue_insert() always assumes the queue should be unbusied
108 * because it's always called before the completion. This function is
109 * for a requeue after completion, which should only occur in this
110 * file.
1da177e4 111 */
84feb166 112static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
1da177e4
LT
113{
114 struct Scsi_Host *host = cmd->device->host;
115 struct scsi_device *device = cmd->device;
f0c0a376 116 struct scsi_target *starget = scsi_target(device);
a1bf9d1d
TH
117 struct request_queue *q = device->request_queue;
118 unsigned long flags;
1da177e4
LT
119
120 SCSI_LOG_MLQUEUE(1,
121 printk("Inserting command %p into mlqueue\n", cmd));
122
123 /*
d8c37e7b 124 * Set the appropriate busy bit for the device/host.
1da177e4
LT
125 *
126 * If the host/device isn't busy, assume that something actually
127 * completed, and that we should be able to queue a command now.
128 *
129 * Note that the prior mid-layer assumption that any host could
130 * always queue at least one command is now broken. The mid-layer
131 * will implement a user specifiable stall (see
132 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
133 * if a command is requeued with no other commands outstanding
134 * either for the device or for the host.
135 */
f0c0a376
MC
136 switch (reason) {
137 case SCSI_MLQUEUE_HOST_BUSY:
1da177e4 138 host->host_blocked = host->max_host_blocked;
f0c0a376
MC
139 break;
140 case SCSI_MLQUEUE_DEVICE_BUSY:
573e5913 141 case SCSI_MLQUEUE_EH_RETRY:
1da177e4 142 device->device_blocked = device->max_device_blocked;
f0c0a376
MC
143 break;
144 case SCSI_MLQUEUE_TARGET_BUSY:
145 starget->target_blocked = starget->max_target_blocked;
146 break;
147 }
1da177e4 148
1da177e4
LT
149 /*
150 * Decrement the counters, since these commands are no longer
151 * active on the host/device.
152 */
4f5299ac
JB
153 if (unbusy)
154 scsi_device_unbusy(device);
1da177e4
LT
155
156 /*
a1bf9d1d
TH
157 * Requeue this command. It will go before all other commands
158 * that are already in the queue.
a488e749 159 */
a1bf9d1d 160 spin_lock_irqsave(q->queue_lock, flags);
59897dad 161 blk_requeue_request(q, cmd->request);
a1bf9d1d
TH
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
9937a5e2 164 kblockd_schedule_work(q, &device->requeue_work);
1da177e4
LT
165}
166
4f5299ac
JB
167/*
168 * Function: scsi_queue_insert()
169 *
170 * Purpose: Insert a command in the midlevel queue.
171 *
172 * Arguments: cmd - command that we are adding to queue.
173 * reason - why we are inserting command to queue.
174 *
175 * Lock status: Assumed that lock is not held upon entry.
176 *
177 * Returns: Nothing.
178 *
179 * Notes: We do this for one of two cases. Either the host is busy
180 * and it cannot accept any more commands for the time being,
181 * or the device returned QUEUE_FULL and can accept no more
182 * commands.
183 * Notes: This could be called either from an interrupt context or a
184 * normal process context.
185 */
84feb166 186void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
4f5299ac 187{
84feb166 188 __scsi_queue_insert(cmd, reason, 1);
4f5299ac 189}
39216033 190/**
33aa687d 191 * scsi_execute - insert request and wait for the result
39216033
JB
192 * @sdev: scsi device
193 * @cmd: scsi command
194 * @data_direction: data direction
195 * @buffer: data buffer
196 * @bufflen: len of buffer
197 * @sense: optional sense buffer
198 * @timeout: request timeout in seconds
199 * @retries: number of times to retry request
33aa687d 200 * @flags: or into request flags;
f4f4e47e 201 * @resid: optional residual length
39216033 202 *
59c51591 203 * returns the req->errors value which is the scsi_cmnd result
ea73a9f2 204 * field.
eb44820c 205 */
33aa687d
JB
206int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
207 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
208 unsigned char *sense, int timeout, int retries, int flags,
209 int *resid)
39216033
JB
210{
211 struct request *req;
212 int write = (data_direction == DMA_TO_DEVICE);
213 int ret = DRIVER_ERROR << 24;
214
215 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
bfe159a5
JB
216 if (!req)
217 return ret;
39216033
JB
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
17e01f21 227 req->retries = retries;
39216033 228 req->timeout = timeout;
4aff5e23
JA
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
39216033
JB
231
232 /*
233 * head injection *required* here otherwise quiesce won't work
234 */
235 blk_execute_rq(req->q, NULL, req, 1);
236
bdb2b8ca
AS
237 /*
238 * Some devices (USB mass-storage in particular) may transfer
239 * garbage data together with a residue indicating that the data
240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data.
242 */
c3a4d78c
TH
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
bdb2b8ca 245
f4f4e47e 246 if (resid)
c3a4d78c 247 *resid = req->resid_len;
39216033
JB
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
33aa687d 254EXPORT_SYMBOL(scsi_execute);
39216033 255
ea73a9f2
JB
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
ea73a9f2
JB
261{
262 char *sense = NULL;
1ccb48bb 263 int result;
264
ea73a9f2 265 if (sshdr) {
24669f75 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
ea73a9f2
JB
267 if (!sense)
268 return DRIVER_ERROR << 24;
ea73a9f2 269 }
1ccb48bb 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
f4f4e47e 271 sense, timeout, retries, 0, resid);
ea73a9f2 272 if (sshdr)
e514385b 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
ea73a9f2
JB
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
1da177e4
LT
280/*
281 * Function: scsi_init_cmd_errh()
282 *
283 * Purpose: Initialize cmd fields related to error handling.
284 *
285 * Arguments: cmd - command that is ready to be queued.
286 *
1da177e4
LT
287 * Notes: This function has the job of initializing a number of
288 * fields related to error handling. Typically this will
289 * be called once for each command, as required.
290 */
631c228c 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
1da177e4 292{
1da177e4 293 cmd->serial_number = 0;
30b0c37b 294 scsi_set_resid(cmd, 0);
b80ca4f7 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4 296 if (cmd->cmd_len == 0)
db4742dd 297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
1da177e4
LT
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
f0c0a376 303 struct scsi_target *starget = scsi_target(sdev);
1da177e4
LT
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
f0c0a376 308 starget->target_busy--;
939647ee 309 if (unlikely(scsi_host_in_recovery(shost) &&
ee7863bc 310 (shost->host_failed || shost->host_eh_scheduled)))
1da177e4
LT
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
152587de 313 spin_lock(sdev->request_queue->queue_lock);
1da177e4 314 sdev->device_busy--;
152587de 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
1da177e4
LT
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336 /*
337 * Call blk_run_queue for all LUNs on the target, starting with
338 * current_sdev. We race with others (to set starget_sdev_user),
339 * but in most cases, we will be first. Ideally, each LU on the
340 * target would get some limited time or requests on the target.
341 */
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
9d112517
KU
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
f0c0a376
MC
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
9d112517
KU
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
1da177e4
LT
388/*
389 * Function: scsi_run_queue()
390 *
391 * Purpose: Select a proper request queue to serve next
392 *
393 * Arguments: q - last request's queue
394 *
395 * Returns: Nothing
396 *
397 * Notes: The previous command was completely finished, start
398 * a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
2a3a59e5 402 struct scsi_device *sdev = q->queuedata;
c055f5b2 403 struct Scsi_Host *shost;
2a3a59e5 404 LIST_HEAD(starved_list);
1da177e4
LT
405 unsigned long flags;
406
c055f5b2 407 shost = sdev->host;
25d7c363 408 if (scsi_target(sdev)->single_lun)
1da177e4
LT
409 scsi_single_lun_run(sdev);
410
411 spin_lock_irqsave(shost->host_lock, flags);
2a3a59e5
MC
412 list_splice_init(&shost->starved_list, &starved_list);
413
414 while (!list_empty(&starved_list)) {
1da177e4
LT
415 /*
416 * As long as shost is accepting commands and we have
417 * starved queues, call blk_run_queue. scsi_request_fn
418 * drops the queue_lock and can add us back to the
419 * starved_list.
420 *
421 * host_lock protects the starved_list and starved_entry.
422 * scsi_request_fn must get the host_lock before checking
423 * or modifying starved_list or starved_entry.
424 */
2a3a59e5 425 if (scsi_host_is_busy(shost))
f0c0a376 426 break;
f0c0a376 427
2a3a59e5
MC
428 sdev = list_entry(starved_list.next,
429 struct scsi_device, starved_entry);
430 list_del_init(&sdev->starved_entry);
f0c0a376
MC
431 if (scsi_target_is_busy(scsi_target(sdev))) {
432 list_move_tail(&sdev->starved_entry,
433 &shost->starved_list);
434 continue;
435 }
436
9937a5e2
JA
437 spin_unlock(shost->host_lock);
438 spin_lock(sdev->request_queue->queue_lock);
439 __blk_run_queue(sdev->request_queue);
440 spin_unlock(sdev->request_queue->queue_lock);
441 spin_lock(shost->host_lock);
1da177e4 442 }
2a3a59e5
MC
443 /* put any unprocessed entries back */
444 list_splice(&starved_list, &shost->starved_list);
1da177e4
LT
445 spin_unlock_irqrestore(shost->host_lock, flags);
446
447 blk_run_queue(q);
448}
449
9937a5e2
JA
450void scsi_requeue_run_queue(struct work_struct *work)
451{
452 struct scsi_device *sdev;
453 struct request_queue *q;
454
455 sdev = container_of(work, struct scsi_device, requeue_work);
456 q = sdev->request_queue;
457 scsi_run_queue(q);
458}
459
1da177e4
LT
460/*
461 * Function: scsi_requeue_command()
462 *
463 * Purpose: Handle post-processing of completed commands.
464 *
465 * Arguments: q - queue to operate on
466 * cmd - command that may need to be requeued.
467 *
468 * Returns: Nothing
469 *
470 * Notes: After command completion, there may be blocks left
471 * over which weren't finished by the previous command
472 * this can be for a number of reasons - the main one is
473 * I/O errors in the middle of the request, in which case
474 * we need to request the blocks that come after the bad
475 * sector.
e91442b6 476 * Notes: Upon return, cmd is a stale pointer.
1da177e4
LT
477 */
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
940f5d47 480 struct scsi_device *sdev = cmd->device;
e91442b6 481 struct request *req = cmd->request;
283369cc
TH
482 unsigned long flags;
483
940f5d47
BVA
484 /*
485 * We need to hold a reference on the device to avoid the queue being
486 * killed after the unlock and before scsi_run_queue is invoked which
487 * may happen because scsi_unprep_request() puts the command which
488 * releases its reference on the device.
489 */
490 get_device(&sdev->sdev_gendev);
491
283369cc 492 spin_lock_irqsave(q->queue_lock, flags);
02bd3499 493 scsi_unprep_request(req);
e91442b6 494 blk_requeue_request(q, req);
283369cc 495 spin_unlock_irqrestore(q->queue_lock, flags);
1da177e4
LT
496
497 scsi_run_queue(q);
940f5d47
BVA
498
499 put_device(&sdev->sdev_gendev);
1da177e4
LT
500}
501
502void scsi_next_command(struct scsi_cmnd *cmd)
503{
49d7bc64
LT
504 struct scsi_device *sdev = cmd->device;
505 struct request_queue *q = sdev->request_queue;
506
507 /* need to hold a reference on the device before we let go of the cmd */
508 get_device(&sdev->sdev_gendev);
1da177e4
LT
509
510 scsi_put_command(cmd);
511 scsi_run_queue(q);
49d7bc64
LT
512
513 /* ok to remove device now */
514 put_device(&sdev->sdev_gendev);
1da177e4
LT
515}
516
517void scsi_run_host_queues(struct Scsi_Host *shost)
518{
519 struct scsi_device *sdev;
520
521 shost_for_each_device(sdev, shost)
522 scsi_run_queue(sdev->request_queue);
523}
524
79ed2429
JB
525static void __scsi_release_buffers(struct scsi_cmnd *, int);
526
1da177e4
LT
527/*
528 * Function: scsi_end_request()
529 *
530 * Purpose: Post-processing of completed commands (usually invoked at end
531 * of upper level post-processing and scsi_io_completion).
532 *
533 * Arguments: cmd - command that is complete.
610d8b0c 534 * error - 0 if I/O indicates success, < 0 for I/O error.
1da177e4
LT
535 * bytes - number of bytes of completed I/O
536 * requeue - indicates whether we should requeue leftovers.
537 *
538 * Lock status: Assumed that lock is not held upon entry.
539 *
e91442b6 540 * Returns: cmd if requeue required, NULL otherwise.
1da177e4
LT
541 *
542 * Notes: This is called for block device requests in order to
543 * mark some number of sectors as complete.
544 *
545 * We are guaranteeing that the request queue will be goosed
546 * at some point during this call.
e91442b6 547 * Notes: If cmd was requeued, upon return it will be a stale pointer.
1da177e4 548 */
610d8b0c 549static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
1da177e4
LT
550 int bytes, int requeue)
551{
165125e1 552 struct request_queue *q = cmd->device->request_queue;
1da177e4 553 struct request *req = cmd->request;
1da177e4
LT
554
555 /*
556 * If there are blocks left over at the end, set up the command
557 * to queue the remainder of them.
558 */
610d8b0c 559 if (blk_end_request(req, error, bytes)) {
1da177e4 560 /* kill remainder if no retrys */
4a27446f 561 if (error && scsi_noretry_cmd(cmd))
e458824f 562 blk_end_request_all(req, error);
1da177e4 563 else {
e91442b6 564 if (requeue) {
1da177e4
LT
565 /*
566 * Bleah. Leftovers again. Stick the
567 * leftovers in the front of the
568 * queue, and goose the queue again.
569 */
79ed2429 570 scsi_release_buffers(cmd);
1da177e4 571 scsi_requeue_command(q, cmd);
e91442b6
JB
572 cmd = NULL;
573 }
1da177e4
LT
574 return cmd;
575 }
576 }
577
1da177e4
LT
578 /*
579 * This will goose the queue request function at the end, so we don't
580 * need to worry about launching another command.
581 */
79ed2429 582 __scsi_release_buffers(cmd, 0);
1da177e4
LT
583 scsi_next_command(cmd);
584 return NULL;
585}
586
a8474ce2
JA
587static inline unsigned int scsi_sgtable_index(unsigned short nents)
588{
589 unsigned int index;
590
d3f46f39
JB
591 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
592
593 if (nents <= 8)
a8474ce2 594 index = 0;
d3f46f39
JB
595 else
596 index = get_count_order(nents) - 3;
1da177e4 597
a8474ce2
JA
598 return index;
599}
600
5ed7959e 601static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
a8474ce2
JA
602{
603 struct scsi_host_sg_pool *sgp;
a8474ce2 604
5ed7959e
JA
605 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
606 mempool_free(sgl, sgp->pool);
607}
a8474ce2 608
5ed7959e
JA
609static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
610{
611 struct scsi_host_sg_pool *sgp;
a8474ce2 612
5ed7959e
JA
613 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
614 return mempool_alloc(sgp->pool, gfp_mask);
615}
a3bec5c5 616
30b0c37b
BH
617static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
618 gfp_t gfp_mask)
5ed7959e
JA
619{
620 int ret;
a8474ce2 621
30b0c37b 622 BUG_ON(!nents);
a8474ce2 623
30b0c37b
BH
624 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
625 gfp_mask, scsi_sg_alloc);
5ed7959e 626 if (unlikely(ret))
30b0c37b 627 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
7cedb1f1 628 scsi_sg_free);
45711f1a 629
a8474ce2 630 return ret;
1da177e4
LT
631}
632
30b0c37b 633static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
1da177e4 634{
30b0c37b 635 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
1da177e4
LT
636}
637
79ed2429
JB
638static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
639{
640
641 if (cmd->sdb.table.nents)
642 scsi_free_sgtable(&cmd->sdb);
643
644 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
645
646 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
647 struct scsi_data_buffer *bidi_sdb =
648 cmd->request->next_rq->special;
649 scsi_free_sgtable(bidi_sdb);
650 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
651 cmd->request->next_rq->special = NULL;
652 }
653
654 if (scsi_prot_sg_count(cmd))
655 scsi_free_sgtable(cmd->prot_sdb);
656}
657
1da177e4
LT
658/*
659 * Function: scsi_release_buffers()
660 *
661 * Purpose: Completion processing for block device I/O requests.
662 *
663 * Arguments: cmd - command that we are bailing.
664 *
665 * Lock status: Assumed that no lock is held upon entry.
666 *
667 * Returns: Nothing
668 *
669 * Notes: In the event that an upper level driver rejects a
670 * command, we must release resources allocated during
671 * the __init_io() function. Primarily this would involve
672 * the scatter-gather table, and potentially any bounce
673 * buffers.
674 */
bb52d82f 675void scsi_release_buffers(struct scsi_cmnd *cmd)
1da177e4 676{
79ed2429 677 __scsi_release_buffers(cmd, 1);
1da177e4 678}
bb52d82f 679EXPORT_SYMBOL(scsi_release_buffers);
1da177e4 680
63583cca
HR
681static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
682{
683 int error = 0;
684
685 switch(host_byte(result)) {
686 case DID_TRANSPORT_FAILFAST:
687 error = -ENOLINK;
688 break;
689 case DID_TARGET_FAILURE:
2082ebc4 690 set_host_byte(cmd, DID_OK);
63583cca
HR
691 error = -EREMOTEIO;
692 break;
693 case DID_NEXUS_FAILURE:
2082ebc4 694 set_host_byte(cmd, DID_OK);
63583cca
HR
695 error = -EBADE;
696 break;
697 default:
698 error = -EIO;
699 break;
700 }
701
702 return error;
703}
704
1da177e4
LT
705/*
706 * Function: scsi_io_completion()
707 *
708 * Purpose: Completion processing for block device I/O requests.
709 *
710 * Arguments: cmd - command that is finished.
711 *
712 * Lock status: Assumed that no lock is held upon entry.
713 *
714 * Returns: Nothing
715 *
716 * Notes: This function is matched in terms of capabilities to
717 * the function that created the scatter-gather list.
718 * In other words, if there are no bounce buffers
719 * (the normal case for most drivers), we don't need
720 * the logic to deal with cleaning up afterwards.
721 *
b60af5b0
AS
722 * We must call scsi_end_request(). This will finish off
723 * the specified number of sectors. If we are done, the
724 * command block will be released and the queue function
725 * will be goosed. If we are not done then we have to
726 * figure out what to do next:
1da177e4 727 *
b60af5b0
AS
728 * a) We can call scsi_requeue_command(). The request
729 * will be unprepared and put back on the queue. Then
730 * a new command will be created for it. This should
731 * be used if we made forward progress, or if we want
732 * to switch from READ(10) to READ(6) for example.
1da177e4 733 *
b60af5b0
AS
734 * b) We can call scsi_queue_insert(). The request will
735 * be put back on the queue and retried using the same
736 * command as before, possibly after a delay.
737 *
738 * c) We can call blk_end_request() with -EIO to fail
739 * the remainder of the request.
1da177e4 740 */
03aba2f7 741void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1da177e4
LT
742{
743 int result = cmd->result;
165125e1 744 struct request_queue *q = cmd->device->request_queue;
1da177e4 745 struct request *req = cmd->request;
fa8e36c3 746 int error = 0;
1da177e4
LT
747 struct scsi_sense_hdr sshdr;
748 int sense_valid = 0;
749 int sense_deferred = 0;
b60af5b0
AS
750 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
751 ACTION_DELAYED_RETRY} action;
752 char *description = NULL;
1da177e4 753
1da177e4
LT
754 if (result) {
755 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
756 if (sense_valid)
757 sense_deferred = scsi_sense_is_deferred(&sshdr);
758 }
631c228c 759
33659ebb 760 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
1da177e4
LT
761 req->errors = result;
762 if (result) {
1da177e4
LT
763 if (sense_valid && req->sense) {
764 /*
765 * SG_IO wants current and deferred errors
766 */
767 int len = 8 + cmd->sense_buffer[7];
768
769 if (len > SCSI_SENSE_BUFFERSIZE)
770 len = SCSI_SENSE_BUFFERSIZE;
771 memcpy(req->sense, cmd->sense_buffer, len);
772 req->sense_len = len;
773 }
fa8e36c3 774 if (!sense_deferred)
63583cca 775 error = __scsi_error_from_host_byte(cmd, result);
b22f687d 776 }
e6bb7a96
FT
777
778 req->resid_len = scsi_get_resid(cmd);
779
6f9a35e2 780 if (scsi_bidi_cmnd(cmd)) {
e6bb7a96
FT
781 /*
782 * Bidi commands Must be complete as a whole,
783 * both sides at once.
784 */
785 req->next_rq->resid_len = scsi_in(cmd)->resid;
786
63c43b0e 787 scsi_release_buffers(cmd);
e6bb7a96
FT
788 blk_end_request_all(req, 0);
789
e6bb7a96 790 scsi_next_command(cmd);
6f9a35e2
BH
791 return;
792 }
1da177e4
LT
793 }
794
33659ebb
CH
795 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
796 BUG_ON(blk_bidi_rq(req));
30b0c37b 797
1da177e4
LT
798 /*
799 * Next deal with any sectors which we were able to correctly
800 * handle.
801 */
83096ebf 802 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
d6b0c537 803 "%d bytes done.\n",
83096ebf 804 blk_rq_sectors(req), good_bytes));
d6b0c537 805
a9bddd74
JB
806 /*
807 * Recovered errors need reporting, but they're always treated
808 * as success, so fiddle the result code here. For BLOCK_PC
809 * we already took a copy of the original into rq->errors which
810 * is what gets returned to the user
811 */
e7efe593
DG
812 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
813 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
814 * print since caller wants ATA registers. Only occurs on
815 * SCSI ATA PASS_THROUGH commands when CK_COND=1
816 */
817 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
818 ;
819 else if (!(req->cmd_flags & REQ_QUIET))
a9bddd74
JB
820 scsi_print_sense("", cmd);
821 result = 0;
822 /* BLOCK_PC may have set error */
823 error = 0;
824 }
825
826 /*
827 * A number of bytes were successfully read. If there
d6b0c537
JB
828 * are leftovers and there is some kind of error
829 * (result != 0), retry the rest.
830 */
fa8e36c3 831 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
d6b0c537 832 return;
03aba2f7 833
63583cca 834 error = __scsi_error_from_host_byte(cmd, result);
3e695f89 835
b60af5b0
AS
836 if (host_byte(result) == DID_RESET) {
837 /* Third party bus reset or reset for error recovery
838 * reasons. Just retry the command and see what
839 * happens.
840 */
841 action = ACTION_RETRY;
842 } else if (sense_valid && !sense_deferred) {
1da177e4
LT
843 switch (sshdr.sense_key) {
844 case UNIT_ATTENTION:
845 if (cmd->device->removable) {
03aba2f7 846 /* Detected disc change. Set a bit
1da177e4
LT
847 * and quietly refuse further access.
848 */
849 cmd->device->changed = 1;
b60af5b0
AS
850 description = "Media Changed";
851 action = ACTION_FAIL;
1da177e4 852 } else {
03aba2f7
LT
853 /* Must have been a power glitch, or a
854 * bus reset. Could not have been a
855 * media change, so we just retry the
b60af5b0 856 * command and see what happens.
03aba2f7 857 */
b60af5b0 858 action = ACTION_RETRY;
1da177e4
LT
859 }
860 break;
861 case ILLEGAL_REQUEST:
03aba2f7
LT
862 /* If we had an ILLEGAL REQUEST returned, then
863 * we may have performed an unsupported
864 * command. The only thing this should be
865 * would be a ten byte read where only a six
866 * byte read was supported. Also, on a system
867 * where READ CAPACITY failed, we may have
868 * read past the end of the disk.
869 */
26a68019
JA
870 if ((cmd->device->use_10_for_rw &&
871 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1da177e4
LT
872 (cmd->cmnd[0] == READ_10 ||
873 cmd->cmnd[0] == WRITE_10)) {
b60af5b0 874 /* This will issue a new 6-byte command. */
1da177e4 875 cmd->device->use_10_for_rw = 0;
b60af5b0 876 action = ACTION_REPREP;
3e695f89
MP
877 } else if (sshdr.asc == 0x10) /* DIX */ {
878 description = "Host Data Integrity Failure";
879 action = ACTION_FAIL;
880 error = -EILSEQ;
c98a0eb0
MP
881 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
882 } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
883 (cmd->cmnd[0] == UNMAP ||
884 cmd->cmnd[0] == WRITE_SAME_16 ||
885 cmd->cmnd[0] == WRITE_SAME)) {
886 description = "Discard failure";
887 action = ACTION_FAIL;
66a651aa 888 error = -EREMOTEIO;
b60af5b0
AS
889 } else
890 action = ACTION_FAIL;
891 break;
511e44f4 892 case ABORTED_COMMAND:
126c0982 893 action = ACTION_FAIL;
511e44f4 894 if (sshdr.asc == 0x10) { /* DIF */
3e695f89 895 description = "Target Data Integrity Failure";
3e695f89 896 error = -EILSEQ;
126c0982 897 }
1da177e4
LT
898 break;
899 case NOT_READY:
03aba2f7 900 /* If the device is in the process of becoming
f3e93f73 901 * ready, or has a temporary blockage, retry.
1da177e4 902 */
f3e93f73
JB
903 if (sshdr.asc == 0x04) {
904 switch (sshdr.ascq) {
905 case 0x01: /* becoming ready */
906 case 0x04: /* format in progress */
907 case 0x05: /* rebuild in progress */
908 case 0x06: /* recalculation in progress */
909 case 0x07: /* operation in progress */
910 case 0x08: /* Long write in progress */
911 case 0x09: /* self test in progress */
d8705f11 912 case 0x14: /* space allocation in progress */
b60af5b0 913 action = ACTION_DELAYED_RETRY;
f3e93f73 914 break;
3dbf6a54
AS
915 default:
916 description = "Device not ready";
917 action = ACTION_FAIL;
918 break;
f3e93f73 919 }
b60af5b0
AS
920 } else {
921 description = "Device not ready";
922 action = ACTION_FAIL;
1da177e4 923 }
b60af5b0 924 break;
1da177e4 925 case VOLUME_OVERFLOW:
03aba2f7 926 /* See SSC3rXX or current. */
b60af5b0
AS
927 action = ACTION_FAIL;
928 break;
1da177e4 929 default:
b60af5b0
AS
930 description = "Unhandled sense code";
931 action = ACTION_FAIL;
1da177e4
LT
932 break;
933 }
b60af5b0
AS
934 } else {
935 description = "Unhandled error code";
936 action = ACTION_FAIL;
03aba2f7 937 }
b60af5b0
AS
938
939 switch (action) {
940 case ACTION_FAIL:
941 /* Give up and fail the remainder of the request */
79ed2429 942 scsi_release_buffers(cmd);
4aff5e23 943 if (!(req->cmd_flags & REQ_QUIET)) {
b60af5b0 944 if (description)
3dbf6a54 945 scmd_printk(KERN_INFO, cmd, "%s\n",
b60af5b0 946 description);
a4d04a4c 947 scsi_print_result(cmd);
3173d8c3
JB
948 if (driver_byte(result) & DRIVER_SENSE)
949 scsi_print_sense("", cmd);
002b1eb2 950 scsi_print_command(cmd);
3173d8c3 951 }
ad630826 952 if (blk_end_request_err(req, error))
da6c5c72
TH
953 scsi_requeue_command(q, cmd);
954 else
955 scsi_next_command(cmd);
b60af5b0
AS
956 break;
957 case ACTION_REPREP:
958 /* Unprep the request and put it back at the head of the queue.
959 * A new command will be prepared and issued.
960 */
79ed2429 961 scsi_release_buffers(cmd);
b60af5b0
AS
962 scsi_requeue_command(q, cmd);
963 break;
964 case ACTION_RETRY:
965 /* Retry the same command immediately */
4f5299ac 966 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
b60af5b0
AS
967 break;
968 case ACTION_DELAYED_RETRY:
969 /* Retry the same command after a delay */
4f5299ac 970 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
b60af5b0 971 break;
1da177e4
LT
972 }
973}
1da177e4 974
6f9a35e2
BH
975static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
976 gfp_t gfp_mask)
1da177e4 977{
6f9a35e2 978 int count;
1da177e4
LT
979
980 /*
3b003157 981 * If sg table allocation fails, requeue request later.
1da177e4 982 */
30b0c37b
BH
983 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
984 gfp_mask))) {
1da177e4 985 return BLKPREP_DEFER;
7c72ce81 986 }
1da177e4 987
3b003157 988 req->buffer = NULL;
1da177e4
LT
989
990 /*
991 * Next, walk the list, and fill in the addresses and sizes of
992 * each segment.
993 */
30b0c37b
BH
994 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
995 BUG_ON(count > sdb->table.nents);
996 sdb->table.nents = count;
1011c1b9 997 sdb->length = blk_rq_bytes(req);
4a03d90e 998 return BLKPREP_OK;
1da177e4 999}
6f9a35e2
BH
1000
1001/*
1002 * Function: scsi_init_io()
1003 *
1004 * Purpose: SCSI I/O initialize function.
1005 *
1006 * Arguments: cmd - Command descriptor we wish to initialize
1007 *
1008 * Returns: 0 on success
1009 * BLKPREP_DEFER if the failure is retryable
1010 * BLKPREP_KILL if the failure is fatal
1011 */
1012int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1013{
13f05c8d
MP
1014 struct request *rq = cmd->request;
1015
1016 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
6f9a35e2
BH
1017 if (error)
1018 goto err_exit;
1019
13f05c8d 1020 if (blk_bidi_rq(rq)) {
6f9a35e2 1021 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
6362abd3 1022 scsi_sdb_cache, GFP_ATOMIC);
6f9a35e2
BH
1023 if (!bidi_sdb) {
1024 error = BLKPREP_DEFER;
1025 goto err_exit;
1026 }
1027
13f05c8d
MP
1028 rq->next_rq->special = bidi_sdb;
1029 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
6f9a35e2
BH
1030 if (error)
1031 goto err_exit;
1032 }
1033
13f05c8d 1034 if (blk_integrity_rq(rq)) {
7027ad72
MP
1035 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1036 int ivecs, count;
1037
1038 BUG_ON(prot_sdb == NULL);
13f05c8d 1039 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
7027ad72
MP
1040
1041 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1042 error = BLKPREP_DEFER;
1043 goto err_exit;
1044 }
1045
13f05c8d 1046 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
7027ad72
MP
1047 prot_sdb->table.sgl);
1048 BUG_ON(unlikely(count > ivecs));
13f05c8d 1049 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
7027ad72
MP
1050
1051 cmd->prot_sdb = prot_sdb;
1052 cmd->prot_sdb->table.nents = count;
1053 }
1054
6f9a35e2
BH
1055 return BLKPREP_OK ;
1056
1057err_exit:
1058 scsi_release_buffers(cmd);
610a6349 1059 cmd->request->special = NULL;
3a5c19c2 1060 scsi_put_command(cmd);
6f9a35e2
BH
1061 return error;
1062}
bb52d82f 1063EXPORT_SYMBOL(scsi_init_io);
1da177e4 1064
3b003157
CH
1065static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1066 struct request *req)
1067{
1068 struct scsi_cmnd *cmd;
1069
1070 if (!req->special) {
1071 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1072 if (unlikely(!cmd))
1073 return NULL;
1074 req->special = cmd;
1075 } else {
1076 cmd = req->special;
1077 }
1078
1079 /* pull a tag out of the request if we have one */
1080 cmd->tag = req->tag;
1081 cmd->request = req;
1082
64a87b24 1083 cmd->cmnd = req->cmd;
72f7d322 1084 cmd->prot_op = SCSI_PROT_NORMAL;
64a87b24 1085
3b003157
CH
1086 return cmd;
1087}
1088
7f9a6bc4 1089int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
7b16318d 1090{
3b003157 1091 struct scsi_cmnd *cmd;
7f9a6bc4
JB
1092 int ret = scsi_prep_state_check(sdev, req);
1093
1094 if (ret != BLKPREP_OK)
1095 return ret;
3b003157
CH
1096
1097 cmd = scsi_get_cmd_from_req(sdev, req);
1098 if (unlikely(!cmd))
1099 return BLKPREP_DEFER;
1100
1101 /*
1102 * BLOCK_PC requests may transfer data, in which case they must
1103 * a bio attached to them. Or they might contain a SCSI command
1104 * that does not transfer data, in which case they may optionally
1105 * submit a request without an attached bio.
1106 */
1107 if (req->bio) {
1108 int ret;
1109
1110 BUG_ON(!req->nr_phys_segments);
1111
bb52d82f 1112 ret = scsi_init_io(cmd, GFP_ATOMIC);
3b003157
CH
1113 if (unlikely(ret))
1114 return ret;
1115 } else {
b0790410 1116 BUG_ON(blk_rq_bytes(req));
3b003157 1117
30b0c37b 1118 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
3b003157
CH
1119 req->buffer = NULL;
1120 }
7b16318d 1121
7b16318d 1122 cmd->cmd_len = req->cmd_len;
b0790410 1123 if (!blk_rq_bytes(req))
7b16318d
JB
1124 cmd->sc_data_direction = DMA_NONE;
1125 else if (rq_data_dir(req) == WRITE)
1126 cmd->sc_data_direction = DMA_TO_DEVICE;
1127 else
1128 cmd->sc_data_direction = DMA_FROM_DEVICE;
1129
b0790410 1130 cmd->transfersize = blk_rq_bytes(req);
7b16318d 1131 cmd->allowed = req->retries;
3b003157 1132 return BLKPREP_OK;
7b16318d 1133}
7f9a6bc4 1134EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
7b16318d 1135
3b003157
CH
1136/*
1137 * Setup a REQ_TYPE_FS command. These are simple read/write request
1138 * from filesystems that still need to be translated to SCSI CDBs from
1139 * the ULD.
1140 */
7f9a6bc4 1141int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1da177e4 1142{
1da177e4 1143 struct scsi_cmnd *cmd;
7f9a6bc4 1144 int ret = scsi_prep_state_check(sdev, req);
1da177e4 1145
7f9a6bc4
JB
1146 if (ret != BLKPREP_OK)
1147 return ret;
a6a8d9f8
CS
1148
1149 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1150 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1151 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1152 if (ret != BLKPREP_OK)
1153 return ret;
1154 }
1155
1da177e4 1156 /*
3b003157 1157 * Filesystem requests must transfer data.
1da177e4 1158 */
3b003157
CH
1159 BUG_ON(!req->nr_phys_segments);
1160
1161 cmd = scsi_get_cmd_from_req(sdev, req);
1162 if (unlikely(!cmd))
1163 return BLKPREP_DEFER;
1164
64a87b24 1165 memset(cmd->cmnd, 0, BLK_MAX_CDB);
bb52d82f 1166 return scsi_init_io(cmd, GFP_ATOMIC);
3b003157 1167}
7f9a6bc4 1168EXPORT_SYMBOL(scsi_setup_fs_cmnd);
3b003157 1169
7f9a6bc4 1170int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
3b003157 1171{
3b003157
CH
1172 int ret = BLKPREP_OK;
1173
1da177e4 1174 /*
3b003157
CH
1175 * If the device is not in running state we will reject some
1176 * or all commands.
1da177e4 1177 */
3b003157
CH
1178 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1179 switch (sdev->sdev_state) {
1180 case SDEV_OFFLINE:
1b8d2620 1181 case SDEV_TRANSPORT_OFFLINE:
3b003157
CH
1182 /*
1183 * If the device is offline we refuse to process any
1184 * commands. The device must be brought online
1185 * before trying any recovery commands.
1186 */
1187 sdev_printk(KERN_ERR, sdev,
1188 "rejecting I/O to offline device\n");
1189 ret = BLKPREP_KILL;
1190 break;
1191 case SDEV_DEL:
1192 /*
1193 * If the device is fully deleted, we refuse to
1194 * process any commands as well.
1195 */
9ccfc756 1196 sdev_printk(KERN_ERR, sdev,
3b003157
CH
1197 "rejecting I/O to dead device\n");
1198 ret = BLKPREP_KILL;
1199 break;
1200 case SDEV_QUIESCE:
1201 case SDEV_BLOCK:
6f4267e3 1202 case SDEV_CREATED_BLOCK:
3b003157
CH
1203 /*
1204 * If the devices is blocked we defer normal commands.
1205 */
1206 if (!(req->cmd_flags & REQ_PREEMPT))
1207 ret = BLKPREP_DEFER;
1208 break;
1209 default:
1210 /*
1211 * For any other not fully online state we only allow
1212 * special commands. In particular any user initiated
1213 * command is not allowed.
1214 */
1215 if (!(req->cmd_flags & REQ_PREEMPT))
1216 ret = BLKPREP_KILL;
1217 break;
1da177e4 1218 }
1da177e4 1219 }
7f9a6bc4
JB
1220 return ret;
1221}
1222EXPORT_SYMBOL(scsi_prep_state_check);
1da177e4 1223
7f9a6bc4
JB
1224int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1225{
1226 struct scsi_device *sdev = q->queuedata;
1da177e4 1227
3b003157
CH
1228 switch (ret) {
1229 case BLKPREP_KILL:
1230 req->errors = DID_NO_CONNECT << 16;
7f9a6bc4
JB
1231 /* release the command and kill it */
1232 if (req->special) {
1233 struct scsi_cmnd *cmd = req->special;
1234 scsi_release_buffers(cmd);
1235 scsi_put_command(cmd);
1236 req->special = NULL;
1237 }
3b003157
CH
1238 break;
1239 case BLKPREP_DEFER:
1da177e4 1240 /*
9934c8c0 1241 * If we defer, the blk_peek_request() returns NULL, but the
a488e749
JA
1242 * queue must be restarted, so we schedule a callback to happen
1243 * shortly.
1da177e4 1244 */
3b003157 1245 if (sdev->device_busy == 0)
a488e749 1246 blk_delay_queue(q, SCSI_QUEUE_DELAY);
3b003157
CH
1247 break;
1248 default:
1249 req->cmd_flags |= REQ_DONTPREP;
1da177e4
LT
1250 }
1251
3b003157 1252 return ret;
1da177e4 1253}
7f9a6bc4
JB
1254EXPORT_SYMBOL(scsi_prep_return);
1255
751bf4d7 1256int scsi_prep_fn(struct request_queue *q, struct request *req)
7f9a6bc4
JB
1257{
1258 struct scsi_device *sdev = q->queuedata;
1259 int ret = BLKPREP_KILL;
1260
1261 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1262 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1263 return scsi_prep_return(q, req, ret);
1264}
b391277a 1265EXPORT_SYMBOL(scsi_prep_fn);
1da177e4
LT
1266
1267/*
1268 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1269 * return 0.
1270 *
1271 * Called with the queue_lock held.
1272 */
1273static inline int scsi_dev_queue_ready(struct request_queue *q,
1274 struct scsi_device *sdev)
1275{
1da177e4
LT
1276 if (sdev->device_busy == 0 && sdev->device_blocked) {
1277 /*
1278 * unblock after device_blocked iterates to zero
1279 */
1280 if (--sdev->device_blocked == 0) {
1281 SCSI_LOG_MLQUEUE(3,
9ccfc756
JB
1282 sdev_printk(KERN_INFO, sdev,
1283 "unblocking device at zero depth\n"));
1da177e4 1284 } else {
a488e749 1285 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1da177e4
LT
1286 return 0;
1287 }
1288 }
9d112517 1289 if (scsi_device_is_busy(sdev))
1da177e4
LT
1290 return 0;
1291
1292 return 1;
1293}
1294
f0c0a376
MC
1295
1296/*
1297 * scsi_target_queue_ready: checks if there we can send commands to target
1298 * @sdev: scsi device on starget to check.
1299 *
1300 * Called with the host lock held.
1301 */
1302static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1303 struct scsi_device *sdev)
1304{
1305 struct scsi_target *starget = scsi_target(sdev);
1306
1307 if (starget->single_lun) {
1308 if (starget->starget_sdev_user &&
1309 starget->starget_sdev_user != sdev)
1310 return 0;
1311 starget->starget_sdev_user = sdev;
1312 }
1313
1314 if (starget->target_busy == 0 && starget->target_blocked) {
1315 /*
1316 * unblock after target_blocked iterates to zero
1317 */
1318 if (--starget->target_blocked == 0) {
1319 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1320 "unblocking target at zero depth\n"));
b4efdd58 1321 } else
f0c0a376 1322 return 0;
f0c0a376
MC
1323 }
1324
1325 if (scsi_target_is_busy(starget)) {
466c08c7 1326 list_move_tail(&sdev->starved_entry, &shost->starved_list);
fd01a663 1327 return 0;
f0c0a376
MC
1328 }
1329
f0c0a376
MC
1330 return 1;
1331}
1332
1da177e4
LT
1333/*
1334 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1335 * return 0. We must end up running the queue again whenever 0 is
1336 * returned, else IO can hang.
1337 *
1338 * Called with host_lock held.
1339 */
1340static inline int scsi_host_queue_ready(struct request_queue *q,
1341 struct Scsi_Host *shost,
1342 struct scsi_device *sdev)
1343{
939647ee 1344 if (scsi_host_in_recovery(shost))
1da177e4
LT
1345 return 0;
1346 if (shost->host_busy == 0 && shost->host_blocked) {
1347 /*
1348 * unblock after host_blocked iterates to zero
1349 */
1350 if (--shost->host_blocked == 0) {
1351 SCSI_LOG_MLQUEUE(3,
1352 printk("scsi%d unblocking host at zero depth\n",
1353 shost->host_no));
1354 } else {
1da177e4
LT
1355 return 0;
1356 }
1357 }
9d112517 1358 if (scsi_host_is_busy(shost)) {
1da177e4
LT
1359 if (list_empty(&sdev->starved_entry))
1360 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1361 return 0;
1362 }
1363
1364 /* We're OK to process the command, so we can't be starved */
1365 if (!list_empty(&sdev->starved_entry))
1366 list_del_init(&sdev->starved_entry);
1367
1368 return 1;
1369}
1370
6c5121b7
KU
1371/*
1372 * Busy state exporting function for request stacking drivers.
1373 *
1374 * For efficiency, no lock is taken to check the busy state of
1375 * shost/starget/sdev, since the returned value is not guaranteed and
1376 * may be changed after request stacking drivers call the function,
1377 * regardless of taking lock or not.
1378 *
67bd9413
BVA
1379 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1380 * needs to return 'not busy'. Otherwise, request stacking drivers
1381 * may hold requests forever.
6c5121b7
KU
1382 */
1383static int scsi_lld_busy(struct request_queue *q)
1384{
1385 struct scsi_device *sdev = q->queuedata;
1386 struct Scsi_Host *shost;
6c5121b7 1387
67bd9413 1388 if (blk_queue_dead(q))
6c5121b7
KU
1389 return 0;
1390
1391 shost = sdev->host;
6c5121b7 1392
b7e94a16
JN
1393 /*
1394 * Ignore host/starget busy state.
1395 * Since block layer does not have a concept of fairness across
1396 * multiple queues, congestion of host/starget needs to be handled
1397 * in SCSI layer.
1398 */
1399 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
6c5121b7
KU
1400 return 1;
1401
1402 return 0;
1403}
1404
1da177e4 1405/*
e91442b6 1406 * Kill a request for a dead device
1da177e4 1407 */
165125e1 1408static void scsi_kill_request(struct request *req, struct request_queue *q)
1da177e4 1409{
e91442b6 1410 struct scsi_cmnd *cmd = req->special;
03b14708
JS
1411 struct scsi_device *sdev;
1412 struct scsi_target *starget;
1413 struct Scsi_Host *shost;
1da177e4 1414
9934c8c0 1415 blk_start_request(req);
788ce43a 1416
74571813
HR
1417 scmd_printk(KERN_INFO, cmd, "killing request\n");
1418
03b14708
JS
1419 sdev = cmd->device;
1420 starget = scsi_target(sdev);
1421 shost = sdev->host;
e91442b6
JB
1422 scsi_init_cmd_errh(cmd);
1423 cmd->result = DID_NO_CONNECT << 16;
1424 atomic_inc(&cmd->device->iorequest_cnt);
e36e0c80
TH
1425
1426 /*
1427 * SCSI request completion path will do scsi_device_unbusy(),
1428 * bump busy counts. To bump the counters, we need to dance
1429 * with the locks as normal issue path does.
1430 */
1431 sdev->device_busy++;
1432 spin_unlock(sdev->request_queue->queue_lock);
1433 spin_lock(shost->host_lock);
1434 shost->host_busy++;
f0c0a376 1435 starget->target_busy++;
e36e0c80
TH
1436 spin_unlock(shost->host_lock);
1437 spin_lock(sdev->request_queue->queue_lock);
1438
242f9dcb 1439 blk_complete_request(req);
1da177e4
LT
1440}
1441
1aea6434
JA
1442static void scsi_softirq_done(struct request *rq)
1443{
242f9dcb
JA
1444 struct scsi_cmnd *cmd = rq->special;
1445 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1aea6434
JA
1446 int disposition;
1447
1448 INIT_LIST_HEAD(&cmd->eh_entry);
1449
242f9dcb
JA
1450 atomic_inc(&cmd->device->iodone_cnt);
1451 if (cmd->result)
1452 atomic_inc(&cmd->device->ioerr_cnt);
1453
1aea6434
JA
1454 disposition = scsi_decide_disposition(cmd);
1455 if (disposition != SUCCESS &&
1456 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1457 sdev_printk(KERN_ERR, cmd->device,
1458 "timing out command, waited %lus\n",
1459 wait_for/HZ);
1460 disposition = SUCCESS;
1461 }
1462
1463 scsi_log_completion(cmd, disposition);
1464
1465 switch (disposition) {
1466 case SUCCESS:
1467 scsi_finish_command(cmd);
1468 break;
1469 case NEEDS_RETRY:
596f482a 1470 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1aea6434
JA
1471 break;
1472 case ADD_TO_MLQUEUE:
1473 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1474 break;
1475 default:
1476 if (!scsi_eh_scmd_add(cmd, 0))
1477 scsi_finish_command(cmd);
1478 }
1479}
1480
1da177e4
LT
1481/*
1482 * Function: scsi_request_fn()
1483 *
1484 * Purpose: Main strategy routine for SCSI.
1485 *
1486 * Arguments: q - Pointer to actual queue.
1487 *
1488 * Returns: Nothing
1489 *
1490 * Lock status: IO request lock assumed to be held when called.
1491 */
1492static void scsi_request_fn(struct request_queue *q)
1493{
1494 struct scsi_device *sdev = q->queuedata;
1495 struct Scsi_Host *shost;
1496 struct scsi_cmnd *cmd;
1497 struct request *req;
1498
1da177e4
LT
1499 if(!get_device(&sdev->sdev_gendev))
1500 /* We must be tearing the block queue down already */
1501 return;
1502
1503 /*
1504 * To start with, we keep looping until the queue is empty, or until
1505 * the host is no longer able to accept any more requests.
1506 */
1507 shost = sdev->host;
a488e749 1508 for (;;) {
1da177e4
LT
1509 int rtn;
1510 /*
1511 * get next queueable request. We do this early to make sure
1512 * that the request is fully prepared even if we cannot
1513 * accept it.
1514 */
9934c8c0 1515 req = blk_peek_request(q);
1da177e4
LT
1516 if (!req || !scsi_dev_queue_ready(q, sdev))
1517 break;
1518
1519 if (unlikely(!scsi_device_online(sdev))) {
9ccfc756
JB
1520 sdev_printk(KERN_ERR, sdev,
1521 "rejecting I/O to offline device\n");
e91442b6 1522 scsi_kill_request(req, q);
1da177e4
LT
1523 continue;
1524 }
1525
1526
1527 /*
1528 * Remove the request from the request list.
1529 */
1530 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
9934c8c0 1531 blk_start_request(req);
1da177e4
LT
1532 sdev->device_busy++;
1533
1534 spin_unlock(q->queue_lock);
e91442b6
JB
1535 cmd = req->special;
1536 if (unlikely(cmd == NULL)) {
1537 printk(KERN_CRIT "impossible request in %s.\n"
1538 "please mail a stack trace to "
4aff5e23 1539 "linux-scsi@vger.kernel.org\n",
cadbd4a5 1540 __func__);
4aff5e23 1541 blk_dump_rq_flags(req, "foo");
e91442b6
JB
1542 BUG();
1543 }
1da177e4
LT
1544 spin_lock(shost->host_lock);
1545
ecefe8a9
MC
1546 /*
1547 * We hit this when the driver is using a host wide
1548 * tag map. For device level tag maps the queue_depth check
1549 * in the device ready fn would prevent us from trying
1550 * to allocate a tag. Since the map is a shared host resource
1551 * we add the dev to the starved list so it eventually gets
1552 * a run when a tag is freed.
1553 */
6bd522f6 1554 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
ecefe8a9
MC
1555 if (list_empty(&sdev->starved_entry))
1556 list_add_tail(&sdev->starved_entry,
1557 &shost->starved_list);
1558 goto not_ready;
1559 }
1560
f0c0a376
MC
1561 if (!scsi_target_queue_ready(shost, sdev))
1562 goto not_ready;
1563
1da177e4
LT
1564 if (!scsi_host_queue_ready(q, shost, sdev))
1565 goto not_ready;
f0c0a376
MC
1566
1567 scsi_target(sdev)->target_busy++;
1da177e4
LT
1568 shost->host_busy++;
1569
1570 /*
1571 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1572 * take the lock again.
1573 */
1574 spin_unlock_irq(shost->host_lock);
1575
1da177e4
LT
1576 /*
1577 * Finally, initialize any error handling parameters, and set up
1578 * the timers for timeouts.
1579 */
1580 scsi_init_cmd_errh(cmd);
1581
1582 /*
1583 * Dispatch the command to the low-level driver.
1584 */
1585 rtn = scsi_dispatch_cmd(cmd);
1586 spin_lock_irq(q->queue_lock);
a488e749
JA
1587 if (rtn)
1588 goto out_delay;
1da177e4
LT
1589 }
1590
1591 goto out;
1592
1593 not_ready:
1594 spin_unlock_irq(shost->host_lock);
1595
1596 /*
1597 * lock q, handle tag, requeue req, and decrement device_busy. We
1598 * must return with queue_lock held.
1599 *
1600 * Decrementing device_busy without checking it is OK, as all such
1601 * cases (host limits or settings) should run the queue at some
1602 * later time.
1603 */
1604 spin_lock_irq(q->queue_lock);
1605 blk_requeue_request(q, req);
1606 sdev->device_busy--;
a488e749
JA
1607out_delay:
1608 if (sdev->device_busy == 0)
1609 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1610out:
1da177e4
LT
1611 /* must be careful here...if we trigger the ->remove() function
1612 * we cannot be holding the q lock */
1613 spin_unlock_irq(q->queue_lock);
1614 put_device(&sdev->sdev_gendev);
1615 spin_lock_irq(q->queue_lock);
1616}
1617
1618u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1619{
1620 struct device *host_dev;
1621 u64 bounce_limit = 0xffffffff;
1622
1623 if (shost->unchecked_isa_dma)
1624 return BLK_BOUNCE_ISA;
1625 /*
1626 * Platforms with virtual-DMA translation
1627 * hardware have no practical limit.
1628 */
1629 if (!PCI_DMA_BUS_IS_PHYS)
1630 return BLK_BOUNCE_ANY;
1631
1632 host_dev = scsi_get_device(shost);
1633 if (host_dev && host_dev->dma_mask)
1634 bounce_limit = *host_dev->dma_mask;
1635
1636 return bounce_limit;
1637}
1638EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1639
b58d9154
FT
1640struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1641 request_fn_proc *request_fn)
1da177e4 1642{
1da177e4 1643 struct request_queue *q;
6f381fa3 1644 struct device *dev = shost->dma_dev;
1da177e4 1645
b58d9154 1646 q = blk_init_queue(request_fn, NULL);
1da177e4
LT
1647 if (!q)
1648 return NULL;
1649
a8474ce2
JA
1650 /*
1651 * this limit is imposed by hardware restrictions
1652 */
8a78362c
MP
1653 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1654 SCSI_MAX_SG_CHAIN_SEGMENTS));
a8474ce2 1655
13f05c8d
MP
1656 if (scsi_host_prot_dma(shost)) {
1657 shost->sg_prot_tablesize =
1658 min_not_zero(shost->sg_prot_tablesize,
1659 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1660 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1661 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1662 }
1663
086fa5ff 1664 blk_queue_max_hw_sectors(q, shost->max_sectors);
1da177e4
LT
1665 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1666 blk_queue_segment_boundary(q, shost->dma_boundary);
99c84dbd 1667 dma_set_seg_boundary(dev, shost->dma_boundary);
1da177e4 1668
860ac568
FT
1669 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1670
1da177e4 1671 if (!shost->use_clustering)
e692cb66 1672 q->limits.cluster = 0;
465ff318
JB
1673
1674 /*
1675 * set a reasonable default alignment on word boundaries: the
1676 * host and device may alter it using
1677 * blk_queue_update_dma_alignment() later.
1678 */
1679 blk_queue_dma_alignment(q, 0x03);
1680
1da177e4
LT
1681 return q;
1682}
b58d9154
FT
1683EXPORT_SYMBOL(__scsi_alloc_queue);
1684
1685struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1686{
1687 struct request_queue *q;
1688
1689 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1690 if (!q)
1691 return NULL;
1692
1693 blk_queue_prep_rq(q, scsi_prep_fn);
b58d9154 1694 blk_queue_softirq_done(q, scsi_softirq_done);
242f9dcb 1695 blk_queue_rq_timed_out(q, scsi_times_out);
6c5121b7 1696 blk_queue_lld_busy(q, scsi_lld_busy);
b58d9154
FT
1697 return q;
1698}
1da177e4 1699
1da177e4
LT
1700/*
1701 * Function: scsi_block_requests()
1702 *
1703 * Purpose: Utility function used by low-level drivers to prevent further
1704 * commands from being queued to the device.
1705 *
1706 * Arguments: shost - Host in question
1707 *
1708 * Returns: Nothing
1709 *
1710 * Lock status: No locks are assumed held.
1711 *
1712 * Notes: There is no timer nor any other means by which the requests
1713 * get unblocked other than the low-level driver calling
1714 * scsi_unblock_requests().
1715 */
1716void scsi_block_requests(struct Scsi_Host *shost)
1717{
1718 shost->host_self_blocked = 1;
1719}
1720EXPORT_SYMBOL(scsi_block_requests);
1721
1722/*
1723 * Function: scsi_unblock_requests()
1724 *
1725 * Purpose: Utility function used by low-level drivers to allow further
1726 * commands from being queued to the device.
1727 *
1728 * Arguments: shost - Host in question
1729 *
1730 * Returns: Nothing
1731 *
1732 * Lock status: No locks are assumed held.
1733 *
1734 * Notes: There is no timer nor any other means by which the requests
1735 * get unblocked other than the low-level driver calling
1736 * scsi_unblock_requests().
1737 *
1738 * This is done as an API function so that changes to the
1739 * internals of the scsi mid-layer won't require wholesale
1740 * changes to drivers that use this feature.
1741 */
1742void scsi_unblock_requests(struct Scsi_Host *shost)
1743{
1744 shost->host_self_blocked = 0;
1745 scsi_run_host_queues(shost);
1746}
1747EXPORT_SYMBOL(scsi_unblock_requests);
1748
1749int __init scsi_init_queue(void)
1750{
1751 int i;
1752
6362abd3
MP
1753 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1754 sizeof(struct scsi_data_buffer),
1755 0, 0, NULL);
1756 if (!scsi_sdb_cache) {
1757 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
f078727b 1758 return -ENOMEM;
6f9a35e2
BH
1759 }
1760
1da177e4
LT
1761 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1762 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1763 int size = sgp->size * sizeof(struct scatterlist);
1764
1765 sgp->slab = kmem_cache_create(sgp->name, size, 0,
20c2df83 1766 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
1767 if (!sgp->slab) {
1768 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1769 sgp->name);
6362abd3 1770 goto cleanup_sdb;
1da177e4
LT
1771 }
1772
93d2341c
MD
1773 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1774 sgp->slab);
1da177e4
LT
1775 if (!sgp->pool) {
1776 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1777 sgp->name);
6362abd3 1778 goto cleanup_sdb;
1da177e4
LT
1779 }
1780 }
1781
1782 return 0;
3d9dd6ee 1783
6362abd3 1784cleanup_sdb:
3d9dd6ee
FT
1785 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1786 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1787 if (sgp->pool)
1788 mempool_destroy(sgp->pool);
1789 if (sgp->slab)
1790 kmem_cache_destroy(sgp->slab);
1791 }
6362abd3 1792 kmem_cache_destroy(scsi_sdb_cache);
3d9dd6ee
FT
1793
1794 return -ENOMEM;
1da177e4
LT
1795}
1796
1797void scsi_exit_queue(void)
1798{
1799 int i;
1800
6362abd3 1801 kmem_cache_destroy(scsi_sdb_cache);
aa7b5cd7 1802
1da177e4
LT
1803 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1804 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1805 mempool_destroy(sgp->pool);
1806 kmem_cache_destroy(sgp->slab);
1807 }
1808}
5baba830
JB
1809
1810/**
1811 * scsi_mode_select - issue a mode select
1812 * @sdev: SCSI device to be queried
1813 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1814 * @sp: Save page bit (0 == don't save, 1 == save)
1815 * @modepage: mode page being requested
1816 * @buffer: request buffer (may not be smaller than eight bytes)
1817 * @len: length of request buffer.
1818 * @timeout: command timeout
1819 * @retries: number of retries before failing
1820 * @data: returns a structure abstracting the mode header data
eb44820c 1821 * @sshdr: place to put sense data (or NULL if no sense to be collected).
5baba830
JB
1822 * must be SCSI_SENSE_BUFFERSIZE big.
1823 *
1824 * Returns zero if successful; negative error number or scsi
1825 * status on error
1826 *
1827 */
1828int
1829scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1830 unsigned char *buffer, int len, int timeout, int retries,
1831 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1832{
1833 unsigned char cmd[10];
1834 unsigned char *real_buffer;
1835 int ret;
1836
1837 memset(cmd, 0, sizeof(cmd));
1838 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1839
1840 if (sdev->use_10_for_ms) {
1841 if (len > 65535)
1842 return -EINVAL;
1843 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1844 if (!real_buffer)
1845 return -ENOMEM;
1846 memcpy(real_buffer + 8, buffer, len);
1847 len += 8;
1848 real_buffer[0] = 0;
1849 real_buffer[1] = 0;
1850 real_buffer[2] = data->medium_type;
1851 real_buffer[3] = data->device_specific;
1852 real_buffer[4] = data->longlba ? 0x01 : 0;
1853 real_buffer[5] = 0;
1854 real_buffer[6] = data->block_descriptor_length >> 8;
1855 real_buffer[7] = data->block_descriptor_length;
1856
1857 cmd[0] = MODE_SELECT_10;
1858 cmd[7] = len >> 8;
1859 cmd[8] = len;
1860 } else {
1861 if (len > 255 || data->block_descriptor_length > 255 ||
1862 data->longlba)
1863 return -EINVAL;
1864
1865 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1866 if (!real_buffer)
1867 return -ENOMEM;
1868 memcpy(real_buffer + 4, buffer, len);
1869 len += 4;
1870 real_buffer[0] = 0;
1871 real_buffer[1] = data->medium_type;
1872 real_buffer[2] = data->device_specific;
1873 real_buffer[3] = data->block_descriptor_length;
1874
1875
1876 cmd[0] = MODE_SELECT;
1877 cmd[4] = len;
1878 }
1879
1880 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
f4f4e47e 1881 sshdr, timeout, retries, NULL);
5baba830
JB
1882 kfree(real_buffer);
1883 return ret;
1884}
1885EXPORT_SYMBOL_GPL(scsi_mode_select);
1886
1da177e4 1887/**
eb44820c 1888 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1cf72699 1889 * @sdev: SCSI device to be queried
1da177e4
LT
1890 * @dbd: set if mode sense will allow block descriptors to be returned
1891 * @modepage: mode page being requested
1892 * @buffer: request buffer (may not be smaller than eight bytes)
1893 * @len: length of request buffer.
1894 * @timeout: command timeout
1895 * @retries: number of retries before failing
1896 * @data: returns a structure abstracting the mode header data
eb44820c 1897 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1cf72699 1898 * must be SCSI_SENSE_BUFFERSIZE big.
1da177e4
LT
1899 *
1900 * Returns zero if unsuccessful, or the header offset (either 4
1901 * or 8 depending on whether a six or ten byte command was
1902 * issued) if successful.
eb44820c 1903 */
1da177e4 1904int
1cf72699 1905scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1da177e4 1906 unsigned char *buffer, int len, int timeout, int retries,
5baba830
JB
1907 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1908{
1da177e4
LT
1909 unsigned char cmd[12];
1910 int use_10_for_ms;
1911 int header_length;
1cf72699 1912 int result;
ea73a9f2 1913 struct scsi_sense_hdr my_sshdr;
1da177e4
LT
1914
1915 memset(data, 0, sizeof(*data));
1916 memset(&cmd[0], 0, 12);
1917 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1918 cmd[2] = modepage;
1919
ea73a9f2
JB
1920 /* caller might not be interested in sense, but we need it */
1921 if (!sshdr)
1922 sshdr = &my_sshdr;
1923
1da177e4 1924 retry:
1cf72699 1925 use_10_for_ms = sdev->use_10_for_ms;
1da177e4
LT
1926
1927 if (use_10_for_ms) {
1928 if (len < 8)
1929 len = 8;
1930
1931 cmd[0] = MODE_SENSE_10;
1932 cmd[8] = len;
1933 header_length = 8;
1934 } else {
1935 if (len < 4)
1936 len = 4;
1937
1938 cmd[0] = MODE_SENSE;
1939 cmd[4] = len;
1940 header_length = 4;
1941 }
1942
1da177e4
LT
1943 memset(buffer, 0, len);
1944
1cf72699 1945 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
f4f4e47e 1946 sshdr, timeout, retries, NULL);
1da177e4
LT
1947
1948 /* This code looks awful: what it's doing is making sure an
1949 * ILLEGAL REQUEST sense return identifies the actual command
1950 * byte as the problem. MODE_SENSE commands can return
1951 * ILLEGAL REQUEST if the code page isn't supported */
1952
1cf72699
JB
1953 if (use_10_for_ms && !scsi_status_is_good(result) &&
1954 (driver_byte(result) & DRIVER_SENSE)) {
ea73a9f2
JB
1955 if (scsi_sense_valid(sshdr)) {
1956 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1957 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1da177e4
LT
1958 /*
1959 * Invalid command operation code
1960 */
1cf72699 1961 sdev->use_10_for_ms = 0;
1da177e4
LT
1962 goto retry;
1963 }
1964 }
1965 }
1966
1cf72699 1967 if(scsi_status_is_good(result)) {
6d73c851
AV
1968 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1969 (modepage == 6 || modepage == 8))) {
1970 /* Initio breakage? */
1971 header_length = 0;
1972 data->length = 13;
1973 data->medium_type = 0;
1974 data->device_specific = 0;
1975 data->longlba = 0;
1976 data->block_descriptor_length = 0;
1977 } else if(use_10_for_ms) {
1da177e4
LT
1978 data->length = buffer[0]*256 + buffer[1] + 2;
1979 data->medium_type = buffer[2];
1980 data->device_specific = buffer[3];
1981 data->longlba = buffer[4] & 0x01;
1982 data->block_descriptor_length = buffer[6]*256
1983 + buffer[7];
1984 } else {
1985 data->length = buffer[0] + 1;
1986 data->medium_type = buffer[1];
1987 data->device_specific = buffer[2];
1988 data->block_descriptor_length = buffer[3];
1989 }
6d73c851 1990 data->header_length = header_length;
1da177e4
LT
1991 }
1992
1cf72699 1993 return result;
1da177e4
LT
1994}
1995EXPORT_SYMBOL(scsi_mode_sense);
1996
001aac25
JB
1997/**
1998 * scsi_test_unit_ready - test if unit is ready
1999 * @sdev: scsi device to change the state of.
2000 * @timeout: command timeout
2001 * @retries: number of retries before failing
2002 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2003 * returning sense. Make sure that this is cleared before passing
2004 * in.
2005 *
2006 * Returns zero if unsuccessful or an error if TUR failed. For
9f8a2c23 2007 * removable media, UNIT_ATTENTION sets ->changed flag.
001aac25 2008 **/
1da177e4 2009int
001aac25
JB
2010scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2011 struct scsi_sense_hdr *sshdr_external)
1da177e4 2012{
1da177e4
LT
2013 char cmd[] = {
2014 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2015 };
001aac25 2016 struct scsi_sense_hdr *sshdr;
1da177e4 2017 int result;
001aac25
JB
2018
2019 if (!sshdr_external)
2020 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2021 else
2022 sshdr = sshdr_external;
2023
2024 /* try to eat the UNIT_ATTENTION if there are enough retries */
2025 do {
2026 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
f4f4e47e 2027 timeout, retries, NULL);
32c356d7
JB
2028 if (sdev->removable && scsi_sense_valid(sshdr) &&
2029 sshdr->sense_key == UNIT_ATTENTION)
2030 sdev->changed = 1;
2031 } while (scsi_sense_valid(sshdr) &&
2032 sshdr->sense_key == UNIT_ATTENTION && --retries);
001aac25 2033
001aac25
JB
2034 if (!sshdr_external)
2035 kfree(sshdr);
1da177e4
LT
2036 return result;
2037}
2038EXPORT_SYMBOL(scsi_test_unit_ready);
2039
2040/**
eb44820c 2041 * scsi_device_set_state - Take the given device through the device state model.
1da177e4
LT
2042 * @sdev: scsi device to change the state of.
2043 * @state: state to change to.
2044 *
2045 * Returns zero if unsuccessful or an error if the requested
2046 * transition is illegal.
eb44820c 2047 */
1da177e4
LT
2048int
2049scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2050{
2051 enum scsi_device_state oldstate = sdev->sdev_state;
2052
2053 if (state == oldstate)
2054 return 0;
2055
2056 switch (state) {
2057 case SDEV_CREATED:
6f4267e3
JB
2058 switch (oldstate) {
2059 case SDEV_CREATED_BLOCK:
2060 break;
2061 default:
2062 goto illegal;
2063 }
2064 break;
1da177e4
LT
2065
2066 case SDEV_RUNNING:
2067 switch (oldstate) {
2068 case SDEV_CREATED:
2069 case SDEV_OFFLINE:
1b8d2620 2070 case SDEV_TRANSPORT_OFFLINE:
1da177e4
LT
2071 case SDEV_QUIESCE:
2072 case SDEV_BLOCK:
2073 break;
2074 default:
2075 goto illegal;
2076 }
2077 break;
2078
2079 case SDEV_QUIESCE:
2080 switch (oldstate) {
2081 case SDEV_RUNNING:
2082 case SDEV_OFFLINE:
1b8d2620 2083 case SDEV_TRANSPORT_OFFLINE:
1da177e4
LT
2084 break;
2085 default:
2086 goto illegal;
2087 }
2088 break;
2089
2090 case SDEV_OFFLINE:
1b8d2620 2091 case SDEV_TRANSPORT_OFFLINE:
1da177e4
LT
2092 switch (oldstate) {
2093 case SDEV_CREATED:
2094 case SDEV_RUNNING:
2095 case SDEV_QUIESCE:
2096 case SDEV_BLOCK:
2097 break;
2098 default:
2099 goto illegal;
2100 }
2101 break;
2102
2103 case SDEV_BLOCK:
2104 switch (oldstate) {
1da177e4 2105 case SDEV_RUNNING:
6f4267e3
JB
2106 case SDEV_CREATED_BLOCK:
2107 break;
2108 default:
2109 goto illegal;
2110 }
2111 break;
2112
2113 case SDEV_CREATED_BLOCK:
2114 switch (oldstate) {
2115 case SDEV_CREATED:
1da177e4
LT
2116 break;
2117 default:
2118 goto illegal;
2119 }
2120 break;
2121
2122 case SDEV_CANCEL:
2123 switch (oldstate) {
2124 case SDEV_CREATED:
2125 case SDEV_RUNNING:
9ea72909 2126 case SDEV_QUIESCE:
1da177e4 2127 case SDEV_OFFLINE:
1b8d2620 2128 case SDEV_TRANSPORT_OFFLINE:
1da177e4
LT
2129 case SDEV_BLOCK:
2130 break;
2131 default:
2132 goto illegal;
2133 }
2134 break;
2135
2136 case SDEV_DEL:
2137 switch (oldstate) {
309bd271
BK
2138 case SDEV_CREATED:
2139 case SDEV_RUNNING:
2140 case SDEV_OFFLINE:
1b8d2620 2141 case SDEV_TRANSPORT_OFFLINE:
1da177e4
LT
2142 case SDEV_CANCEL:
2143 break;
2144 default:
2145 goto illegal;
2146 }
2147 break;
2148
2149 }
2150 sdev->sdev_state = state;
2151 return 0;
2152
2153 illegal:
2154 SCSI_LOG_ERROR_RECOVERY(1,
9ccfc756
JB
2155 sdev_printk(KERN_ERR, sdev,
2156 "Illegal state transition %s->%s\n",
2157 scsi_device_state_name(oldstate),
2158 scsi_device_state_name(state))
1da177e4
LT
2159 );
2160 return -EINVAL;
2161}
2162EXPORT_SYMBOL(scsi_device_set_state);
2163
a341cd0f
JG
2164/**
2165 * sdev_evt_emit - emit a single SCSI device uevent
2166 * @sdev: associated SCSI device
2167 * @evt: event to emit
2168 *
2169 * Send a single uevent (scsi_event) to the associated scsi_device.
2170 */
2171static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2172{
2173 int idx = 0;
2174 char *envp[3];
2175
2176 switch (evt->evt_type) {
2177 case SDEV_EVT_MEDIA_CHANGE:
2178 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2179 break;
2180
2181 default:
2182 /* do nothing */
2183 break;
2184 }
2185
2186 envp[idx++] = NULL;
2187
2188 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2189}
2190
2191/**
2192 * sdev_evt_thread - send a uevent for each scsi event
2193 * @work: work struct for scsi_device
2194 *
2195 * Dispatch queued events to their associated scsi_device kobjects
2196 * as uevents.
2197 */
2198void scsi_evt_thread(struct work_struct *work)
2199{
2200 struct scsi_device *sdev;
2201 LIST_HEAD(event_list);
2202
2203 sdev = container_of(work, struct scsi_device, event_work);
2204
2205 while (1) {
2206 struct scsi_event *evt;
2207 struct list_head *this, *tmp;
2208 unsigned long flags;
2209
2210 spin_lock_irqsave(&sdev->list_lock, flags);
2211 list_splice_init(&sdev->event_list, &event_list);
2212 spin_unlock_irqrestore(&sdev->list_lock, flags);
2213
2214 if (list_empty(&event_list))
2215 break;
2216
2217 list_for_each_safe(this, tmp, &event_list) {
2218 evt = list_entry(this, struct scsi_event, node);
2219 list_del(&evt->node);
2220 scsi_evt_emit(sdev, evt);
2221 kfree(evt);
2222 }
2223 }
2224}
2225
2226/**
2227 * sdev_evt_send - send asserted event to uevent thread
2228 * @sdev: scsi_device event occurred on
2229 * @evt: event to send
2230 *
2231 * Assert scsi device event asynchronously.
2232 */
2233void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2234{
2235 unsigned long flags;
2236
4d1566ed
KS
2237#if 0
2238 /* FIXME: currently this check eliminates all media change events
2239 * for polled devices. Need to update to discriminate between AN
2240 * and polled events */
a341cd0f
JG
2241 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2242 kfree(evt);
2243 return;
2244 }
4d1566ed 2245#endif
a341cd0f
JG
2246
2247 spin_lock_irqsave(&sdev->list_lock, flags);
2248 list_add_tail(&evt->node, &sdev->event_list);
2249 schedule_work(&sdev->event_work);
2250 spin_unlock_irqrestore(&sdev->list_lock, flags);
2251}
2252EXPORT_SYMBOL_GPL(sdev_evt_send);
2253
2254/**
2255 * sdev_evt_alloc - allocate a new scsi event
2256 * @evt_type: type of event to allocate
2257 * @gfpflags: GFP flags for allocation
2258 *
2259 * Allocates and returns a new scsi_event.
2260 */
2261struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2262 gfp_t gfpflags)
2263{
2264 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2265 if (!evt)
2266 return NULL;
2267
2268 evt->evt_type = evt_type;
2269 INIT_LIST_HEAD(&evt->node);
2270
2271 /* evt_type-specific initialization, if any */
2272 switch (evt_type) {
2273 case SDEV_EVT_MEDIA_CHANGE:
2274 default:
2275 /* do nothing */
2276 break;
2277 }
2278
2279 return evt;
2280}
2281EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2282
2283/**
2284 * sdev_evt_send_simple - send asserted event to uevent thread
2285 * @sdev: scsi_device event occurred on
2286 * @evt_type: type of event to send
2287 * @gfpflags: GFP flags for allocation
2288 *
2289 * Assert scsi device event asynchronously, given an event type.
2290 */
2291void sdev_evt_send_simple(struct scsi_device *sdev,
2292 enum scsi_device_event evt_type, gfp_t gfpflags)
2293{
2294 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2295 if (!evt) {
2296 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2297 evt_type);
2298 return;
2299 }
2300
2301 sdev_evt_send(sdev, evt);
2302}
2303EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2304
1da177e4
LT
2305/**
2306 * scsi_device_quiesce - Block user issued commands.
2307 * @sdev: scsi device to quiesce.
2308 *
2309 * This works by trying to transition to the SDEV_QUIESCE state
2310 * (which must be a legal transition). When the device is in this
2311 * state, only special requests will be accepted, all others will
2312 * be deferred. Since special requests may also be requeued requests,
2313 * a successful return doesn't guarantee the device will be
2314 * totally quiescent.
2315 *
2316 * Must be called with user context, may sleep.
2317 *
2318 * Returns zero if unsuccessful or an error if not.
eb44820c 2319 */
1da177e4
LT
2320int
2321scsi_device_quiesce(struct scsi_device *sdev)
2322{
2323 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2324 if (err)
2325 return err;
2326
2327 scsi_run_queue(sdev->request_queue);
2328 while (sdev->device_busy) {
2329 msleep_interruptible(200);
2330 scsi_run_queue(sdev->request_queue);
2331 }
2332 return 0;
2333}
2334EXPORT_SYMBOL(scsi_device_quiesce);
2335
2336/**
2337 * scsi_device_resume - Restart user issued commands to a quiesced device.
2338 * @sdev: scsi device to resume.
2339 *
2340 * Moves the device from quiesced back to running and restarts the
2341 * queues.
2342 *
2343 * Must be called with user context, may sleep.
eb44820c 2344 */
a7a20d10 2345void scsi_device_resume(struct scsi_device *sdev)
1da177e4 2346{
a7a20d10
DW
2347 /* check if the device state was mutated prior to resume, and if
2348 * so assume the state is being managed elsewhere (for example
2349 * device deleted during suspend)
2350 */
2351 if (sdev->sdev_state != SDEV_QUIESCE ||
2352 scsi_device_set_state(sdev, SDEV_RUNNING))
1da177e4
LT
2353 return;
2354 scsi_run_queue(sdev->request_queue);
2355}
2356EXPORT_SYMBOL(scsi_device_resume);
2357
2358static void
2359device_quiesce_fn(struct scsi_device *sdev, void *data)
2360{
2361 scsi_device_quiesce(sdev);
2362}
2363
2364void
2365scsi_target_quiesce(struct scsi_target *starget)
2366{
2367 starget_for_each_device(starget, NULL, device_quiesce_fn);
2368}
2369EXPORT_SYMBOL(scsi_target_quiesce);
2370
2371static void
2372device_resume_fn(struct scsi_device *sdev, void *data)
2373{
2374 scsi_device_resume(sdev);
2375}
2376
2377void
2378scsi_target_resume(struct scsi_target *starget)
2379{
2380 starget_for_each_device(starget, NULL, device_resume_fn);
2381}
2382EXPORT_SYMBOL(scsi_target_resume);
2383
2384/**
eb44820c 2385 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
1da177e4
LT
2386 * @sdev: device to block
2387 *
2388 * Block request made by scsi lld's to temporarily stop all
2389 * scsi commands on the specified device. Called from interrupt
2390 * or normal process context.
2391 *
2392 * Returns zero if successful or error if not
2393 *
2394 * Notes:
2395 * This routine transitions the device to the SDEV_BLOCK state
2396 * (which must be a legal transition). When the device is in this
2397 * state, all commands are deferred until the scsi lld reenables
2398 * the device with scsi_device_unblock or device_block_tmo fires.
eb44820c 2399 */
1da177e4
LT
2400int
2401scsi_internal_device_block(struct scsi_device *sdev)
2402{
165125e1 2403 struct request_queue *q = sdev->request_queue;
1da177e4
LT
2404 unsigned long flags;
2405 int err = 0;
2406
2407 err = scsi_device_set_state(sdev, SDEV_BLOCK);
6f4267e3
JB
2408 if (err) {
2409 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2410
2411 if (err)
2412 return err;
2413 }
1da177e4
LT
2414
2415 /*
2416 * The device has transitioned to SDEV_BLOCK. Stop the
2417 * block layer from calling the midlayer with this device's
2418 * request queue.
2419 */
2420 spin_lock_irqsave(q->queue_lock, flags);
2421 blk_stop_queue(q);
2422 spin_unlock_irqrestore(q->queue_lock, flags);
2423
2424 return 0;
2425}
2426EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2427
2428/**
2429 * scsi_internal_device_unblock - resume a device after a block request
2430 * @sdev: device to resume
5d9fb5cc 2431 * @new_state: state to set devices to after unblocking
1da177e4
LT
2432 *
2433 * Called by scsi lld's or the midlayer to restart the device queue
2434 * for the previously suspended scsi device. Called from interrupt or
2435 * normal process context.
2436 *
2437 * Returns zero if successful or error if not.
2438 *
2439 * Notes:
2440 * This routine transitions the device to the SDEV_RUNNING state
5d9fb5cc 2441 * or to one of the offline states (which must be a legal transition)
d075498c 2442 * allowing the midlayer to goose the queue for this device.
eb44820c 2443 */
1da177e4 2444int
5d9fb5cc
MC
2445scsi_internal_device_unblock(struct scsi_device *sdev,
2446 enum scsi_device_state new_state)
1da177e4 2447{
165125e1 2448 struct request_queue *q = sdev->request_queue;
1da177e4 2449 unsigned long flags;
5d9fb5cc
MC
2450
2451 /*
2452 * Try to transition the scsi device to SDEV_RUNNING or one of the
2453 * offlined states and goose the device queue if successful.
1da177e4 2454 */
5c10e63c 2455 if (sdev->sdev_state == SDEV_BLOCK)
5d9fb5cc
MC
2456 sdev->sdev_state = new_state;
2457 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2458 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2459 new_state == SDEV_OFFLINE)
2460 sdev->sdev_state = new_state;
2461 else
2462 sdev->sdev_state = SDEV_CREATED;
2463 } else if (sdev->sdev_state != SDEV_CANCEL &&
986fe6c7 2464 sdev->sdev_state != SDEV_OFFLINE)
5c10e63c 2465 return -EINVAL;
1da177e4
LT
2466
2467 spin_lock_irqsave(q->queue_lock, flags);
2468 blk_start_queue(q);
2469 spin_unlock_irqrestore(q->queue_lock, flags);
2470
2471 return 0;
2472}
2473EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2474
2475static void
2476device_block(struct scsi_device *sdev, void *data)
2477{
2478 scsi_internal_device_block(sdev);
2479}
2480
2481static int
2482target_block(struct device *dev, void *data)
2483{
2484 if (scsi_is_target_device(dev))
2485 starget_for_each_device(to_scsi_target(dev), NULL,
2486 device_block);
2487 return 0;
2488}
2489
2490void
2491scsi_target_block(struct device *dev)
2492{
2493 if (scsi_is_target_device(dev))
2494 starget_for_each_device(to_scsi_target(dev), NULL,
2495 device_block);
2496 else
2497 device_for_each_child(dev, NULL, target_block);
2498}
2499EXPORT_SYMBOL_GPL(scsi_target_block);
2500
2501static void
2502device_unblock(struct scsi_device *sdev, void *data)
2503{
5d9fb5cc 2504 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
1da177e4
LT
2505}
2506
2507static int
2508target_unblock(struct device *dev, void *data)
2509{
2510 if (scsi_is_target_device(dev))
5d9fb5cc 2511 starget_for_each_device(to_scsi_target(dev), data,
1da177e4
LT
2512 device_unblock);
2513 return 0;
2514}
2515
2516void
5d9fb5cc 2517scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
1da177e4
LT
2518{
2519 if (scsi_is_target_device(dev))
5d9fb5cc 2520 starget_for_each_device(to_scsi_target(dev), &new_state,
1da177e4
LT
2521 device_unblock);
2522 else
5d9fb5cc 2523 device_for_each_child(dev, &new_state, target_unblock);
1da177e4
LT
2524}
2525EXPORT_SYMBOL_GPL(scsi_target_unblock);
cdb8c2a6
GL
2526
2527/**
2528 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
eb44820c 2529 * @sgl: scatter-gather list
cdb8c2a6
GL
2530 * @sg_count: number of segments in sg
2531 * @offset: offset in bytes into sg, on return offset into the mapped area
2532 * @len: bytes to map, on return number of bytes mapped
2533 *
2534 * Returns virtual address of the start of the mapped page
2535 */
c6132da1 2536void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
cdb8c2a6
GL
2537 size_t *offset, size_t *len)
2538{
2539 int i;
2540 size_t sg_len = 0, len_complete = 0;
c6132da1 2541 struct scatterlist *sg;
cdb8c2a6
GL
2542 struct page *page;
2543
22cfefb5
AM
2544 WARN_ON(!irqs_disabled());
2545
c6132da1 2546 for_each_sg(sgl, sg, sg_count, i) {
cdb8c2a6 2547 len_complete = sg_len; /* Complete sg-entries */
c6132da1 2548 sg_len += sg->length;
cdb8c2a6
GL
2549 if (sg_len > *offset)
2550 break;
2551 }
2552
2553 if (unlikely(i == sg_count)) {
169e1a2a
AM
2554 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2555 "elements %d\n",
cadbd4a5 2556 __func__, sg_len, *offset, sg_count);
cdb8c2a6
GL
2557 WARN_ON(1);
2558 return NULL;
2559 }
2560
2561 /* Offset starting from the beginning of first page in this sg-entry */
c6132da1 2562 *offset = *offset - len_complete + sg->offset;
cdb8c2a6
GL
2563
2564 /* Assumption: contiguous pages can be accessed as "page + i" */
45711f1a 2565 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
cdb8c2a6
GL
2566 *offset &= ~PAGE_MASK;
2567
2568 /* Bytes in this sg-entry from *offset to the end of the page */
2569 sg_len = PAGE_SIZE - *offset;
2570 if (*len > sg_len)
2571 *len = sg_len;
2572
77dfce07 2573 return kmap_atomic(page);
cdb8c2a6
GL
2574}
2575EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2576
2577/**
eb44820c 2578 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
cdb8c2a6
GL
2579 * @virt: virtual address to be unmapped
2580 */
2581void scsi_kunmap_atomic_sg(void *virt)
2582{
77dfce07 2583 kunmap_atomic(virt);
cdb8c2a6
GL
2584}
2585EXPORT_SYMBOL(scsi_kunmap_atomic_sg);