]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/scsi/scsi.c
[SCSI] consolidate command allocation in a single place
[mirror_ubuntu-kernels.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
1da177e4
LT
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
1da177e4
LT
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
0b950672 56#include <linux/mutex.h>
1da177e4
LT
57
58#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_dbg.h>
61#include <scsi/scsi_device.h>
7b3d9545 62#include <scsi/scsi_driver.h>
1da177e4
LT
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
1da177e4
LT
66
67#include "scsi_priv.h"
68#include "scsi_logging.h"
69
52c1da39 70static void scsi_done(struct scsi_cmnd *cmd);
1da177e4
LT
71
72/*
73 * Definitions and constants.
74 */
75
76#define MIN_RESET_DELAY (2*HZ)
77
78/* Do not call reset on error if we just did a reset within 15 sec. */
79#define MIN_RESET_PERIOD (15*HZ)
80
81/*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90/*
91 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface.
93 */
94unsigned int scsi_logging_level;
95#if defined(CONFIG_SCSI_LOGGING)
96EXPORT_SYMBOL(scsi_logging_level);
97#endif
98
8a1cdc9c
MW
99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is
101 * encouraged once assigned by ANSI/INCITS T10
102 */
4ff36718 103static const char *const scsi_device_types[] = {
8a1cdc9c
MW
104 "Direct-Access ",
105 "Sequential-Access",
1da177e4
LT
106 "Printer ",
107 "Processor ",
108 "WORM ",
8a1cdc9c 109 "CD-ROM ",
1da177e4 110 "Scanner ",
8a1cdc9c
MW
111 "Optical Device ",
112 "Medium Changer ",
1da177e4 113 "Communications ",
4ff36718
MW
114 "ASC IT8 ",
115 "ASC IT8 ",
1da177e4
LT
116 "RAID ",
117 "Enclosure ",
8a1cdc9c 118 "Direct-Access-RBC",
4ff36718
MW
119 "Optical card ",
120 "Bridge controller",
121 "Object storage ",
122 "Automation/Drive ",
1da177e4 123};
4ff36718 124
eb44820c
RL
125/**
126 * scsi_device_type - Return 17 char string indicating device type.
127 * @type: type number to look up
128 */
129
4ff36718
MW
130const char * scsi_device_type(unsigned type)
131{
132 if (type == 0x1e)
133 return "Well-known LUN ";
134 if (type == 0x1f)
135 return "No Device ";
80c6e3c0 136 if (type >= ARRAY_SIZE(scsi_device_types))
4ff36718
MW
137 return "Unknown ";
138 return scsi_device_types[type];
139}
140
141EXPORT_SYMBOL(scsi_device_type);
1da177e4 142
1da177e4 143struct scsi_host_cmd_pool {
5b7f1680
JB
144 struct kmem_cache *cmd_slab;
145 struct kmem_cache *sense_slab;
146 unsigned int users;
147 char *cmd_name;
148 char *sense_name;
149 unsigned int slab_flags;
150 gfp_t gfp_mask;
1da177e4
LT
151};
152
153static struct scsi_host_cmd_pool scsi_cmd_pool = {
5b7f1680
JB
154 .cmd_name = "scsi_cmd_cache",
155 .sense_name = "scsi_sense_cache",
1da177e4
LT
156 .slab_flags = SLAB_HWCACHE_ALIGN,
157};
158
159static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
5b7f1680
JB
160 .cmd_name = "scsi_cmd_cache(DMA)",
161 .sense_name = "scsi_sense_cache(DMA)",
1da177e4
LT
162 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
163 .gfp_mask = __GFP_DMA,
164};
165
0b950672 166static DEFINE_MUTEX(host_cmd_pool_mutex);
1da177e4 167
e507e30b
JB
168/**
169 * scsi_pool_alloc_command - internal function to get a fully allocated command
170 * @pool: slab pool to allocate the command from
171 * @gfp_mask: mask for the allocation
172 *
173 * Returns a fully allocated command (with the allied sense buffer) or
174 * NULL on failure
175 */
176static struct scsi_cmnd *
177scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
178{
179 struct scsi_cmnd *cmd;
180
181 cmd = kmem_cache_alloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
182 if (!cmd)
183 return NULL;
184
185 memset(cmd, 0, sizeof(*cmd));
186
187 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
188 gfp_mask | pool->gfp_mask);
189 if (!cmd->sense_buffer) {
190 kmem_cache_free(pool->cmd_slab, cmd);
191 return NULL;
192 }
193
194 return cmd;
195}
196
197/**
198 * scsi_pool_free_command - internal function to release a command
199 * @pool: slab pool to allocate the command from
200 * @cmd: command to release
201 *
202 * the command must previously have been allocated by
203 * scsi_pool_alloc_command.
204 */
205static void
206scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
207 struct scsi_cmnd *cmd)
208{
209 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
210 kmem_cache_free(pool->cmd_slab, cmd);
211}
212
eb44820c
RL
213/**
214 * __scsi_get_command - Allocate a struct scsi_cmnd
215 * @shost: host to transmit command
216 * @gfp_mask: allocation mask
217 *
218 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
219 * host's free_list if necessary.
220 */
b58d9154 221struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
1da177e4
LT
222{
223 struct scsi_cmnd *cmd;
de25deb1 224 unsigned char *buf;
1da177e4 225
e507e30b 226 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
164fc5dc 227
1da177e4
LT
228 if (unlikely(!cmd)) {
229 unsigned long flags;
230
231 spin_lock_irqsave(&shost->free_list_lock, flags);
232 if (likely(!list_empty(&shost->free_list))) {
233 cmd = list_entry(shost->free_list.next,
234 struct scsi_cmnd, list);
235 list_del_init(&cmd->list);
236 }
237 spin_unlock_irqrestore(&shost->free_list_lock, flags);
de25deb1
FT
238
239 if (cmd) {
240 buf = cmd->sense_buffer;
241 memset(cmd, 0, sizeof(*cmd));
242 cmd->sense_buffer = buf;
243 }
1da177e4
LT
244 }
245
246 return cmd;
247}
b58d9154 248EXPORT_SYMBOL_GPL(__scsi_get_command);
1da177e4 249
eb44820c
RL
250/**
251 * scsi_get_command - Allocate and setup a scsi command block
252 * @dev: parent scsi device
253 * @gfp_mask: allocator flags
1da177e4
LT
254 *
255 * Returns: The allocated scsi command structure.
256 */
c53033f6 257struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4
LT
258{
259 struct scsi_cmnd *cmd;
260
261 /* Bail if we can't get a reference to the device */
262 if (!get_device(&dev->sdev_gendev))
263 return NULL;
264
265 cmd = __scsi_get_command(dev->host, gfp_mask);
266
267 if (likely(cmd != NULL)) {
268 unsigned long flags;
269
1da177e4 270 cmd->device = dev;
1da177e4
LT
271 init_timer(&cmd->eh_timeout);
272 INIT_LIST_HEAD(&cmd->list);
273 spin_lock_irqsave(&dev->list_lock, flags);
274 list_add_tail(&cmd->list, &dev->cmd_list);
275 spin_unlock_irqrestore(&dev->list_lock, flags);
79e448bf 276 cmd->jiffies_at_alloc = jiffies;
1da177e4
LT
277 } else
278 put_device(&dev->sdev_gendev);
279
280 return cmd;
b58d9154 281}
1da177e4
LT
282EXPORT_SYMBOL(scsi_get_command);
283
eb44820c
RL
284/**
285 * __scsi_put_command - Free a struct scsi_cmnd
286 * @shost: dev->host
287 * @cmd: Command to free
288 * @dev: parent scsi device
289 */
b58d9154
FT
290void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
291 struct device *dev)
292{
293 unsigned long flags;
294
295 /* changing locks here, don't need to restore the irq state */
296 spin_lock_irqsave(&shost->free_list_lock, flags);
297 if (unlikely(list_empty(&shost->free_list))) {
298 list_add(&cmd->list, &shost->free_list);
299 cmd = NULL;
300 }
301 spin_unlock_irqrestore(&shost->free_list_lock, flags);
302
e507e30b
JB
303 if (likely(cmd != NULL))
304 scsi_pool_free_command(shost->cmd_pool, cmd);
b58d9154
FT
305
306 put_device(dev);
307}
308EXPORT_SYMBOL(__scsi_put_command);
309
eb44820c
RL
310/**
311 * scsi_put_command - Free a scsi command block
312 * @cmd: command block to free
1da177e4
LT
313 *
314 * Returns: Nothing.
315 *
316 * Notes: The command must not belong to any lists.
317 */
318void scsi_put_command(struct scsi_cmnd *cmd)
319{
320 struct scsi_device *sdev = cmd->device;
1da177e4 321 unsigned long flags;
b58d9154 322
1da177e4
LT
323 /* serious error if the command hasn't come from a device list */
324 spin_lock_irqsave(&cmd->device->list_lock, flags);
325 BUG_ON(list_empty(&cmd->list));
326 list_del_init(&cmd->list);
b58d9154 327 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
1da177e4 328
b58d9154 329 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
1da177e4
LT
330}
331EXPORT_SYMBOL(scsi_put_command);
332
eb44820c
RL
333/**
334 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
335 * @shost: host to allocate the freelist for.
1da177e4 336 *
eb44820c
RL
337 * Description: The command freelist protects against system-wide out of memory
338 * deadlock by preallocating one SCSI command structure for each host, so the
339 * system can always write to a swap file on a device associated with that host.
1da177e4
LT
340 *
341 * Returns: Nothing.
342 */
343int scsi_setup_command_freelist(struct Scsi_Host *shost)
344{
345 struct scsi_host_cmd_pool *pool;
346 struct scsi_cmnd *cmd;
347
348 spin_lock_init(&shost->free_list_lock);
349 INIT_LIST_HEAD(&shost->free_list);
350
351 /*
352 * Select a command slab for this host and create it if not
eb44820c 353 * yet existent.
1da177e4 354 */
0b950672 355 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
356 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
357 if (!pool->users) {
5b7f1680
JB
358 pool->cmd_slab = kmem_cache_create(pool->cmd_name,
359 sizeof(struct scsi_cmnd), 0,
360 pool->slab_flags, NULL);
361 if (!pool->cmd_slab)
362 goto fail;
363
364 pool->sense_slab = kmem_cache_create(pool->sense_name,
365 SCSI_SENSE_BUFFERSIZE, 0,
366 pool->slab_flags, NULL);
367 if (!pool->sense_slab) {
368 kmem_cache_destroy(pool->cmd_slab);
1da177e4 369 goto fail;
5b7f1680 370 }
1da177e4
LT
371 }
372
373 pool->users++;
374 shost->cmd_pool = pool;
0b950672 375 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
376
377 /*
378 * Get one backup command for this host.
379 */
e507e30b 380 cmd = scsi_pool_alloc_command(shost->cmd_pool, GFP_KERNEL);
1da177e4
LT
381 if (!cmd)
382 goto fail2;
de25deb1 383
166a7287 384 list_add(&cmd->list, &shost->free_list);
1da177e4
LT
385 return 0;
386
387 fail2:
166a7287 388 mutex_lock(&host_cmd_pool_mutex);
5b7f1680
JB
389 if (!--pool->users) {
390 kmem_cache_destroy(pool->cmd_slab);
391 kmem_cache_destroy(pool->sense_slab);
392 }
1da177e4 393 fail:
0b950672 394 mutex_unlock(&host_cmd_pool_mutex);
1da177e4 395 return -ENOMEM;
1da177e4
LT
396}
397
eb44820c
RL
398/**
399 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
400 * @shost: host whose freelist is going to be destroyed
1da177e4
LT
401 */
402void scsi_destroy_command_freelist(struct Scsi_Host *shost)
403{
404 while (!list_empty(&shost->free_list)) {
405 struct scsi_cmnd *cmd;
406
407 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
408 list_del_init(&cmd->list);
e507e30b 409 scsi_pool_free_command(shost->cmd_pool, cmd);
1da177e4
LT
410 }
411
0b950672 412 mutex_lock(&host_cmd_pool_mutex);
5b7f1680
JB
413 if (!--shost->cmd_pool->users) {
414 kmem_cache_destroy(shost->cmd_pool->cmd_slab);
415 kmem_cache_destroy(shost->cmd_pool->sense_slab);
de25deb1 416 }
de25deb1
FT
417 mutex_unlock(&host_cmd_pool_mutex);
418}
419
1da177e4
LT
420#ifdef CONFIG_SCSI_LOGGING
421void scsi_log_send(struct scsi_cmnd *cmd)
422{
423 unsigned int level;
1da177e4
LT
424
425 /*
426 * If ML QUEUE log level is greater than or equal to:
427 *
428 * 1: nothing (match completion)
429 *
430 * 2: log opcode + command of all commands
431 *
432 * 3: same as 2 plus dump cmd address
433 *
434 * 4: same as 3 plus dump extra junk
435 */
436 if (unlikely(scsi_logging_level)) {
437 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
438 SCSI_LOG_MLQUEUE_BITS);
439 if (level > 1) {
a4d04a4c 440 scmd_printk(KERN_INFO, cmd, "Send: ");
1da177e4
LT
441 if (level > 2)
442 printk("0x%p ", cmd);
a4d04a4c 443 printk("\n");
1da177e4
LT
444 scsi_print_command(cmd);
445 if (level > 3) {
446 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
7b3d9545 447 " queuecommand 0x%p\n",
a73e45b3 448 scsi_sglist(cmd), scsi_bufflen(cmd),
a4d04a4c 449 cmd->device->host->hostt->queuecommand);
1da177e4
LT
450
451 }
452 }
453 }
454}
455
456void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
457{
458 unsigned int level;
1da177e4
LT
459
460 /*
461 * If ML COMPLETE log level is greater than or equal to:
462 *
463 * 1: log disposition, result, opcode + command, and conditionally
464 * sense data for failures or non SUCCESS dispositions.
465 *
466 * 2: same as 1 but for all command completions.
467 *
468 * 3: same as 2 plus dump cmd address
469 *
470 * 4: same as 3 plus dump extra junk
471 */
472 if (unlikely(scsi_logging_level)) {
473 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
474 SCSI_LOG_MLCOMPLETE_BITS);
475 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
476 (level > 1)) {
a4d04a4c 477 scmd_printk(KERN_INFO, cmd, "Done: ");
1da177e4
LT
478 if (level > 2)
479 printk("0x%p ", cmd);
480 /*
481 * Dump truncated values, so we usually fit within
482 * 80 chars.
483 */
484 switch (disposition) {
485 case SUCCESS:
a4d04a4c 486 printk("SUCCESS\n");
1da177e4
LT
487 break;
488 case NEEDS_RETRY:
a4d04a4c 489 printk("RETRY\n");
1da177e4
LT
490 break;
491 case ADD_TO_MLQUEUE:
a4d04a4c 492 printk("MLQUEUE\n");
1da177e4
LT
493 break;
494 case FAILED:
a4d04a4c 495 printk("FAILED\n");
1da177e4
LT
496 break;
497 case TIMEOUT_ERROR:
498 /*
499 * If called via scsi_times_out.
500 */
a4d04a4c 501 printk("TIMEOUT\n");
1da177e4
LT
502 break;
503 default:
a4d04a4c 504 printk("UNKNOWN\n");
1da177e4 505 }
a4d04a4c 506 scsi_print_result(cmd);
1da177e4 507 scsi_print_command(cmd);
a4d04a4c 508 if (status_byte(cmd->result) & CHECK_CONDITION)
1da177e4 509 scsi_print_sense("", cmd);
a4d04a4c
MP
510 if (level > 3)
511 scmd_printk(KERN_INFO, cmd,
512 "scsi host busy %d failed %d\n",
513 cmd->device->host->host_busy,
514 cmd->device->host->host_failed);
1da177e4
LT
515 }
516 }
517}
518#endif
519
eb44820c
RL
520/**
521 * scsi_cmd_get_serial - Assign a serial number to a command
522 * @host: the scsi host
523 * @cmd: command to assign serial number to
524 *
525 * Description: a serial number identifies a request for error recovery
1da177e4
LT
526 * and debugging purposes. Protected by the Host_Lock of host.
527 */
528static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
529{
530 cmd->serial_number = host->cmd_serial_number++;
531 if (cmd->serial_number == 0)
532 cmd->serial_number = host->cmd_serial_number++;
1da177e4
LT
533}
534
eb44820c
RL
535/**
536 * scsi_dispatch_command - Dispatch a command to the low-level driver.
537 * @cmd: command block we are dispatching.
1da177e4 538 *
eb44820c
RL
539 * Return: nonzero return request was rejected and device's queue needs to be
540 * plugged.
1da177e4
LT
541 */
542int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
543{
544 struct Scsi_Host *host = cmd->device->host;
545 unsigned long flags = 0;
546 unsigned long timeout;
547 int rtn = 0;
548
549 /* check if the device is still usable */
550 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
551 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
552 * returns an immediate error upwards, and signals
553 * that the device is no longer present */
554 cmd->result = DID_NO_CONNECT << 16;
555 atomic_inc(&cmd->device->iorequest_cnt);
69b52893 556 __scsi_done(cmd);
1da177e4
LT
557 /* return 0 (because the command has been processed) */
558 goto out;
559 }
560
561 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
562 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
563 /*
564 * in SDEV_BLOCK, the command is just put back on the device
565 * queue. The suspend state has already blocked the queue so
566 * future requests should not occur until the device
567 * transitions out of the suspend state.
568 */
569 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
570
571 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
572
573 /*
574 * NOTE: rtn is still zero here because we don't need the
575 * queue to be plugged on return (it's already stopped)
576 */
577 goto out;
578 }
579
580 /*
581 * If SCSI-2 or lower, store the LUN value in cmnd.
582 */
4d7db04a
JB
583 if (cmd->device->scsi_level <= SCSI_2 &&
584 cmd->device->scsi_level != SCSI_UNKNOWN) {
1da177e4
LT
585 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
586 (cmd->device->lun << 5 & 0xe0);
587 }
588
589 /*
590 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
591 * we can avoid the drive not being ready.
592 */
593 timeout = host->last_reset + MIN_RESET_DELAY;
594
595 if (host->resetting && time_before(jiffies, timeout)) {
596 int ticks_remaining = timeout - jiffies;
597 /*
598 * NOTE: This may be executed from within an interrupt
599 * handler! This is bad, but for now, it'll do. The irq
600 * level of the interrupt handler has been masked out by the
601 * platform dependent interrupt handling code already, so the
602 * sti() here will not cause another call to the SCSI host's
603 * interrupt handler (assuming there is one irq-level per
604 * host).
605 */
606 while (--ticks_remaining >= 0)
607 mdelay(1 + 999 / HZ);
608 host->resetting = 0;
609 }
610
611 /*
612 * AK: unlikely race here: for some reason the timer could
613 * expire before the serial number is set up below.
614 */
615 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
616
617 scsi_log_send(cmd);
618
619 /*
620 * We will use a queued command if possible, otherwise we will
621 * emulate the queuing and calling of completion function ourselves.
622 */
1da177e4
LT
623 atomic_inc(&cmd->device->iorequest_cnt);
624
625 /*
626 * Before we queue this command, check if the command
627 * length exceeds what the host adapter can handle.
628 */
629 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
630 SCSI_LOG_MLQUEUE(3,
631 printk("queuecommand : command too long.\n"));
632 cmd->result = (DID_ABORT << 16);
633
634 scsi_done(cmd);
635 goto out;
636 }
637
638 spin_lock_irqsave(host->host_lock, flags);
639 scsi_cmd_get_serial(host, cmd);
640
d2c9d9ea 641 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4
LT
642 cmd->result = (DID_NO_CONNECT << 16);
643 scsi_done(cmd);
644 } else {
645 rtn = host->hostt->queuecommand(cmd, scsi_done);
646 }
647 spin_unlock_irqrestore(host->host_lock, flags);
648 if (rtn) {
d8c37e7b
TH
649 if (scsi_delete_timer(cmd)) {
650 atomic_inc(&cmd->device->iodone_cnt);
651 scsi_queue_insert(cmd,
652 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
653 rtn : SCSI_MLQUEUE_HOST_BUSY);
654 }
1da177e4
LT
655 SCSI_LOG_MLQUEUE(3,
656 printk("queuecommand : request rejected\n"));
657 }
658
659 out:
660 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
661 return rtn;
662}
663
89f48c4d
LT
664/**
665 * scsi_req_abort_cmd -- Request command recovery for the specified command
eb44820c 666 * @cmd: pointer to the SCSI command of interest
89f48c4d
LT
667 *
668 * This function requests that SCSI Core start recovery for the
669 * command by deleting the timer and adding the command to the eh
670 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
671 * implement their own error recovery MAY ignore the timeout event if
672 * they generated scsi_req_abort_cmd.
673 */
674void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
675{
676 if (!scsi_delete_timer(cmd))
677 return;
678 scsi_times_out(cmd);
679}
680EXPORT_SYMBOL(scsi_req_abort_cmd);
681
1da177e4
LT
682/**
683 * scsi_done - Enqueue the finished SCSI command into the done queue.
684 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
685 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
686 *
eb44820c
RL
687 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
688 * which regains ownership of the SCSI command (de facto) from a LLDD, and
689 * enqueues the command to the done queue for further processing.
1da177e4
LT
690 *
691 * This is the producer of the done queue who enqueues at the tail.
692 *
693 * This function is interrupt context safe.
694 */
52c1da39 695static void scsi_done(struct scsi_cmnd *cmd)
1da177e4
LT
696{
697 /*
eb44820c 698 * We don't have to worry about this one timing out anymore.
1da177e4
LT
699 * If we are unable to remove the timer, then the command
700 * has already timed out. In which case, we have no choice but to
701 * let the timeout function run, as we have no idea where in fact
702 * that function could really be. It might be on another processor,
703 * etc, etc.
704 */
705 if (!scsi_delete_timer(cmd))
706 return;
707 __scsi_done(cmd);
708}
709
710/* Private entry to scsi_done() to complete a command when the timer
711 * isn't running --- used by scsi_times_out */
712void __scsi_done(struct scsi_cmnd *cmd)
713{
1aea6434 714 struct request *rq = cmd->request;
1da177e4
LT
715
716 /*
717 * Set the serial numbers back to zero
718 */
719 cmd->serial_number = 0;
1da177e4
LT
720
721 atomic_inc(&cmd->device->iodone_cnt);
722 if (cmd->result)
723 atomic_inc(&cmd->device->ioerr_cnt);
724
1aea6434
JA
725 BUG_ON(!rq);
726
1da177e4 727 /*
1aea6434
JA
728 * The uptodate/nbytes values don't matter, as we allow partial
729 * completes and thus will check this in the softirq callback
1da177e4 730 */
1aea6434
JA
731 rq->completion_data = cmd;
732 blk_complete_request(rq);
1da177e4
LT
733}
734
7b3d9545
LT
735/* Move this to a header if it becomes more generally useful */
736static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
737{
738 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
739}
740
eb44820c
RL
741/**
742 * scsi_finish_command - cleanup and pass command back to upper layer
743 * @cmd: the command
1da177e4 744 *
eb44820c 745 * Description: Pass command off to upper layer for finishing of I/O
1da177e4
LT
746 * request, waking processes that are waiting on results,
747 * etc.
748 */
749void scsi_finish_command(struct scsi_cmnd *cmd)
750{
751 struct scsi_device *sdev = cmd->device;
752 struct Scsi_Host *shost = sdev->host;
7b3d9545
LT
753 struct scsi_driver *drv;
754 unsigned int good_bytes;
1da177e4
LT
755
756 scsi_device_unbusy(sdev);
757
758 /*
759 * Clear the flags which say that the device/host is no longer
760 * capable of accepting new commands. These are set in scsi_queue.c
761 * for both the queue full condition on a device, and for a
762 * host full condition on the host.
763 *
764 * XXX(hch): What about locking?
765 */
766 shost->host_blocked = 0;
767 sdev->device_blocked = 0;
768
769 /*
770 * If we have valid sense information, then some kind of recovery
771 * must have taken place. Make a note of this.
772 */
773 if (SCSI_SENSE_VALID(cmd))
774 cmd->result |= (DRIVER_SENSE << 24);
775
3bf743e7
JG
776 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
777 "Notifying upper driver of completion "
778 "(result %x)\n", cmd->result));
1da177e4 779
e97a294e 780 good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len;
7b3d9545
LT
781 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
782 drv = scsi_cmd_to_driver(cmd);
783 if (drv->done)
784 good_bytes = drv->done(cmd);
785 }
786 scsi_io_completion(cmd, good_bytes);
1da177e4
LT
787}
788EXPORT_SYMBOL(scsi_finish_command);
789
eb44820c
RL
790/**
791 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
792 * @sdev: SCSI Device in question
793 * @tagged: Do we use tagged queueing (non-0) or do we treat
794 * this device as an untagged device (0)
795 * @tags: Number of tags allowed if tagged queueing enabled,
796 * or number of commands the low level driver can
797 * queue up in non-tagged mode (as per cmd_per_lun).
1da177e4
LT
798 *
799 * Returns: Nothing
800 *
801 * Lock Status: None held on entry
802 *
803 * Notes: Low level drivers may call this at any time and we will do
804 * the right thing depending on whether or not the device is
805 * currently active and whether or not it even has the
806 * command blocks built yet.
807 */
808void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
809{
810 unsigned long flags;
811
812 /*
813 * refuse to set tagged depth to an unworkable size
814 */
815 if (tags <= 0)
816 return;
817
818 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
819
eb44820c
RL
820 /* Check to see if the queue is managed by the block layer.
821 * If it is, and we fail to adjust the depth, exit. */
1da177e4
LT
822 if (blk_queue_tagged(sdev->request_queue) &&
823 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
824 goto out;
825
826 sdev->queue_depth = tags;
827 switch (tagged) {
828 case MSG_ORDERED_TAG:
829 sdev->ordered_tags = 1;
830 sdev->simple_tags = 1;
831 break;
832 case MSG_SIMPLE_TAG:
833 sdev->ordered_tags = 0;
834 sdev->simple_tags = 1;
835 break;
836 default:
9ccfc756
JB
837 sdev_printk(KERN_WARNING, sdev,
838 "scsi_adjust_queue_depth, bad queue type, "
839 "disabled\n");
1da177e4
LT
840 case 0:
841 sdev->ordered_tags = sdev->simple_tags = 0;
842 sdev->queue_depth = tags;
843 break;
844 }
845 out:
846 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
847}
848EXPORT_SYMBOL(scsi_adjust_queue_depth);
849
eb44820c
RL
850/**
851 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
852 * @sdev: SCSI Device in question
853 * @depth: Current number of outstanding SCSI commands on this device,
854 * not counting the one returned as QUEUE_FULL.
1da177e4 855 *
eb44820c 856 * Description: This function will track successive QUEUE_FULL events on a
1da177e4
LT
857 * specific SCSI device to determine if and when there is a
858 * need to adjust the queue depth on the device.
859 *
eb44820c 860 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
1da177e4
LT
861 * -1 - Drop back to untagged operation using host->cmd_per_lun
862 * as the untagged command depth
863 *
864 * Lock Status: None held on entry
865 *
866 * Notes: Low level drivers may call this at any time and we will do
867 * "The Right Thing." We are interrupt context safe.
868 */
869int scsi_track_queue_full(struct scsi_device *sdev, int depth)
870{
871 if ((jiffies >> 4) == sdev->last_queue_full_time)
872 return 0;
873
874 sdev->last_queue_full_time = (jiffies >> 4);
875 if (sdev->last_queue_full_depth != depth) {
876 sdev->last_queue_full_count = 1;
877 sdev->last_queue_full_depth = depth;
878 } else {
879 sdev->last_queue_full_count++;
880 }
881
882 if (sdev->last_queue_full_count <= 10)
883 return 0;
884 if (sdev->last_queue_full_depth < 8) {
885 /* Drop back to untagged */
886 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
887 return -1;
888 }
889
890 if (sdev->ordered_tags)
891 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
892 else
893 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
894 return depth;
895}
896EXPORT_SYMBOL(scsi_track_queue_full);
897
898/**
eb44820c 899 * scsi_device_get - get an additional reference to a scsi_device
1da177e4
LT
900 * @sdev: device to get a reference to
901 *
eb44820c 902 * Description: Gets a reference to the scsi_device and increments the use count
1da177e4
LT
903 * of the underlying LLDD module. You must hold host_lock of the
904 * parent Scsi_Host or already have a reference when calling this.
905 */
906int scsi_device_get(struct scsi_device *sdev)
907{
85b6c720 908 if (sdev->sdev_state == SDEV_DEL)
1da177e4
LT
909 return -ENXIO;
910 if (!get_device(&sdev->sdev_gendev))
911 return -ENXIO;
85b6c720
JB
912 /* We can fail this if we're doing SCSI operations
913 * from module exit (like cache flush) */
914 try_module_get(sdev->host->hostt->module);
915
1da177e4
LT
916 return 0;
917}
918EXPORT_SYMBOL(scsi_device_get);
919
920/**
921 * scsi_device_put - release a reference to a scsi_device
922 * @sdev: device to release a reference on.
923 *
eb44820c
RL
924 * Description: Release a reference to the scsi_device and decrements the use
925 * count of the underlying LLDD module. The device is freed once the last
1da177e4
LT
926 * user vanishes.
927 */
928void scsi_device_put(struct scsi_device *sdev)
929{
504fb37a 930#ifdef CONFIG_MODULE_UNLOAD
f479ab87
JB
931 struct module *module = sdev->host->hostt->module;
932
85b6c720
JB
933 /* The module refcount will be zero if scsi_device_get()
934 * was called from a module removal routine */
f479ab87
JB
935 if (module && module_refcount(module) != 0)
936 module_put(module);
a506b44b 937#endif
1da177e4
LT
938 put_device(&sdev->sdev_gendev);
939}
940EXPORT_SYMBOL(scsi_device_put);
941
eb44820c 942/* helper for shost_for_each_device, see that for documentation */
1da177e4
LT
943struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
944 struct scsi_device *prev)
945{
946 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
947 struct scsi_device *next = NULL;
948 unsigned long flags;
949
950 spin_lock_irqsave(shost->host_lock, flags);
951 while (list->next != &shost->__devices) {
952 next = list_entry(list->next, struct scsi_device, siblings);
953 /* skip devices that we can't get a reference to */
954 if (!scsi_device_get(next))
955 break;
956 next = NULL;
957 list = list->next;
958 }
959 spin_unlock_irqrestore(shost->host_lock, flags);
960
961 if (prev)
962 scsi_device_put(prev);
963 return next;
964}
965EXPORT_SYMBOL(__scsi_iterate_devices);
966
967/**
968 * starget_for_each_device - helper to walk all devices of a target
969 * @starget: target whose devices we want to iterate over.
eb44820c
RL
970 * @data: Opaque passed to each function call.
971 * @fn: Function to call on each device
1da177e4 972 *
522939d4 973 * This traverses over each device of @starget. The devices have
1da177e4
LT
974 * a reference that must be released by scsi_host_put when breaking
975 * out of the loop.
976 */
522939d4 977void starget_for_each_device(struct scsi_target *starget, void *data,
1da177e4
LT
978 void (*fn)(struct scsi_device *, void *))
979{
980 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
981 struct scsi_device *sdev;
982
983 shost_for_each_device(sdev, shost) {
984 if ((sdev->channel == starget->channel) &&
985 (sdev->id == starget->id))
986 fn(sdev, data);
987 }
988}
989EXPORT_SYMBOL(starget_for_each_device);
990
522939d4 991/**
14f501a4 992 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
522939d4 993 * @starget: target whose devices we want to iterate over.
14f501a4
RD
994 * @data: parameter for callback @fn()
995 * @fn: callback function that is invoked for each device
522939d4
MR
996 *
997 * This traverses over each device of @starget. It does _not_
998 * take a reference on the scsi_device, so the whole loop must be
999 * protected by shost->host_lock.
1000 *
1001 * Note: The only reason why drivers would want to use this is because
1002 * they need to access the device list in irq context. Otherwise you
1003 * really want to use starget_for_each_device instead.
1004 **/
1005void __starget_for_each_device(struct scsi_target *starget, void *data,
1006 void (*fn)(struct scsi_device *, void *))
1007{
1008 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1009 struct scsi_device *sdev;
1010
1011 __shost_for_each_device(sdev, shost) {
1012 if ((sdev->channel == starget->channel) &&
1013 (sdev->id == starget->id))
1014 fn(sdev, data);
1015 }
1016}
1017EXPORT_SYMBOL(__starget_for_each_device);
1018
1da177e4
LT
1019/**
1020 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1021 * @starget: SCSI target pointer
1022 * @lun: SCSI Logical Unit Number
1023 *
eb44820c
RL
1024 * Description: Looks up the scsi_device with the specified @lun for a given
1025 * @starget. The returned scsi_device does not have an additional
1da177e4
LT
1026 * reference. You must hold the host's host_lock over this call and
1027 * any access to the returned scsi_device.
1028 *
dc8875e1 1029 * Note: The only reason why drivers should use this is because
eb44820c 1030 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1031 * really want to use scsi_device_lookup_by_target instead.
1032 **/
1033struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1034 uint lun)
1035{
1036 struct scsi_device *sdev;
1037
1038 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1039 if (sdev->lun ==lun)
1040 return sdev;
1041 }
1042
1043 return NULL;
1044}
1045EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1046
1047/**
1048 * scsi_device_lookup_by_target - find a device given the target
1049 * @starget: SCSI target pointer
1050 * @lun: SCSI Logical Unit Number
1051 *
eb44820c
RL
1052 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1053 * for a given host. The returned scsi_device has an additional reference that
1054 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1055 **/
1056struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1057 uint lun)
1058{
1059 struct scsi_device *sdev;
1060 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1061 unsigned long flags;
1062
1063 spin_lock_irqsave(shost->host_lock, flags);
1064 sdev = __scsi_device_lookup_by_target(starget, lun);
1065 if (sdev && scsi_device_get(sdev))
1066 sdev = NULL;
1067 spin_unlock_irqrestore(shost->host_lock, flags);
1068
1069 return sdev;
1070}
1071EXPORT_SYMBOL(scsi_device_lookup_by_target);
1072
1073/**
eb44820c 1074 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1da177e4
LT
1075 * @shost: SCSI host pointer
1076 * @channel: SCSI channel (zero if only one channel)
eb44820c 1077 * @id: SCSI target number (physical unit number)
1da177e4
LT
1078 * @lun: SCSI Logical Unit Number
1079 *
eb44820c
RL
1080 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1081 * for a given host. The returned scsi_device does not have an additional
1082 * reference. You must hold the host's host_lock over this call and any access
1083 * to the returned scsi_device.
1da177e4
LT
1084 *
1085 * Note: The only reason why drivers would want to use this is because
eb44820c 1086 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1087 * really want to use scsi_device_lookup instead.
1088 **/
1089struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1090 uint channel, uint id, uint lun)
1091{
1092 struct scsi_device *sdev;
1093
1094 list_for_each_entry(sdev, &shost->__devices, siblings) {
1095 if (sdev->channel == channel && sdev->id == id &&
1096 sdev->lun ==lun)
1097 return sdev;
1098 }
1099
1100 return NULL;
1101}
1102EXPORT_SYMBOL(__scsi_device_lookup);
1103
1104/**
1105 * scsi_device_lookup - find a device given the host
1106 * @shost: SCSI host pointer
1107 * @channel: SCSI channel (zero if only one channel)
1108 * @id: SCSI target number (physical unit number)
1109 * @lun: SCSI Logical Unit Number
1110 *
eb44820c
RL
1111 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1112 * for a given host. The returned scsi_device has an additional reference that
1113 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1114 **/
1115struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1116 uint channel, uint id, uint lun)
1117{
1118 struct scsi_device *sdev;
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(shost->host_lock, flags);
1122 sdev = __scsi_device_lookup(shost, channel, id, lun);
1123 if (sdev && scsi_device_get(sdev))
1124 sdev = NULL;
1125 spin_unlock_irqrestore(shost->host_lock, flags);
1126
1127 return sdev;
1128}
1129EXPORT_SYMBOL(scsi_device_lookup);
1130
1da177e4
LT
1131MODULE_DESCRIPTION("SCSI core");
1132MODULE_LICENSE("GPL");
1133
1134module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1135MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1136
1137static int __init init_scsi(void)
1138{
9bf09c23 1139 int error;
1da177e4
LT
1140
1141 error = scsi_init_queue();
1142 if (error)
1143 return error;
1144 error = scsi_init_procfs();
1145 if (error)
1146 goto cleanup_queue;
1147 error = scsi_init_devinfo();
1148 if (error)
1149 goto cleanup_procfs;
1150 error = scsi_init_hosts();
1151 if (error)
1152 goto cleanup_devlist;
1153 error = scsi_init_sysctl();
1154 if (error)
1155 goto cleanup_hosts;
1156 error = scsi_sysfs_register();
1157 if (error)
1158 goto cleanup_sysctl;
1159
84314fd4
JS
1160 scsi_netlink_init();
1161
1da177e4
LT
1162 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1163 return 0;
1164
1165cleanup_sysctl:
1166 scsi_exit_sysctl();
1167cleanup_hosts:
1168 scsi_exit_hosts();
1169cleanup_devlist:
1170 scsi_exit_devinfo();
1171cleanup_procfs:
1172 scsi_exit_procfs();
1173cleanup_queue:
1174 scsi_exit_queue();
1175 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1176 -error);
1177 return error;
1178}
1179
1180static void __exit exit_scsi(void)
1181{
84314fd4 1182 scsi_netlink_exit();
1da177e4
LT
1183 scsi_sysfs_unregister();
1184 scsi_exit_sysctl();
1185 scsi_exit_hosts();
1186 scsi_exit_devinfo();
1da177e4
LT
1187 scsi_exit_procfs();
1188 scsi_exit_queue();
1da177e4
LT
1189}
1190
1191subsys_initcall(init_scsi);
1192module_exit(exit_scsi);