]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi.c
[SCSI] use dynamically allocated sense buffer
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
1da177e4
LT
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
1da177e4
LT
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
0b950672 56#include <linux/mutex.h>
1da177e4
LT
57
58#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_dbg.h>
61#include <scsi/scsi_device.h>
7b3d9545 62#include <scsi/scsi_driver.h>
1da177e4
LT
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
1da177e4
LT
66
67#include "scsi_priv.h"
68#include "scsi_logging.h"
69
52c1da39 70static void scsi_done(struct scsi_cmnd *cmd);
1da177e4
LT
71
72/*
73 * Definitions and constants.
74 */
75
76#define MIN_RESET_DELAY (2*HZ)
77
78/* Do not call reset on error if we just did a reset within 15 sec. */
79#define MIN_RESET_PERIOD (15*HZ)
80
81/*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90/*
91 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface.
93 */
94unsigned int scsi_logging_level;
95#if defined(CONFIG_SCSI_LOGGING)
96EXPORT_SYMBOL(scsi_logging_level);
97#endif
98
8a1cdc9c
MW
99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is
101 * encouraged once assigned by ANSI/INCITS T10
102 */
4ff36718 103static const char *const scsi_device_types[] = {
8a1cdc9c
MW
104 "Direct-Access ",
105 "Sequential-Access",
1da177e4
LT
106 "Printer ",
107 "Processor ",
108 "WORM ",
8a1cdc9c 109 "CD-ROM ",
1da177e4 110 "Scanner ",
8a1cdc9c
MW
111 "Optical Device ",
112 "Medium Changer ",
1da177e4 113 "Communications ",
4ff36718
MW
114 "ASC IT8 ",
115 "ASC IT8 ",
1da177e4
LT
116 "RAID ",
117 "Enclosure ",
8a1cdc9c 118 "Direct-Access-RBC",
4ff36718
MW
119 "Optical card ",
120 "Bridge controller",
121 "Object storage ",
122 "Automation/Drive ",
1da177e4 123};
4ff36718 124
eb44820c
RL
125/**
126 * scsi_device_type - Return 17 char string indicating device type.
127 * @type: type number to look up
128 */
129
4ff36718
MW
130const char * scsi_device_type(unsigned type)
131{
132 if (type == 0x1e)
133 return "Well-known LUN ";
134 if (type == 0x1f)
135 return "No Device ";
80c6e3c0 136 if (type >= ARRAY_SIZE(scsi_device_types))
4ff36718
MW
137 return "Unknown ";
138 return scsi_device_types[type];
139}
140
141EXPORT_SYMBOL(scsi_device_type);
1da177e4 142
1da177e4 143struct scsi_host_cmd_pool {
e18b890b 144 struct kmem_cache *slab;
1da177e4
LT
145 unsigned int users;
146 char *name;
147 unsigned int slab_flags;
c53033f6 148 gfp_t gfp_mask;
1da177e4
LT
149};
150
151static struct scsi_host_cmd_pool scsi_cmd_pool = {
152 .name = "scsi_cmd_cache",
153 .slab_flags = SLAB_HWCACHE_ALIGN,
154};
155
156static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
157 .name = "scsi_cmd_cache(DMA)",
158 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
159 .gfp_mask = __GFP_DMA,
160};
161
0b950672 162static DEFINE_MUTEX(host_cmd_pool_mutex);
1da177e4 163
de25deb1
FT
164static struct kmem_cache *sense_buffer_slab;
165static int sense_buffer_slab_users;
166
eb44820c
RL
167/**
168 * __scsi_get_command - Allocate a struct scsi_cmnd
169 * @shost: host to transmit command
170 * @gfp_mask: allocation mask
171 *
172 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
173 * host's free_list if necessary.
174 */
b58d9154 175struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
1da177e4
LT
176{
177 struct scsi_cmnd *cmd;
de25deb1 178 unsigned char *buf;
1da177e4
LT
179
180 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
181 gfp_mask | shost->cmd_pool->gfp_mask);
182
183 if (unlikely(!cmd)) {
184 unsigned long flags;
185
186 spin_lock_irqsave(&shost->free_list_lock, flags);
187 if (likely(!list_empty(&shost->free_list))) {
188 cmd = list_entry(shost->free_list.next,
189 struct scsi_cmnd, list);
190 list_del_init(&cmd->list);
191 }
192 spin_unlock_irqrestore(&shost->free_list_lock, flags);
de25deb1
FT
193
194 if (cmd) {
195 buf = cmd->sense_buffer;
196 memset(cmd, 0, sizeof(*cmd));
197 cmd->sense_buffer = buf;
198 }
199 } else {
200 buf = kmem_cache_alloc(sense_buffer_slab, __GFP_DMA|gfp_mask);
201 if (likely(buf)) {
202 memset(cmd, 0, sizeof(*cmd));
203 cmd->sense_buffer = buf;
204 } else {
205 kmem_cache_free(shost->cmd_pool->slab, cmd);
206 cmd = NULL;
207 }
1da177e4
LT
208 }
209
210 return cmd;
211}
b58d9154 212EXPORT_SYMBOL_GPL(__scsi_get_command);
1da177e4 213
eb44820c
RL
214/**
215 * scsi_get_command - Allocate and setup a scsi command block
216 * @dev: parent scsi device
217 * @gfp_mask: allocator flags
1da177e4
LT
218 *
219 * Returns: The allocated scsi command structure.
220 */
c53033f6 221struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4
LT
222{
223 struct scsi_cmnd *cmd;
224
225 /* Bail if we can't get a reference to the device */
226 if (!get_device(&dev->sdev_gendev))
227 return NULL;
228
229 cmd = __scsi_get_command(dev->host, gfp_mask);
230
231 if (likely(cmd != NULL)) {
232 unsigned long flags;
233
1da177e4 234 cmd->device = dev;
1da177e4
LT
235 init_timer(&cmd->eh_timeout);
236 INIT_LIST_HEAD(&cmd->list);
237 spin_lock_irqsave(&dev->list_lock, flags);
238 list_add_tail(&cmd->list, &dev->cmd_list);
239 spin_unlock_irqrestore(&dev->list_lock, flags);
79e448bf 240 cmd->jiffies_at_alloc = jiffies;
1da177e4
LT
241 } else
242 put_device(&dev->sdev_gendev);
243
244 return cmd;
b58d9154 245}
1da177e4
LT
246EXPORT_SYMBOL(scsi_get_command);
247
eb44820c
RL
248/**
249 * __scsi_put_command - Free a struct scsi_cmnd
250 * @shost: dev->host
251 * @cmd: Command to free
252 * @dev: parent scsi device
253 */
b58d9154
FT
254void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
255 struct device *dev)
256{
257 unsigned long flags;
258
259 /* changing locks here, don't need to restore the irq state */
260 spin_lock_irqsave(&shost->free_list_lock, flags);
261 if (unlikely(list_empty(&shost->free_list))) {
262 list_add(&cmd->list, &shost->free_list);
263 cmd = NULL;
264 }
265 spin_unlock_irqrestore(&shost->free_list_lock, flags);
266
de25deb1
FT
267 if (likely(cmd != NULL)) {
268 kmem_cache_free(sense_buffer_slab, cmd->sense_buffer);
b58d9154 269 kmem_cache_free(shost->cmd_pool->slab, cmd);
de25deb1 270 }
b58d9154
FT
271
272 put_device(dev);
273}
274EXPORT_SYMBOL(__scsi_put_command);
275
eb44820c
RL
276/**
277 * scsi_put_command - Free a scsi command block
278 * @cmd: command block to free
1da177e4
LT
279 *
280 * Returns: Nothing.
281 *
282 * Notes: The command must not belong to any lists.
283 */
284void scsi_put_command(struct scsi_cmnd *cmd)
285{
286 struct scsi_device *sdev = cmd->device;
1da177e4 287 unsigned long flags;
b58d9154 288
1da177e4
LT
289 /* serious error if the command hasn't come from a device list */
290 spin_lock_irqsave(&cmd->device->list_lock, flags);
291 BUG_ON(list_empty(&cmd->list));
292 list_del_init(&cmd->list);
b58d9154 293 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
1da177e4 294
b58d9154 295 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
1da177e4
LT
296}
297EXPORT_SYMBOL(scsi_put_command);
298
eb44820c
RL
299/**
300 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
301 * @shost: host to allocate the freelist for.
1da177e4 302 *
eb44820c
RL
303 * Description: The command freelist protects against system-wide out of memory
304 * deadlock by preallocating one SCSI command structure for each host, so the
305 * system can always write to a swap file on a device associated with that host.
1da177e4
LT
306 *
307 * Returns: Nothing.
308 */
309int scsi_setup_command_freelist(struct Scsi_Host *shost)
310{
311 struct scsi_host_cmd_pool *pool;
312 struct scsi_cmnd *cmd;
de25deb1 313 unsigned char *sense_buffer;
1da177e4
LT
314
315 spin_lock_init(&shost->free_list_lock);
316 INIT_LIST_HEAD(&shost->free_list);
317
318 /*
319 * Select a command slab for this host and create it if not
eb44820c 320 * yet existent.
1da177e4 321 */
0b950672 322 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
323 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
324 if (!pool->users) {
325 pool->slab = kmem_cache_create(pool->name,
326 sizeof(struct scsi_cmnd), 0,
20c2df83 327 pool->slab_flags, NULL);
1da177e4
LT
328 if (!pool->slab)
329 goto fail;
330 }
331
332 pool->users++;
333 shost->cmd_pool = pool;
0b950672 334 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
335
336 /*
337 * Get one backup command for this host.
338 */
339 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
340 GFP_KERNEL | shost->cmd_pool->gfp_mask);
341 if (!cmd)
342 goto fail2;
de25deb1
FT
343
344 sense_buffer = kmem_cache_alloc(sense_buffer_slab,
345 GFP_KERNEL | __GFP_DMA);
346 if (!sense_buffer)
347 goto destroy_backup;
348
349 cmd->sense_buffer = sense_buffer;
166a7287 350 list_add(&cmd->list, &shost->free_list);
1da177e4
LT
351 return 0;
352
de25deb1
FT
353destroy_backup:
354 kmem_cache_free(shost->cmd_pool->slab, cmd);
1da177e4 355 fail2:
166a7287 356 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
357 if (!--pool->users)
358 kmem_cache_destroy(pool->slab);
1da177e4 359 fail:
0b950672 360 mutex_unlock(&host_cmd_pool_mutex);
1da177e4 361 return -ENOMEM;
1da177e4
LT
362}
363
eb44820c
RL
364/**
365 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
366 * @shost: host whose freelist is going to be destroyed
1da177e4
LT
367 */
368void scsi_destroy_command_freelist(struct Scsi_Host *shost)
369{
370 while (!list_empty(&shost->free_list)) {
371 struct scsi_cmnd *cmd;
372
373 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
374 list_del_init(&cmd->list);
de25deb1 375 kmem_cache_free(sense_buffer_slab, cmd->sense_buffer);
1da177e4
LT
376 kmem_cache_free(shost->cmd_pool->slab, cmd);
377 }
378
0b950672 379 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
380 if (!--shost->cmd_pool->users)
381 kmem_cache_destroy(shost->cmd_pool->slab);
0b950672 382 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
383}
384
de25deb1
FT
385int scsi_setup_command_sense_buffer(struct Scsi_Host *shost)
386{
387 mutex_lock(&host_cmd_pool_mutex);
388 if (!sense_buffer_slab_users) {
389 sense_buffer_slab = kmem_cache_create("scsi_sense_buffer",
390 SCSI_SENSE_BUFFERSIZE,
391 0, SLAB_CACHE_DMA, NULL);
392 if (!sense_buffer_slab) {
393 mutex_unlock(&host_cmd_pool_mutex);
394 return -ENOMEM;
395 }
396 }
397 sense_buffer_slab_users++;
398 mutex_unlock(&host_cmd_pool_mutex);
399
400 return 0;
401}
402
403void scsi_destroy_command_sense_buffer(struct Scsi_Host *shost)
404{
405 mutex_lock(&host_cmd_pool_mutex);
406 if (!--sense_buffer_slab_users)
407 kmem_cache_destroy(sense_buffer_slab);
408 mutex_unlock(&host_cmd_pool_mutex);
409}
410
1da177e4
LT
411#ifdef CONFIG_SCSI_LOGGING
412void scsi_log_send(struct scsi_cmnd *cmd)
413{
414 unsigned int level;
1da177e4
LT
415
416 /*
417 * If ML QUEUE log level is greater than or equal to:
418 *
419 * 1: nothing (match completion)
420 *
421 * 2: log opcode + command of all commands
422 *
423 * 3: same as 2 plus dump cmd address
424 *
425 * 4: same as 3 plus dump extra junk
426 */
427 if (unlikely(scsi_logging_level)) {
428 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
429 SCSI_LOG_MLQUEUE_BITS);
430 if (level > 1) {
a4d04a4c 431 scmd_printk(KERN_INFO, cmd, "Send: ");
1da177e4
LT
432 if (level > 2)
433 printk("0x%p ", cmd);
a4d04a4c 434 printk("\n");
1da177e4
LT
435 scsi_print_command(cmd);
436 if (level > 3) {
437 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
7b3d9545 438 " queuecommand 0x%p\n",
a73e45b3 439 scsi_sglist(cmd), scsi_bufflen(cmd),
a4d04a4c 440 cmd->device->host->hostt->queuecommand);
1da177e4
LT
441
442 }
443 }
444 }
445}
446
447void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
448{
449 unsigned int level;
1da177e4
LT
450
451 /*
452 * If ML COMPLETE log level is greater than or equal to:
453 *
454 * 1: log disposition, result, opcode + command, and conditionally
455 * sense data for failures or non SUCCESS dispositions.
456 *
457 * 2: same as 1 but for all command completions.
458 *
459 * 3: same as 2 plus dump cmd address
460 *
461 * 4: same as 3 plus dump extra junk
462 */
463 if (unlikely(scsi_logging_level)) {
464 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
465 SCSI_LOG_MLCOMPLETE_BITS);
466 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
467 (level > 1)) {
a4d04a4c 468 scmd_printk(KERN_INFO, cmd, "Done: ");
1da177e4
LT
469 if (level > 2)
470 printk("0x%p ", cmd);
471 /*
472 * Dump truncated values, so we usually fit within
473 * 80 chars.
474 */
475 switch (disposition) {
476 case SUCCESS:
a4d04a4c 477 printk("SUCCESS\n");
1da177e4
LT
478 break;
479 case NEEDS_RETRY:
a4d04a4c 480 printk("RETRY\n");
1da177e4
LT
481 break;
482 case ADD_TO_MLQUEUE:
a4d04a4c 483 printk("MLQUEUE\n");
1da177e4
LT
484 break;
485 case FAILED:
a4d04a4c 486 printk("FAILED\n");
1da177e4
LT
487 break;
488 case TIMEOUT_ERROR:
489 /*
490 * If called via scsi_times_out.
491 */
a4d04a4c 492 printk("TIMEOUT\n");
1da177e4
LT
493 break;
494 default:
a4d04a4c 495 printk("UNKNOWN\n");
1da177e4 496 }
a4d04a4c 497 scsi_print_result(cmd);
1da177e4 498 scsi_print_command(cmd);
a4d04a4c 499 if (status_byte(cmd->result) & CHECK_CONDITION)
1da177e4 500 scsi_print_sense("", cmd);
a4d04a4c
MP
501 if (level > 3)
502 scmd_printk(KERN_INFO, cmd,
503 "scsi host busy %d failed %d\n",
504 cmd->device->host->host_busy,
505 cmd->device->host->host_failed);
1da177e4
LT
506 }
507 }
508}
509#endif
510
eb44820c
RL
511/**
512 * scsi_cmd_get_serial - Assign a serial number to a command
513 * @host: the scsi host
514 * @cmd: command to assign serial number to
515 *
516 * Description: a serial number identifies a request for error recovery
1da177e4
LT
517 * and debugging purposes. Protected by the Host_Lock of host.
518 */
519static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
520{
521 cmd->serial_number = host->cmd_serial_number++;
522 if (cmd->serial_number == 0)
523 cmd->serial_number = host->cmd_serial_number++;
1da177e4
LT
524}
525
eb44820c
RL
526/**
527 * scsi_dispatch_command - Dispatch a command to the low-level driver.
528 * @cmd: command block we are dispatching.
1da177e4 529 *
eb44820c
RL
530 * Return: nonzero return request was rejected and device's queue needs to be
531 * plugged.
1da177e4
LT
532 */
533int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
534{
535 struct Scsi_Host *host = cmd->device->host;
536 unsigned long flags = 0;
537 unsigned long timeout;
538 int rtn = 0;
539
540 /* check if the device is still usable */
541 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
542 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
543 * returns an immediate error upwards, and signals
544 * that the device is no longer present */
545 cmd->result = DID_NO_CONNECT << 16;
546 atomic_inc(&cmd->device->iorequest_cnt);
69b52893 547 __scsi_done(cmd);
1da177e4
LT
548 /* return 0 (because the command has been processed) */
549 goto out;
550 }
551
552 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
553 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
554 /*
555 * in SDEV_BLOCK, the command is just put back on the device
556 * queue. The suspend state has already blocked the queue so
557 * future requests should not occur until the device
558 * transitions out of the suspend state.
559 */
560 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
561
562 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
563
564 /*
565 * NOTE: rtn is still zero here because we don't need the
566 * queue to be plugged on return (it's already stopped)
567 */
568 goto out;
569 }
570
571 /*
572 * If SCSI-2 or lower, store the LUN value in cmnd.
573 */
4d7db04a
JB
574 if (cmd->device->scsi_level <= SCSI_2 &&
575 cmd->device->scsi_level != SCSI_UNKNOWN) {
1da177e4
LT
576 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
577 (cmd->device->lun << 5 & 0xe0);
578 }
579
580 /*
581 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
582 * we can avoid the drive not being ready.
583 */
584 timeout = host->last_reset + MIN_RESET_DELAY;
585
586 if (host->resetting && time_before(jiffies, timeout)) {
587 int ticks_remaining = timeout - jiffies;
588 /*
589 * NOTE: This may be executed from within an interrupt
590 * handler! This is bad, but for now, it'll do. The irq
591 * level of the interrupt handler has been masked out by the
592 * platform dependent interrupt handling code already, so the
593 * sti() here will not cause another call to the SCSI host's
594 * interrupt handler (assuming there is one irq-level per
595 * host).
596 */
597 while (--ticks_remaining >= 0)
598 mdelay(1 + 999 / HZ);
599 host->resetting = 0;
600 }
601
602 /*
603 * AK: unlikely race here: for some reason the timer could
604 * expire before the serial number is set up below.
605 */
606 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
607
608 scsi_log_send(cmd);
609
610 /*
611 * We will use a queued command if possible, otherwise we will
612 * emulate the queuing and calling of completion function ourselves.
613 */
1da177e4
LT
614 atomic_inc(&cmd->device->iorequest_cnt);
615
616 /*
617 * Before we queue this command, check if the command
618 * length exceeds what the host adapter can handle.
619 */
620 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
621 SCSI_LOG_MLQUEUE(3,
622 printk("queuecommand : command too long.\n"));
623 cmd->result = (DID_ABORT << 16);
624
625 scsi_done(cmd);
626 goto out;
627 }
628
629 spin_lock_irqsave(host->host_lock, flags);
630 scsi_cmd_get_serial(host, cmd);
631
d2c9d9ea 632 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4
LT
633 cmd->result = (DID_NO_CONNECT << 16);
634 scsi_done(cmd);
635 } else {
636 rtn = host->hostt->queuecommand(cmd, scsi_done);
637 }
638 spin_unlock_irqrestore(host->host_lock, flags);
639 if (rtn) {
d8c37e7b
TH
640 if (scsi_delete_timer(cmd)) {
641 atomic_inc(&cmd->device->iodone_cnt);
642 scsi_queue_insert(cmd,
643 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
644 rtn : SCSI_MLQUEUE_HOST_BUSY);
645 }
1da177e4
LT
646 SCSI_LOG_MLQUEUE(3,
647 printk("queuecommand : request rejected\n"));
648 }
649
650 out:
651 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
652 return rtn;
653}
654
89f48c4d
LT
655/**
656 * scsi_req_abort_cmd -- Request command recovery for the specified command
eb44820c 657 * @cmd: pointer to the SCSI command of interest
89f48c4d
LT
658 *
659 * This function requests that SCSI Core start recovery for the
660 * command by deleting the timer and adding the command to the eh
661 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
662 * implement their own error recovery MAY ignore the timeout event if
663 * they generated scsi_req_abort_cmd.
664 */
665void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
666{
667 if (!scsi_delete_timer(cmd))
668 return;
669 scsi_times_out(cmd);
670}
671EXPORT_SYMBOL(scsi_req_abort_cmd);
672
1da177e4
LT
673/**
674 * scsi_done - Enqueue the finished SCSI command into the done queue.
675 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
676 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
677 *
eb44820c
RL
678 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
679 * which regains ownership of the SCSI command (de facto) from a LLDD, and
680 * enqueues the command to the done queue for further processing.
1da177e4
LT
681 *
682 * This is the producer of the done queue who enqueues at the tail.
683 *
684 * This function is interrupt context safe.
685 */
52c1da39 686static void scsi_done(struct scsi_cmnd *cmd)
1da177e4
LT
687{
688 /*
eb44820c 689 * We don't have to worry about this one timing out anymore.
1da177e4
LT
690 * If we are unable to remove the timer, then the command
691 * has already timed out. In which case, we have no choice but to
692 * let the timeout function run, as we have no idea where in fact
693 * that function could really be. It might be on another processor,
694 * etc, etc.
695 */
696 if (!scsi_delete_timer(cmd))
697 return;
698 __scsi_done(cmd);
699}
700
701/* Private entry to scsi_done() to complete a command when the timer
702 * isn't running --- used by scsi_times_out */
703void __scsi_done(struct scsi_cmnd *cmd)
704{
1aea6434 705 struct request *rq = cmd->request;
1da177e4
LT
706
707 /*
708 * Set the serial numbers back to zero
709 */
710 cmd->serial_number = 0;
1da177e4
LT
711
712 atomic_inc(&cmd->device->iodone_cnt);
713 if (cmd->result)
714 atomic_inc(&cmd->device->ioerr_cnt);
715
1aea6434
JA
716 BUG_ON(!rq);
717
1da177e4 718 /*
1aea6434
JA
719 * The uptodate/nbytes values don't matter, as we allow partial
720 * completes and thus will check this in the softirq callback
1da177e4 721 */
1aea6434
JA
722 rq->completion_data = cmd;
723 blk_complete_request(rq);
1da177e4
LT
724}
725
7b3d9545
LT
726/* Move this to a header if it becomes more generally useful */
727static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
728{
729 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
730}
731
eb44820c
RL
732/**
733 * scsi_finish_command - cleanup and pass command back to upper layer
734 * @cmd: the command
1da177e4 735 *
eb44820c 736 * Description: Pass command off to upper layer for finishing of I/O
1da177e4
LT
737 * request, waking processes that are waiting on results,
738 * etc.
739 */
740void scsi_finish_command(struct scsi_cmnd *cmd)
741{
742 struct scsi_device *sdev = cmd->device;
743 struct Scsi_Host *shost = sdev->host;
7b3d9545
LT
744 struct scsi_driver *drv;
745 unsigned int good_bytes;
1da177e4
LT
746
747 scsi_device_unbusy(sdev);
748
749 /*
750 * Clear the flags which say that the device/host is no longer
751 * capable of accepting new commands. These are set in scsi_queue.c
752 * for both the queue full condition on a device, and for a
753 * host full condition on the host.
754 *
755 * XXX(hch): What about locking?
756 */
757 shost->host_blocked = 0;
758 sdev->device_blocked = 0;
759
760 /*
761 * If we have valid sense information, then some kind of recovery
762 * must have taken place. Make a note of this.
763 */
764 if (SCSI_SENSE_VALID(cmd))
765 cmd->result |= (DRIVER_SENSE << 24);
766
3bf743e7
JG
767 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
768 "Notifying upper driver of completion "
769 "(result %x)\n", cmd->result));
1da177e4 770
7b3d9545
LT
771 good_bytes = cmd->request_bufflen;
772 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
773 drv = scsi_cmd_to_driver(cmd);
774 if (drv->done)
775 good_bytes = drv->done(cmd);
776 }
777 scsi_io_completion(cmd, good_bytes);
1da177e4
LT
778}
779EXPORT_SYMBOL(scsi_finish_command);
780
eb44820c
RL
781/**
782 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
783 * @sdev: SCSI Device in question
784 * @tagged: Do we use tagged queueing (non-0) or do we treat
785 * this device as an untagged device (0)
786 * @tags: Number of tags allowed if tagged queueing enabled,
787 * or number of commands the low level driver can
788 * queue up in non-tagged mode (as per cmd_per_lun).
1da177e4
LT
789 *
790 * Returns: Nothing
791 *
792 * Lock Status: None held on entry
793 *
794 * Notes: Low level drivers may call this at any time and we will do
795 * the right thing depending on whether or not the device is
796 * currently active and whether or not it even has the
797 * command blocks built yet.
798 */
799void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
800{
801 unsigned long flags;
802
803 /*
804 * refuse to set tagged depth to an unworkable size
805 */
806 if (tags <= 0)
807 return;
808
809 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
810
eb44820c
RL
811 /* Check to see if the queue is managed by the block layer.
812 * If it is, and we fail to adjust the depth, exit. */
1da177e4
LT
813 if (blk_queue_tagged(sdev->request_queue) &&
814 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
815 goto out;
816
817 sdev->queue_depth = tags;
818 switch (tagged) {
819 case MSG_ORDERED_TAG:
820 sdev->ordered_tags = 1;
821 sdev->simple_tags = 1;
822 break;
823 case MSG_SIMPLE_TAG:
824 sdev->ordered_tags = 0;
825 sdev->simple_tags = 1;
826 break;
827 default:
9ccfc756
JB
828 sdev_printk(KERN_WARNING, sdev,
829 "scsi_adjust_queue_depth, bad queue type, "
830 "disabled\n");
1da177e4
LT
831 case 0:
832 sdev->ordered_tags = sdev->simple_tags = 0;
833 sdev->queue_depth = tags;
834 break;
835 }
836 out:
837 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
838}
839EXPORT_SYMBOL(scsi_adjust_queue_depth);
840
eb44820c
RL
841/**
842 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
843 * @sdev: SCSI Device in question
844 * @depth: Current number of outstanding SCSI commands on this device,
845 * not counting the one returned as QUEUE_FULL.
1da177e4 846 *
eb44820c 847 * Description: This function will track successive QUEUE_FULL events on a
1da177e4
LT
848 * specific SCSI device to determine if and when there is a
849 * need to adjust the queue depth on the device.
850 *
eb44820c 851 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
1da177e4
LT
852 * -1 - Drop back to untagged operation using host->cmd_per_lun
853 * as the untagged command depth
854 *
855 * Lock Status: None held on entry
856 *
857 * Notes: Low level drivers may call this at any time and we will do
858 * "The Right Thing." We are interrupt context safe.
859 */
860int scsi_track_queue_full(struct scsi_device *sdev, int depth)
861{
862 if ((jiffies >> 4) == sdev->last_queue_full_time)
863 return 0;
864
865 sdev->last_queue_full_time = (jiffies >> 4);
866 if (sdev->last_queue_full_depth != depth) {
867 sdev->last_queue_full_count = 1;
868 sdev->last_queue_full_depth = depth;
869 } else {
870 sdev->last_queue_full_count++;
871 }
872
873 if (sdev->last_queue_full_count <= 10)
874 return 0;
875 if (sdev->last_queue_full_depth < 8) {
876 /* Drop back to untagged */
877 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
878 return -1;
879 }
880
881 if (sdev->ordered_tags)
882 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
883 else
884 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
885 return depth;
886}
887EXPORT_SYMBOL(scsi_track_queue_full);
888
889/**
eb44820c 890 * scsi_device_get - get an additional reference to a scsi_device
1da177e4
LT
891 * @sdev: device to get a reference to
892 *
eb44820c 893 * Description: Gets a reference to the scsi_device and increments the use count
1da177e4
LT
894 * of the underlying LLDD module. You must hold host_lock of the
895 * parent Scsi_Host or already have a reference when calling this.
896 */
897int scsi_device_get(struct scsi_device *sdev)
898{
85b6c720 899 if (sdev->sdev_state == SDEV_DEL)
1da177e4
LT
900 return -ENXIO;
901 if (!get_device(&sdev->sdev_gendev))
902 return -ENXIO;
85b6c720
JB
903 /* We can fail this if we're doing SCSI operations
904 * from module exit (like cache flush) */
905 try_module_get(sdev->host->hostt->module);
906
1da177e4
LT
907 return 0;
908}
909EXPORT_SYMBOL(scsi_device_get);
910
911/**
912 * scsi_device_put - release a reference to a scsi_device
913 * @sdev: device to release a reference on.
914 *
eb44820c
RL
915 * Description: Release a reference to the scsi_device and decrements the use
916 * count of the underlying LLDD module. The device is freed once the last
1da177e4
LT
917 * user vanishes.
918 */
919void scsi_device_put(struct scsi_device *sdev)
920{
504fb37a 921#ifdef CONFIG_MODULE_UNLOAD
f479ab87
JB
922 struct module *module = sdev->host->hostt->module;
923
85b6c720
JB
924 /* The module refcount will be zero if scsi_device_get()
925 * was called from a module removal routine */
f479ab87
JB
926 if (module && module_refcount(module) != 0)
927 module_put(module);
a506b44b 928#endif
1da177e4
LT
929 put_device(&sdev->sdev_gendev);
930}
931EXPORT_SYMBOL(scsi_device_put);
932
eb44820c 933/* helper for shost_for_each_device, see that for documentation */
1da177e4
LT
934struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
935 struct scsi_device *prev)
936{
937 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
938 struct scsi_device *next = NULL;
939 unsigned long flags;
940
941 spin_lock_irqsave(shost->host_lock, flags);
942 while (list->next != &shost->__devices) {
943 next = list_entry(list->next, struct scsi_device, siblings);
944 /* skip devices that we can't get a reference to */
945 if (!scsi_device_get(next))
946 break;
947 next = NULL;
948 list = list->next;
949 }
950 spin_unlock_irqrestore(shost->host_lock, flags);
951
952 if (prev)
953 scsi_device_put(prev);
954 return next;
955}
956EXPORT_SYMBOL(__scsi_iterate_devices);
957
958/**
959 * starget_for_each_device - helper to walk all devices of a target
960 * @starget: target whose devices we want to iterate over.
eb44820c
RL
961 * @data: Opaque passed to each function call.
962 * @fn: Function to call on each device
1da177e4 963 *
522939d4 964 * This traverses over each device of @starget. The devices have
1da177e4
LT
965 * a reference that must be released by scsi_host_put when breaking
966 * out of the loop.
967 */
522939d4 968void starget_for_each_device(struct scsi_target *starget, void *data,
1da177e4
LT
969 void (*fn)(struct scsi_device *, void *))
970{
971 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
972 struct scsi_device *sdev;
973
974 shost_for_each_device(sdev, shost) {
975 if ((sdev->channel == starget->channel) &&
976 (sdev->id == starget->id))
977 fn(sdev, data);
978 }
979}
980EXPORT_SYMBOL(starget_for_each_device);
981
522939d4
MR
982/**
983 * __starget_for_each_device - helper to walk all devices of a target
984 * (UNLOCKED)
985 * @starget: target whose devices we want to iterate over.
986 *
987 * This traverses over each device of @starget. It does _not_
988 * take a reference on the scsi_device, so the whole loop must be
989 * protected by shost->host_lock.
990 *
991 * Note: The only reason why drivers would want to use this is because
992 * they need to access the device list in irq context. Otherwise you
993 * really want to use starget_for_each_device instead.
994 **/
995void __starget_for_each_device(struct scsi_target *starget, void *data,
996 void (*fn)(struct scsi_device *, void *))
997{
998 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
999 struct scsi_device *sdev;
1000
1001 __shost_for_each_device(sdev, shost) {
1002 if ((sdev->channel == starget->channel) &&
1003 (sdev->id == starget->id))
1004 fn(sdev, data);
1005 }
1006}
1007EXPORT_SYMBOL(__starget_for_each_device);
1008
1da177e4
LT
1009/**
1010 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1011 * @starget: SCSI target pointer
1012 * @lun: SCSI Logical Unit Number
1013 *
eb44820c
RL
1014 * Description: Looks up the scsi_device with the specified @lun for a given
1015 * @starget. The returned scsi_device does not have an additional
1da177e4
LT
1016 * reference. You must hold the host's host_lock over this call and
1017 * any access to the returned scsi_device.
1018 *
dc8875e1 1019 * Note: The only reason why drivers should use this is because
eb44820c 1020 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1021 * really want to use scsi_device_lookup_by_target instead.
1022 **/
1023struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1024 uint lun)
1025{
1026 struct scsi_device *sdev;
1027
1028 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1029 if (sdev->lun ==lun)
1030 return sdev;
1031 }
1032
1033 return NULL;
1034}
1035EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1036
1037/**
1038 * scsi_device_lookup_by_target - find a device given the target
1039 * @starget: SCSI target pointer
1040 * @lun: SCSI Logical Unit Number
1041 *
eb44820c
RL
1042 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1043 * for a given host. The returned scsi_device has an additional reference that
1044 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1045 **/
1046struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1047 uint lun)
1048{
1049 struct scsi_device *sdev;
1050 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(shost->host_lock, flags);
1054 sdev = __scsi_device_lookup_by_target(starget, lun);
1055 if (sdev && scsi_device_get(sdev))
1056 sdev = NULL;
1057 spin_unlock_irqrestore(shost->host_lock, flags);
1058
1059 return sdev;
1060}
1061EXPORT_SYMBOL(scsi_device_lookup_by_target);
1062
1063/**
eb44820c 1064 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1da177e4
LT
1065 * @shost: SCSI host pointer
1066 * @channel: SCSI channel (zero if only one channel)
eb44820c 1067 * @id: SCSI target number (physical unit number)
1da177e4
LT
1068 * @lun: SCSI Logical Unit Number
1069 *
eb44820c
RL
1070 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1071 * for a given host. The returned scsi_device does not have an additional
1072 * reference. You must hold the host's host_lock over this call and any access
1073 * to the returned scsi_device.
1da177e4
LT
1074 *
1075 * Note: The only reason why drivers would want to use this is because
eb44820c 1076 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1077 * really want to use scsi_device_lookup instead.
1078 **/
1079struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1080 uint channel, uint id, uint lun)
1081{
1082 struct scsi_device *sdev;
1083
1084 list_for_each_entry(sdev, &shost->__devices, siblings) {
1085 if (sdev->channel == channel && sdev->id == id &&
1086 sdev->lun ==lun)
1087 return sdev;
1088 }
1089
1090 return NULL;
1091}
1092EXPORT_SYMBOL(__scsi_device_lookup);
1093
1094/**
1095 * scsi_device_lookup - find a device given the host
1096 * @shost: SCSI host pointer
1097 * @channel: SCSI channel (zero if only one channel)
1098 * @id: SCSI target number (physical unit number)
1099 * @lun: SCSI Logical Unit Number
1100 *
eb44820c
RL
1101 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1102 * for a given host. The returned scsi_device has an additional reference that
1103 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1104 **/
1105struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1106 uint channel, uint id, uint lun)
1107{
1108 struct scsi_device *sdev;
1109 unsigned long flags;
1110
1111 spin_lock_irqsave(shost->host_lock, flags);
1112 sdev = __scsi_device_lookup(shost, channel, id, lun);
1113 if (sdev && scsi_device_get(sdev))
1114 sdev = NULL;
1115 spin_unlock_irqrestore(shost->host_lock, flags);
1116
1117 return sdev;
1118}
1119EXPORT_SYMBOL(scsi_device_lookup);
1120
1da177e4
LT
1121MODULE_DESCRIPTION("SCSI core");
1122MODULE_LICENSE("GPL");
1123
1124module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1125MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1126
1127static int __init init_scsi(void)
1128{
9bf09c23 1129 int error;
1da177e4
LT
1130
1131 error = scsi_init_queue();
1132 if (error)
1133 return error;
1134 error = scsi_init_procfs();
1135 if (error)
1136 goto cleanup_queue;
1137 error = scsi_init_devinfo();
1138 if (error)
1139 goto cleanup_procfs;
1140 error = scsi_init_hosts();
1141 if (error)
1142 goto cleanup_devlist;
1143 error = scsi_init_sysctl();
1144 if (error)
1145 goto cleanup_hosts;
1146 error = scsi_sysfs_register();
1147 if (error)
1148 goto cleanup_sysctl;
1149
84314fd4
JS
1150 scsi_netlink_init();
1151
1da177e4
LT
1152 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1153 return 0;
1154
1155cleanup_sysctl:
1156 scsi_exit_sysctl();
1157cleanup_hosts:
1158 scsi_exit_hosts();
1159cleanup_devlist:
1160 scsi_exit_devinfo();
1161cleanup_procfs:
1162 scsi_exit_procfs();
1163cleanup_queue:
1164 scsi_exit_queue();
1165 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1166 -error);
1167 return error;
1168}
1169
1170static void __exit exit_scsi(void)
1171{
84314fd4 1172 scsi_netlink_exit();
1da177e4
LT
1173 scsi_sysfs_unregister();
1174 scsi_exit_sysctl();
1175 scsi_exit_hosts();
1176 scsi_exit_devinfo();
1da177e4
LT
1177 scsi_exit_procfs();
1178 scsi_exit_queue();
1da177e4
LT
1179}
1180
1181subsys_initcall(init_scsi);
1182module_exit(exit_scsi);