]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi.c
scsi: convert target_busy to an atomic_t
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
1da177e4
LT
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
1da177e4
LT
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
0b950672 56#include <linux/mutex.h>
2955b47d 57#include <linux/async.h>
3c6bdaea 58#include <asm/unaligned.h>
1da177e4
LT
59
60#include <scsi/scsi.h>
61#include <scsi/scsi_cmnd.h>
62#include <scsi/scsi_dbg.h>
63#include <scsi/scsi_device.h>
7b3d9545 64#include <scsi/scsi_driver.h>
1da177e4
LT
65#include <scsi/scsi_eh.h>
66#include <scsi/scsi_host.h>
67#include <scsi/scsi_tcq.h>
1da177e4
LT
68
69#include "scsi_priv.h"
70#include "scsi_logging.h"
71
bf816235
KT
72#define CREATE_TRACE_POINTS
73#include <trace/events/scsi.h>
74
1da177e4
LT
75/*
76 * Definitions and constants.
77 */
78
1da177e4
LT
79/*
80 * Note - the initial logging level can be set here to log events at boot time.
81 * After the system is up, you may enable logging via the /proc interface.
82 */
83unsigned int scsi_logging_level;
84#if defined(CONFIG_SCSI_LOGGING)
85EXPORT_SYMBOL(scsi_logging_level);
86#endif
87
ea80dade 88/* sd, scsi core and power management need to coordinate flushing async actions */
2955b47d 89ASYNC_DOMAIN(scsi_sd_probe_domain);
a7a20d10 90EXPORT_SYMBOL(scsi_sd_probe_domain);
a7a20d10 91
3c31b52f
DW
92/*
93 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
94 * asynchronous system resume operations. It is marked 'exclusive' to avoid
95 * being included in the async_synchronize_full() that is invoked by
96 * dpm_resume()
97 */
98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99EXPORT_SYMBOL(scsi_sd_pm_domain);
100
8a1cdc9c
MW
101/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
102 * You may not alter any existing entry (although adding new ones is
103 * encouraged once assigned by ANSI/INCITS T10
104 */
4ff36718 105static const char *const scsi_device_types[] = {
8a1cdc9c
MW
106 "Direct-Access ",
107 "Sequential-Access",
1da177e4
LT
108 "Printer ",
109 "Processor ",
110 "WORM ",
8a1cdc9c 111 "CD-ROM ",
1da177e4 112 "Scanner ",
8a1cdc9c
MW
113 "Optical Device ",
114 "Medium Changer ",
1da177e4 115 "Communications ",
4ff36718
MW
116 "ASC IT8 ",
117 "ASC IT8 ",
1da177e4
LT
118 "RAID ",
119 "Enclosure ",
8a1cdc9c 120 "Direct-Access-RBC",
4ff36718
MW
121 "Optical card ",
122 "Bridge controller",
123 "Object storage ",
124 "Automation/Drive ",
1da177e4 125};
4ff36718 126
eb44820c
RL
127/**
128 * scsi_device_type - Return 17 char string indicating device type.
129 * @type: type number to look up
130 */
131
4ff36718
MW
132const char * scsi_device_type(unsigned type)
133{
134 if (type == 0x1e)
135 return "Well-known LUN ";
136 if (type == 0x1f)
137 return "No Device ";
80c6e3c0 138 if (type >= ARRAY_SIZE(scsi_device_types))
4ff36718
MW
139 return "Unknown ";
140 return scsi_device_types[type];
141}
142
143EXPORT_SYMBOL(scsi_device_type);
1da177e4 144
1da177e4 145struct scsi_host_cmd_pool {
5b7f1680
JB
146 struct kmem_cache *cmd_slab;
147 struct kmem_cache *sense_slab;
148 unsigned int users;
149 char *cmd_name;
150 char *sense_name;
151 unsigned int slab_flags;
152 gfp_t gfp_mask;
1da177e4
LT
153};
154
155static struct scsi_host_cmd_pool scsi_cmd_pool = {
5b7f1680
JB
156 .cmd_name = "scsi_cmd_cache",
157 .sense_name = "scsi_sense_cache",
1da177e4
LT
158 .slab_flags = SLAB_HWCACHE_ALIGN,
159};
160
161static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
5b7f1680
JB
162 .cmd_name = "scsi_cmd_cache(DMA)",
163 .sense_name = "scsi_sense_cache(DMA)",
1da177e4
LT
164 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
165 .gfp_mask = __GFP_DMA,
166};
167
0b950672 168static DEFINE_MUTEX(host_cmd_pool_mutex);
1da177e4 169
e507e30b 170/**
7c283341
CH
171 * scsi_host_free_command - internal function to release a command
172 * @shost: host to free the command for
e507e30b
JB
173 * @cmd: command to release
174 *
175 * the command must previously have been allocated by
7c283341 176 * scsi_host_alloc_command.
e507e30b
JB
177 */
178static void
7c283341 179scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
e507e30b 180{
7c283341
CH
181 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
182
7027ad72
MP
183 if (cmd->prot_sdb)
184 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
e507e30b
JB
185 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
186 kmem_cache_free(pool->cmd_slab, cmd);
187}
188
7027ad72
MP
189/**
190 * scsi_host_alloc_command - internal function to allocate command
191 * @shost: SCSI host whose pool to allocate from
192 * @gfp_mask: mask for the allocation
193 *
194 * Returns a fully allocated command with sense buffer and protection
195 * data buffer (where applicable) or NULL on failure
196 */
197static struct scsi_cmnd *
198scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
199{
7c283341 200 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
7027ad72
MP
201 struct scsi_cmnd *cmd;
202
7c283341 203 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
7027ad72 204 if (!cmd)
7c283341
CH
205 goto fail;
206
207 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
208 gfp_mask | pool->gfp_mask);
209 if (!cmd->sense_buffer)
210 goto fail_free_cmd;
7027ad72
MP
211
212 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
213 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
7c283341
CH
214 if (!cmd->prot_sdb)
215 goto fail_free_sense;
7027ad72
MP
216 }
217
218 return cmd;
7c283341
CH
219
220fail_free_sense:
221 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
222fail_free_cmd:
223 kmem_cache_free(pool->cmd_slab, cmd);
224fail:
225 return NULL;
7027ad72
MP
226}
227
eb44820c
RL
228/**
229 * __scsi_get_command - Allocate a struct scsi_cmnd
230 * @shost: host to transmit command
231 * @gfp_mask: allocation mask
232 *
233 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
234 * host's free_list if necessary.
235 */
f1bea55d
CH
236static struct scsi_cmnd *
237__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
1da177e4 238{
b4c2554d 239 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
164fc5dc 240
1da177e4
LT
241 if (unlikely(!cmd)) {
242 unsigned long flags;
243
244 spin_lock_irqsave(&shost->free_list_lock, flags);
245 if (likely(!list_empty(&shost->free_list))) {
246 cmd = list_entry(shost->free_list.next,
247 struct scsi_cmnd, list);
248 list_del_init(&cmd->list);
249 }
250 spin_unlock_irqrestore(&shost->free_list_lock, flags);
de25deb1
FT
251
252 if (cmd) {
b4c2554d
MP
253 void *buf, *prot;
254
de25deb1 255 buf = cmd->sense_buffer;
b4c2554d
MP
256 prot = cmd->prot_sdb;
257
de25deb1 258 memset(cmd, 0, sizeof(*cmd));
b4c2554d 259
de25deb1 260 cmd->sense_buffer = buf;
b4c2554d 261 cmd->prot_sdb = prot;
de25deb1 262 }
1da177e4
LT
263 }
264
265 return cmd;
266}
267
eb44820c
RL
268/**
269 * scsi_get_command - Allocate and setup a scsi command block
270 * @dev: parent scsi device
271 * @gfp_mask: allocator flags
1da177e4
LT
272 *
273 * Returns: The allocated scsi command structure.
274 */
c53033f6 275struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4 276{
04796336
CH
277 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
278 unsigned long flags;
1da177e4 279
04796336 280 if (unlikely(cmd == NULL))
1da177e4
LT
281 return NULL;
282
04796336
CH
283 cmd->device = dev;
284 INIT_LIST_HEAD(&cmd->list);
285 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
286 spin_lock_irqsave(&dev->list_lock, flags);
287 list_add_tail(&cmd->list, &dev->cmd_list);
288 spin_unlock_irqrestore(&dev->list_lock, flags);
289 cmd->jiffies_at_alloc = jiffies;
1da177e4 290 return cmd;
b58d9154 291}
1da177e4 292
eb44820c
RL
293/**
294 * __scsi_put_command - Free a struct scsi_cmnd
295 * @shost: dev->host
296 * @cmd: Command to free
eb44820c 297 */
f1bea55d 298static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
b58d9154
FT
299{
300 unsigned long flags;
301
b58d9154 302 if (unlikely(list_empty(&shost->free_list))) {
44b93b59
CH
303 spin_lock_irqsave(&shost->free_list_lock, flags);
304 if (list_empty(&shost->free_list)) {
305 list_add(&cmd->list, &shost->free_list);
306 cmd = NULL;
307 }
308 spin_unlock_irqrestore(&shost->free_list_lock, flags);
b58d9154 309 }
b58d9154 310
e507e30b 311 if (likely(cmd != NULL))
7c283341 312 scsi_host_free_command(shost, cmd);
b58d9154 313}
b58d9154 314
eb44820c
RL
315/**
316 * scsi_put_command - Free a scsi command block
317 * @cmd: command block to free
1da177e4
LT
318 *
319 * Returns: Nothing.
320 *
321 * Notes: The command must not belong to any lists.
322 */
323void scsi_put_command(struct scsi_cmnd *cmd)
324{
1da177e4 325 unsigned long flags;
b58d9154 326
1da177e4
LT
327 /* serious error if the command hasn't come from a device list */
328 spin_lock_irqsave(&cmd->device->list_lock, flags);
329 BUG_ON(list_empty(&cmd->list));
330 list_del_init(&cmd->list);
b58d9154 331 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
1da177e4 332
fcc95a76 333 BUG_ON(delayed_work_pending(&cmd->abort_work));
e494f6a7 334
04796336 335 __scsi_put_command(cmd->device->host, cmd);
1da177e4 336}
1da177e4 337
89d9a567
CH
338static struct scsi_host_cmd_pool *
339scsi_find_host_cmd_pool(struct Scsi_Host *shost)
1da177e4 340{
89d9a567
CH
341 if (shost->hostt->cmd_size)
342 return shost->hostt->cmd_pool;
343 if (shost->unchecked_isa_dma)
344 return &scsi_cmd_dma_pool;
345 return &scsi_cmd_pool;
346}
347
348static void
349scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
350{
351 kfree(pool->sense_name);
352 kfree(pool->cmd_name);
353 kfree(pool);
354}
355
356static struct scsi_host_cmd_pool *
357scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
358{
359 struct scsi_host_template *hostt = shost->hostt;
360 struct scsi_host_cmd_pool *pool;
361
362 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
363 if (!pool)
364 return NULL;
365
366 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
367 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
368 if (!pool->cmd_name || !pool->sense_name) {
369 scsi_free_host_cmd_pool(pool);
370 return NULL;
371 }
372
373 pool->slab_flags = SLAB_HWCACHE_ALIGN;
374 if (shost->unchecked_isa_dma) {
375 pool->slab_flags |= SLAB_CACHE_DMA;
376 pool->gfp_mask = __GFP_DMA;
377 }
378 return pool;
379}
380
381static struct scsi_host_cmd_pool *
382scsi_get_host_cmd_pool(struct Scsi_Host *shost)
383{
384 struct scsi_host_template *hostt = shost->hostt;
1c353f7d 385 struct scsi_host_cmd_pool *retval = NULL, *pool;
89d9a567
CH
386 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
387
1da177e4
LT
388 /*
389 * Select a command slab for this host and create it if not
eb44820c 390 * yet existent.
1da177e4 391 */
0b950672 392 mutex_lock(&host_cmd_pool_mutex);
89d9a567
CH
393 pool = scsi_find_host_cmd_pool(shost);
394 if (!pool) {
395 pool = scsi_alloc_host_cmd_pool(shost);
396 if (!pool)
397 goto out;
398 }
399
1da177e4 400 if (!pool->users) {
89d9a567 401 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
5b7f1680
JB
402 pool->slab_flags, NULL);
403 if (!pool->cmd_slab)
89d9a567 404 goto out_free_pool;
5b7f1680
JB
405
406 pool->sense_slab = kmem_cache_create(pool->sense_name,
407 SCSI_SENSE_BUFFERSIZE, 0,
408 pool->slab_flags, NULL);
89d9a567
CH
409 if (!pool->sense_slab)
410 goto out_free_slab;
1da177e4
LT
411 }
412
413 pool->users++;
1c353f7d 414 retval = pool;
89d9a567 415out:
0b950672 416 mutex_unlock(&host_cmd_pool_mutex);
1c353f7d 417 return retval;
89d9a567
CH
418
419out_free_slab:
420 kmem_cache_destroy(pool->cmd_slab);
421out_free_pool:
422 if (hostt->cmd_size)
423 scsi_free_host_cmd_pool(pool);
424 goto out;
1c353f7d
JB
425}
426
89d9a567 427static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
1c353f7d 428{
89d9a567 429 struct scsi_host_template *hostt = shost->hostt;
1c353f7d 430 struct scsi_host_cmd_pool *pool;
1da177e4 431
1c353f7d 432 mutex_lock(&host_cmd_pool_mutex);
89d9a567
CH
433 pool = scsi_find_host_cmd_pool(shost);
434
1da177e4 435 /*
1c353f7d
JB
436 * This may happen if a driver has a mismatched get and put
437 * of the command pool; the driver should be implicated in
438 * the stack trace
1da177e4 439 */
1c353f7d 440 BUG_ON(pool->users == 0);
de25deb1 441
5b7f1680
JB
442 if (!--pool->users) {
443 kmem_cache_destroy(pool->cmd_slab);
444 kmem_cache_destroy(pool->sense_slab);
89d9a567
CH
445 if (hostt->cmd_size)
446 scsi_free_host_cmd_pool(pool);
5b7f1680 447 }
0b950672 448 mutex_unlock(&host_cmd_pool_mutex);
1c353f7d
JB
449}
450
1c353f7d
JB
451/**
452 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
453 * @shost: host to allocate the freelist for.
454 *
455 * Description: The command freelist protects against system-wide out of memory
456 * deadlock by preallocating one SCSI command structure for each host, so the
457 * system can always write to a swap file on a device associated with that host.
458 *
459 * Returns: Nothing.
460 */
461int scsi_setup_command_freelist(struct Scsi_Host *shost)
462{
1c353f7d 463 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
89d9a567 464 struct scsi_cmnd *cmd;
1c353f7d
JB
465
466 spin_lock_init(&shost->free_list_lock);
467 INIT_LIST_HEAD(&shost->free_list);
468
89d9a567 469 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
1c353f7d
JB
470 if (!shost->cmd_pool)
471 return -ENOMEM;
472
473 /*
474 * Get one backup command for this host.
475 */
7027ad72 476 cmd = scsi_host_alloc_command(shost, gfp_mask);
1c353f7d 477 if (!cmd) {
89d9a567 478 scsi_put_host_cmd_pool(shost);
61d7416a 479 shost->cmd_pool = NULL;
1c353f7d
JB
480 return -ENOMEM;
481 }
482 list_add(&cmd->list, &shost->free_list);
483 return 0;
1da177e4
LT
484}
485
eb44820c
RL
486/**
487 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
488 * @shost: host whose freelist is going to be destroyed
1da177e4
LT
489 */
490void scsi_destroy_command_freelist(struct Scsi_Host *shost)
491{
61d7416a
AB
492 /*
493 * If cmd_pool is NULL the free list was not initialized, so
494 * do not attempt to release resources.
495 */
496 if (!shost->cmd_pool)
497 return;
498
1da177e4
LT
499 while (!list_empty(&shost->free_list)) {
500 struct scsi_cmnd *cmd;
501
502 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
503 list_del_init(&cmd->list);
7c283341 504 scsi_host_free_command(shost, cmd);
1da177e4 505 }
1c353f7d 506 shost->cmd_pool = NULL;
89d9a567 507 scsi_put_host_cmd_pool(shost);
de25deb1
FT
508}
509
1da177e4
LT
510#ifdef CONFIG_SCSI_LOGGING
511void scsi_log_send(struct scsi_cmnd *cmd)
512{
513 unsigned int level;
1da177e4
LT
514
515 /*
516 * If ML QUEUE log level is greater than or equal to:
517 *
518 * 1: nothing (match completion)
519 *
520 * 2: log opcode + command of all commands
521 *
522 * 3: same as 2 plus dump cmd address
523 *
524 * 4: same as 3 plus dump extra junk
525 */
526 if (unlikely(scsi_logging_level)) {
527 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
528 SCSI_LOG_MLQUEUE_BITS);
529 if (level > 1) {
a4d04a4c 530 scmd_printk(KERN_INFO, cmd, "Send: ");
1da177e4
LT
531 if (level > 2)
532 printk("0x%p ", cmd);
a4d04a4c 533 printk("\n");
1da177e4
LT
534 scsi_print_command(cmd);
535 if (level > 3) {
536 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
7b3d9545 537 " queuecommand 0x%p\n",
a73e45b3 538 scsi_sglist(cmd), scsi_bufflen(cmd),
a4d04a4c 539 cmd->device->host->hostt->queuecommand);
1da177e4
LT
540
541 }
542 }
543 }
544}
545
546void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
547{
548 unsigned int level;
1da177e4
LT
549
550 /*
551 * If ML COMPLETE log level is greater than or equal to:
552 *
553 * 1: log disposition, result, opcode + command, and conditionally
554 * sense data for failures or non SUCCESS dispositions.
555 *
556 * 2: same as 1 but for all command completions.
557 *
558 * 3: same as 2 plus dump cmd address
559 *
560 * 4: same as 3 plus dump extra junk
561 */
562 if (unlikely(scsi_logging_level)) {
563 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
564 SCSI_LOG_MLCOMPLETE_BITS);
565 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
566 (level > 1)) {
a4d04a4c 567 scmd_printk(KERN_INFO, cmd, "Done: ");
1da177e4
LT
568 if (level > 2)
569 printk("0x%p ", cmd);
570 /*
571 * Dump truncated values, so we usually fit within
572 * 80 chars.
573 */
574 switch (disposition) {
575 case SUCCESS:
a4d04a4c 576 printk("SUCCESS\n");
1da177e4
LT
577 break;
578 case NEEDS_RETRY:
a4d04a4c 579 printk("RETRY\n");
1da177e4
LT
580 break;
581 case ADD_TO_MLQUEUE:
a4d04a4c 582 printk("MLQUEUE\n");
1da177e4
LT
583 break;
584 case FAILED:
a4d04a4c 585 printk("FAILED\n");
1da177e4
LT
586 break;
587 case TIMEOUT_ERROR:
588 /*
589 * If called via scsi_times_out.
590 */
a4d04a4c 591 printk("TIMEOUT\n");
1da177e4
LT
592 break;
593 default:
a4d04a4c 594 printk("UNKNOWN\n");
1da177e4 595 }
a4d04a4c 596 scsi_print_result(cmd);
1da177e4 597 scsi_print_command(cmd);
a4d04a4c 598 if (status_byte(cmd->result) & CHECK_CONDITION)
1da177e4 599 scsi_print_sense("", cmd);
a4d04a4c
MP
600 if (level > 3)
601 scmd_printk(KERN_INFO, cmd,
602 "scsi host busy %d failed %d\n",
603 cmd->device->host->host_busy,
604 cmd->device->host->host_failed);
1da177e4
LT
605 }
606 }
607}
608#endif
609
eb44820c
RL
610/**
611 * scsi_cmd_get_serial - Assign a serial number to a command
612 * @host: the scsi host
613 * @cmd: command to assign serial number to
614 *
615 * Description: a serial number identifies a request for error recovery
1da177e4
LT
616 * and debugging purposes. Protected by the Host_Lock of host.
617 */
f281233d 618void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1da177e4
LT
619{
620 cmd->serial_number = host->cmd_serial_number++;
621 if (cmd->serial_number == 0)
622 cmd->serial_number = host->cmd_serial_number++;
1da177e4 623}
f281233d 624EXPORT_SYMBOL(scsi_cmd_get_serial);
1da177e4 625
eb44820c
RL
626/**
627 * scsi_dispatch_command - Dispatch a command to the low-level driver.
628 * @cmd: command block we are dispatching.
1da177e4 629 *
eb44820c
RL
630 * Return: nonzero return request was rejected and device's queue needs to be
631 * plugged.
1da177e4
LT
632 */
633int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
634{
635 struct Scsi_Host *host = cmd->device->host;
1da177e4
LT
636 int rtn = 0;
637
242f9dcb
JA
638 atomic_inc(&cmd->device->iorequest_cnt);
639
1da177e4
LT
640 /* check if the device is still usable */
641 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
642 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
643 * returns an immediate error upwards, and signals
644 * that the device is no longer present */
645 cmd->result = DID_NO_CONNECT << 16;
d0d3bbf9 646 goto done;
1da177e4
LT
647 }
648
0f1d87a2
JB
649 /* Check to see if the scsi lld made this device blocked. */
650 if (unlikely(scsi_device_blocked(cmd->device))) {
91921e01 651 /*
0f1d87a2
JB
652 * in blocked state, the command is just put back on
653 * the device queue. The suspend state has already
654 * blocked the queue so future requests should not
655 * occur until the device transitions out of the
656 * suspend state.
1da177e4 657 */
91921e01
HR
658 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
659 "queuecommand : device blocked\n"));
d0d3bbf9 660 return SCSI_MLQUEUE_DEVICE_BUSY;
1da177e4
LT
661 }
662
91921e01 663 /*
1da177e4
LT
664 * If SCSI-2 or lower, store the LUN value in cmnd.
665 */
4d7db04a
JB
666 if (cmd->device->scsi_level <= SCSI_2 &&
667 cmd->device->scsi_level != SCSI_UNKNOWN) {
1da177e4
LT
668 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
669 (cmd->device->lun << 5 & 0xe0);
670 }
671
1da177e4
LT
672 scsi_log_send(cmd);
673
1da177e4
LT
674 /*
675 * Before we queue this command, check if the command
676 * length exceeds what the host adapter can handle.
677 */
db4742dd 678 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
91921e01
HR
679 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
680 "queuecommand : command too long. "
db4742dd
BH
681 "cdb_size=%d host->max_cmd_len=%d\n",
682 cmd->cmd_len, cmd->device->host->max_cmd_len));
1da177e4 683 cmd->result = (DID_ABORT << 16);
d0d3bbf9 684 goto done;
1da177e4
LT
685 }
686
d2c9d9ea 687 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4 688 cmd->result = (DID_NO_CONNECT << 16);
d0d3bbf9
CH
689 goto done;
690
1da177e4 691 }
f281233d 692
d0d3bbf9 693 trace_scsi_dispatch_cmd_start(cmd);
d0d3bbf9 694 rtn = host->hostt->queuecommand(host, cmd);
1da177e4 695 if (rtn) {
bf816235 696 trace_scsi_dispatch_cmd_error(cmd, rtn);
f0c0a376
MC
697 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
698 rtn != SCSI_MLQUEUE_TARGET_BUSY)
699 rtn = SCSI_MLQUEUE_HOST_BUSY;
700
91921e01
HR
701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
702 "queuecommand : request rejected\n"));
1da177e4
LT
703 }
704
1da177e4 705 return rtn;
d0d3bbf9 706 done:
3b5382c4 707 cmd->scsi_done(cmd);
d0d3bbf9 708 return 0;
1da177e4
LT
709}
710
eb44820c
RL
711/**
712 * scsi_finish_command - cleanup and pass command back to upper layer
713 * @cmd: the command
1da177e4 714 *
eb44820c 715 * Description: Pass command off to upper layer for finishing of I/O
1da177e4
LT
716 * request, waking processes that are waiting on results,
717 * etc.
718 */
719void scsi_finish_command(struct scsi_cmnd *cmd)
720{
721 struct scsi_device *sdev = cmd->device;
f0c0a376 722 struct scsi_target *starget = scsi_target(sdev);
1da177e4 723 struct Scsi_Host *shost = sdev->host;
7b3d9545
LT
724 struct scsi_driver *drv;
725 unsigned int good_bytes;
1da177e4
LT
726
727 scsi_device_unbusy(sdev);
728
729 /*
730 * Clear the flags which say that the device/host is no longer
731 * capable of accepting new commands. These are set in scsi_queue.c
732 * for both the queue full condition on a device, and for a
733 * host full condition on the host.
734 *
735 * XXX(hch): What about locking?
736 */
737 shost->host_blocked = 0;
f0c0a376 738 starget->target_blocked = 0;
1da177e4
LT
739 sdev->device_blocked = 0;
740
741 /*
742 * If we have valid sense information, then some kind of recovery
743 * must have taken place. Make a note of this.
744 */
745 if (SCSI_SENSE_VALID(cmd))
746 cmd->result |= (DRIVER_SENSE << 24);
747
3bf743e7
JG
748 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
749 "Notifying upper driver of completion "
750 "(result %x)\n", cmd->result));
1da177e4 751
f18573ab 752 good_bytes = scsi_bufflen(cmd);
7b3d9545 753 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
427e59f0 754 int old_good_bytes = good_bytes;
7b3d9545
LT
755 drv = scsi_cmd_to_driver(cmd);
756 if (drv->done)
757 good_bytes = drv->done(cmd);
427e59f0
JB
758 /*
759 * USB may not give sense identifying bad sector and
760 * simply return a residue instead, so subtract off the
761 * residue if drv->done() error processing indicates no
762 * change to the completion length.
763 */
764 if (good_bytes == old_good_bytes)
765 good_bytes -= scsi_get_resid(cmd);
7b3d9545
LT
766 }
767 scsi_io_completion(cmd, good_bytes);
1da177e4 768}
1da177e4 769
eb44820c
RL
770/**
771 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
772 * @sdev: SCSI Device in question
773 * @tagged: Do we use tagged queueing (non-0) or do we treat
774 * this device as an untagged device (0)
775 * @tags: Number of tags allowed if tagged queueing enabled,
776 * or number of commands the low level driver can
777 * queue up in non-tagged mode (as per cmd_per_lun).
1da177e4
LT
778 *
779 * Returns: Nothing
780 *
781 * Lock Status: None held on entry
782 *
783 * Notes: Low level drivers may call this at any time and we will do
784 * the right thing depending on whether or not the device is
785 * currently active and whether or not it even has the
786 * command blocks built yet.
787 */
788void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
789{
790 unsigned long flags;
791
792 /*
793 * refuse to set tagged depth to an unworkable size
794 */
795 if (tags <= 0)
796 return;
797
798 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
799
885ace9e
MC
800 /*
801 * Check to see if the queue is managed by the block layer.
802 * If it is, and we fail to adjust the depth, exit.
803 *
804 * Do not resize the tag map if it is a host wide share bqt,
805 * because the size should be the hosts's can_queue. If there
806 * is more IO than the LLD's can_queue (so there are not enuogh
807 * tags) request_fn's host queue ready check will handle it.
808 */
809 if (!sdev->host->bqt) {
810 if (blk_queue_tagged(sdev->request_queue) &&
811 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
812 goto out;
813 }
1da177e4
LT
814
815 sdev->queue_depth = tags;
816 switch (tagged) {
cb23f912
DG
817 case 0:
818 sdev->ordered_tags = 0;
819 sdev->simple_tags = 0;
820 break;
1da177e4
LT
821 case MSG_ORDERED_TAG:
822 sdev->ordered_tags = 1;
823 sdev->simple_tags = 1;
824 break;
825 case MSG_SIMPLE_TAG:
826 sdev->ordered_tags = 0;
827 sdev->simple_tags = 1;
828 break;
829 default:
cb23f912
DG
830 sdev->ordered_tags = 0;
831 sdev->simple_tags = 0;
9ccfc756
JB
832 sdev_printk(KERN_WARNING, sdev,
833 "scsi_adjust_queue_depth, bad queue type, "
834 "disabled\n");
1da177e4
LT
835 }
836 out:
837 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
838}
839EXPORT_SYMBOL(scsi_adjust_queue_depth);
840
eb44820c
RL
841/**
842 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
843 * @sdev: SCSI Device in question
844 * @depth: Current number of outstanding SCSI commands on this device,
845 * not counting the one returned as QUEUE_FULL.
1da177e4 846 *
eb44820c 847 * Description: This function will track successive QUEUE_FULL events on a
1da177e4
LT
848 * specific SCSI device to determine if and when there is a
849 * need to adjust the queue depth on the device.
850 *
eb44820c 851 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
1da177e4
LT
852 * -1 - Drop back to untagged operation using host->cmd_per_lun
853 * as the untagged command depth
854 *
855 * Lock Status: None held on entry
856 *
857 * Notes: Low level drivers may call this at any time and we will do
858 * "The Right Thing." We are interrupt context safe.
859 */
860int scsi_track_queue_full(struct scsi_device *sdev, int depth)
861{
4a84067d
VD
862
863 /*
864 * Don't let QUEUE_FULLs on the same
865 * jiffies count, they could all be from
866 * same event.
867 */
868 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
1da177e4
LT
869 return 0;
870
4a84067d 871 sdev->last_queue_full_time = jiffies;
1da177e4
LT
872 if (sdev->last_queue_full_depth != depth) {
873 sdev->last_queue_full_count = 1;
874 sdev->last_queue_full_depth = depth;
875 } else {
876 sdev->last_queue_full_count++;
877 }
878
879 if (sdev->last_queue_full_count <= 10)
880 return 0;
881 if (sdev->last_queue_full_depth < 8) {
882 /* Drop back to untagged */
883 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
884 return -1;
885 }
886
887 if (sdev->ordered_tags)
888 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
889 else
890 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
891 return depth;
892}
893EXPORT_SYMBOL(scsi_track_queue_full);
894
881a256d
MW
895/**
896 * scsi_vpd_inquiry - Request a device provide us with a VPD page
897 * @sdev: The device to ask
898 * @buffer: Where to put the result
899 * @page: Which Vital Product Data to return
900 * @len: The length of the buffer
901 *
902 * This is an internal helper function. You probably want to use
903 * scsi_get_vpd_page instead.
904 *
bc8945df 905 * Returns size of the vpd page on success or a negative error number.
881a256d
MW
906 */
907static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
908 u8 page, unsigned len)
909{
910 int result;
911 unsigned char cmd[16];
912
bc8945df
HR
913 if (len < 4)
914 return -EINVAL;
915
881a256d
MW
916 cmd[0] = INQUIRY;
917 cmd[1] = 1; /* EVPD */
918 cmd[2] = page;
919 cmd[3] = len >> 8;
920 cmd[4] = len & 0xff;
921 cmd[5] = 0; /* Control byte */
922
923 /*
924 * I'm not convinced we need to try quite this hard to get VPD, but
925 * all the existing users tried this hard.
926 */
927 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
95a3639e 928 len, NULL, 30 * HZ, 3, NULL);
881a256d 929 if (result)
bc8945df 930 return -EIO;
881a256d
MW
931
932 /* Sanity check that we got the page back that we asked for */
933 if (buffer[1] != page)
934 return -EIO;
935
bc8945df 936 return get_unaligned_be16(&buffer[2]) + 4;
881a256d
MW
937}
938
939/**
940 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
941 * @sdev: The device to ask
942 * @page: Which Vital Product Data to return
786f8ba2
RD
943 * @buf: where to store the VPD
944 * @buf_len: number of bytes in the VPD buffer area
881a256d
MW
945 *
946 * SCSI devices may optionally supply Vital Product Data. Each 'page'
947 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
948 * If the device supports this VPD page, this routine returns a pointer
949 * to a buffer containing the data from that page. The caller is
950 * responsible for calling kfree() on this pointer when it is no longer
951 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
952 */
e3deec09
JB
953int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
954 int buf_len)
881a256d
MW
955{
956 int i, result;
881a256d 957
7562523e
MP
958 if (sdev->skip_vpd_pages)
959 goto fail;
960
881a256d 961 /* Ask for all the pages supported by this device */
e3deec09 962 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
bc8945df 963 if (result < 4)
881a256d
MW
964 goto fail;
965
966 /* If the user actually wanted this page, we can skip the rest */
967 if (page == 0)
16d3ea26 968 return 0;
881a256d 969
bc8945df
HR
970 for (i = 4; i < min(result, buf_len); i++)
971 if (buf[i] == page)
881a256d 972 goto found;
e3deec09 973
bc8945df 974 if (i < result && i >= buf_len)
e3deec09
JB
975 /* ran off the end of the buffer, give us benefit of doubt */
976 goto found;
881a256d
MW
977 /* The device claims it doesn't support the requested page */
978 goto fail;
979
980 found:
e3deec09 981 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
bc8945df 982 if (result < 0)
881a256d
MW
983 goto fail;
984
e3deec09 985 return 0;
881a256d
MW
986
987 fail:
e3deec09 988 return -EINVAL;
881a256d
MW
989}
990EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
991
b3ae8780
HR
992/**
993 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
994 * @sdev: The device to ask
995 *
996 * Attach the 'Device Identification' VPD page (0x83) and the
997 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
998 * structure. This information can be used to identify the device
999 * uniquely.
1000 */
1001void scsi_attach_vpd(struct scsi_device *sdev)
1002{
1003 int result, i;
1004 int vpd_len = SCSI_VPD_PG_LEN;
1005 int pg80_supported = 0;
1006 int pg83_supported = 0;
1007 unsigned char *vpd_buf;
1008
1009 if (sdev->skip_vpd_pages)
1010 return;
1011retry_pg0:
1012 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1013 if (!vpd_buf)
1014 return;
1015
1016 /* Ask for all the pages supported by this device */
1017 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
1018 if (result < 0) {
1019 kfree(vpd_buf);
1020 return;
1021 }
1022 if (result > vpd_len) {
1023 vpd_len = result;
1024 kfree(vpd_buf);
1025 goto retry_pg0;
1026 }
1027
1028 for (i = 4; i < result; i++) {
1029 if (vpd_buf[i] == 0x80)
1030 pg80_supported = 1;
1031 if (vpd_buf[i] == 0x83)
1032 pg83_supported = 1;
1033 }
1034 kfree(vpd_buf);
1035 vpd_len = SCSI_VPD_PG_LEN;
1036
1037 if (pg80_supported) {
1038retry_pg80:
1039 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1040 if (!vpd_buf)
1041 return;
1042
1043 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1044 if (result < 0) {
1045 kfree(vpd_buf);
1046 return;
1047 }
1048 if (result > vpd_len) {
1049 vpd_len = result;
1050 kfree(vpd_buf);
1051 goto retry_pg80;
1052 }
1053 sdev->vpd_pg80_len = result;
1054 sdev->vpd_pg80 = vpd_buf;
1055 vpd_len = SCSI_VPD_PG_LEN;
1056 }
1057
1058 if (pg83_supported) {
1059retry_pg83:
1060 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1061 if (!vpd_buf)
1062 return;
1063
1064 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1065 if (result < 0) {
1066 kfree(vpd_buf);
1067 return;
1068 }
1069 if (result > vpd_len) {
1070 vpd_len = result;
1071 kfree(vpd_buf);
1072 goto retry_pg83;
1073 }
1074 sdev->vpd_pg83_len = result;
1075 sdev->vpd_pg83 = vpd_buf;
1076 }
1077}
1078
3c6bdaea
MP
1079/**
1080 * scsi_report_opcode - Find out if a given command opcode is supported
1081 * @sdev: scsi device to query
1082 * @buffer: scratch buffer (must be at least 20 bytes long)
1083 * @len: length of buffer
1084 * @opcode: opcode for command to look up
1085 *
1086 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
66c28f97
MP
1087 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1088 * unsupported and 1 if the device claims to support the command.
3c6bdaea
MP
1089 */
1090int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1091 unsigned int len, unsigned char opcode)
1092{
1093 unsigned char cmd[16];
1094 struct scsi_sense_hdr sshdr;
1095 int result;
1096
1097 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
66c28f97 1098 return -EINVAL;
3c6bdaea
MP
1099
1100 memset(cmd, 0, 16);
1101 cmd[0] = MAINTENANCE_IN;
1102 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1103 cmd[2] = 1; /* One command format */
1104 cmd[3] = opcode;
1105 put_unaligned_be32(len, &cmd[6]);
1106 memset(buffer, 0, len);
1107
1108 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1109 &sshdr, 30 * HZ, 3, NULL);
1110
1111 if (result && scsi_sense_valid(&sshdr) &&
1112 sshdr.sense_key == ILLEGAL_REQUEST &&
1113 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
66c28f97 1114 return -EINVAL;
3c6bdaea
MP
1115
1116 if ((buffer[1] & 3) == 3) /* Command supported */
1117 return 1;
1118
1119 return 0;
1120}
1121EXPORT_SYMBOL(scsi_report_opcode);
1122
1da177e4 1123/**
eb44820c 1124 * scsi_device_get - get an additional reference to a scsi_device
1da177e4
LT
1125 * @sdev: device to get a reference to
1126 *
eb44820c 1127 * Description: Gets a reference to the scsi_device and increments the use count
1da177e4
LT
1128 * of the underlying LLDD module. You must hold host_lock of the
1129 * parent Scsi_Host or already have a reference when calling this.
1130 */
1131int scsi_device_get(struct scsi_device *sdev)
1132{
85b6c720 1133 if (sdev->sdev_state == SDEV_DEL)
1da177e4
LT
1134 return -ENXIO;
1135 if (!get_device(&sdev->sdev_gendev))
1136 return -ENXIO;
85b6c720
JB
1137 /* We can fail this if we're doing SCSI operations
1138 * from module exit (like cache flush) */
1139 try_module_get(sdev->host->hostt->module);
1140
1da177e4
LT
1141 return 0;
1142}
1143EXPORT_SYMBOL(scsi_device_get);
1144
1145/**
1146 * scsi_device_put - release a reference to a scsi_device
1147 * @sdev: device to release a reference on.
1148 *
eb44820c
RL
1149 * Description: Release a reference to the scsi_device and decrements the use
1150 * count of the underlying LLDD module. The device is freed once the last
1da177e4
LT
1151 * user vanishes.
1152 */
1153void scsi_device_put(struct scsi_device *sdev)
1154{
504fb37a 1155#ifdef CONFIG_MODULE_UNLOAD
f479ab87
JB
1156 struct module *module = sdev->host->hostt->module;
1157
85b6c720
JB
1158 /* The module refcount will be zero if scsi_device_get()
1159 * was called from a module removal routine */
f479ab87
JB
1160 if (module && module_refcount(module) != 0)
1161 module_put(module);
a506b44b 1162#endif
1da177e4
LT
1163 put_device(&sdev->sdev_gendev);
1164}
1165EXPORT_SYMBOL(scsi_device_put);
1166
eb44820c 1167/* helper for shost_for_each_device, see that for documentation */
1da177e4
LT
1168struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1169 struct scsi_device *prev)
1170{
1171 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1172 struct scsi_device *next = NULL;
1173 unsigned long flags;
1174
1175 spin_lock_irqsave(shost->host_lock, flags);
1176 while (list->next != &shost->__devices) {
1177 next = list_entry(list->next, struct scsi_device, siblings);
1178 /* skip devices that we can't get a reference to */
1179 if (!scsi_device_get(next))
1180 break;
1181 next = NULL;
1182 list = list->next;
1183 }
1184 spin_unlock_irqrestore(shost->host_lock, flags);
1185
1186 if (prev)
1187 scsi_device_put(prev);
1188 return next;
1189}
1190EXPORT_SYMBOL(__scsi_iterate_devices);
1191
1192/**
1193 * starget_for_each_device - helper to walk all devices of a target
1194 * @starget: target whose devices we want to iterate over.
eb44820c
RL
1195 * @data: Opaque passed to each function call.
1196 * @fn: Function to call on each device
1da177e4 1197 *
522939d4 1198 * This traverses over each device of @starget. The devices have
1da177e4
LT
1199 * a reference that must be released by scsi_host_put when breaking
1200 * out of the loop.
1201 */
522939d4 1202void starget_for_each_device(struct scsi_target *starget, void *data,
1da177e4
LT
1203 void (*fn)(struct scsi_device *, void *))
1204{
1205 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1206 struct scsi_device *sdev;
1207
1208 shost_for_each_device(sdev, shost) {
1209 if ((sdev->channel == starget->channel) &&
1210 (sdev->id == starget->id))
1211 fn(sdev, data);
1212 }
1213}
1214EXPORT_SYMBOL(starget_for_each_device);
1215
522939d4 1216/**
14f501a4 1217 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
522939d4 1218 * @starget: target whose devices we want to iterate over.
14f501a4
RD
1219 * @data: parameter for callback @fn()
1220 * @fn: callback function that is invoked for each device
522939d4
MR
1221 *
1222 * This traverses over each device of @starget. It does _not_
1223 * take a reference on the scsi_device, so the whole loop must be
1224 * protected by shost->host_lock.
1225 *
1226 * Note: The only reason why drivers would want to use this is because
1227 * they need to access the device list in irq context. Otherwise you
1228 * really want to use starget_for_each_device instead.
1229 **/
1230void __starget_for_each_device(struct scsi_target *starget, void *data,
1231 void (*fn)(struct scsi_device *, void *))
1232{
1233 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1234 struct scsi_device *sdev;
1235
1236 __shost_for_each_device(sdev, shost) {
1237 if ((sdev->channel == starget->channel) &&
1238 (sdev->id == starget->id))
1239 fn(sdev, data);
1240 }
1241}
1242EXPORT_SYMBOL(__starget_for_each_device);
1243
1da177e4
LT
1244/**
1245 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1246 * @starget: SCSI target pointer
1247 * @lun: SCSI Logical Unit Number
1248 *
eb44820c
RL
1249 * Description: Looks up the scsi_device with the specified @lun for a given
1250 * @starget. The returned scsi_device does not have an additional
1da177e4 1251 * reference. You must hold the host's host_lock over this call and
32aeef60
HR
1252 * any access to the returned scsi_device. A scsi_device in state
1253 * SDEV_DEL is skipped.
1da177e4 1254 *
dc8875e1 1255 * Note: The only reason why drivers should use this is because
eb44820c 1256 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1257 * really want to use scsi_device_lookup_by_target instead.
1258 **/
1259struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
9cb78c16 1260 u64 lun)
1da177e4
LT
1261{
1262 struct scsi_device *sdev;
1263
1264 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
32aeef60
HR
1265 if (sdev->sdev_state == SDEV_DEL)
1266 continue;
1da177e4
LT
1267 if (sdev->lun ==lun)
1268 return sdev;
1269 }
1270
1271 return NULL;
1272}
1273EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1274
1275/**
1276 * scsi_device_lookup_by_target - find a device given the target
1277 * @starget: SCSI target pointer
1278 * @lun: SCSI Logical Unit Number
1279 *
477e608c
BZ
1280 * Description: Looks up the scsi_device with the specified @lun for a given
1281 * @starget. The returned scsi_device has an additional reference that
eb44820c 1282 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1283 **/
1284struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
9cb78c16 1285 u64 lun)
1da177e4
LT
1286{
1287 struct scsi_device *sdev;
1288 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1289 unsigned long flags;
1290
1291 spin_lock_irqsave(shost->host_lock, flags);
1292 sdev = __scsi_device_lookup_by_target(starget, lun);
1293 if (sdev && scsi_device_get(sdev))
1294 sdev = NULL;
1295 spin_unlock_irqrestore(shost->host_lock, flags);
1296
1297 return sdev;
1298}
1299EXPORT_SYMBOL(scsi_device_lookup_by_target);
1300
1301/**
eb44820c 1302 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1da177e4
LT
1303 * @shost: SCSI host pointer
1304 * @channel: SCSI channel (zero if only one channel)
eb44820c 1305 * @id: SCSI target number (physical unit number)
1da177e4
LT
1306 * @lun: SCSI Logical Unit Number
1307 *
eb44820c
RL
1308 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1309 * for a given host. The returned scsi_device does not have an additional
1310 * reference. You must hold the host's host_lock over this call and any access
1311 * to the returned scsi_device.
1da177e4
LT
1312 *
1313 * Note: The only reason why drivers would want to use this is because
eb44820c 1314 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1315 * really want to use scsi_device_lookup instead.
1316 **/
1317struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
9cb78c16 1318 uint channel, uint id, u64 lun)
1da177e4
LT
1319{
1320 struct scsi_device *sdev;
1321
1322 list_for_each_entry(sdev, &shost->__devices, siblings) {
1323 if (sdev->channel == channel && sdev->id == id &&
1324 sdev->lun ==lun)
1325 return sdev;
1326 }
1327
1328 return NULL;
1329}
1330EXPORT_SYMBOL(__scsi_device_lookup);
1331
1332/**
1333 * scsi_device_lookup - find a device given the host
1334 * @shost: SCSI host pointer
1335 * @channel: SCSI channel (zero if only one channel)
1336 * @id: SCSI target number (physical unit number)
1337 * @lun: SCSI Logical Unit Number
1338 *
eb44820c
RL
1339 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1340 * for a given host. The returned scsi_device has an additional reference that
1341 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1342 **/
1343struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
9cb78c16 1344 uint channel, uint id, u64 lun)
1da177e4
LT
1345{
1346 struct scsi_device *sdev;
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(shost->host_lock, flags);
1350 sdev = __scsi_device_lookup(shost, channel, id, lun);
1351 if (sdev && scsi_device_get(sdev))
1352 sdev = NULL;
1353 spin_unlock_irqrestore(shost->host_lock, flags);
1354
1355 return sdev;
1356}
1357EXPORT_SYMBOL(scsi_device_lookup);
1358
1da177e4
LT
1359MODULE_DESCRIPTION("SCSI core");
1360MODULE_LICENSE("GPL");
1361
1362module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1363MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1364
1365static int __init init_scsi(void)
1366{
9bf09c23 1367 int error;
1da177e4
LT
1368
1369 error = scsi_init_queue();
1370 if (error)
1371 return error;
1372 error = scsi_init_procfs();
1373 if (error)
1374 goto cleanup_queue;
1375 error = scsi_init_devinfo();
1376 if (error)
1377 goto cleanup_procfs;
1378 error = scsi_init_hosts();
1379 if (error)
1380 goto cleanup_devlist;
1381 error = scsi_init_sysctl();
1382 if (error)
1383 goto cleanup_hosts;
1384 error = scsi_sysfs_register();
1385 if (error)
1386 goto cleanup_sysctl;
1387
84314fd4
JS
1388 scsi_netlink_init();
1389
1da177e4
LT
1390 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1391 return 0;
1392
1393cleanup_sysctl:
1394 scsi_exit_sysctl();
1395cleanup_hosts:
1396 scsi_exit_hosts();
1397cleanup_devlist:
1398 scsi_exit_devinfo();
1399cleanup_procfs:
1400 scsi_exit_procfs();
1401cleanup_queue:
1402 scsi_exit_queue();
1403 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1404 -error);
1405 return error;
1406}
1407
1408static void __exit exit_scsi(void)
1409{
84314fd4 1410 scsi_netlink_exit();
1da177e4
LT
1411 scsi_sysfs_unregister();
1412 scsi_exit_sysctl();
1413 scsi_exit_hosts();
1414 scsi_exit_devinfo();
1da177e4
LT
1415 scsi_exit_procfs();
1416 scsi_exit_queue();
a4683487 1417 async_unregister_domain(&scsi_sd_probe_domain);
1da177e4
LT
1418}
1419
1420subsys_initcall(init_scsi);
1421module_exit(exit_scsi);