]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi.c
scsi: add support for a blk-mq based I/O path.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
1da177e4
LT
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
1da177e4
LT
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
0b950672 56#include <linux/mutex.h>
2955b47d 57#include <linux/async.h>
3c6bdaea 58#include <asm/unaligned.h>
1da177e4
LT
59
60#include <scsi/scsi.h>
61#include <scsi/scsi_cmnd.h>
62#include <scsi/scsi_dbg.h>
63#include <scsi/scsi_device.h>
7b3d9545 64#include <scsi/scsi_driver.h>
1da177e4
LT
65#include <scsi/scsi_eh.h>
66#include <scsi/scsi_host.h>
67#include <scsi/scsi_tcq.h>
1da177e4
LT
68
69#include "scsi_priv.h"
70#include "scsi_logging.h"
71
bf816235
KT
72#define CREATE_TRACE_POINTS
73#include <trace/events/scsi.h>
74
1da177e4
LT
75/*
76 * Definitions and constants.
77 */
78
1da177e4
LT
79/*
80 * Note - the initial logging level can be set here to log events at boot time.
81 * After the system is up, you may enable logging via the /proc interface.
82 */
83unsigned int scsi_logging_level;
84#if defined(CONFIG_SCSI_LOGGING)
85EXPORT_SYMBOL(scsi_logging_level);
86#endif
87
ea80dade 88/* sd, scsi core and power management need to coordinate flushing async actions */
2955b47d 89ASYNC_DOMAIN(scsi_sd_probe_domain);
a7a20d10 90EXPORT_SYMBOL(scsi_sd_probe_domain);
a7a20d10 91
3c31b52f
DW
92/*
93 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
94 * asynchronous system resume operations. It is marked 'exclusive' to avoid
95 * being included in the async_synchronize_full() that is invoked by
96 * dpm_resume()
97 */
98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99EXPORT_SYMBOL(scsi_sd_pm_domain);
100
8a1cdc9c
MW
101/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
102 * You may not alter any existing entry (although adding new ones is
103 * encouraged once assigned by ANSI/INCITS T10
104 */
4ff36718 105static const char *const scsi_device_types[] = {
8a1cdc9c
MW
106 "Direct-Access ",
107 "Sequential-Access",
1da177e4
LT
108 "Printer ",
109 "Processor ",
110 "WORM ",
8a1cdc9c 111 "CD-ROM ",
1da177e4 112 "Scanner ",
8a1cdc9c
MW
113 "Optical Device ",
114 "Medium Changer ",
1da177e4 115 "Communications ",
4ff36718
MW
116 "ASC IT8 ",
117 "ASC IT8 ",
1da177e4
LT
118 "RAID ",
119 "Enclosure ",
8a1cdc9c 120 "Direct-Access-RBC",
4ff36718
MW
121 "Optical card ",
122 "Bridge controller",
123 "Object storage ",
124 "Automation/Drive ",
1da177e4 125};
4ff36718 126
eb44820c
RL
127/**
128 * scsi_device_type - Return 17 char string indicating device type.
129 * @type: type number to look up
130 */
131
4ff36718
MW
132const char * scsi_device_type(unsigned type)
133{
134 if (type == 0x1e)
135 return "Well-known LUN ";
136 if (type == 0x1f)
137 return "No Device ";
80c6e3c0 138 if (type >= ARRAY_SIZE(scsi_device_types))
4ff36718
MW
139 return "Unknown ";
140 return scsi_device_types[type];
141}
142
143EXPORT_SYMBOL(scsi_device_type);
1da177e4 144
1da177e4 145struct scsi_host_cmd_pool {
5b7f1680
JB
146 struct kmem_cache *cmd_slab;
147 struct kmem_cache *sense_slab;
148 unsigned int users;
149 char *cmd_name;
150 char *sense_name;
151 unsigned int slab_flags;
152 gfp_t gfp_mask;
1da177e4
LT
153};
154
155static struct scsi_host_cmd_pool scsi_cmd_pool = {
5b7f1680
JB
156 .cmd_name = "scsi_cmd_cache",
157 .sense_name = "scsi_sense_cache",
1da177e4
LT
158 .slab_flags = SLAB_HWCACHE_ALIGN,
159};
160
161static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
5b7f1680
JB
162 .cmd_name = "scsi_cmd_cache(DMA)",
163 .sense_name = "scsi_sense_cache(DMA)",
1da177e4
LT
164 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
165 .gfp_mask = __GFP_DMA,
166};
167
0b950672 168static DEFINE_MUTEX(host_cmd_pool_mutex);
1da177e4 169
e507e30b 170/**
7c283341
CH
171 * scsi_host_free_command - internal function to release a command
172 * @shost: host to free the command for
e507e30b
JB
173 * @cmd: command to release
174 *
175 * the command must previously have been allocated by
7c283341 176 * scsi_host_alloc_command.
e507e30b
JB
177 */
178static void
7c283341 179scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
e507e30b 180{
7c283341
CH
181 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
182
7027ad72
MP
183 if (cmd->prot_sdb)
184 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
e507e30b
JB
185 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
186 kmem_cache_free(pool->cmd_slab, cmd);
187}
188
7027ad72
MP
189/**
190 * scsi_host_alloc_command - internal function to allocate command
191 * @shost: SCSI host whose pool to allocate from
192 * @gfp_mask: mask for the allocation
193 *
194 * Returns a fully allocated command with sense buffer and protection
195 * data buffer (where applicable) or NULL on failure
196 */
197static struct scsi_cmnd *
198scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
199{
7c283341 200 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
7027ad72
MP
201 struct scsi_cmnd *cmd;
202
7c283341 203 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
7027ad72 204 if (!cmd)
7c283341
CH
205 goto fail;
206
207 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
208 gfp_mask | pool->gfp_mask);
209 if (!cmd->sense_buffer)
210 goto fail_free_cmd;
7027ad72
MP
211
212 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
213 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
7c283341
CH
214 if (!cmd->prot_sdb)
215 goto fail_free_sense;
7027ad72
MP
216 }
217
218 return cmd;
7c283341
CH
219
220fail_free_sense:
221 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
222fail_free_cmd:
223 kmem_cache_free(pool->cmd_slab, cmd);
224fail:
225 return NULL;
7027ad72
MP
226}
227
eb44820c
RL
228/**
229 * __scsi_get_command - Allocate a struct scsi_cmnd
230 * @shost: host to transmit command
231 * @gfp_mask: allocation mask
232 *
233 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
234 * host's free_list if necessary.
235 */
f1bea55d
CH
236static struct scsi_cmnd *
237__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
1da177e4 238{
b4c2554d 239 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
164fc5dc 240
1da177e4
LT
241 if (unlikely(!cmd)) {
242 unsigned long flags;
243
244 spin_lock_irqsave(&shost->free_list_lock, flags);
245 if (likely(!list_empty(&shost->free_list))) {
246 cmd = list_entry(shost->free_list.next,
247 struct scsi_cmnd, list);
248 list_del_init(&cmd->list);
249 }
250 spin_unlock_irqrestore(&shost->free_list_lock, flags);
de25deb1
FT
251
252 if (cmd) {
b4c2554d
MP
253 void *buf, *prot;
254
de25deb1 255 buf = cmd->sense_buffer;
b4c2554d
MP
256 prot = cmd->prot_sdb;
257
de25deb1 258 memset(cmd, 0, sizeof(*cmd));
b4c2554d 259
de25deb1 260 cmd->sense_buffer = buf;
b4c2554d 261 cmd->prot_sdb = prot;
de25deb1 262 }
1da177e4
LT
263 }
264
265 return cmd;
266}
267
eb44820c
RL
268/**
269 * scsi_get_command - Allocate and setup a scsi command block
270 * @dev: parent scsi device
271 * @gfp_mask: allocator flags
1da177e4
LT
272 *
273 * Returns: The allocated scsi command structure.
274 */
c53033f6 275struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4 276{
04796336
CH
277 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
278 unsigned long flags;
1da177e4 279
04796336 280 if (unlikely(cmd == NULL))
1da177e4
LT
281 return NULL;
282
04796336
CH
283 cmd->device = dev;
284 INIT_LIST_HEAD(&cmd->list);
285 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
286 spin_lock_irqsave(&dev->list_lock, flags);
287 list_add_tail(&cmd->list, &dev->cmd_list);
288 spin_unlock_irqrestore(&dev->list_lock, flags);
289 cmd->jiffies_at_alloc = jiffies;
1da177e4 290 return cmd;
b58d9154 291}
1da177e4 292
eb44820c
RL
293/**
294 * __scsi_put_command - Free a struct scsi_cmnd
295 * @shost: dev->host
296 * @cmd: Command to free
eb44820c 297 */
f1bea55d 298static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
b58d9154
FT
299{
300 unsigned long flags;
301
b58d9154 302 if (unlikely(list_empty(&shost->free_list))) {
44b93b59
CH
303 spin_lock_irqsave(&shost->free_list_lock, flags);
304 if (list_empty(&shost->free_list)) {
305 list_add(&cmd->list, &shost->free_list);
306 cmd = NULL;
307 }
308 spin_unlock_irqrestore(&shost->free_list_lock, flags);
b58d9154 309 }
b58d9154 310
e507e30b 311 if (likely(cmd != NULL))
7c283341 312 scsi_host_free_command(shost, cmd);
b58d9154 313}
b58d9154 314
eb44820c
RL
315/**
316 * scsi_put_command - Free a scsi command block
317 * @cmd: command block to free
1da177e4
LT
318 *
319 * Returns: Nothing.
320 *
321 * Notes: The command must not belong to any lists.
322 */
323void scsi_put_command(struct scsi_cmnd *cmd)
324{
1da177e4 325 unsigned long flags;
b58d9154 326
1da177e4
LT
327 /* serious error if the command hasn't come from a device list */
328 spin_lock_irqsave(&cmd->device->list_lock, flags);
329 BUG_ON(list_empty(&cmd->list));
330 list_del_init(&cmd->list);
b58d9154 331 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
1da177e4 332
fcc95a76 333 BUG_ON(delayed_work_pending(&cmd->abort_work));
e494f6a7 334
04796336 335 __scsi_put_command(cmd->device->host, cmd);
1da177e4 336}
1da177e4 337
89d9a567
CH
338static struct scsi_host_cmd_pool *
339scsi_find_host_cmd_pool(struct Scsi_Host *shost)
1da177e4 340{
89d9a567
CH
341 if (shost->hostt->cmd_size)
342 return shost->hostt->cmd_pool;
343 if (shost->unchecked_isa_dma)
344 return &scsi_cmd_dma_pool;
345 return &scsi_cmd_pool;
346}
347
348static void
349scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
350{
351 kfree(pool->sense_name);
352 kfree(pool->cmd_name);
353 kfree(pool);
354}
355
356static struct scsi_host_cmd_pool *
357scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
358{
359 struct scsi_host_template *hostt = shost->hostt;
360 struct scsi_host_cmd_pool *pool;
361
362 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
363 if (!pool)
364 return NULL;
365
366 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
367 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
368 if (!pool->cmd_name || !pool->sense_name) {
369 scsi_free_host_cmd_pool(pool);
370 return NULL;
371 }
372
373 pool->slab_flags = SLAB_HWCACHE_ALIGN;
374 if (shost->unchecked_isa_dma) {
375 pool->slab_flags |= SLAB_CACHE_DMA;
376 pool->gfp_mask = __GFP_DMA;
377 }
378 return pool;
379}
380
381static struct scsi_host_cmd_pool *
382scsi_get_host_cmd_pool(struct Scsi_Host *shost)
383{
384 struct scsi_host_template *hostt = shost->hostt;
1c353f7d 385 struct scsi_host_cmd_pool *retval = NULL, *pool;
89d9a567
CH
386 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
387
1da177e4
LT
388 /*
389 * Select a command slab for this host and create it if not
eb44820c 390 * yet existent.
1da177e4 391 */
0b950672 392 mutex_lock(&host_cmd_pool_mutex);
89d9a567
CH
393 pool = scsi_find_host_cmd_pool(shost);
394 if (!pool) {
395 pool = scsi_alloc_host_cmd_pool(shost);
396 if (!pool)
397 goto out;
398 }
399
1da177e4 400 if (!pool->users) {
89d9a567 401 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
5b7f1680
JB
402 pool->slab_flags, NULL);
403 if (!pool->cmd_slab)
89d9a567 404 goto out_free_pool;
5b7f1680
JB
405
406 pool->sense_slab = kmem_cache_create(pool->sense_name,
407 SCSI_SENSE_BUFFERSIZE, 0,
408 pool->slab_flags, NULL);
89d9a567
CH
409 if (!pool->sense_slab)
410 goto out_free_slab;
1da177e4
LT
411 }
412
413 pool->users++;
1c353f7d 414 retval = pool;
89d9a567 415out:
0b950672 416 mutex_unlock(&host_cmd_pool_mutex);
1c353f7d 417 return retval;
89d9a567
CH
418
419out_free_slab:
420 kmem_cache_destroy(pool->cmd_slab);
421out_free_pool:
422 if (hostt->cmd_size)
423 scsi_free_host_cmd_pool(pool);
424 goto out;
1c353f7d
JB
425}
426
89d9a567 427static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
1c353f7d 428{
89d9a567 429 struct scsi_host_template *hostt = shost->hostt;
1c353f7d 430 struct scsi_host_cmd_pool *pool;
1da177e4 431
1c353f7d 432 mutex_lock(&host_cmd_pool_mutex);
89d9a567
CH
433 pool = scsi_find_host_cmd_pool(shost);
434
1da177e4 435 /*
1c353f7d
JB
436 * This may happen if a driver has a mismatched get and put
437 * of the command pool; the driver should be implicated in
438 * the stack trace
1da177e4 439 */
1c353f7d 440 BUG_ON(pool->users == 0);
de25deb1 441
5b7f1680
JB
442 if (!--pool->users) {
443 kmem_cache_destroy(pool->cmd_slab);
444 kmem_cache_destroy(pool->sense_slab);
89d9a567
CH
445 if (hostt->cmd_size)
446 scsi_free_host_cmd_pool(pool);
5b7f1680 447 }
0b950672 448 mutex_unlock(&host_cmd_pool_mutex);
1c353f7d
JB
449}
450
1c353f7d
JB
451/**
452 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
453 * @shost: host to allocate the freelist for.
454 *
455 * Description: The command freelist protects against system-wide out of memory
456 * deadlock by preallocating one SCSI command structure for each host, so the
457 * system can always write to a swap file on a device associated with that host.
458 *
459 * Returns: Nothing.
460 */
461int scsi_setup_command_freelist(struct Scsi_Host *shost)
462{
1c353f7d 463 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
89d9a567 464 struct scsi_cmnd *cmd;
1c353f7d
JB
465
466 spin_lock_init(&shost->free_list_lock);
467 INIT_LIST_HEAD(&shost->free_list);
468
89d9a567 469 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
1c353f7d
JB
470 if (!shost->cmd_pool)
471 return -ENOMEM;
472
473 /*
474 * Get one backup command for this host.
475 */
7027ad72 476 cmd = scsi_host_alloc_command(shost, gfp_mask);
1c353f7d 477 if (!cmd) {
89d9a567 478 scsi_put_host_cmd_pool(shost);
61d7416a 479 shost->cmd_pool = NULL;
1c353f7d
JB
480 return -ENOMEM;
481 }
482 list_add(&cmd->list, &shost->free_list);
483 return 0;
1da177e4
LT
484}
485
eb44820c
RL
486/**
487 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
488 * @shost: host whose freelist is going to be destroyed
1da177e4
LT
489 */
490void scsi_destroy_command_freelist(struct Scsi_Host *shost)
491{
61d7416a
AB
492 /*
493 * If cmd_pool is NULL the free list was not initialized, so
494 * do not attempt to release resources.
495 */
496 if (!shost->cmd_pool)
497 return;
498
1da177e4
LT
499 while (!list_empty(&shost->free_list)) {
500 struct scsi_cmnd *cmd;
501
502 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
503 list_del_init(&cmd->list);
7c283341 504 scsi_host_free_command(shost, cmd);
1da177e4 505 }
1c353f7d 506 shost->cmd_pool = NULL;
89d9a567 507 scsi_put_host_cmd_pool(shost);
de25deb1
FT
508}
509
1da177e4
LT
510#ifdef CONFIG_SCSI_LOGGING
511void scsi_log_send(struct scsi_cmnd *cmd)
512{
513 unsigned int level;
1da177e4
LT
514
515 /*
516 * If ML QUEUE log level is greater than or equal to:
517 *
518 * 1: nothing (match completion)
519 *
520 * 2: log opcode + command of all commands
521 *
522 * 3: same as 2 plus dump cmd address
523 *
524 * 4: same as 3 plus dump extra junk
525 */
526 if (unlikely(scsi_logging_level)) {
527 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
528 SCSI_LOG_MLQUEUE_BITS);
529 if (level > 1) {
a4d04a4c 530 scmd_printk(KERN_INFO, cmd, "Send: ");
1da177e4
LT
531 if (level > 2)
532 printk("0x%p ", cmd);
a4d04a4c 533 printk("\n");
1da177e4
LT
534 scsi_print_command(cmd);
535 if (level > 3) {
536 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
7b3d9545 537 " queuecommand 0x%p\n",
a73e45b3 538 scsi_sglist(cmd), scsi_bufflen(cmd),
a4d04a4c 539 cmd->device->host->hostt->queuecommand);
1da177e4
LT
540
541 }
542 }
543 }
544}
545
546void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
547{
548 unsigned int level;
1da177e4
LT
549
550 /*
551 * If ML COMPLETE log level is greater than or equal to:
552 *
553 * 1: log disposition, result, opcode + command, and conditionally
554 * sense data for failures or non SUCCESS dispositions.
555 *
556 * 2: same as 1 but for all command completions.
557 *
558 * 3: same as 2 plus dump cmd address
559 *
560 * 4: same as 3 plus dump extra junk
561 */
562 if (unlikely(scsi_logging_level)) {
563 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
564 SCSI_LOG_MLCOMPLETE_BITS);
565 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
566 (level > 1)) {
a4d04a4c 567 scmd_printk(KERN_INFO, cmd, "Done: ");
1da177e4
LT
568 if (level > 2)
569 printk("0x%p ", cmd);
570 /*
571 * Dump truncated values, so we usually fit within
572 * 80 chars.
573 */
574 switch (disposition) {
575 case SUCCESS:
a4d04a4c 576 printk("SUCCESS\n");
1da177e4
LT
577 break;
578 case NEEDS_RETRY:
a4d04a4c 579 printk("RETRY\n");
1da177e4
LT
580 break;
581 case ADD_TO_MLQUEUE:
a4d04a4c 582 printk("MLQUEUE\n");
1da177e4
LT
583 break;
584 case FAILED:
a4d04a4c 585 printk("FAILED\n");
1da177e4
LT
586 break;
587 case TIMEOUT_ERROR:
588 /*
589 * If called via scsi_times_out.
590 */
a4d04a4c 591 printk("TIMEOUT\n");
1da177e4
LT
592 break;
593 default:
a4d04a4c 594 printk("UNKNOWN\n");
1da177e4 595 }
a4d04a4c 596 scsi_print_result(cmd);
1da177e4 597 scsi_print_command(cmd);
a4d04a4c 598 if (status_byte(cmd->result) & CHECK_CONDITION)
1da177e4 599 scsi_print_sense("", cmd);
a4d04a4c
MP
600 if (level > 3)
601 scmd_printk(KERN_INFO, cmd,
602 "scsi host busy %d failed %d\n",
74665016 603 atomic_read(&cmd->device->host->host_busy),
a4d04a4c 604 cmd->device->host->host_failed);
1da177e4
LT
605 }
606 }
607}
608#endif
609
eb44820c
RL
610/**
611 * scsi_cmd_get_serial - Assign a serial number to a command
612 * @host: the scsi host
613 * @cmd: command to assign serial number to
614 *
615 * Description: a serial number identifies a request for error recovery
1da177e4
LT
616 * and debugging purposes. Protected by the Host_Lock of host.
617 */
f281233d 618void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1da177e4
LT
619{
620 cmd->serial_number = host->cmd_serial_number++;
621 if (cmd->serial_number == 0)
622 cmd->serial_number = host->cmd_serial_number++;
1da177e4 623}
f281233d 624EXPORT_SYMBOL(scsi_cmd_get_serial);
1da177e4 625
eb44820c
RL
626/**
627 * scsi_dispatch_command - Dispatch a command to the low-level driver.
628 * @cmd: command block we are dispatching.
1da177e4 629 *
eb44820c
RL
630 * Return: nonzero return request was rejected and device's queue needs to be
631 * plugged.
1da177e4
LT
632 */
633int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
634{
635 struct Scsi_Host *host = cmd->device->host;
1da177e4
LT
636 int rtn = 0;
637
242f9dcb
JA
638 atomic_inc(&cmd->device->iorequest_cnt);
639
1da177e4
LT
640 /* check if the device is still usable */
641 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
642 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
643 * returns an immediate error upwards, and signals
644 * that the device is no longer present */
645 cmd->result = DID_NO_CONNECT << 16;
d0d3bbf9 646 goto done;
1da177e4
LT
647 }
648
0f1d87a2
JB
649 /* Check to see if the scsi lld made this device blocked. */
650 if (unlikely(scsi_device_blocked(cmd->device))) {
91921e01 651 /*
0f1d87a2
JB
652 * in blocked state, the command is just put back on
653 * the device queue. The suspend state has already
654 * blocked the queue so future requests should not
655 * occur until the device transitions out of the
656 * suspend state.
1da177e4 657 */
91921e01
HR
658 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
659 "queuecommand : device blocked\n"));
d0d3bbf9 660 return SCSI_MLQUEUE_DEVICE_BUSY;
1da177e4
LT
661 }
662
91921e01 663 /*
1da177e4
LT
664 * If SCSI-2 or lower, store the LUN value in cmnd.
665 */
4d7db04a
JB
666 if (cmd->device->scsi_level <= SCSI_2 &&
667 cmd->device->scsi_level != SCSI_UNKNOWN) {
1da177e4
LT
668 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
669 (cmd->device->lun << 5 & 0xe0);
670 }
671
1da177e4
LT
672 scsi_log_send(cmd);
673
1da177e4
LT
674 /*
675 * Before we queue this command, check if the command
676 * length exceeds what the host adapter can handle.
677 */
db4742dd 678 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
91921e01
HR
679 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
680 "queuecommand : command too long. "
db4742dd
BH
681 "cdb_size=%d host->max_cmd_len=%d\n",
682 cmd->cmd_len, cmd->device->host->max_cmd_len));
1da177e4 683 cmd->result = (DID_ABORT << 16);
d0d3bbf9 684 goto done;
1da177e4
LT
685 }
686
d2c9d9ea 687 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4 688 cmd->result = (DID_NO_CONNECT << 16);
d0d3bbf9
CH
689 goto done;
690
1da177e4 691 }
f281233d 692
d0d3bbf9 693 trace_scsi_dispatch_cmd_start(cmd);
d0d3bbf9 694 rtn = host->hostt->queuecommand(host, cmd);
1da177e4 695 if (rtn) {
bf816235 696 trace_scsi_dispatch_cmd_error(cmd, rtn);
f0c0a376
MC
697 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
698 rtn != SCSI_MLQUEUE_TARGET_BUSY)
699 rtn = SCSI_MLQUEUE_HOST_BUSY;
700
91921e01
HR
701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
702 "queuecommand : request rejected\n"));
1da177e4
LT
703 }
704
1da177e4 705 return rtn;
d0d3bbf9 706 done:
3b5382c4 707 cmd->scsi_done(cmd);
d0d3bbf9 708 return 0;
1da177e4
LT
709}
710
eb44820c
RL
711/**
712 * scsi_finish_command - cleanup and pass command back to upper layer
713 * @cmd: the command
1da177e4 714 *
eb44820c 715 * Description: Pass command off to upper layer for finishing of I/O
1da177e4
LT
716 * request, waking processes that are waiting on results,
717 * etc.
718 */
719void scsi_finish_command(struct scsi_cmnd *cmd)
720{
721 struct scsi_device *sdev = cmd->device;
f0c0a376 722 struct scsi_target *starget = scsi_target(sdev);
1da177e4 723 struct Scsi_Host *shost = sdev->host;
7b3d9545
LT
724 struct scsi_driver *drv;
725 unsigned int good_bytes;
1da177e4
LT
726
727 scsi_device_unbusy(sdev);
728
cd9070c9
CH
729 /*
730 * Clear the flags that say that the device/target/host is no longer
731 * capable of accepting new commands.
732 */
733 if (atomic_read(&shost->host_blocked))
734 atomic_set(&shost->host_blocked, 0);
735 if (atomic_read(&starget->target_blocked))
736 atomic_set(&starget->target_blocked, 0);
737 if (atomic_read(&sdev->device_blocked))
738 atomic_set(&sdev->device_blocked, 0);
1da177e4
LT
739
740 /*
741 * If we have valid sense information, then some kind of recovery
742 * must have taken place. Make a note of this.
743 */
744 if (SCSI_SENSE_VALID(cmd))
745 cmd->result |= (DRIVER_SENSE << 24);
746
3bf743e7
JG
747 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
748 "Notifying upper driver of completion "
749 "(result %x)\n", cmd->result));
1da177e4 750
f18573ab 751 good_bytes = scsi_bufflen(cmd);
7b3d9545 752 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
427e59f0 753 int old_good_bytes = good_bytes;
7b3d9545
LT
754 drv = scsi_cmd_to_driver(cmd);
755 if (drv->done)
756 good_bytes = drv->done(cmd);
427e59f0
JB
757 /*
758 * USB may not give sense identifying bad sector and
759 * simply return a residue instead, so subtract off the
760 * residue if drv->done() error processing indicates no
761 * change to the completion length.
762 */
763 if (good_bytes == old_good_bytes)
764 good_bytes -= scsi_get_resid(cmd);
7b3d9545
LT
765 }
766 scsi_io_completion(cmd, good_bytes);
1da177e4 767}
1da177e4 768
eb44820c
RL
769/**
770 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
771 * @sdev: SCSI Device in question
772 * @tagged: Do we use tagged queueing (non-0) or do we treat
773 * this device as an untagged device (0)
774 * @tags: Number of tags allowed if tagged queueing enabled,
775 * or number of commands the low level driver can
776 * queue up in non-tagged mode (as per cmd_per_lun).
1da177e4
LT
777 *
778 * Returns: Nothing
779 *
780 * Lock Status: None held on entry
781 *
782 * Notes: Low level drivers may call this at any time and we will do
783 * the right thing depending on whether or not the device is
784 * currently active and whether or not it even has the
785 * command blocks built yet.
786 */
787void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
788{
789 unsigned long flags;
790
791 /*
792 * refuse to set tagged depth to an unworkable size
793 */
794 if (tags <= 0)
795 return;
796
797 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
798
885ace9e
MC
799 /*
800 * Check to see if the queue is managed by the block layer.
801 * If it is, and we fail to adjust the depth, exit.
802 *
803 * Do not resize the tag map if it is a host wide share bqt,
804 * because the size should be the hosts's can_queue. If there
805 * is more IO than the LLD's can_queue (so there are not enuogh
806 * tags) request_fn's host queue ready check will handle it.
807 */
d285203c 808 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
885ace9e
MC
809 if (blk_queue_tagged(sdev->request_queue) &&
810 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
811 goto out;
812 }
1da177e4
LT
813
814 sdev->queue_depth = tags;
815 switch (tagged) {
cb23f912
DG
816 case 0:
817 sdev->ordered_tags = 0;
818 sdev->simple_tags = 0;
819 break;
1da177e4
LT
820 case MSG_ORDERED_TAG:
821 sdev->ordered_tags = 1;
822 sdev->simple_tags = 1;
823 break;
824 case MSG_SIMPLE_TAG:
825 sdev->ordered_tags = 0;
826 sdev->simple_tags = 1;
827 break;
828 default:
cb23f912
DG
829 sdev->ordered_tags = 0;
830 sdev->simple_tags = 0;
9ccfc756
JB
831 sdev_printk(KERN_WARNING, sdev,
832 "scsi_adjust_queue_depth, bad queue type, "
833 "disabled\n");
1da177e4
LT
834 }
835 out:
836 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
837}
838EXPORT_SYMBOL(scsi_adjust_queue_depth);
839
eb44820c
RL
840/**
841 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
842 * @sdev: SCSI Device in question
843 * @depth: Current number of outstanding SCSI commands on this device,
844 * not counting the one returned as QUEUE_FULL.
1da177e4 845 *
eb44820c 846 * Description: This function will track successive QUEUE_FULL events on a
1da177e4
LT
847 * specific SCSI device to determine if and when there is a
848 * need to adjust the queue depth on the device.
849 *
eb44820c 850 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
1da177e4
LT
851 * -1 - Drop back to untagged operation using host->cmd_per_lun
852 * as the untagged command depth
853 *
854 * Lock Status: None held on entry
855 *
856 * Notes: Low level drivers may call this at any time and we will do
857 * "The Right Thing." We are interrupt context safe.
858 */
859int scsi_track_queue_full(struct scsi_device *sdev, int depth)
860{
4a84067d
VD
861
862 /*
863 * Don't let QUEUE_FULLs on the same
864 * jiffies count, they could all be from
865 * same event.
866 */
867 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
1da177e4
LT
868 return 0;
869
4a84067d 870 sdev->last_queue_full_time = jiffies;
1da177e4
LT
871 if (sdev->last_queue_full_depth != depth) {
872 sdev->last_queue_full_count = 1;
873 sdev->last_queue_full_depth = depth;
874 } else {
875 sdev->last_queue_full_count++;
876 }
877
878 if (sdev->last_queue_full_count <= 10)
879 return 0;
880 if (sdev->last_queue_full_depth < 8) {
881 /* Drop back to untagged */
882 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
883 return -1;
884 }
885
886 if (sdev->ordered_tags)
887 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
888 else
889 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
890 return depth;
891}
892EXPORT_SYMBOL(scsi_track_queue_full);
893
881a256d
MW
894/**
895 * scsi_vpd_inquiry - Request a device provide us with a VPD page
896 * @sdev: The device to ask
897 * @buffer: Where to put the result
898 * @page: Which Vital Product Data to return
899 * @len: The length of the buffer
900 *
901 * This is an internal helper function. You probably want to use
902 * scsi_get_vpd_page instead.
903 *
bc8945df 904 * Returns size of the vpd page on success or a negative error number.
881a256d
MW
905 */
906static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
907 u8 page, unsigned len)
908{
909 int result;
910 unsigned char cmd[16];
911
bc8945df
HR
912 if (len < 4)
913 return -EINVAL;
914
881a256d
MW
915 cmd[0] = INQUIRY;
916 cmd[1] = 1; /* EVPD */
917 cmd[2] = page;
918 cmd[3] = len >> 8;
919 cmd[4] = len & 0xff;
920 cmd[5] = 0; /* Control byte */
921
922 /*
923 * I'm not convinced we need to try quite this hard to get VPD, but
924 * all the existing users tried this hard.
925 */
926 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
95a3639e 927 len, NULL, 30 * HZ, 3, NULL);
881a256d 928 if (result)
bc8945df 929 return -EIO;
881a256d
MW
930
931 /* Sanity check that we got the page back that we asked for */
932 if (buffer[1] != page)
933 return -EIO;
934
bc8945df 935 return get_unaligned_be16(&buffer[2]) + 4;
881a256d
MW
936}
937
938/**
939 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
940 * @sdev: The device to ask
941 * @page: Which Vital Product Data to return
786f8ba2
RD
942 * @buf: where to store the VPD
943 * @buf_len: number of bytes in the VPD buffer area
881a256d
MW
944 *
945 * SCSI devices may optionally supply Vital Product Data. Each 'page'
946 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
947 * If the device supports this VPD page, this routine returns a pointer
948 * to a buffer containing the data from that page. The caller is
949 * responsible for calling kfree() on this pointer when it is no longer
950 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
951 */
e3deec09
JB
952int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
953 int buf_len)
881a256d
MW
954{
955 int i, result;
881a256d 956
7562523e
MP
957 if (sdev->skip_vpd_pages)
958 goto fail;
959
881a256d 960 /* Ask for all the pages supported by this device */
e3deec09 961 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
bc8945df 962 if (result < 4)
881a256d
MW
963 goto fail;
964
965 /* If the user actually wanted this page, we can skip the rest */
966 if (page == 0)
16d3ea26 967 return 0;
881a256d 968
bc8945df
HR
969 for (i = 4; i < min(result, buf_len); i++)
970 if (buf[i] == page)
881a256d 971 goto found;
e3deec09 972
bc8945df 973 if (i < result && i >= buf_len)
e3deec09
JB
974 /* ran off the end of the buffer, give us benefit of doubt */
975 goto found;
881a256d
MW
976 /* The device claims it doesn't support the requested page */
977 goto fail;
978
979 found:
e3deec09 980 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
bc8945df 981 if (result < 0)
881a256d
MW
982 goto fail;
983
e3deec09 984 return 0;
881a256d
MW
985
986 fail:
e3deec09 987 return -EINVAL;
881a256d
MW
988}
989EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
990
b3ae8780
HR
991/**
992 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
993 * @sdev: The device to ask
994 *
995 * Attach the 'Device Identification' VPD page (0x83) and the
996 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
997 * structure. This information can be used to identify the device
998 * uniquely.
999 */
1000void scsi_attach_vpd(struct scsi_device *sdev)
1001{
1002 int result, i;
1003 int vpd_len = SCSI_VPD_PG_LEN;
1004 int pg80_supported = 0;
1005 int pg83_supported = 0;
1006 unsigned char *vpd_buf;
1007
1008 if (sdev->skip_vpd_pages)
1009 return;
1010retry_pg0:
1011 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1012 if (!vpd_buf)
1013 return;
1014
1015 /* Ask for all the pages supported by this device */
1016 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
1017 if (result < 0) {
1018 kfree(vpd_buf);
1019 return;
1020 }
1021 if (result > vpd_len) {
1022 vpd_len = result;
1023 kfree(vpd_buf);
1024 goto retry_pg0;
1025 }
1026
1027 for (i = 4; i < result; i++) {
1028 if (vpd_buf[i] == 0x80)
1029 pg80_supported = 1;
1030 if (vpd_buf[i] == 0x83)
1031 pg83_supported = 1;
1032 }
1033 kfree(vpd_buf);
1034 vpd_len = SCSI_VPD_PG_LEN;
1035
1036 if (pg80_supported) {
1037retry_pg80:
1038 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1039 if (!vpd_buf)
1040 return;
1041
1042 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1043 if (result < 0) {
1044 kfree(vpd_buf);
1045 return;
1046 }
1047 if (result > vpd_len) {
1048 vpd_len = result;
1049 kfree(vpd_buf);
1050 goto retry_pg80;
1051 }
1052 sdev->vpd_pg80_len = result;
1053 sdev->vpd_pg80 = vpd_buf;
1054 vpd_len = SCSI_VPD_PG_LEN;
1055 }
1056
1057 if (pg83_supported) {
1058retry_pg83:
1059 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1060 if (!vpd_buf)
1061 return;
1062
1063 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1064 if (result < 0) {
1065 kfree(vpd_buf);
1066 return;
1067 }
1068 if (result > vpd_len) {
1069 vpd_len = result;
1070 kfree(vpd_buf);
1071 goto retry_pg83;
1072 }
1073 sdev->vpd_pg83_len = result;
1074 sdev->vpd_pg83 = vpd_buf;
1075 }
1076}
1077
3c6bdaea
MP
1078/**
1079 * scsi_report_opcode - Find out if a given command opcode is supported
1080 * @sdev: scsi device to query
1081 * @buffer: scratch buffer (must be at least 20 bytes long)
1082 * @len: length of buffer
1083 * @opcode: opcode for command to look up
1084 *
1085 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
66c28f97
MP
1086 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1087 * unsupported and 1 if the device claims to support the command.
3c6bdaea
MP
1088 */
1089int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1090 unsigned int len, unsigned char opcode)
1091{
1092 unsigned char cmd[16];
1093 struct scsi_sense_hdr sshdr;
1094 int result;
1095
1096 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
66c28f97 1097 return -EINVAL;
3c6bdaea
MP
1098
1099 memset(cmd, 0, 16);
1100 cmd[0] = MAINTENANCE_IN;
1101 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1102 cmd[2] = 1; /* One command format */
1103 cmd[3] = opcode;
1104 put_unaligned_be32(len, &cmd[6]);
1105 memset(buffer, 0, len);
1106
1107 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1108 &sshdr, 30 * HZ, 3, NULL);
1109
1110 if (result && scsi_sense_valid(&sshdr) &&
1111 sshdr.sense_key == ILLEGAL_REQUEST &&
1112 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
66c28f97 1113 return -EINVAL;
3c6bdaea
MP
1114
1115 if ((buffer[1] & 3) == 3) /* Command supported */
1116 return 1;
1117
1118 return 0;
1119}
1120EXPORT_SYMBOL(scsi_report_opcode);
1121
1da177e4 1122/**
eb44820c 1123 * scsi_device_get - get an additional reference to a scsi_device
1da177e4
LT
1124 * @sdev: device to get a reference to
1125 *
eb44820c 1126 * Description: Gets a reference to the scsi_device and increments the use count
1da177e4
LT
1127 * of the underlying LLDD module. You must hold host_lock of the
1128 * parent Scsi_Host or already have a reference when calling this.
1129 */
1130int scsi_device_get(struct scsi_device *sdev)
1131{
85b6c720 1132 if (sdev->sdev_state == SDEV_DEL)
1da177e4
LT
1133 return -ENXIO;
1134 if (!get_device(&sdev->sdev_gendev))
1135 return -ENXIO;
85b6c720
JB
1136 /* We can fail this if we're doing SCSI operations
1137 * from module exit (like cache flush) */
1138 try_module_get(sdev->host->hostt->module);
1139
1da177e4
LT
1140 return 0;
1141}
1142EXPORT_SYMBOL(scsi_device_get);
1143
1144/**
1145 * scsi_device_put - release a reference to a scsi_device
1146 * @sdev: device to release a reference on.
1147 *
eb44820c
RL
1148 * Description: Release a reference to the scsi_device and decrements the use
1149 * count of the underlying LLDD module. The device is freed once the last
1da177e4
LT
1150 * user vanishes.
1151 */
1152void scsi_device_put(struct scsi_device *sdev)
1153{
504fb37a 1154#ifdef CONFIG_MODULE_UNLOAD
f479ab87
JB
1155 struct module *module = sdev->host->hostt->module;
1156
85b6c720
JB
1157 /* The module refcount will be zero if scsi_device_get()
1158 * was called from a module removal routine */
f479ab87
JB
1159 if (module && module_refcount(module) != 0)
1160 module_put(module);
a506b44b 1161#endif
1da177e4
LT
1162 put_device(&sdev->sdev_gendev);
1163}
1164EXPORT_SYMBOL(scsi_device_put);
1165
eb44820c 1166/* helper for shost_for_each_device, see that for documentation */
1da177e4
LT
1167struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1168 struct scsi_device *prev)
1169{
1170 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1171 struct scsi_device *next = NULL;
1172 unsigned long flags;
1173
1174 spin_lock_irqsave(shost->host_lock, flags);
1175 while (list->next != &shost->__devices) {
1176 next = list_entry(list->next, struct scsi_device, siblings);
1177 /* skip devices that we can't get a reference to */
1178 if (!scsi_device_get(next))
1179 break;
1180 next = NULL;
1181 list = list->next;
1182 }
1183 spin_unlock_irqrestore(shost->host_lock, flags);
1184
1185 if (prev)
1186 scsi_device_put(prev);
1187 return next;
1188}
1189EXPORT_SYMBOL(__scsi_iterate_devices);
1190
1191/**
1192 * starget_for_each_device - helper to walk all devices of a target
1193 * @starget: target whose devices we want to iterate over.
eb44820c
RL
1194 * @data: Opaque passed to each function call.
1195 * @fn: Function to call on each device
1da177e4 1196 *
522939d4 1197 * This traverses over each device of @starget. The devices have
1da177e4
LT
1198 * a reference that must be released by scsi_host_put when breaking
1199 * out of the loop.
1200 */
522939d4 1201void starget_for_each_device(struct scsi_target *starget, void *data,
1da177e4
LT
1202 void (*fn)(struct scsi_device *, void *))
1203{
1204 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1205 struct scsi_device *sdev;
1206
1207 shost_for_each_device(sdev, shost) {
1208 if ((sdev->channel == starget->channel) &&
1209 (sdev->id == starget->id))
1210 fn(sdev, data);
1211 }
1212}
1213EXPORT_SYMBOL(starget_for_each_device);
1214
522939d4 1215/**
14f501a4 1216 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
522939d4 1217 * @starget: target whose devices we want to iterate over.
14f501a4
RD
1218 * @data: parameter for callback @fn()
1219 * @fn: callback function that is invoked for each device
522939d4
MR
1220 *
1221 * This traverses over each device of @starget. It does _not_
1222 * take a reference on the scsi_device, so the whole loop must be
1223 * protected by shost->host_lock.
1224 *
1225 * Note: The only reason why drivers would want to use this is because
1226 * they need to access the device list in irq context. Otherwise you
1227 * really want to use starget_for_each_device instead.
1228 **/
1229void __starget_for_each_device(struct scsi_target *starget, void *data,
1230 void (*fn)(struct scsi_device *, void *))
1231{
1232 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1233 struct scsi_device *sdev;
1234
1235 __shost_for_each_device(sdev, shost) {
1236 if ((sdev->channel == starget->channel) &&
1237 (sdev->id == starget->id))
1238 fn(sdev, data);
1239 }
1240}
1241EXPORT_SYMBOL(__starget_for_each_device);
1242
1da177e4
LT
1243/**
1244 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1245 * @starget: SCSI target pointer
1246 * @lun: SCSI Logical Unit Number
1247 *
eb44820c
RL
1248 * Description: Looks up the scsi_device with the specified @lun for a given
1249 * @starget. The returned scsi_device does not have an additional
1da177e4 1250 * reference. You must hold the host's host_lock over this call and
32aeef60
HR
1251 * any access to the returned scsi_device. A scsi_device in state
1252 * SDEV_DEL is skipped.
1da177e4 1253 *
dc8875e1 1254 * Note: The only reason why drivers should use this is because
eb44820c 1255 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1256 * really want to use scsi_device_lookup_by_target instead.
1257 **/
1258struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
9cb78c16 1259 u64 lun)
1da177e4
LT
1260{
1261 struct scsi_device *sdev;
1262
1263 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
32aeef60
HR
1264 if (sdev->sdev_state == SDEV_DEL)
1265 continue;
1da177e4
LT
1266 if (sdev->lun ==lun)
1267 return sdev;
1268 }
1269
1270 return NULL;
1271}
1272EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1273
1274/**
1275 * scsi_device_lookup_by_target - find a device given the target
1276 * @starget: SCSI target pointer
1277 * @lun: SCSI Logical Unit Number
1278 *
477e608c
BZ
1279 * Description: Looks up the scsi_device with the specified @lun for a given
1280 * @starget. The returned scsi_device has an additional reference that
eb44820c 1281 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1282 **/
1283struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
9cb78c16 1284 u64 lun)
1da177e4
LT
1285{
1286 struct scsi_device *sdev;
1287 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(shost->host_lock, flags);
1291 sdev = __scsi_device_lookup_by_target(starget, lun);
1292 if (sdev && scsi_device_get(sdev))
1293 sdev = NULL;
1294 spin_unlock_irqrestore(shost->host_lock, flags);
1295
1296 return sdev;
1297}
1298EXPORT_SYMBOL(scsi_device_lookup_by_target);
1299
1300/**
eb44820c 1301 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1da177e4
LT
1302 * @shost: SCSI host pointer
1303 * @channel: SCSI channel (zero if only one channel)
eb44820c 1304 * @id: SCSI target number (physical unit number)
1da177e4
LT
1305 * @lun: SCSI Logical Unit Number
1306 *
eb44820c
RL
1307 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1308 * for a given host. The returned scsi_device does not have an additional
1309 * reference. You must hold the host's host_lock over this call and any access
1310 * to the returned scsi_device.
1da177e4
LT
1311 *
1312 * Note: The only reason why drivers would want to use this is because
eb44820c 1313 * they need to access the device list in irq context. Otherwise you
1da177e4
LT
1314 * really want to use scsi_device_lookup instead.
1315 **/
1316struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
9cb78c16 1317 uint channel, uint id, u64 lun)
1da177e4
LT
1318{
1319 struct scsi_device *sdev;
1320
1321 list_for_each_entry(sdev, &shost->__devices, siblings) {
1322 if (sdev->channel == channel && sdev->id == id &&
1323 sdev->lun ==lun)
1324 return sdev;
1325 }
1326
1327 return NULL;
1328}
1329EXPORT_SYMBOL(__scsi_device_lookup);
1330
1331/**
1332 * scsi_device_lookup - find a device given the host
1333 * @shost: SCSI host pointer
1334 * @channel: SCSI channel (zero if only one channel)
1335 * @id: SCSI target number (physical unit number)
1336 * @lun: SCSI Logical Unit Number
1337 *
eb44820c
RL
1338 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1339 * for a given host. The returned scsi_device has an additional reference that
1340 * needs to be released with scsi_device_put once you're done with it.
1da177e4
LT
1341 **/
1342struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
9cb78c16 1343 uint channel, uint id, u64 lun)
1da177e4
LT
1344{
1345 struct scsi_device *sdev;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(shost->host_lock, flags);
1349 sdev = __scsi_device_lookup(shost, channel, id, lun);
1350 if (sdev && scsi_device_get(sdev))
1351 sdev = NULL;
1352 spin_unlock_irqrestore(shost->host_lock, flags);
1353
1354 return sdev;
1355}
1356EXPORT_SYMBOL(scsi_device_lookup);
1357
1da177e4
LT
1358MODULE_DESCRIPTION("SCSI core");
1359MODULE_LICENSE("GPL");
1360
1361module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1362MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1363
d285203c
CH
1364bool scsi_use_blk_mq = false;
1365module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
1366
1da177e4
LT
1367static int __init init_scsi(void)
1368{
9bf09c23 1369 int error;
1da177e4
LT
1370
1371 error = scsi_init_queue();
1372 if (error)
1373 return error;
1374 error = scsi_init_procfs();
1375 if (error)
1376 goto cleanup_queue;
1377 error = scsi_init_devinfo();
1378 if (error)
1379 goto cleanup_procfs;
1380 error = scsi_init_hosts();
1381 if (error)
1382 goto cleanup_devlist;
1383 error = scsi_init_sysctl();
1384 if (error)
1385 goto cleanup_hosts;
1386 error = scsi_sysfs_register();
1387 if (error)
1388 goto cleanup_sysctl;
1389
84314fd4
JS
1390 scsi_netlink_init();
1391
1da177e4
LT
1392 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1393 return 0;
1394
1395cleanup_sysctl:
1396 scsi_exit_sysctl();
1397cleanup_hosts:
1398 scsi_exit_hosts();
1399cleanup_devlist:
1400 scsi_exit_devinfo();
1401cleanup_procfs:
1402 scsi_exit_procfs();
1403cleanup_queue:
1404 scsi_exit_queue();
1405 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1406 -error);
1407 return error;
1408}
1409
1410static void __exit exit_scsi(void)
1411{
84314fd4 1412 scsi_netlink_exit();
1da177e4
LT
1413 scsi_sysfs_unregister();
1414 scsi_exit_sysctl();
1415 scsi_exit_hosts();
1416 scsi_exit_devinfo();
1da177e4
LT
1417 scsi_exit_procfs();
1418 scsi_exit_queue();
a4683487 1419 async_unregister_domain(&scsi_sd_probe_domain);
1da177e4
LT
1420}
1421
1422subsys_initcall(init_scsi);
1423module_exit(exit_scsi);