]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi.c
[PATCH] cpufreq: silence cpufreq for UP
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/timer.h>
45#include <linux/string.h>
46#include <linux/slab.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/completion.h>
51#include <linux/devfs_fs_kernel.h>
52#include <linux/unistd.h>
53#include <linux/spinlock.h>
54#include <linux/kmod.h>
55#include <linux/interrupt.h>
56#include <linux/notifier.h>
57#include <linux/cpu.h>
58
59#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h>
61#include <scsi/scsi_dbg.h>
62#include <scsi/scsi_device.h>
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_request.h>
67
68#include "scsi_priv.h"
69#include "scsi_logging.h"
70
52c1da39
AB
71static void scsi_done(struct scsi_cmnd *cmd);
72static int scsi_retry_command(struct scsi_cmnd *cmd);
1da177e4
LT
73
74/*
75 * Definitions and constants.
76 */
77
78#define MIN_RESET_DELAY (2*HZ)
79
80/* Do not call reset on error if we just did a reset within 15 sec. */
81#define MIN_RESET_PERIOD (15*HZ)
82
83/*
84 * Macro to determine the size of SCSI command. This macro takes vendor
85 * unique commands into account. SCSI commands in groups 6 and 7 are
86 * vendor unique and we will depend upon the command length being
87 * supplied correctly in cmd_len.
88 */
89#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
90 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
91
92/*
93 * Note - the initial logging level can be set here to log events at boot time.
94 * After the system is up, you may enable logging via the /proc interface.
95 */
96unsigned int scsi_logging_level;
97#if defined(CONFIG_SCSI_LOGGING)
98EXPORT_SYMBOL(scsi_logging_level);
99#endif
100
101const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
102 "Direct-Access ",
103 "Sequential-Access",
104 "Printer ",
105 "Processor ",
106 "WORM ",
107 "CD-ROM ",
108 "Scanner ",
109 "Optical Device ",
110 "Medium Changer ",
111 "Communications ",
112 "Unknown ",
113 "Unknown ",
114 "RAID ",
115 "Enclosure ",
7f602c53 116 "Direct-Access-RBC",
1da177e4
LT
117};
118EXPORT_SYMBOL(scsi_device_types);
119
120/*
121 * Function: scsi_allocate_request
122 *
123 * Purpose: Allocate a request descriptor.
124 *
125 * Arguments: device - device for which we want a request
126 * gfp_mask - allocation flags passed to kmalloc
127 *
128 * Lock status: No locks assumed to be held. This function is SMP-safe.
129 *
130 * Returns: Pointer to request block.
131 */
132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
c53033f6 133 gfp_t gfp_mask)
1da177e4
LT
134{
135 const int offset = ALIGN(sizeof(struct scsi_request), 4);
136 const int size = offset + sizeof(struct request);
137 struct scsi_request *sreq;
138
139 sreq = kmalloc(size, gfp_mask);
140 if (likely(sreq != NULL)) {
141 memset(sreq, 0, size);
142 sreq->sr_request = (struct request *)(((char *)sreq) + offset);
143 sreq->sr_device = sdev;
144 sreq->sr_host = sdev->host;
145 sreq->sr_magic = SCSI_REQ_MAGIC;
146 sreq->sr_data_direction = DMA_BIDIRECTIONAL;
147 }
148
149 return sreq;
150}
151EXPORT_SYMBOL(scsi_allocate_request);
152
153void __scsi_release_request(struct scsi_request *sreq)
154{
155 struct request *req = sreq->sr_request;
156
157 /* unlikely because the tag was usually ended earlier by the
158 * mid-layer. However, for layering reasons ULD's don't end
159 * the tag of commands they generate. */
160 if (unlikely(blk_rq_tagged(req))) {
161 unsigned long flags;
162 struct request_queue *q = req->q;
163
164 spin_lock_irqsave(q->queue_lock, flags);
165 blk_queue_end_tag(q, req);
166 spin_unlock_irqrestore(q->queue_lock, flags);
167 }
168
169
170 if (likely(sreq->sr_command != NULL)) {
171 struct scsi_cmnd *cmd = sreq->sr_command;
172
173 sreq->sr_command = NULL;
174 scsi_next_command(cmd);
175 }
176}
177
178/*
179 * Function: scsi_release_request
180 *
181 * Purpose: Release a request descriptor.
182 *
183 * Arguments: sreq - request to release
184 *
185 * Lock status: No locks assumed to be held. This function is SMP-safe.
186 */
187void scsi_release_request(struct scsi_request *sreq)
188{
189 __scsi_release_request(sreq);
190 kfree(sreq);
191}
192EXPORT_SYMBOL(scsi_release_request);
193
194struct scsi_host_cmd_pool {
195 kmem_cache_t *slab;
196 unsigned int users;
197 char *name;
198 unsigned int slab_flags;
c53033f6 199 gfp_t gfp_mask;
1da177e4
LT
200};
201
202static struct scsi_host_cmd_pool scsi_cmd_pool = {
203 .name = "scsi_cmd_cache",
204 .slab_flags = SLAB_HWCACHE_ALIGN,
205};
206
207static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
208 .name = "scsi_cmd_cache(DMA)",
209 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
210 .gfp_mask = __GFP_DMA,
211};
212
213static DECLARE_MUTEX(host_cmd_pool_mutex);
214
215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
c53033f6 216 gfp_t gfp_mask)
1da177e4
LT
217{
218 struct scsi_cmnd *cmd;
219
220 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
221 gfp_mask | shost->cmd_pool->gfp_mask);
222
223 if (unlikely(!cmd)) {
224 unsigned long flags;
225
226 spin_lock_irqsave(&shost->free_list_lock, flags);
227 if (likely(!list_empty(&shost->free_list))) {
228 cmd = list_entry(shost->free_list.next,
229 struct scsi_cmnd, list);
230 list_del_init(&cmd->list);
231 }
232 spin_unlock_irqrestore(&shost->free_list_lock, flags);
233 }
234
235 return cmd;
236}
237
238/*
239 * Function: scsi_get_command()
240 *
241 * Purpose: Allocate and setup a scsi command block
242 *
243 * Arguments: dev - parent scsi device
244 * gfp_mask- allocator flags
245 *
246 * Returns: The allocated scsi command structure.
247 */
c53033f6 248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4
LT
249{
250 struct scsi_cmnd *cmd;
251
252 /* Bail if we can't get a reference to the device */
253 if (!get_device(&dev->sdev_gendev))
254 return NULL;
255
256 cmd = __scsi_get_command(dev->host, gfp_mask);
257
258 if (likely(cmd != NULL)) {
259 unsigned long flags;
260
261 memset(cmd, 0, sizeof(*cmd));
262 cmd->device = dev;
1da177e4
LT
263 init_timer(&cmd->eh_timeout);
264 INIT_LIST_HEAD(&cmd->list);
265 spin_lock_irqsave(&dev->list_lock, flags);
266 list_add_tail(&cmd->list, &dev->cmd_list);
267 spin_unlock_irqrestore(&dev->list_lock, flags);
268 } else
269 put_device(&dev->sdev_gendev);
270
b21a4138 271 cmd->jiffies_at_alloc = jiffies;
1da177e4
LT
272 return cmd;
273}
274EXPORT_SYMBOL(scsi_get_command);
275
276/*
277 * Function: scsi_put_command()
278 *
279 * Purpose: Free a scsi command block
280 *
281 * Arguments: cmd - command block to free
282 *
283 * Returns: Nothing.
284 *
285 * Notes: The command must not belong to any lists.
286 */
287void scsi_put_command(struct scsi_cmnd *cmd)
288{
289 struct scsi_device *sdev = cmd->device;
290 struct Scsi_Host *shost = sdev->host;
291 unsigned long flags;
292
293 /* serious error if the command hasn't come from a device list */
294 spin_lock_irqsave(&cmd->device->list_lock, flags);
295 BUG_ON(list_empty(&cmd->list));
296 list_del_init(&cmd->list);
297 spin_unlock(&cmd->device->list_lock);
298 /* changing locks here, don't need to restore the irq state */
299 spin_lock(&shost->free_list_lock);
300 if (unlikely(list_empty(&shost->free_list))) {
301 list_add(&cmd->list, &shost->free_list);
302 cmd = NULL;
303 }
304 spin_unlock_irqrestore(&shost->free_list_lock, flags);
305
306 if (likely(cmd != NULL))
307 kmem_cache_free(shost->cmd_pool->slab, cmd);
308
309 put_device(&sdev->sdev_gendev);
310}
311EXPORT_SYMBOL(scsi_put_command);
312
313/*
314 * Function: scsi_setup_command_freelist()
315 *
316 * Purpose: Setup the command freelist for a scsi host.
317 *
318 * Arguments: shost - host to allocate the freelist for.
319 *
320 * Returns: Nothing.
321 */
322int scsi_setup_command_freelist(struct Scsi_Host *shost)
323{
324 struct scsi_host_cmd_pool *pool;
325 struct scsi_cmnd *cmd;
326
327 spin_lock_init(&shost->free_list_lock);
328 INIT_LIST_HEAD(&shost->free_list);
329
330 /*
331 * Select a command slab for this host and create it if not
332 * yet existant.
333 */
334 down(&host_cmd_pool_mutex);
335 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
336 if (!pool->users) {
337 pool->slab = kmem_cache_create(pool->name,
338 sizeof(struct scsi_cmnd), 0,
339 pool->slab_flags, NULL, NULL);
340 if (!pool->slab)
341 goto fail;
342 }
343
344 pool->users++;
345 shost->cmd_pool = pool;
346 up(&host_cmd_pool_mutex);
347
348 /*
349 * Get one backup command for this host.
350 */
351 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
352 GFP_KERNEL | shost->cmd_pool->gfp_mask);
353 if (!cmd)
354 goto fail2;
355 list_add(&cmd->list, &shost->free_list);
356 return 0;
357
358 fail2:
359 if (!--pool->users)
360 kmem_cache_destroy(pool->slab);
361 return -ENOMEM;
362 fail:
363 up(&host_cmd_pool_mutex);
364 return -ENOMEM;
365
366}
367
368/*
369 * Function: scsi_destroy_command_freelist()
370 *
371 * Purpose: Release the command freelist for a scsi host.
372 *
373 * Arguments: shost - host that's freelist is going to be destroyed
374 */
375void scsi_destroy_command_freelist(struct Scsi_Host *shost)
376{
377 while (!list_empty(&shost->free_list)) {
378 struct scsi_cmnd *cmd;
379
380 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
381 list_del_init(&cmd->list);
382 kmem_cache_free(shost->cmd_pool->slab, cmd);
383 }
384
385 down(&host_cmd_pool_mutex);
386 if (!--shost->cmd_pool->users)
387 kmem_cache_destroy(shost->cmd_pool->slab);
388 up(&host_cmd_pool_mutex);
389}
390
391#ifdef CONFIG_SCSI_LOGGING
392void scsi_log_send(struct scsi_cmnd *cmd)
393{
394 unsigned int level;
395 struct scsi_device *sdev;
396
397 /*
398 * If ML QUEUE log level is greater than or equal to:
399 *
400 * 1: nothing (match completion)
401 *
402 * 2: log opcode + command of all commands
403 *
404 * 3: same as 2 plus dump cmd address
405 *
406 * 4: same as 3 plus dump extra junk
407 */
408 if (unlikely(scsi_logging_level)) {
409 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
410 SCSI_LOG_MLQUEUE_BITS);
411 if (level > 1) {
412 sdev = cmd->device;
9ccfc756 413 sdev_printk(KERN_INFO, sdev, "send ");
1da177e4
LT
414 if (level > 2)
415 printk("0x%p ", cmd);
416 /*
417 * spaces to match disposition and cmd->result
418 * output in scsi_log_completion.
419 */
420 printk(" ");
421 scsi_print_command(cmd);
422 if (level > 3) {
423 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
424 " done = 0x%p, queuecommand 0x%p\n",
425 cmd->buffer, cmd->bufflen,
426 cmd->done,
427 sdev->host->hostt->queuecommand);
428
429 }
430 }
431 }
432}
433
434void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
435{
436 unsigned int level;
437 struct scsi_device *sdev;
438
439 /*
440 * If ML COMPLETE log level is greater than or equal to:
441 *
442 * 1: log disposition, result, opcode + command, and conditionally
443 * sense data for failures or non SUCCESS dispositions.
444 *
445 * 2: same as 1 but for all command completions.
446 *
447 * 3: same as 2 plus dump cmd address
448 *
449 * 4: same as 3 plus dump extra junk
450 */
451 if (unlikely(scsi_logging_level)) {
452 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
453 SCSI_LOG_MLCOMPLETE_BITS);
454 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
455 (level > 1)) {
456 sdev = cmd->device;
9ccfc756 457 sdev_printk(KERN_INFO, sdev, "done ");
1da177e4
LT
458 if (level > 2)
459 printk("0x%p ", cmd);
460 /*
461 * Dump truncated values, so we usually fit within
462 * 80 chars.
463 */
464 switch (disposition) {
465 case SUCCESS:
466 printk("SUCCESS");
467 break;
468 case NEEDS_RETRY:
469 printk("RETRY ");
470 break;
471 case ADD_TO_MLQUEUE:
472 printk("MLQUEUE");
473 break;
474 case FAILED:
475 printk("FAILED ");
476 break;
477 case TIMEOUT_ERROR:
478 /*
479 * If called via scsi_times_out.
480 */
481 printk("TIMEOUT");
482 break;
483 default:
484 printk("UNKNOWN");
485 }
486 printk(" %8x ", cmd->result);
487 scsi_print_command(cmd);
488 if (status_byte(cmd->result) & CHECK_CONDITION) {
489 /*
db9dff36 490 * XXX The scsi_print_sense formatting/prefix
1da177e4
LT
491 * doesn't match this function.
492 */
493 scsi_print_sense("", cmd);
494 }
495 if (level > 3) {
496 printk(KERN_INFO "scsi host busy %d failed %d\n",
497 sdev->host->host_busy,
498 sdev->host->host_failed);
499 }
500 }
501 }
502}
503#endif
504
505/*
506 * Assign a serial number and pid to the request for error recovery
507 * and debugging purposes. Protected by the Host_Lock of host.
508 */
509static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
510{
511 cmd->serial_number = host->cmd_serial_number++;
512 if (cmd->serial_number == 0)
513 cmd->serial_number = host->cmd_serial_number++;
514
515 cmd->pid = host->cmd_pid++;
516 if (cmd->pid == 0)
517 cmd->pid = host->cmd_pid++;
518}
519
520/*
521 * Function: scsi_dispatch_command
522 *
523 * Purpose: Dispatch a command to the low-level driver.
524 *
525 * Arguments: cmd - command block we are dispatching.
526 *
527 * Notes:
528 */
529int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
530{
531 struct Scsi_Host *host = cmd->device->host;
532 unsigned long flags = 0;
533 unsigned long timeout;
534 int rtn = 0;
535
536 /* check if the device is still usable */
537 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
538 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
539 * returns an immediate error upwards, and signals
540 * that the device is no longer present */
541 cmd->result = DID_NO_CONNECT << 16;
542 atomic_inc(&cmd->device->iorequest_cnt);
69b52893 543 __scsi_done(cmd);
1da177e4
LT
544 /* return 0 (because the command has been processed) */
545 goto out;
546 }
547
548 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
549 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
550 /*
551 * in SDEV_BLOCK, the command is just put back on the device
552 * queue. The suspend state has already blocked the queue so
553 * future requests should not occur until the device
554 * transitions out of the suspend state.
555 */
556 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
557
558 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
559
560 /*
561 * NOTE: rtn is still zero here because we don't need the
562 * queue to be plugged on return (it's already stopped)
563 */
564 goto out;
565 }
566
567 /*
568 * If SCSI-2 or lower, store the LUN value in cmnd.
569 */
570 if (cmd->device->scsi_level <= SCSI_2) {
571 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
572 (cmd->device->lun << 5 & 0xe0);
573 }
574
575 /*
576 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
577 * we can avoid the drive not being ready.
578 */
579 timeout = host->last_reset + MIN_RESET_DELAY;
580
581 if (host->resetting && time_before(jiffies, timeout)) {
582 int ticks_remaining = timeout - jiffies;
583 /*
584 * NOTE: This may be executed from within an interrupt
585 * handler! This is bad, but for now, it'll do. The irq
586 * level of the interrupt handler has been masked out by the
587 * platform dependent interrupt handling code already, so the
588 * sti() here will not cause another call to the SCSI host's
589 * interrupt handler (assuming there is one irq-level per
590 * host).
591 */
592 while (--ticks_remaining >= 0)
593 mdelay(1 + 999 / HZ);
594 host->resetting = 0;
595 }
596
597 /*
598 * AK: unlikely race here: for some reason the timer could
599 * expire before the serial number is set up below.
600 */
601 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
602
603 scsi_log_send(cmd);
604
605 /*
606 * We will use a queued command if possible, otherwise we will
607 * emulate the queuing and calling of completion function ourselves.
608 */
1da177e4
LT
609 atomic_inc(&cmd->device->iorequest_cnt);
610
611 /*
612 * Before we queue this command, check if the command
613 * length exceeds what the host adapter can handle.
614 */
615 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
616 SCSI_LOG_MLQUEUE(3,
617 printk("queuecommand : command too long.\n"));
618 cmd->result = (DID_ABORT << 16);
619
620 scsi_done(cmd);
621 goto out;
622 }
623
624 spin_lock_irqsave(host->host_lock, flags);
625 scsi_cmd_get_serial(host, cmd);
626
d2c9d9ea 627 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4
LT
628 cmd->result = (DID_NO_CONNECT << 16);
629 scsi_done(cmd);
630 } else {
631 rtn = host->hostt->queuecommand(cmd, scsi_done);
632 }
633 spin_unlock_irqrestore(host->host_lock, flags);
634 if (rtn) {
d8c37e7b
TH
635 if (scsi_delete_timer(cmd)) {
636 atomic_inc(&cmd->device->iodone_cnt);
637 scsi_queue_insert(cmd,
638 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
639 rtn : SCSI_MLQUEUE_HOST_BUSY);
640 }
1da177e4
LT
641 SCSI_LOG_MLQUEUE(3,
642 printk("queuecommand : request rejected\n"));
643 }
644
645 out:
646 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
647 return rtn;
648}
649
650/*
651 * Function: scsi_init_cmd_from_req
652 *
653 * Purpose: Queue a SCSI command
654 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
655 *
656 * Arguments: cmd - command descriptor.
657 * sreq - Request from the queue.
658 *
659 * Lock status: None needed.
660 *
661 * Returns: Nothing.
662 *
663 * Notes: Mainly transfer data from the request structure to the
664 * command structure. The request structure is allocated
665 * using the normal memory allocator, and requests can pile
666 * up to more or less any depth. The command structure represents
667 * a consumable resource, as these are allocated into a pool
668 * when the SCSI subsystem initializes. The preallocation is
669 * required so that in low-memory situations a disk I/O request
670 * won't cause the memory manager to try and write out a page.
671 * The request structure is generally used by ioctls and character
672 * devices.
673 */
674void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
675{
676 sreq->sr_command = cmd;
677
1da177e4
LT
678 cmd->cmd_len = sreq->sr_cmd_len;
679 cmd->use_sg = sreq->sr_use_sg;
680
681 cmd->request = sreq->sr_request;
682 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
683 cmd->serial_number = 0;
1da177e4
LT
684 cmd->bufflen = sreq->sr_bufflen;
685 cmd->buffer = sreq->sr_buffer;
686 cmd->retries = 0;
687 cmd->allowed = sreq->sr_allowed;
688 cmd->done = sreq->sr_done;
689 cmd->timeout_per_command = sreq->sr_timeout_per_command;
690 cmd->sc_data_direction = sreq->sr_data_direction;
691 cmd->sglist_len = sreq->sr_sglist_len;
692 cmd->underflow = sreq->sr_underflow;
693 cmd->sc_request = sreq;
694 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
695
696 /*
697 * Zero the sense buffer. Some host adapters automatically request
698 * sense on error. 0 is not a valid sense code.
699 */
700 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
701 cmd->request_buffer = sreq->sr_buffer;
702 cmd->request_bufflen = sreq->sr_bufflen;
703 cmd->old_use_sg = cmd->use_sg;
704 if (cmd->cmd_len == 0)
705 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
706 cmd->old_cmd_len = cmd->cmd_len;
707 cmd->sc_old_data_direction = cmd->sc_data_direction;
708 cmd->old_underflow = cmd->underflow;
709
710 /*
711 * Start the timer ticking.
712 */
1da177e4
LT
713 cmd->result = 0;
714
715 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
716}
717
718/*
719 * Per-CPU I/O completion queue.
720 */
721static DEFINE_PER_CPU(struct list_head, scsi_done_q);
722
723/**
724 * scsi_done - Enqueue the finished SCSI command into the done queue.
725 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
726 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
727 *
728 * This function is the mid-level's (SCSI Core) interrupt routine, which
729 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
730 * the command to the done queue for further processing.
731 *
732 * This is the producer of the done queue who enqueues at the tail.
733 *
734 * This function is interrupt context safe.
735 */
52c1da39 736static void scsi_done(struct scsi_cmnd *cmd)
1da177e4
LT
737{
738 /*
739 * We don't have to worry about this one timing out any more.
740 * If we are unable to remove the timer, then the command
741 * has already timed out. In which case, we have no choice but to
742 * let the timeout function run, as we have no idea where in fact
743 * that function could really be. It might be on another processor,
744 * etc, etc.
745 */
746 if (!scsi_delete_timer(cmd))
747 return;
748 __scsi_done(cmd);
749}
750
751/* Private entry to scsi_done() to complete a command when the timer
752 * isn't running --- used by scsi_times_out */
753void __scsi_done(struct scsi_cmnd *cmd)
754{
755 unsigned long flags;
756
757 /*
758 * Set the serial numbers back to zero
759 */
760 cmd->serial_number = 0;
1da177e4
LT
761
762 atomic_inc(&cmd->device->iodone_cnt);
763 if (cmd->result)
764 atomic_inc(&cmd->device->ioerr_cnt);
765
766 /*
767 * Next, enqueue the command into the done queue.
768 * It is a per-CPU queue, so we just disable local interrupts
769 * and need no spinlock.
770 */
771 local_irq_save(flags);
772 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
773 raise_softirq_irqoff(SCSI_SOFTIRQ);
774 local_irq_restore(flags);
775}
776
777/**
778 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
779 *
780 * This is the consumer of the done queue.
781 *
782 * This is called with all interrupts enabled. This should reduce
783 * interrupt latency, stack depth, and reentrancy of the low-level
784 * drivers.
785 */
786static void scsi_softirq(struct softirq_action *h)
787{
788 int disposition;
789 LIST_HEAD(local_q);
790
791 local_irq_disable();
792 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
793 local_irq_enable();
794
795 while (!list_empty(&local_q)) {
796 struct scsi_cmnd *cmd = list_entry(local_q.next,
797 struct scsi_cmnd, eh_entry);
b21a4138
JB
798 /* The longest time any command should be outstanding is the
799 * per command timeout multiplied by the number of retries.
800 *
801 * For a typical command, this is 2.5 minutes */
802 unsigned long wait_for
803 = cmd->allowed * cmd->timeout_per_command;
1da177e4
LT
804 list_del_init(&cmd->eh_entry);
805
806 disposition = scsi_decide_disposition(cmd);
b21a4138
JB
807 if (disposition != SUCCESS &&
808 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
9ccfc756
JB
809 sdev_printk(KERN_ERR, cmd->device,
810 "timing out command, waited %lus\n",
811 wait_for/HZ);
b21a4138
JB
812 disposition = SUCCESS;
813 }
814
1da177e4
LT
815 scsi_log_completion(cmd, disposition);
816 switch (disposition) {
817 case SUCCESS:
818 scsi_finish_command(cmd);
819 break;
820 case NEEDS_RETRY:
821 scsi_retry_command(cmd);
822 break;
823 case ADD_TO_MLQUEUE:
824 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
825 break;
826 default:
827 if (!scsi_eh_scmd_add(cmd, 0))
828 scsi_finish_command(cmd);
829 }
830 }
831}
832
833/*
834 * Function: scsi_retry_command
835 *
836 * Purpose: Send a command back to the low level to be retried.
837 *
838 * Notes: This command is always executed in the context of the
839 * bottom half handler, or the error handler thread. Low
840 * level drivers should not become re-entrant as a result of
841 * this.
842 */
52c1da39 843static int scsi_retry_command(struct scsi_cmnd *cmd)
1da177e4
LT
844{
845 /*
846 * Restore the SCSI command state.
847 */
848 scsi_setup_cmd_retry(cmd);
849
850 /*
851 * Zero the sense information from the last time we tried
852 * this command.
853 */
854 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
855
856 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
857}
858
859/*
860 * Function: scsi_finish_command
861 *
862 * Purpose: Pass command off to upper layer for finishing of I/O
863 * request, waking processes that are waiting on results,
864 * etc.
865 */
866void scsi_finish_command(struct scsi_cmnd *cmd)
867{
868 struct scsi_device *sdev = cmd->device;
869 struct Scsi_Host *shost = sdev->host;
870 struct scsi_request *sreq;
871
872 scsi_device_unbusy(sdev);
873
874 /*
875 * Clear the flags which say that the device/host is no longer
876 * capable of accepting new commands. These are set in scsi_queue.c
877 * for both the queue full condition on a device, and for a
878 * host full condition on the host.
879 *
880 * XXX(hch): What about locking?
881 */
882 shost->host_blocked = 0;
883 sdev->device_blocked = 0;
884
885 /*
886 * If we have valid sense information, then some kind of recovery
887 * must have taken place. Make a note of this.
888 */
889 if (SCSI_SENSE_VALID(cmd))
890 cmd->result |= (DRIVER_SENSE << 24);
891
3bf743e7
JG
892 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
893 "Notifying upper driver of completion "
894 "(result %x)\n", cmd->result));
1da177e4 895
1da177e4
LT
896 /*
897 * We can get here with use_sg=0, causing a panic in the upper level
898 */
899 cmd->use_sg = cmd->old_use_sg;
900
901 /*
902 * If there is an associated request structure, copy the data over
903 * before we call the completion function.
904 */
905 sreq = cmd->sc_request;
906 if (sreq) {
907 sreq->sr_result = sreq->sr_command->result;
908 if (sreq->sr_result) {
909 memcpy(sreq->sr_sense_buffer,
910 sreq->sr_command->sense_buffer,
911 sizeof(sreq->sr_sense_buffer));
912 }
913 }
914
915 cmd->done(cmd);
916}
917EXPORT_SYMBOL(scsi_finish_command);
918
919/*
920 * Function: scsi_adjust_queue_depth()
921 *
922 * Purpose: Allow low level drivers to tell us to change the queue depth
923 * on a specific SCSI device
924 *
925 * Arguments: sdev - SCSI Device in question
926 * tagged - Do we use tagged queueing (non-0) or do we treat
927 * this device as an untagged device (0)
928 * tags - Number of tags allowed if tagged queueing enabled,
929 * or number of commands the low level driver can
930 * queue up in non-tagged mode (as per cmd_per_lun).
931 *
932 * Returns: Nothing
933 *
934 * Lock Status: None held on entry
935 *
936 * Notes: Low level drivers may call this at any time and we will do
937 * the right thing depending on whether or not the device is
938 * currently active and whether or not it even has the
939 * command blocks built yet.
940 */
941void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
942{
943 unsigned long flags;
944
945 /*
946 * refuse to set tagged depth to an unworkable size
947 */
948 if (tags <= 0)
949 return;
950
951 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
952
953 /* Check to see if the queue is managed by the block layer
954 * if it is, and we fail to adjust the depth, exit */
955 if (blk_queue_tagged(sdev->request_queue) &&
956 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
957 goto out;
958
959 sdev->queue_depth = tags;
960 switch (tagged) {
961 case MSG_ORDERED_TAG:
962 sdev->ordered_tags = 1;
963 sdev->simple_tags = 1;
964 break;
965 case MSG_SIMPLE_TAG:
966 sdev->ordered_tags = 0;
967 sdev->simple_tags = 1;
968 break;
969 default:
9ccfc756
JB
970 sdev_printk(KERN_WARNING, sdev,
971 "scsi_adjust_queue_depth, bad queue type, "
972 "disabled\n");
1da177e4
LT
973 case 0:
974 sdev->ordered_tags = sdev->simple_tags = 0;
975 sdev->queue_depth = tags;
976 break;
977 }
978 out:
979 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
980}
981EXPORT_SYMBOL(scsi_adjust_queue_depth);
982
983/*
984 * Function: scsi_track_queue_full()
985 *
986 * Purpose: This function will track successive QUEUE_FULL events on a
987 * specific SCSI device to determine if and when there is a
988 * need to adjust the queue depth on the device.
989 *
990 * Arguments: sdev - SCSI Device in question
991 * depth - Current number of outstanding SCSI commands on
992 * this device, not counting the one returned as
993 * QUEUE_FULL.
994 *
995 * Returns: 0 - No change needed
996 * >0 - Adjust queue depth to this new depth
997 * -1 - Drop back to untagged operation using host->cmd_per_lun
998 * as the untagged command depth
999 *
1000 * Lock Status: None held on entry
1001 *
1002 * Notes: Low level drivers may call this at any time and we will do
1003 * "The Right Thing." We are interrupt context safe.
1004 */
1005int scsi_track_queue_full(struct scsi_device *sdev, int depth)
1006{
1007 if ((jiffies >> 4) == sdev->last_queue_full_time)
1008 return 0;
1009
1010 sdev->last_queue_full_time = (jiffies >> 4);
1011 if (sdev->last_queue_full_depth != depth) {
1012 sdev->last_queue_full_count = 1;
1013 sdev->last_queue_full_depth = depth;
1014 } else {
1015 sdev->last_queue_full_count++;
1016 }
1017
1018 if (sdev->last_queue_full_count <= 10)
1019 return 0;
1020 if (sdev->last_queue_full_depth < 8) {
1021 /* Drop back to untagged */
1022 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
1023 return -1;
1024 }
1025
1026 if (sdev->ordered_tags)
1027 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
1028 else
1029 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1030 return depth;
1031}
1032EXPORT_SYMBOL(scsi_track_queue_full);
1033
1034/**
1035 * scsi_device_get - get an addition reference to a scsi_device
1036 * @sdev: device to get a reference to
1037 *
1038 * Gets a reference to the scsi_device and increments the use count
1039 * of the underlying LLDD module. You must hold host_lock of the
1040 * parent Scsi_Host or already have a reference when calling this.
1041 */
1042int scsi_device_get(struct scsi_device *sdev)
1043{
1044 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
1045 return -ENXIO;
1046 if (!get_device(&sdev->sdev_gendev))
1047 return -ENXIO;
1048 if (!try_module_get(sdev->host->hostt->module)) {
1049 put_device(&sdev->sdev_gendev);
1050 return -ENXIO;
1051 }
1052 return 0;
1053}
1054EXPORT_SYMBOL(scsi_device_get);
1055
1056/**
1057 * scsi_device_put - release a reference to a scsi_device
1058 * @sdev: device to release a reference on.
1059 *
1060 * Release a reference to the scsi_device and decrements the use count
1061 * of the underlying LLDD module. The device is freed once the last
1062 * user vanishes.
1063 */
1064void scsi_device_put(struct scsi_device *sdev)
1065{
1066 module_put(sdev->host->hostt->module);
1067 put_device(&sdev->sdev_gendev);
1068}
1069EXPORT_SYMBOL(scsi_device_put);
1070
1071/* helper for shost_for_each_device, thus not documented */
1072struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1073 struct scsi_device *prev)
1074{
1075 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1076 struct scsi_device *next = NULL;
1077 unsigned long flags;
1078
1079 spin_lock_irqsave(shost->host_lock, flags);
1080 while (list->next != &shost->__devices) {
1081 next = list_entry(list->next, struct scsi_device, siblings);
1082 /* skip devices that we can't get a reference to */
1083 if (!scsi_device_get(next))
1084 break;
1085 next = NULL;
1086 list = list->next;
1087 }
1088 spin_unlock_irqrestore(shost->host_lock, flags);
1089
1090 if (prev)
1091 scsi_device_put(prev);
1092 return next;
1093}
1094EXPORT_SYMBOL(__scsi_iterate_devices);
1095
1096/**
1097 * starget_for_each_device - helper to walk all devices of a target
1098 * @starget: target whose devices we want to iterate over.
1099 *
1100 * This traverses over each devices of @shost. The devices have
1101 * a reference that must be released by scsi_host_put when breaking
1102 * out of the loop.
1103 */
1104void starget_for_each_device(struct scsi_target *starget, void * data,
1105 void (*fn)(struct scsi_device *, void *))
1106{
1107 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1108 struct scsi_device *sdev;
1109
1110 shost_for_each_device(sdev, shost) {
1111 if ((sdev->channel == starget->channel) &&
1112 (sdev->id == starget->id))
1113 fn(sdev, data);
1114 }
1115}
1116EXPORT_SYMBOL(starget_for_each_device);
1117
1118/**
1119 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1120 * @starget: SCSI target pointer
1121 * @lun: SCSI Logical Unit Number
1122 *
1123 * Looks up the scsi_device with the specified @lun for a give
1124 * @starget. The returned scsi_device does not have an additional
1125 * reference. You must hold the host's host_lock over this call and
1126 * any access to the returned scsi_device.
1127 *
1128 * Note: The only reason why drivers would want to use this is because
1129 * they're need to access the device list in irq context. Otherwise you
1130 * really want to use scsi_device_lookup_by_target instead.
1131 **/
1132struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1133 uint lun)
1134{
1135 struct scsi_device *sdev;
1136
1137 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1138 if (sdev->lun ==lun)
1139 return sdev;
1140 }
1141
1142 return NULL;
1143}
1144EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1145
1146/**
1147 * scsi_device_lookup_by_target - find a device given the target
1148 * @starget: SCSI target pointer
1149 * @lun: SCSI Logical Unit Number
1150 *
1151 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1152 * give host. The returned scsi_device has an additional reference that
1153 * needs to be release with scsi_host_put once you're done with it.
1154 **/
1155struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1156 uint lun)
1157{
1158 struct scsi_device *sdev;
1159 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1160 unsigned long flags;
1161
1162 spin_lock_irqsave(shost->host_lock, flags);
1163 sdev = __scsi_device_lookup_by_target(starget, lun);
1164 if (sdev && scsi_device_get(sdev))
1165 sdev = NULL;
1166 spin_unlock_irqrestore(shost->host_lock, flags);
1167
1168 return sdev;
1169}
1170EXPORT_SYMBOL(scsi_device_lookup_by_target);
1171
1172/**
1173 * scsi_device_lookup - find a device given the host (UNLOCKED)
1174 * @shost: SCSI host pointer
1175 * @channel: SCSI channel (zero if only one channel)
1176 * @pun: SCSI target number (physical unit number)
1177 * @lun: SCSI Logical Unit Number
1178 *
1179 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1180 * give host. The returned scsi_device does not have an additional reference.
1181 * You must hold the host's host_lock over this call and any access to the
1182 * returned scsi_device.
1183 *
1184 * Note: The only reason why drivers would want to use this is because
1185 * they're need to access the device list in irq context. Otherwise you
1186 * really want to use scsi_device_lookup instead.
1187 **/
1188struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1189 uint channel, uint id, uint lun)
1190{
1191 struct scsi_device *sdev;
1192
1193 list_for_each_entry(sdev, &shost->__devices, siblings) {
1194 if (sdev->channel == channel && sdev->id == id &&
1195 sdev->lun ==lun)
1196 return sdev;
1197 }
1198
1199 return NULL;
1200}
1201EXPORT_SYMBOL(__scsi_device_lookup);
1202
1203/**
1204 * scsi_device_lookup - find a device given the host
1205 * @shost: SCSI host pointer
1206 * @channel: SCSI channel (zero if only one channel)
1207 * @id: SCSI target number (physical unit number)
1208 * @lun: SCSI Logical Unit Number
1209 *
1210 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1211 * give host. The returned scsi_device has an additional reference that
1212 * needs to be release with scsi_host_put once you're done with it.
1213 **/
1214struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1215 uint channel, uint id, uint lun)
1216{
1217 struct scsi_device *sdev;
1218 unsigned long flags;
1219
1220 spin_lock_irqsave(shost->host_lock, flags);
1221 sdev = __scsi_device_lookup(shost, channel, id, lun);
1222 if (sdev && scsi_device_get(sdev))
1223 sdev = NULL;
1224 spin_unlock_irqrestore(shost->host_lock, flags);
1225
1226 return sdev;
1227}
1228EXPORT_SYMBOL(scsi_device_lookup);
1229
1230/**
1231 * scsi_device_cancel - cancel outstanding IO to this device
1232 * @sdev: Pointer to struct scsi_device
1233 * @recovery: Boolean instructing function to recover device or not.
1234 *
1235 **/
1236int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1237{
1238 struct scsi_cmnd *scmd;
1239 LIST_HEAD(active_list);
1240 struct list_head *lh, *lh_sf;
1241 unsigned long flags;
1242
1243 scsi_device_set_state(sdev, SDEV_CANCEL);
1244
1245 spin_lock_irqsave(&sdev->list_lock, flags);
1246 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1247 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) {
1248 /*
1249 * If we are unable to remove the timer, it means
1250 * that the command has already timed out or
1251 * finished.
1252 */
1253 if (!scsi_delete_timer(scmd))
1254 continue;
1255 list_add_tail(&scmd->eh_entry, &active_list);
1256 }
1257 }
1258 spin_unlock_irqrestore(&sdev->list_lock, flags);
1259
1260 if (!list_empty(&active_list)) {
1261 list_for_each_safe(lh, lh_sf, &active_list) {
1262 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1263 list_del_init(lh);
939647ee
JB
1264 if (recovery &&
1265 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1da177e4
LT
1266 scmd->result = (DID_ABORT << 16);
1267 scsi_finish_command(scmd);
1268 }
1269 }
1270 }
1271
1272 return 0;
1273}
1274EXPORT_SYMBOL(scsi_device_cancel);
1275
1276#ifdef CONFIG_HOTPLUG_CPU
1277static int scsi_cpu_notify(struct notifier_block *self,
1278 unsigned long action, void *hcpu)
1279{
1280 int cpu = (unsigned long)hcpu;
1281
1282 switch(action) {
1283 case CPU_DEAD:
1284 /* Drain scsi_done_q. */
1285 local_irq_disable();
1286 list_splice_init(&per_cpu(scsi_done_q, cpu),
1287 &__get_cpu_var(scsi_done_q));
1288 raise_softirq_irqoff(SCSI_SOFTIRQ);
1289 local_irq_enable();
1290 break;
1291 default:
1292 break;
1293 }
1294 return NOTIFY_OK;
1295}
1296
1297static struct notifier_block __devinitdata scsi_cpu_nb = {
1298 .notifier_call = scsi_cpu_notify,
1299};
1300
1301#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
1302#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
1303#else
1304#define register_scsi_cpu()
1305#define unregister_scsi_cpu()
1306#endif /* CONFIG_HOTPLUG_CPU */
1307
1308MODULE_DESCRIPTION("SCSI core");
1309MODULE_LICENSE("GPL");
1310
1311module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1312MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1313
1314static int __init init_scsi(void)
1315{
1316 int error, i;
1317
1318 error = scsi_init_queue();
1319 if (error)
1320 return error;
1321 error = scsi_init_procfs();
1322 if (error)
1323 goto cleanup_queue;
1324 error = scsi_init_devinfo();
1325 if (error)
1326 goto cleanup_procfs;
1327 error = scsi_init_hosts();
1328 if (error)
1329 goto cleanup_devlist;
1330 error = scsi_init_sysctl();
1331 if (error)
1332 goto cleanup_hosts;
1333 error = scsi_sysfs_register();
1334 if (error)
1335 goto cleanup_sysctl;
1336
1337 for (i = 0; i < NR_CPUS; i++)
1338 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1339
1340 devfs_mk_dir("scsi");
1341 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
1342 register_scsi_cpu();
1343 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1344 return 0;
1345
1346cleanup_sysctl:
1347 scsi_exit_sysctl();
1348cleanup_hosts:
1349 scsi_exit_hosts();
1350cleanup_devlist:
1351 scsi_exit_devinfo();
1352cleanup_procfs:
1353 scsi_exit_procfs();
1354cleanup_queue:
1355 scsi_exit_queue();
1356 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1357 -error);
1358 return error;
1359}
1360
1361static void __exit exit_scsi(void)
1362{
1363 scsi_sysfs_unregister();
1364 scsi_exit_sysctl();
1365 scsi_exit_hosts();
1366 scsi_exit_devinfo();
1367 devfs_remove("scsi");
1368 scsi_exit_procfs();
1369 scsi_exit_queue();
1370 unregister_scsi_cpu();
1371}
1372
1373subsys_initcall(init_scsi);
1374module_exit(exit_scsi);