]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/scsi.c
[PATCH] gfp_t: kernel/*
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/timer.h>
45#include <linux/string.h>
46#include <linux/slab.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/completion.h>
51#include <linux/devfs_fs_kernel.h>
52#include <linux/unistd.h>
53#include <linux/spinlock.h>
54#include <linux/kmod.h>
55#include <linux/interrupt.h>
56#include <linux/notifier.h>
57#include <linux/cpu.h>
58
59#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h>
61#include <scsi/scsi_dbg.h>
62#include <scsi/scsi_device.h>
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_request.h>
67
68#include "scsi_priv.h"
69#include "scsi_logging.h"
70
52c1da39
AB
71static void scsi_done(struct scsi_cmnd *cmd);
72static int scsi_retry_command(struct scsi_cmnd *cmd);
1da177e4
LT
73
74/*
75 * Definitions and constants.
76 */
77
78#define MIN_RESET_DELAY (2*HZ)
79
80/* Do not call reset on error if we just did a reset within 15 sec. */
81#define MIN_RESET_PERIOD (15*HZ)
82
83/*
84 * Macro to determine the size of SCSI command. This macro takes vendor
85 * unique commands into account. SCSI commands in groups 6 and 7 are
86 * vendor unique and we will depend upon the command length being
87 * supplied correctly in cmd_len.
88 */
89#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
90 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
91
92/*
93 * Note - the initial logging level can be set here to log events at boot time.
94 * After the system is up, you may enable logging via the /proc interface.
95 */
96unsigned int scsi_logging_level;
97#if defined(CONFIG_SCSI_LOGGING)
98EXPORT_SYMBOL(scsi_logging_level);
99#endif
100
101const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
102 "Direct-Access ",
103 "Sequential-Access",
104 "Printer ",
105 "Processor ",
106 "WORM ",
107 "CD-ROM ",
108 "Scanner ",
109 "Optical Device ",
110 "Medium Changer ",
111 "Communications ",
112 "Unknown ",
113 "Unknown ",
114 "RAID ",
115 "Enclosure ",
7f602c53 116 "Direct-Access-RBC",
1da177e4
LT
117};
118EXPORT_SYMBOL(scsi_device_types);
119
120/*
121 * Function: scsi_allocate_request
122 *
123 * Purpose: Allocate a request descriptor.
124 *
125 * Arguments: device - device for which we want a request
126 * gfp_mask - allocation flags passed to kmalloc
127 *
128 * Lock status: No locks assumed to be held. This function is SMP-safe.
129 *
130 * Returns: Pointer to request block.
131 */
132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
133 int gfp_mask)
134{
135 const int offset = ALIGN(sizeof(struct scsi_request), 4);
136 const int size = offset + sizeof(struct request);
137 struct scsi_request *sreq;
138
139 sreq = kmalloc(size, gfp_mask);
140 if (likely(sreq != NULL)) {
141 memset(sreq, 0, size);
142 sreq->sr_request = (struct request *)(((char *)sreq) + offset);
143 sreq->sr_device = sdev;
144 sreq->sr_host = sdev->host;
145 sreq->sr_magic = SCSI_REQ_MAGIC;
146 sreq->sr_data_direction = DMA_BIDIRECTIONAL;
147 }
148
149 return sreq;
150}
151EXPORT_SYMBOL(scsi_allocate_request);
152
153void __scsi_release_request(struct scsi_request *sreq)
154{
155 struct request *req = sreq->sr_request;
156
157 /* unlikely because the tag was usually ended earlier by the
158 * mid-layer. However, for layering reasons ULD's don't end
159 * the tag of commands they generate. */
160 if (unlikely(blk_rq_tagged(req))) {
161 unsigned long flags;
162 struct request_queue *q = req->q;
163
164 spin_lock_irqsave(q->queue_lock, flags);
165 blk_queue_end_tag(q, req);
166 spin_unlock_irqrestore(q->queue_lock, flags);
167 }
168
169
170 if (likely(sreq->sr_command != NULL)) {
171 struct scsi_cmnd *cmd = sreq->sr_command;
172
173 sreq->sr_command = NULL;
174 scsi_next_command(cmd);
175 }
176}
177
178/*
179 * Function: scsi_release_request
180 *
181 * Purpose: Release a request descriptor.
182 *
183 * Arguments: sreq - request to release
184 *
185 * Lock status: No locks assumed to be held. This function is SMP-safe.
186 */
187void scsi_release_request(struct scsi_request *sreq)
188{
189 __scsi_release_request(sreq);
190 kfree(sreq);
191}
192EXPORT_SYMBOL(scsi_release_request);
193
194struct scsi_host_cmd_pool {
195 kmem_cache_t *slab;
196 unsigned int users;
197 char *name;
198 unsigned int slab_flags;
199 unsigned int gfp_mask;
200};
201
202static struct scsi_host_cmd_pool scsi_cmd_pool = {
203 .name = "scsi_cmd_cache",
204 .slab_flags = SLAB_HWCACHE_ALIGN,
205};
206
207static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
208 .name = "scsi_cmd_cache(DMA)",
209 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
210 .gfp_mask = __GFP_DMA,
211};
212
213static DECLARE_MUTEX(host_cmd_pool_mutex);
214
215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
216 int gfp_mask)
217{
218 struct scsi_cmnd *cmd;
219
220 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
221 gfp_mask | shost->cmd_pool->gfp_mask);
222
223 if (unlikely(!cmd)) {
224 unsigned long flags;
225
226 spin_lock_irqsave(&shost->free_list_lock, flags);
227 if (likely(!list_empty(&shost->free_list))) {
228 cmd = list_entry(shost->free_list.next,
229 struct scsi_cmnd, list);
230 list_del_init(&cmd->list);
231 }
232 spin_unlock_irqrestore(&shost->free_list_lock, flags);
233 }
234
235 return cmd;
236}
237
238/*
239 * Function: scsi_get_command()
240 *
241 * Purpose: Allocate and setup a scsi command block
242 *
243 * Arguments: dev - parent scsi device
244 * gfp_mask- allocator flags
245 *
246 * Returns: The allocated scsi command structure.
247 */
248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
249{
250 struct scsi_cmnd *cmd;
251
252 /* Bail if we can't get a reference to the device */
253 if (!get_device(&dev->sdev_gendev))
254 return NULL;
255
256 cmd = __scsi_get_command(dev->host, gfp_mask);
257
258 if (likely(cmd != NULL)) {
259 unsigned long flags;
260
261 memset(cmd, 0, sizeof(*cmd));
262 cmd->device = dev;
1da177e4
LT
263 init_timer(&cmd->eh_timeout);
264 INIT_LIST_HEAD(&cmd->list);
265 spin_lock_irqsave(&dev->list_lock, flags);
266 list_add_tail(&cmd->list, &dev->cmd_list);
267 spin_unlock_irqrestore(&dev->list_lock, flags);
268 } else
269 put_device(&dev->sdev_gendev);
270
b21a4138 271 cmd->jiffies_at_alloc = jiffies;
1da177e4
LT
272 return cmd;
273}
274EXPORT_SYMBOL(scsi_get_command);
275
276/*
277 * Function: scsi_put_command()
278 *
279 * Purpose: Free a scsi command block
280 *
281 * Arguments: cmd - command block to free
282 *
283 * Returns: Nothing.
284 *
285 * Notes: The command must not belong to any lists.
286 */
287void scsi_put_command(struct scsi_cmnd *cmd)
288{
289 struct scsi_device *sdev = cmd->device;
290 struct Scsi_Host *shost = sdev->host;
291 unsigned long flags;
292
293 /* serious error if the command hasn't come from a device list */
294 spin_lock_irqsave(&cmd->device->list_lock, flags);
295 BUG_ON(list_empty(&cmd->list));
296 list_del_init(&cmd->list);
297 spin_unlock(&cmd->device->list_lock);
298 /* changing locks here, don't need to restore the irq state */
299 spin_lock(&shost->free_list_lock);
300 if (unlikely(list_empty(&shost->free_list))) {
301 list_add(&cmd->list, &shost->free_list);
302 cmd = NULL;
303 }
304 spin_unlock_irqrestore(&shost->free_list_lock, flags);
305
306 if (likely(cmd != NULL))
307 kmem_cache_free(shost->cmd_pool->slab, cmd);
308
309 put_device(&sdev->sdev_gendev);
310}
311EXPORT_SYMBOL(scsi_put_command);
312
313/*
314 * Function: scsi_setup_command_freelist()
315 *
316 * Purpose: Setup the command freelist for a scsi host.
317 *
318 * Arguments: shost - host to allocate the freelist for.
319 *
320 * Returns: Nothing.
321 */
322int scsi_setup_command_freelist(struct Scsi_Host *shost)
323{
324 struct scsi_host_cmd_pool *pool;
325 struct scsi_cmnd *cmd;
326
327 spin_lock_init(&shost->free_list_lock);
328 INIT_LIST_HEAD(&shost->free_list);
329
330 /*
331 * Select a command slab for this host and create it if not
332 * yet existant.
333 */
334 down(&host_cmd_pool_mutex);
335 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
336 if (!pool->users) {
337 pool->slab = kmem_cache_create(pool->name,
338 sizeof(struct scsi_cmnd), 0,
339 pool->slab_flags, NULL, NULL);
340 if (!pool->slab)
341 goto fail;
342 }
343
344 pool->users++;
345 shost->cmd_pool = pool;
346 up(&host_cmd_pool_mutex);
347
348 /*
349 * Get one backup command for this host.
350 */
351 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
352 GFP_KERNEL | shost->cmd_pool->gfp_mask);
353 if (!cmd)
354 goto fail2;
355 list_add(&cmd->list, &shost->free_list);
356 return 0;
357
358 fail2:
359 if (!--pool->users)
360 kmem_cache_destroy(pool->slab);
361 return -ENOMEM;
362 fail:
363 up(&host_cmd_pool_mutex);
364 return -ENOMEM;
365
366}
367
368/*
369 * Function: scsi_destroy_command_freelist()
370 *
371 * Purpose: Release the command freelist for a scsi host.
372 *
373 * Arguments: shost - host that's freelist is going to be destroyed
374 */
375void scsi_destroy_command_freelist(struct Scsi_Host *shost)
376{
377 while (!list_empty(&shost->free_list)) {
378 struct scsi_cmnd *cmd;
379
380 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
381 list_del_init(&cmd->list);
382 kmem_cache_free(shost->cmd_pool->slab, cmd);
383 }
384
385 down(&host_cmd_pool_mutex);
386 if (!--shost->cmd_pool->users)
387 kmem_cache_destroy(shost->cmd_pool->slab);
388 up(&host_cmd_pool_mutex);
389}
390
391#ifdef CONFIG_SCSI_LOGGING
392void scsi_log_send(struct scsi_cmnd *cmd)
393{
394 unsigned int level;
395 struct scsi_device *sdev;
396
397 /*
398 * If ML QUEUE log level is greater than or equal to:
399 *
400 * 1: nothing (match completion)
401 *
402 * 2: log opcode + command of all commands
403 *
404 * 3: same as 2 plus dump cmd address
405 *
406 * 4: same as 3 plus dump extra junk
407 */
408 if (unlikely(scsi_logging_level)) {
409 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
410 SCSI_LOG_MLQUEUE_BITS);
411 if (level > 1) {
412 sdev = cmd->device;
413 printk(KERN_INFO "scsi <%d:%d:%d:%d> send ",
414 sdev->host->host_no, sdev->channel, sdev->id,
415 sdev->lun);
416 if (level > 2)
417 printk("0x%p ", cmd);
418 /*
419 * spaces to match disposition and cmd->result
420 * output in scsi_log_completion.
421 */
422 printk(" ");
423 scsi_print_command(cmd);
424 if (level > 3) {
425 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
426 " done = 0x%p, queuecommand 0x%p\n",
427 cmd->buffer, cmd->bufflen,
428 cmd->done,
429 sdev->host->hostt->queuecommand);
430
431 }
432 }
433 }
434}
435
436void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
437{
438 unsigned int level;
439 struct scsi_device *sdev;
440
441 /*
442 * If ML COMPLETE log level is greater than or equal to:
443 *
444 * 1: log disposition, result, opcode + command, and conditionally
445 * sense data for failures or non SUCCESS dispositions.
446 *
447 * 2: same as 1 but for all command completions.
448 *
449 * 3: same as 2 plus dump cmd address
450 *
451 * 4: same as 3 plus dump extra junk
452 */
453 if (unlikely(scsi_logging_level)) {
454 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
455 SCSI_LOG_MLCOMPLETE_BITS);
456 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
457 (level > 1)) {
458 sdev = cmd->device;
459 printk(KERN_INFO "scsi <%d:%d:%d:%d> done ",
460 sdev->host->host_no, sdev->channel, sdev->id,
461 sdev->lun);
462 if (level > 2)
463 printk("0x%p ", cmd);
464 /*
465 * Dump truncated values, so we usually fit within
466 * 80 chars.
467 */
468 switch (disposition) {
469 case SUCCESS:
470 printk("SUCCESS");
471 break;
472 case NEEDS_RETRY:
473 printk("RETRY ");
474 break;
475 case ADD_TO_MLQUEUE:
476 printk("MLQUEUE");
477 break;
478 case FAILED:
479 printk("FAILED ");
480 break;
481 case TIMEOUT_ERROR:
482 /*
483 * If called via scsi_times_out.
484 */
485 printk("TIMEOUT");
486 break;
487 default:
488 printk("UNKNOWN");
489 }
490 printk(" %8x ", cmd->result);
491 scsi_print_command(cmd);
492 if (status_byte(cmd->result) & CHECK_CONDITION) {
493 /*
db9dff36 494 * XXX The scsi_print_sense formatting/prefix
1da177e4
LT
495 * doesn't match this function.
496 */
497 scsi_print_sense("", cmd);
498 }
499 if (level > 3) {
500 printk(KERN_INFO "scsi host busy %d failed %d\n",
501 sdev->host->host_busy,
502 sdev->host->host_failed);
503 }
504 }
505 }
506}
507#endif
508
509/*
510 * Assign a serial number and pid to the request for error recovery
511 * and debugging purposes. Protected by the Host_Lock of host.
512 */
513static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
514{
515 cmd->serial_number = host->cmd_serial_number++;
516 if (cmd->serial_number == 0)
517 cmd->serial_number = host->cmd_serial_number++;
518
519 cmd->pid = host->cmd_pid++;
520 if (cmd->pid == 0)
521 cmd->pid = host->cmd_pid++;
522}
523
524/*
525 * Function: scsi_dispatch_command
526 *
527 * Purpose: Dispatch a command to the low-level driver.
528 *
529 * Arguments: cmd - command block we are dispatching.
530 *
531 * Notes:
532 */
533int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
534{
535 struct Scsi_Host *host = cmd->device->host;
536 unsigned long flags = 0;
537 unsigned long timeout;
538 int rtn = 0;
539
540 /* check if the device is still usable */
541 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
542 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
543 * returns an immediate error upwards, and signals
544 * that the device is no longer present */
545 cmd->result = DID_NO_CONNECT << 16;
546 atomic_inc(&cmd->device->iorequest_cnt);
69b52893 547 __scsi_done(cmd);
1da177e4
LT
548 /* return 0 (because the command has been processed) */
549 goto out;
550 }
551
552 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
553 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
554 /*
555 * in SDEV_BLOCK, the command is just put back on the device
556 * queue. The suspend state has already blocked the queue so
557 * future requests should not occur until the device
558 * transitions out of the suspend state.
559 */
560 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
561
562 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
563
564 /*
565 * NOTE: rtn is still zero here because we don't need the
566 * queue to be plugged on return (it's already stopped)
567 */
568 goto out;
569 }
570
571 /*
572 * If SCSI-2 or lower, store the LUN value in cmnd.
573 */
574 if (cmd->device->scsi_level <= SCSI_2) {
575 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
576 (cmd->device->lun << 5 & 0xe0);
577 }
578
579 /*
580 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
581 * we can avoid the drive not being ready.
582 */
583 timeout = host->last_reset + MIN_RESET_DELAY;
584
585 if (host->resetting && time_before(jiffies, timeout)) {
586 int ticks_remaining = timeout - jiffies;
587 /*
588 * NOTE: This may be executed from within an interrupt
589 * handler! This is bad, but for now, it'll do. The irq
590 * level of the interrupt handler has been masked out by the
591 * platform dependent interrupt handling code already, so the
592 * sti() here will not cause another call to the SCSI host's
593 * interrupt handler (assuming there is one irq-level per
594 * host).
595 */
596 while (--ticks_remaining >= 0)
597 mdelay(1 + 999 / HZ);
598 host->resetting = 0;
599 }
600
601 /*
602 * AK: unlikely race here: for some reason the timer could
603 * expire before the serial number is set up below.
604 */
605 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
606
607 scsi_log_send(cmd);
608
609 /*
610 * We will use a queued command if possible, otherwise we will
611 * emulate the queuing and calling of completion function ourselves.
612 */
1da177e4
LT
613 atomic_inc(&cmd->device->iorequest_cnt);
614
615 /*
616 * Before we queue this command, check if the command
617 * length exceeds what the host adapter can handle.
618 */
619 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
620 SCSI_LOG_MLQUEUE(3,
621 printk("queuecommand : command too long.\n"));
622 cmd->result = (DID_ABORT << 16);
623
624 scsi_done(cmd);
625 goto out;
626 }
627
628 spin_lock_irqsave(host->host_lock, flags);
629 scsi_cmd_get_serial(host, cmd);
630
d2c9d9ea 631 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4
LT
632 cmd->result = (DID_NO_CONNECT << 16);
633 scsi_done(cmd);
634 } else {
635 rtn = host->hostt->queuecommand(cmd, scsi_done);
636 }
637 spin_unlock_irqrestore(host->host_lock, flags);
638 if (rtn) {
d8c37e7b
TH
639 if (scsi_delete_timer(cmd)) {
640 atomic_inc(&cmd->device->iodone_cnt);
641 scsi_queue_insert(cmd,
642 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
643 rtn : SCSI_MLQUEUE_HOST_BUSY);
644 }
1da177e4
LT
645 SCSI_LOG_MLQUEUE(3,
646 printk("queuecommand : request rejected\n"));
647 }
648
649 out:
650 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
651 return rtn;
652}
653
654/*
655 * Function: scsi_init_cmd_from_req
656 *
657 * Purpose: Queue a SCSI command
658 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
659 *
660 * Arguments: cmd - command descriptor.
661 * sreq - Request from the queue.
662 *
663 * Lock status: None needed.
664 *
665 * Returns: Nothing.
666 *
667 * Notes: Mainly transfer data from the request structure to the
668 * command structure. The request structure is allocated
669 * using the normal memory allocator, and requests can pile
670 * up to more or less any depth. The command structure represents
671 * a consumable resource, as these are allocated into a pool
672 * when the SCSI subsystem initializes. The preallocation is
673 * required so that in low-memory situations a disk I/O request
674 * won't cause the memory manager to try and write out a page.
675 * The request structure is generally used by ioctls and character
676 * devices.
677 */
678void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
679{
680 sreq->sr_command = cmd;
681
1da177e4
LT
682 cmd->cmd_len = sreq->sr_cmd_len;
683 cmd->use_sg = sreq->sr_use_sg;
684
685 cmd->request = sreq->sr_request;
686 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
687 cmd->serial_number = 0;
1da177e4
LT
688 cmd->bufflen = sreq->sr_bufflen;
689 cmd->buffer = sreq->sr_buffer;
690 cmd->retries = 0;
691 cmd->allowed = sreq->sr_allowed;
692 cmd->done = sreq->sr_done;
693 cmd->timeout_per_command = sreq->sr_timeout_per_command;
694 cmd->sc_data_direction = sreq->sr_data_direction;
695 cmd->sglist_len = sreq->sr_sglist_len;
696 cmd->underflow = sreq->sr_underflow;
697 cmd->sc_request = sreq;
698 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
699
700 /*
701 * Zero the sense buffer. Some host adapters automatically request
702 * sense on error. 0 is not a valid sense code.
703 */
704 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
705 cmd->request_buffer = sreq->sr_buffer;
706 cmd->request_bufflen = sreq->sr_bufflen;
707 cmd->old_use_sg = cmd->use_sg;
708 if (cmd->cmd_len == 0)
709 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
710 cmd->old_cmd_len = cmd->cmd_len;
711 cmd->sc_old_data_direction = cmd->sc_data_direction;
712 cmd->old_underflow = cmd->underflow;
713
714 /*
715 * Start the timer ticking.
716 */
1da177e4
LT
717 cmd->result = 0;
718
719 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
720}
721
722/*
723 * Per-CPU I/O completion queue.
724 */
725static DEFINE_PER_CPU(struct list_head, scsi_done_q);
726
727/**
728 * scsi_done - Enqueue the finished SCSI command into the done queue.
729 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
730 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
731 *
732 * This function is the mid-level's (SCSI Core) interrupt routine, which
733 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
734 * the command to the done queue for further processing.
735 *
736 * This is the producer of the done queue who enqueues at the tail.
737 *
738 * This function is interrupt context safe.
739 */
52c1da39 740static void scsi_done(struct scsi_cmnd *cmd)
1da177e4
LT
741{
742 /*
743 * We don't have to worry about this one timing out any more.
744 * If we are unable to remove the timer, then the command
745 * has already timed out. In which case, we have no choice but to
746 * let the timeout function run, as we have no idea where in fact
747 * that function could really be. It might be on another processor,
748 * etc, etc.
749 */
750 if (!scsi_delete_timer(cmd))
751 return;
752 __scsi_done(cmd);
753}
754
755/* Private entry to scsi_done() to complete a command when the timer
756 * isn't running --- used by scsi_times_out */
757void __scsi_done(struct scsi_cmnd *cmd)
758{
759 unsigned long flags;
760
761 /*
762 * Set the serial numbers back to zero
763 */
764 cmd->serial_number = 0;
1da177e4
LT
765
766 atomic_inc(&cmd->device->iodone_cnt);
767 if (cmd->result)
768 atomic_inc(&cmd->device->ioerr_cnt);
769
770 /*
771 * Next, enqueue the command into the done queue.
772 * It is a per-CPU queue, so we just disable local interrupts
773 * and need no spinlock.
774 */
775 local_irq_save(flags);
776 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
777 raise_softirq_irqoff(SCSI_SOFTIRQ);
778 local_irq_restore(flags);
779}
780
781/**
782 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
783 *
784 * This is the consumer of the done queue.
785 *
786 * This is called with all interrupts enabled. This should reduce
787 * interrupt latency, stack depth, and reentrancy of the low-level
788 * drivers.
789 */
790static void scsi_softirq(struct softirq_action *h)
791{
792 int disposition;
793 LIST_HEAD(local_q);
794
795 local_irq_disable();
796 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
797 local_irq_enable();
798
799 while (!list_empty(&local_q)) {
800 struct scsi_cmnd *cmd = list_entry(local_q.next,
801 struct scsi_cmnd, eh_entry);
b21a4138
JB
802 /* The longest time any command should be outstanding is the
803 * per command timeout multiplied by the number of retries.
804 *
805 * For a typical command, this is 2.5 minutes */
806 unsigned long wait_for
807 = cmd->allowed * cmd->timeout_per_command;
1da177e4
LT
808 list_del_init(&cmd->eh_entry);
809
810 disposition = scsi_decide_disposition(cmd);
b21a4138
JB
811 if (disposition != SUCCESS &&
812 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
813 dev_printk(KERN_ERR, &cmd->device->sdev_gendev,
6becdff3 814 "timing out command, waited %lus\n",
b21a4138
JB
815 wait_for/HZ);
816 disposition = SUCCESS;
817 }
818
1da177e4
LT
819 scsi_log_completion(cmd, disposition);
820 switch (disposition) {
821 case SUCCESS:
822 scsi_finish_command(cmd);
823 break;
824 case NEEDS_RETRY:
825 scsi_retry_command(cmd);
826 break;
827 case ADD_TO_MLQUEUE:
828 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
829 break;
830 default:
831 if (!scsi_eh_scmd_add(cmd, 0))
832 scsi_finish_command(cmd);
833 }
834 }
835}
836
837/*
838 * Function: scsi_retry_command
839 *
840 * Purpose: Send a command back to the low level to be retried.
841 *
842 * Notes: This command is always executed in the context of the
843 * bottom half handler, or the error handler thread. Low
844 * level drivers should not become re-entrant as a result of
845 * this.
846 */
52c1da39 847static int scsi_retry_command(struct scsi_cmnd *cmd)
1da177e4
LT
848{
849 /*
850 * Restore the SCSI command state.
851 */
852 scsi_setup_cmd_retry(cmd);
853
854 /*
855 * Zero the sense information from the last time we tried
856 * this command.
857 */
858 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
859
860 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
861}
862
863/*
864 * Function: scsi_finish_command
865 *
866 * Purpose: Pass command off to upper layer for finishing of I/O
867 * request, waking processes that are waiting on results,
868 * etc.
869 */
870void scsi_finish_command(struct scsi_cmnd *cmd)
871{
872 struct scsi_device *sdev = cmd->device;
873 struct Scsi_Host *shost = sdev->host;
874 struct scsi_request *sreq;
875
876 scsi_device_unbusy(sdev);
877
878 /*
879 * Clear the flags which say that the device/host is no longer
880 * capable of accepting new commands. These are set in scsi_queue.c
881 * for both the queue full condition on a device, and for a
882 * host full condition on the host.
883 *
884 * XXX(hch): What about locking?
885 */
886 shost->host_blocked = 0;
887 sdev->device_blocked = 0;
888
889 /*
890 * If we have valid sense information, then some kind of recovery
891 * must have taken place. Make a note of this.
892 */
893 if (SCSI_SENSE_VALID(cmd))
894 cmd->result |= (DRIVER_SENSE << 24);
895
896 SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion "
897 "for device %d %x\n", sdev->id, cmd->result));
898
1da177e4
LT
899 /*
900 * We can get here with use_sg=0, causing a panic in the upper level
901 */
902 cmd->use_sg = cmd->old_use_sg;
903
904 /*
905 * If there is an associated request structure, copy the data over
906 * before we call the completion function.
907 */
908 sreq = cmd->sc_request;
909 if (sreq) {
910 sreq->sr_result = sreq->sr_command->result;
911 if (sreq->sr_result) {
912 memcpy(sreq->sr_sense_buffer,
913 sreq->sr_command->sense_buffer,
914 sizeof(sreq->sr_sense_buffer));
915 }
916 }
917
918 cmd->done(cmd);
919}
920EXPORT_SYMBOL(scsi_finish_command);
921
922/*
923 * Function: scsi_adjust_queue_depth()
924 *
925 * Purpose: Allow low level drivers to tell us to change the queue depth
926 * on a specific SCSI device
927 *
928 * Arguments: sdev - SCSI Device in question
929 * tagged - Do we use tagged queueing (non-0) or do we treat
930 * this device as an untagged device (0)
931 * tags - Number of tags allowed if tagged queueing enabled,
932 * or number of commands the low level driver can
933 * queue up in non-tagged mode (as per cmd_per_lun).
934 *
935 * Returns: Nothing
936 *
937 * Lock Status: None held on entry
938 *
939 * Notes: Low level drivers may call this at any time and we will do
940 * the right thing depending on whether or not the device is
941 * currently active and whether or not it even has the
942 * command blocks built yet.
943 */
944void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
945{
946 unsigned long flags;
947
948 /*
949 * refuse to set tagged depth to an unworkable size
950 */
951 if (tags <= 0)
952 return;
953
954 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
955
956 /* Check to see if the queue is managed by the block layer
957 * if it is, and we fail to adjust the depth, exit */
958 if (blk_queue_tagged(sdev->request_queue) &&
959 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
960 goto out;
961
962 sdev->queue_depth = tags;
963 switch (tagged) {
964 case MSG_ORDERED_TAG:
965 sdev->ordered_tags = 1;
966 sdev->simple_tags = 1;
967 break;
968 case MSG_SIMPLE_TAG:
969 sdev->ordered_tags = 0;
970 sdev->simple_tags = 1;
971 break;
972 default:
973 printk(KERN_WARNING "(scsi%d:%d:%d:%d) "
974 "scsi_adjust_queue_depth, bad queue type, "
975 "disabled\n", sdev->host->host_no,
976 sdev->channel, sdev->id, sdev->lun);
977 case 0:
978 sdev->ordered_tags = sdev->simple_tags = 0;
979 sdev->queue_depth = tags;
980 break;
981 }
982 out:
983 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
984}
985EXPORT_SYMBOL(scsi_adjust_queue_depth);
986
987/*
988 * Function: scsi_track_queue_full()
989 *
990 * Purpose: This function will track successive QUEUE_FULL events on a
991 * specific SCSI device to determine if and when there is a
992 * need to adjust the queue depth on the device.
993 *
994 * Arguments: sdev - SCSI Device in question
995 * depth - Current number of outstanding SCSI commands on
996 * this device, not counting the one returned as
997 * QUEUE_FULL.
998 *
999 * Returns: 0 - No change needed
1000 * >0 - Adjust queue depth to this new depth
1001 * -1 - Drop back to untagged operation using host->cmd_per_lun
1002 * as the untagged command depth
1003 *
1004 * Lock Status: None held on entry
1005 *
1006 * Notes: Low level drivers may call this at any time and we will do
1007 * "The Right Thing." We are interrupt context safe.
1008 */
1009int scsi_track_queue_full(struct scsi_device *sdev, int depth)
1010{
1011 if ((jiffies >> 4) == sdev->last_queue_full_time)
1012 return 0;
1013
1014 sdev->last_queue_full_time = (jiffies >> 4);
1015 if (sdev->last_queue_full_depth != depth) {
1016 sdev->last_queue_full_count = 1;
1017 sdev->last_queue_full_depth = depth;
1018 } else {
1019 sdev->last_queue_full_count++;
1020 }
1021
1022 if (sdev->last_queue_full_count <= 10)
1023 return 0;
1024 if (sdev->last_queue_full_depth < 8) {
1025 /* Drop back to untagged */
1026 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
1027 return -1;
1028 }
1029
1030 if (sdev->ordered_tags)
1031 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
1032 else
1033 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1034 return depth;
1035}
1036EXPORT_SYMBOL(scsi_track_queue_full);
1037
1038/**
1039 * scsi_device_get - get an addition reference to a scsi_device
1040 * @sdev: device to get a reference to
1041 *
1042 * Gets a reference to the scsi_device and increments the use count
1043 * of the underlying LLDD module. You must hold host_lock of the
1044 * parent Scsi_Host or already have a reference when calling this.
1045 */
1046int scsi_device_get(struct scsi_device *sdev)
1047{
1048 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
1049 return -ENXIO;
1050 if (!get_device(&sdev->sdev_gendev))
1051 return -ENXIO;
1052 if (!try_module_get(sdev->host->hostt->module)) {
1053 put_device(&sdev->sdev_gendev);
1054 return -ENXIO;
1055 }
1056 return 0;
1057}
1058EXPORT_SYMBOL(scsi_device_get);
1059
1060/**
1061 * scsi_device_put - release a reference to a scsi_device
1062 * @sdev: device to release a reference on.
1063 *
1064 * Release a reference to the scsi_device and decrements the use count
1065 * of the underlying LLDD module. The device is freed once the last
1066 * user vanishes.
1067 */
1068void scsi_device_put(struct scsi_device *sdev)
1069{
1070 module_put(sdev->host->hostt->module);
1071 put_device(&sdev->sdev_gendev);
1072}
1073EXPORT_SYMBOL(scsi_device_put);
1074
1075/* helper for shost_for_each_device, thus not documented */
1076struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1077 struct scsi_device *prev)
1078{
1079 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1080 struct scsi_device *next = NULL;
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(shost->host_lock, flags);
1084 while (list->next != &shost->__devices) {
1085 next = list_entry(list->next, struct scsi_device, siblings);
1086 /* skip devices that we can't get a reference to */
1087 if (!scsi_device_get(next))
1088 break;
1089 next = NULL;
1090 list = list->next;
1091 }
1092 spin_unlock_irqrestore(shost->host_lock, flags);
1093
1094 if (prev)
1095 scsi_device_put(prev);
1096 return next;
1097}
1098EXPORT_SYMBOL(__scsi_iterate_devices);
1099
1100/**
1101 * starget_for_each_device - helper to walk all devices of a target
1102 * @starget: target whose devices we want to iterate over.
1103 *
1104 * This traverses over each devices of @shost. The devices have
1105 * a reference that must be released by scsi_host_put when breaking
1106 * out of the loop.
1107 */
1108void starget_for_each_device(struct scsi_target *starget, void * data,
1109 void (*fn)(struct scsi_device *, void *))
1110{
1111 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1112 struct scsi_device *sdev;
1113
1114 shost_for_each_device(sdev, shost) {
1115 if ((sdev->channel == starget->channel) &&
1116 (sdev->id == starget->id))
1117 fn(sdev, data);
1118 }
1119}
1120EXPORT_SYMBOL(starget_for_each_device);
1121
1122/**
1123 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1124 * @starget: SCSI target pointer
1125 * @lun: SCSI Logical Unit Number
1126 *
1127 * Looks up the scsi_device with the specified @lun for a give
1128 * @starget. The returned scsi_device does not have an additional
1129 * reference. You must hold the host's host_lock over this call and
1130 * any access to the returned scsi_device.
1131 *
1132 * Note: The only reason why drivers would want to use this is because
1133 * they're need to access the device list in irq context. Otherwise you
1134 * really want to use scsi_device_lookup_by_target instead.
1135 **/
1136struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1137 uint lun)
1138{
1139 struct scsi_device *sdev;
1140
1141 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1142 if (sdev->lun ==lun)
1143 return sdev;
1144 }
1145
1146 return NULL;
1147}
1148EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1149
1150/**
1151 * scsi_device_lookup_by_target - find a device given the target
1152 * @starget: SCSI target pointer
1153 * @lun: SCSI Logical Unit Number
1154 *
1155 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1156 * give host. The returned scsi_device has an additional reference that
1157 * needs to be release with scsi_host_put once you're done with it.
1158 **/
1159struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1160 uint lun)
1161{
1162 struct scsi_device *sdev;
1163 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1164 unsigned long flags;
1165
1166 spin_lock_irqsave(shost->host_lock, flags);
1167 sdev = __scsi_device_lookup_by_target(starget, lun);
1168 if (sdev && scsi_device_get(sdev))
1169 sdev = NULL;
1170 spin_unlock_irqrestore(shost->host_lock, flags);
1171
1172 return sdev;
1173}
1174EXPORT_SYMBOL(scsi_device_lookup_by_target);
1175
1176/**
1177 * scsi_device_lookup - find a device given the host (UNLOCKED)
1178 * @shost: SCSI host pointer
1179 * @channel: SCSI channel (zero if only one channel)
1180 * @pun: SCSI target number (physical unit number)
1181 * @lun: SCSI Logical Unit Number
1182 *
1183 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1184 * give host. The returned scsi_device does not have an additional reference.
1185 * You must hold the host's host_lock over this call and any access to the
1186 * returned scsi_device.
1187 *
1188 * Note: The only reason why drivers would want to use this is because
1189 * they're need to access the device list in irq context. Otherwise you
1190 * really want to use scsi_device_lookup instead.
1191 **/
1192struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1193 uint channel, uint id, uint lun)
1194{
1195 struct scsi_device *sdev;
1196
1197 list_for_each_entry(sdev, &shost->__devices, siblings) {
1198 if (sdev->channel == channel && sdev->id == id &&
1199 sdev->lun ==lun)
1200 return sdev;
1201 }
1202
1203 return NULL;
1204}
1205EXPORT_SYMBOL(__scsi_device_lookup);
1206
1207/**
1208 * scsi_device_lookup - find a device given the host
1209 * @shost: SCSI host pointer
1210 * @channel: SCSI channel (zero if only one channel)
1211 * @id: SCSI target number (physical unit number)
1212 * @lun: SCSI Logical Unit Number
1213 *
1214 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1215 * give host. The returned scsi_device has an additional reference that
1216 * needs to be release with scsi_host_put once you're done with it.
1217 **/
1218struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1219 uint channel, uint id, uint lun)
1220{
1221 struct scsi_device *sdev;
1222 unsigned long flags;
1223
1224 spin_lock_irqsave(shost->host_lock, flags);
1225 sdev = __scsi_device_lookup(shost, channel, id, lun);
1226 if (sdev && scsi_device_get(sdev))
1227 sdev = NULL;
1228 spin_unlock_irqrestore(shost->host_lock, flags);
1229
1230 return sdev;
1231}
1232EXPORT_SYMBOL(scsi_device_lookup);
1233
1234/**
1235 * scsi_device_cancel - cancel outstanding IO to this device
1236 * @sdev: Pointer to struct scsi_device
1237 * @recovery: Boolean instructing function to recover device or not.
1238 *
1239 **/
1240int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1241{
1242 struct scsi_cmnd *scmd;
1243 LIST_HEAD(active_list);
1244 struct list_head *lh, *lh_sf;
1245 unsigned long flags;
1246
1247 scsi_device_set_state(sdev, SDEV_CANCEL);
1248
1249 spin_lock_irqsave(&sdev->list_lock, flags);
1250 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1251 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) {
1252 /*
1253 * If we are unable to remove the timer, it means
1254 * that the command has already timed out or
1255 * finished.
1256 */
1257 if (!scsi_delete_timer(scmd))
1258 continue;
1259 list_add_tail(&scmd->eh_entry, &active_list);
1260 }
1261 }
1262 spin_unlock_irqrestore(&sdev->list_lock, flags);
1263
1264 if (!list_empty(&active_list)) {
1265 list_for_each_safe(lh, lh_sf, &active_list) {
1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1267 list_del_init(lh);
939647ee
JB
1268 if (recovery &&
1269 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1da177e4
LT
1270 scmd->result = (DID_ABORT << 16);
1271 scsi_finish_command(scmd);
1272 }
1273 }
1274 }
1275
1276 return 0;
1277}
1278EXPORT_SYMBOL(scsi_device_cancel);
1279
1280#ifdef CONFIG_HOTPLUG_CPU
1281static int scsi_cpu_notify(struct notifier_block *self,
1282 unsigned long action, void *hcpu)
1283{
1284 int cpu = (unsigned long)hcpu;
1285
1286 switch(action) {
1287 case CPU_DEAD:
1288 /* Drain scsi_done_q. */
1289 local_irq_disable();
1290 list_splice_init(&per_cpu(scsi_done_q, cpu),
1291 &__get_cpu_var(scsi_done_q));
1292 raise_softirq_irqoff(SCSI_SOFTIRQ);
1293 local_irq_enable();
1294 break;
1295 default:
1296 break;
1297 }
1298 return NOTIFY_OK;
1299}
1300
1301static struct notifier_block __devinitdata scsi_cpu_nb = {
1302 .notifier_call = scsi_cpu_notify,
1303};
1304
1305#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
1306#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
1307#else
1308#define register_scsi_cpu()
1309#define unregister_scsi_cpu()
1310#endif /* CONFIG_HOTPLUG_CPU */
1311
1312MODULE_DESCRIPTION("SCSI core");
1313MODULE_LICENSE("GPL");
1314
1315module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1316MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1317
1318static int __init init_scsi(void)
1319{
1320 int error, i;
1321
1322 error = scsi_init_queue();
1323 if (error)
1324 return error;
1325 error = scsi_init_procfs();
1326 if (error)
1327 goto cleanup_queue;
1328 error = scsi_init_devinfo();
1329 if (error)
1330 goto cleanup_procfs;
1331 error = scsi_init_hosts();
1332 if (error)
1333 goto cleanup_devlist;
1334 error = scsi_init_sysctl();
1335 if (error)
1336 goto cleanup_hosts;
1337 error = scsi_sysfs_register();
1338 if (error)
1339 goto cleanup_sysctl;
1340
1341 for (i = 0; i < NR_CPUS; i++)
1342 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1343
1344 devfs_mk_dir("scsi");
1345 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
1346 register_scsi_cpu();
1347 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1348 return 0;
1349
1350cleanup_sysctl:
1351 scsi_exit_sysctl();
1352cleanup_hosts:
1353 scsi_exit_hosts();
1354cleanup_devlist:
1355 scsi_exit_devinfo();
1356cleanup_procfs:
1357 scsi_exit_procfs();
1358cleanup_queue:
1359 scsi_exit_queue();
1360 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1361 -error);
1362 return error;
1363}
1364
1365static void __exit exit_scsi(void)
1366{
1367 scsi_sysfs_unregister();
1368 scsi_exit_sysctl();
1369 scsi_exit_hosts();
1370 scsi_exit_devinfo();
1371 devfs_remove("scsi");
1372 scsi_exit_procfs();
1373 scsi_exit_queue();
1374 unregister_scsi_cpu();
1375}
1376
1377subsys_initcall(init_scsi);
1378module_exit(exit_scsi);