2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/blkdev.h>
23 #include <linux/poll.h>
24 #include <linux/cdev.h>
25 #include <linux/percpu.h>
26 #include <linux/uio.h>
27 #include <linux/bsg.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_ioctl.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_driver.h>
36 static char bsg_version
[] = "block layer sg (bsg) 0.4";
39 request_queue_t
*queue
;
41 struct list_head busy_list
;
42 struct list_head done_list
;
43 struct hlist_node dev_list
;
48 wait_queue_head_t wq_done
;
49 wait_queue_head_t wq_free
;
50 char name
[BUS_ID_SIZE
];
60 #define BSG_DEFAULT_CMDS 64
61 #define BSG_MAX_DEVS 32768
66 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
68 #define dprintk(fmt, args...)
71 #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
76 #define BSG_MAJOR (240)
78 static DEFINE_MUTEX(bsg_mutex
);
79 static int bsg_device_nr
, bsg_minor_idx
;
81 #define BSG_LIST_SIZE (8)
82 #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
83 static struct hlist_head bsg_device_list
[BSG_LIST_SIZE
];
85 static struct class *bsg_class
;
86 static LIST_HEAD(bsg_class_list
);
88 static struct kmem_cache
*bsg_cmd_cachep
;
91 * our internal command type
94 struct bsg_device
*bd
;
95 struct list_head list
;
101 struct sg_io_v4 __user
*uhdr
;
102 char sense
[SCSI_SENSE_BUFFERSIZE
];
105 static void bsg_free_command(struct bsg_command
*bc
)
107 struct bsg_device
*bd
= bc
->bd
;
110 kmem_cache_free(bsg_cmd_cachep
, bc
);
112 spin_lock_irqsave(&bd
->lock
, flags
);
114 spin_unlock_irqrestore(&bd
->lock
, flags
);
116 wake_up(&bd
->wq_free
);
119 static struct bsg_command
*bsg_alloc_command(struct bsg_device
*bd
)
121 struct bsg_command
*bc
= ERR_PTR(-EINVAL
);
123 spin_lock_irq(&bd
->lock
);
125 if (bd
->queued_cmds
>= bd
->max_queue
)
129 spin_unlock_irq(&bd
->lock
);
131 bc
= kmem_cache_alloc(bsg_cmd_cachep
, GFP_USER
);
133 spin_lock_irq(&bd
->lock
);
135 bc
= ERR_PTR(-ENOMEM
);
139 memset(bc
, 0, sizeof(*bc
));
141 INIT_LIST_HEAD(&bc
->list
);
142 dprintk("%s: returning free cmd %p\n", bd
->name
, bc
);
145 spin_unlock_irq(&bd
->lock
);
150 bsg_del_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
157 bsg_add_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
160 list_add_tail(&bc
->list
, &bd
->done_list
);
161 wake_up(&bd
->wq_done
);
164 static inline int bsg_io_schedule(struct bsg_device
*bd
, int state
)
169 spin_lock_irq(&bd
->lock
);
171 BUG_ON(bd
->done_cmds
> bd
->queued_cmds
);
174 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
175 * work to do", even though we return -ENOSPC after this same test
176 * during bsg_write() -- there, it means our buffer can't have more
177 * bsg_commands added to it, thus has no space left.
179 if (bd
->done_cmds
== bd
->queued_cmds
) {
184 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
189 prepare_to_wait(&bd
->wq_done
, &wait
, state
);
190 spin_unlock_irq(&bd
->lock
);
192 finish_wait(&bd
->wq_done
, &wait
);
194 if ((state
== TASK_INTERRUPTIBLE
) && signal_pending(current
))
199 spin_unlock_irq(&bd
->lock
);
203 static int blk_fill_sgv4_hdr_rq(request_queue_t
*q
, struct request
*rq
,
204 struct sg_io_v4
*hdr
, int has_write_perm
)
206 memset(rq
->cmd
, 0, BLK_MAX_CDB
); /* ATAPI hates garbage after CDB */
208 if (copy_from_user(rq
->cmd
, (void *)(unsigned long)hdr
->request
,
211 if (blk_verify_command(rq
->cmd
, has_write_perm
))
215 * fill in request structure
217 rq
->cmd_len
= hdr
->request_len
;
218 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
220 rq
->timeout
= (hdr
->timeout
* HZ
) / 1000;
222 rq
->timeout
= q
->sg_timeout
;
224 rq
->timeout
= BLK_DEFAULT_SG_TIMEOUT
;
230 * Check if sg_io_v4 from user is allowed and valid
233 bsg_validate_sgv4_hdr(request_queue_t
*q
, struct sg_io_v4
*hdr
, int *rw
)
235 if (hdr
->guard
!= 'Q')
237 if (hdr
->request_len
> BLK_MAX_CDB
)
239 if (hdr
->dout_xfer_len
> (q
->max_sectors
<< 9) ||
240 hdr
->din_xfer_len
> (q
->max_sectors
<< 9))
243 /* not supported currently */
244 if (hdr
->protocol
|| hdr
->subprotocol
)
247 *rw
= hdr
->dout_xfer_len
? WRITE
: READ
;
253 * map sg_io_v4 to a request.
255 static struct request
*
256 bsg_map_hdr(struct bsg_device
*bd
, struct sg_io_v4
*hdr
)
258 request_queue_t
*q
= bd
->queue
;
259 struct request
*rq
, *next_rq
= NULL
;
260 int ret
, rw
= 0; /* shut up gcc */
261 unsigned int dxfer_len
;
264 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr
->dout_xferp
,
265 hdr
->dout_xfer_len
, (unsigned long long) hdr
->din_xferp
,
268 ret
= bsg_validate_sgv4_hdr(q
, hdr
, &rw
);
273 * map scatter-gather elements seperately and string them to request
275 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
277 return ERR_PTR(-ENOMEM
);
278 ret
= blk_fill_sgv4_hdr_rq(q
, rq
, hdr
, test_bit(BSG_F_WRITE_PERM
,
283 if (rw
== WRITE
&& hdr
->din_xfer_len
) {
284 if (!test_bit(QUEUE_FLAG_BIDI
, &q
->queue_flags
)) {
289 next_rq
= blk_get_request(q
, READ
, GFP_KERNEL
);
294 rq
->next_rq
= next_rq
;
296 dxferp
= (void*)(unsigned long)hdr
->din_xferp
;
297 ret
= blk_rq_map_user(q
, next_rq
, dxferp
, hdr
->din_xfer_len
);
302 if (hdr
->dout_xfer_len
) {
303 dxfer_len
= hdr
->dout_xfer_len
;
304 dxferp
= (void*)(unsigned long)hdr
->dout_xferp
;
305 } else if (hdr
->din_xfer_len
) {
306 dxfer_len
= hdr
->din_xfer_len
;
307 dxferp
= (void*)(unsigned long)hdr
->din_xferp
;
312 ret
= blk_rq_map_user(q
, rq
, dxferp
, dxfer_len
);
320 blk_rq_unmap_user(next_rq
->bio
);
321 blk_put_request(next_rq
);
327 * async completion call-back from the block layer, when scsi/ide/whatever
328 * calls end_that_request_last() on a request
330 static void bsg_rq_end_io(struct request
*rq
, int uptodate
)
332 struct bsg_command
*bc
= rq
->end_io_data
;
333 struct bsg_device
*bd
= bc
->bd
;
336 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
337 bd
->name
, rq
, bc
, bc
->bio
, uptodate
);
339 bc
->hdr
.duration
= jiffies_to_msecs(jiffies
- bc
->hdr
.duration
);
341 spin_lock_irqsave(&bd
->lock
, flags
);
343 bsg_add_done_cmd(bd
, bc
);
344 spin_unlock_irqrestore(&bd
->lock
, flags
);
348 * do final setup of a 'bc' and submit the matching 'rq' to the block
351 static void bsg_add_command(struct bsg_device
*bd
, request_queue_t
*q
,
352 struct bsg_command
*bc
, struct request
*rq
)
354 rq
->sense
= bc
->sense
;
358 * add bc command to busy queue and submit rq for io
363 bc
->bidi_bio
= rq
->next_rq
->bio
;
364 bc
->hdr
.duration
= jiffies
;
365 spin_lock_irq(&bd
->lock
);
366 list_add_tail(&bc
->list
, &bd
->busy_list
);
367 spin_unlock_irq(&bd
->lock
);
369 dprintk("%s: queueing rq %p, bc %p\n", bd
->name
, rq
, bc
);
371 rq
->end_io_data
= bc
;
372 blk_execute_rq_nowait(q
, NULL
, rq
, 1, bsg_rq_end_io
);
375 static inline struct bsg_command
*bsg_next_done_cmd(struct bsg_device
*bd
)
377 struct bsg_command
*bc
= NULL
;
379 spin_lock_irq(&bd
->lock
);
381 bc
= list_entry_bc(bd
->done_list
.next
);
382 bsg_del_done_cmd(bd
, bc
);
384 spin_unlock_irq(&bd
->lock
);
390 * Get a finished command from the done list
392 static struct bsg_command
*bsg_get_done_cmd(struct bsg_device
*bd
)
394 struct bsg_command
*bc
;
398 bc
= bsg_next_done_cmd(bd
);
402 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
403 bc
= ERR_PTR(-EAGAIN
);
407 ret
= wait_event_interruptible(bd
->wq_done
, bd
->done_cmds
);
409 bc
= ERR_PTR(-ERESTARTSYS
);
414 dprintk("%s: returning done %p\n", bd
->name
, bc
);
419 static int blk_complete_sgv4_hdr_rq(struct request
*rq
, struct sg_io_v4
*hdr
,
420 struct bio
*bio
, struct bio
*bidi_bio
)
424 dprintk("rq %p bio %p %u\n", rq
, bio
, rq
->errors
);
426 * fill in all the output members
428 hdr
->device_status
= status_byte(rq
->errors
);
429 hdr
->transport_status
= host_byte(rq
->errors
);
430 hdr
->driver_status
= driver_byte(rq
->errors
);
432 if (hdr
->device_status
|| hdr
->transport_status
|| hdr
->driver_status
)
433 hdr
->info
|= SG_INFO_CHECK
;
434 hdr
->din_resid
= rq
->data_len
;
435 hdr
->response_len
= 0;
437 if (rq
->sense_len
&& hdr
->response
) {
438 int len
= min((unsigned int) hdr
->max_response_len
,
441 ret
= copy_to_user((void*)(unsigned long)hdr
->response
,
444 hdr
->response_len
= len
;
450 blk_rq_unmap_user(bidi_bio
);
451 blk_put_request(rq
->next_rq
);
454 blk_rq_unmap_user(bio
);
460 static int bsg_complete_all_commands(struct bsg_device
*bd
)
462 struct bsg_command
*bc
;
465 dprintk("%s: entered\n", bd
->name
);
467 set_bit(BSG_F_BLOCK
, &bd
->flags
);
470 * wait for all commands to complete
474 ret
= bsg_io_schedule(bd
, TASK_UNINTERRUPTIBLE
);
476 * look for -ENODATA specifically -- we'll sometimes get
477 * -ERESTARTSYS when we've taken a signal, but we can't
478 * return until we're done freeing the queue, so ignore
479 * it. The signal will get handled when we're done freeing
482 } while (ret
!= -ENODATA
);
485 * discard done commands
489 spin_lock_irq(&bd
->lock
);
490 if (!bd
->queued_cmds
) {
491 spin_unlock_irq(&bd
->lock
);
494 spin_unlock_irq(&bd
->lock
);
496 bc
= bsg_get_done_cmd(bd
);
500 tret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
,
505 bsg_free_command(bc
);
512 __bsg_read(char __user
*buf
, size_t count
, struct bsg_device
*bd
,
513 const struct iovec
*iov
, ssize_t
*bytes_read
)
515 struct bsg_command
*bc
;
516 int nr_commands
, ret
;
518 if (count
% sizeof(struct sg_io_v4
))
522 nr_commands
= count
/ sizeof(struct sg_io_v4
);
523 while (nr_commands
) {
524 bc
= bsg_get_done_cmd(bd
);
531 * this is the only case where we need to copy data back
532 * after completing the request. so do that here,
533 * bsg_complete_work() cannot do that for us
535 ret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
,
538 if (copy_to_user(buf
, (char *) &bc
->hdr
, sizeof(bc
->hdr
)))
541 bsg_free_command(bc
);
546 buf
+= sizeof(struct sg_io_v4
);
547 *bytes_read
+= sizeof(struct sg_io_v4
);
554 static inline void bsg_set_block(struct bsg_device
*bd
, struct file
*file
)
556 if (file
->f_flags
& O_NONBLOCK
)
557 clear_bit(BSG_F_BLOCK
, &bd
->flags
);
559 set_bit(BSG_F_BLOCK
, &bd
->flags
);
562 static inline void bsg_set_write_perm(struct bsg_device
*bd
, struct file
*file
)
564 if (file
->f_mode
& FMODE_WRITE
)
565 set_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
567 clear_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
570 static inline int err_block_err(int ret
)
572 if (ret
&& ret
!= -ENOSPC
&& ret
!= -ENODATA
&& ret
!= -EAGAIN
)
579 bsg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
581 struct bsg_device
*bd
= file
->private_data
;
585 dprintk("%s: read %Zd bytes\n", bd
->name
, count
);
587 bsg_set_block(bd
, file
);
589 ret
= __bsg_read(buf
, count
, bd
, NULL
, &bytes_read
);
592 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
598 static ssize_t
__bsg_write(struct bsg_device
*bd
, const char __user
*buf
,
599 size_t count
, ssize_t
*bytes_read
)
601 struct bsg_command
*bc
;
603 int ret
, nr_commands
;
605 if (count
% sizeof(struct sg_io_v4
))
608 nr_commands
= count
/ sizeof(struct sg_io_v4
);
612 while (nr_commands
) {
613 request_queue_t
*q
= bd
->queue
;
615 bc
= bsg_alloc_command(bd
);
622 bc
->uhdr
= (struct sg_io_v4 __user
*) buf
;
623 if (copy_from_user(&bc
->hdr
, buf
, sizeof(bc
->hdr
))) {
629 * get a request, fill in the blanks, and add to request queue
631 rq
= bsg_map_hdr(bd
, &bc
->hdr
);
638 bsg_add_command(bd
, q
, bc
, rq
);
642 buf
+= sizeof(struct sg_io_v4
);
643 *bytes_read
+= sizeof(struct sg_io_v4
);
647 bsg_free_command(bc
);
653 bsg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
655 struct bsg_device
*bd
= file
->private_data
;
659 dprintk("%s: write %Zd bytes\n", bd
->name
, count
);
661 bsg_set_block(bd
, file
);
662 bsg_set_write_perm(bd
, file
);
665 ret
= __bsg_write(bd
, buf
, count
, &bytes_read
);
669 * return bytes written on non-fatal errors
671 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
674 dprintk("%s: returning %Zd\n", bd
->name
, bytes_read
);
678 static struct bsg_device
*bsg_alloc_device(void)
680 struct bsg_device
*bd
;
682 bd
= kzalloc(sizeof(struct bsg_device
), GFP_KERNEL
);
686 spin_lock_init(&bd
->lock
);
688 bd
->max_queue
= BSG_DEFAULT_CMDS
;
690 INIT_LIST_HEAD(&bd
->busy_list
);
691 INIT_LIST_HEAD(&bd
->done_list
);
692 INIT_HLIST_NODE(&bd
->dev_list
);
694 init_waitqueue_head(&bd
->wq_free
);
695 init_waitqueue_head(&bd
->wq_done
);
699 static int bsg_put_device(struct bsg_device
*bd
)
703 mutex_lock(&bsg_mutex
);
705 if (!atomic_dec_and_test(&bd
->ref_count
))
708 dprintk("%s: tearing down\n", bd
->name
);
711 * close can always block
713 set_bit(BSG_F_BLOCK
, &bd
->flags
);
716 * correct error detection baddies here again. it's the responsibility
717 * of the app to properly reap commands before close() if it wants
718 * fool-proof error detection
720 ret
= bsg_complete_all_commands(bd
);
722 blk_put_queue(bd
->queue
);
723 hlist_del(&bd
->dev_list
);
726 mutex_unlock(&bsg_mutex
);
730 static struct bsg_device
*bsg_add_device(struct inode
*inode
,
731 struct request_queue
*rq
,
734 struct bsg_device
*bd
= NULL
;
736 unsigned char buf
[32];
739 bd
= bsg_alloc_device();
741 return ERR_PTR(-ENOMEM
);
744 kobject_get(&rq
->kobj
);
745 bsg_set_block(bd
, file
);
747 atomic_set(&bd
->ref_count
, 1);
748 bd
->minor
= iminor(inode
);
749 mutex_lock(&bsg_mutex
);
750 hlist_add_head(&bd
->dev_list
, &bsg_device_list
[bsg_list_idx(bd
->minor
)]);
752 strncpy(bd
->name
, rq
->bsg_dev
.class_dev
->class_id
, sizeof(bd
->name
) - 1);
753 dprintk("bound to <%s>, max queue %d\n",
754 format_dev_t(buf
, inode
->i_rdev
), bd
->max_queue
);
756 mutex_unlock(&bsg_mutex
);
760 static struct bsg_device
*__bsg_get_device(int minor
)
762 struct hlist_head
*list
= &bsg_device_list
[bsg_list_idx(minor
)];
763 struct bsg_device
*bd
= NULL
;
764 struct hlist_node
*entry
;
766 mutex_lock(&bsg_mutex
);
768 hlist_for_each(entry
, list
) {
769 bd
= hlist_entry(entry
, struct bsg_device
, dev_list
);
770 if (bd
->minor
== minor
) {
771 atomic_inc(&bd
->ref_count
);
778 mutex_unlock(&bsg_mutex
);
782 static struct bsg_device
*bsg_get_device(struct inode
*inode
, struct file
*file
)
784 struct bsg_device
*bd
= __bsg_get_device(iminor(inode
));
785 struct bsg_class_device
*bcd
, *__bcd
;
791 * find the class device
794 mutex_lock(&bsg_mutex
);
795 list_for_each_entry(__bcd
, &bsg_class_list
, list
) {
796 if (__bcd
->minor
== iminor(inode
)) {
801 mutex_unlock(&bsg_mutex
);
804 return ERR_PTR(-ENODEV
);
806 return bsg_add_device(inode
, bcd
->queue
, file
);
809 static int bsg_open(struct inode
*inode
, struct file
*file
)
811 struct bsg_device
*bd
= bsg_get_device(inode
, file
);
816 file
->private_data
= bd
;
820 static int bsg_release(struct inode
*inode
, struct file
*file
)
822 struct bsg_device
*bd
= file
->private_data
;
824 file
->private_data
= NULL
;
825 return bsg_put_device(bd
);
828 static unsigned int bsg_poll(struct file
*file
, poll_table
*wait
)
830 struct bsg_device
*bd
= file
->private_data
;
831 unsigned int mask
= 0;
833 poll_wait(file
, &bd
->wq_done
, wait
);
834 poll_wait(file
, &bd
->wq_free
, wait
);
836 spin_lock_irq(&bd
->lock
);
837 if (!list_empty(&bd
->done_list
))
838 mask
|= POLLIN
| POLLRDNORM
;
839 if (bd
->queued_cmds
>= bd
->max_queue
)
841 spin_unlock_irq(&bd
->lock
);
847 bsg_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
,
850 struct bsg_device
*bd
= file
->private_data
;
851 int __user
*uarg
= (int __user
*) arg
;
860 case SG_GET_COMMAND_Q
:
861 return put_user(bd
->max_queue
, uarg
);
862 case SG_SET_COMMAND_Q
: {
865 if (get_user(queue
, uarg
))
870 spin_lock_irq(&bd
->lock
);
871 bd
->max_queue
= queue
;
872 spin_unlock_irq(&bd
->lock
);
879 case SG_GET_VERSION_NUM
:
880 case SCSI_IOCTL_GET_IDLUN
:
881 case SCSI_IOCTL_GET_BUS_NUMBER
:
884 case SG_GET_RESERVED_SIZE
:
885 case SG_SET_RESERVED_SIZE
:
886 case SG_EMULATED_HOST
:
887 case SCSI_IOCTL_SEND_COMMAND
: {
888 void __user
*uarg
= (void __user
*) arg
;
889 return scsi_cmd_ioctl(file
, bd
->queue
, NULL
, cmd
, uarg
);
893 struct bio
*bio
, *bidi_bio
= NULL
;
896 if (copy_from_user(&hdr
, uarg
, sizeof(hdr
)))
899 rq
= bsg_map_hdr(bd
, &hdr
);
905 bidi_bio
= rq
->next_rq
->bio
;
906 blk_execute_rq(bd
->queue
, NULL
, rq
, 0);
907 blk_complete_sgv4_hdr_rq(rq
, &hdr
, bio
, bidi_bio
);
909 if (copy_to_user(uarg
, &hdr
, sizeof(hdr
)))
915 * block device ioctls
919 return ioctl_by_bdev(bd
->bdev
, cmd
, arg
);
926 static struct file_operations bsg_fops
= {
931 .release
= bsg_release
,
933 .owner
= THIS_MODULE
,
936 void bsg_unregister_queue(struct request_queue
*q
)
938 struct bsg_class_device
*bcd
= &q
->bsg_dev
;
943 mutex_lock(&bsg_mutex
);
944 sysfs_remove_link(&q
->kobj
, "bsg");
945 class_device_destroy(bsg_class
, MKDEV(BSG_MAJOR
, bcd
->minor
));
946 bcd
->class_dev
= NULL
;
947 list_del_init(&bcd
->list
);
949 mutex_unlock(&bsg_mutex
);
951 EXPORT_SYMBOL_GPL(bsg_unregister_queue
);
953 int bsg_register_queue(struct request_queue
*q
, const char *name
)
955 struct bsg_class_device
*bcd
, *__bcd
;
958 struct class_device
*class_dev
= NULL
;
961 * we need a proper transport to send commands, not a stacked device
967 memset(bcd
, 0, sizeof(*bcd
));
968 INIT_LIST_HEAD(&bcd
->list
);
970 mutex_lock(&bsg_mutex
);
971 if (bsg_device_nr
== BSG_MAX_DEVS
) {
972 printk(KERN_ERR
"bsg: too many bsg devices\n");
977 list_for_each_entry(__bcd
, &bsg_class_list
, list
) {
978 if (__bcd
->minor
== bsg_minor_idx
) {
980 if (bsg_minor_idx
== BSG_MAX_DEVS
)
986 bcd
->minor
= bsg_minor_idx
++;
987 if (bsg_minor_idx
== BSG_MAX_DEVS
)
991 dev
= MKDEV(BSG_MAJOR
, bcd
->minor
);
992 class_dev
= class_device_create(bsg_class
, NULL
, dev
, bcd
->dev
, "%s", name
);
993 if (IS_ERR(class_dev
)) {
994 ret
= PTR_ERR(class_dev
);
997 bcd
->class_dev
= class_dev
;
999 if (q
->kobj
.dentry
) {
1000 ret
= sysfs_create_link(&q
->kobj
, &bcd
->class_dev
->kobj
, "bsg");
1005 list_add_tail(&bcd
->list
, &bsg_class_list
);
1008 mutex_unlock(&bsg_mutex
);
1012 class_device_destroy(bsg_class
, MKDEV(BSG_MAJOR
, bcd
->minor
));
1013 mutex_unlock(&bsg_mutex
);
1016 EXPORT_SYMBOL_GPL(bsg_register_queue
);
1018 static int bsg_add(struct class_device
*cl_dev
, struct class_interface
*cl_intf
)
1021 struct scsi_device
*sdp
= to_scsi_device(cl_dev
->dev
);
1022 struct request_queue
*rq
= sdp
->request_queue
;
1024 if (rq
->kobj
.parent
)
1025 ret
= bsg_register_queue(rq
, kobject_name(rq
->kobj
.parent
));
1027 ret
= bsg_register_queue(rq
, kobject_name(&sdp
->sdev_gendev
.kobj
));
1031 static void bsg_remove(struct class_device
*cl_dev
, struct class_interface
*cl_intf
)
1033 bsg_unregister_queue(to_scsi_device(cl_dev
->dev
)->request_queue
);
1036 static struct class_interface bsg_intf
= {
1038 .remove
= bsg_remove
,
1041 static struct cdev bsg_cdev
= {
1042 .kobj
= {.name
= "bsg", },
1043 .owner
= THIS_MODULE
,
1046 static int __init
bsg_init(void)
1050 bsg_cmd_cachep
= kmem_cache_create("bsg_cmd",
1051 sizeof(struct bsg_command
), 0, 0, NULL
, NULL
);
1052 if (!bsg_cmd_cachep
) {
1053 printk(KERN_ERR
"bsg: failed creating slab cache\n");
1057 for (i
= 0; i
< BSG_LIST_SIZE
; i
++)
1058 INIT_HLIST_HEAD(&bsg_device_list
[i
]);
1060 bsg_class
= class_create(THIS_MODULE
, "bsg");
1061 if (IS_ERR(bsg_class
)) {
1062 kmem_cache_destroy(bsg_cmd_cachep
);
1063 return PTR_ERR(bsg_class
);
1066 ret
= register_chrdev_region(MKDEV(BSG_MAJOR
, 0), BSG_MAX_DEVS
, "bsg");
1068 kmem_cache_destroy(bsg_cmd_cachep
);
1069 class_destroy(bsg_class
);
1073 cdev_init(&bsg_cdev
, &bsg_fops
);
1074 ret
= cdev_add(&bsg_cdev
, MKDEV(BSG_MAJOR
, 0), BSG_MAX_DEVS
);
1076 kmem_cache_destroy(bsg_cmd_cachep
);
1077 class_destroy(bsg_class
);
1078 unregister_chrdev_region(MKDEV(BSG_MAJOR
, 0), BSG_MAX_DEVS
);
1082 ret
= scsi_register_interface(&bsg_intf
);
1084 printk(KERN_ERR
"bsg: failed register scsi interface %d\n", ret
);
1085 kmem_cache_destroy(bsg_cmd_cachep
);
1086 class_destroy(bsg_class
);
1087 unregister_chrdev(BSG_MAJOR
, "bsg");
1091 printk(KERN_INFO
"%s loaded\n", bsg_version
);
1095 MODULE_AUTHOR("Jens Axboe");
1096 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
1097 MODULE_LICENSE("GPL");
1099 device_initcall(bsg_init
);