]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/bsg.c
ARM: davinci: mark spi_board_info arguments as const
[mirror_ubuntu-bionic-kernel.git] / block / bsg.c
1 /*
2 * bsg.c - block layer implementation of the sg v4 interface
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/uio.h>
21 #include <linux/idr.h>
22 #include <linux/bsg.h>
23 #include <linux/slab.h>
24
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_ioctl.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/sg.h>
31
32 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
33 #define BSG_VERSION "0.4"
34
35 struct bsg_device {
36 struct request_queue *queue;
37 spinlock_t lock;
38 struct list_head busy_list;
39 struct list_head done_list;
40 struct hlist_node dev_list;
41 atomic_t ref_count;
42 int queued_cmds;
43 int done_cmds;
44 wait_queue_head_t wq_done;
45 wait_queue_head_t wq_free;
46 char name[20];
47 int max_queue;
48 unsigned long flags;
49 };
50
51 enum {
52 BSG_F_BLOCK = 1,
53 };
54
55 #define BSG_DEFAULT_CMDS 64
56 #define BSG_MAX_DEVS 32768
57
58 #undef BSG_DEBUG
59
60 #ifdef BSG_DEBUG
61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
62 #else
63 #define dprintk(fmt, args...)
64 #endif
65
66 static DEFINE_MUTEX(bsg_mutex);
67 static DEFINE_IDR(bsg_minor_idr);
68
69 #define BSG_LIST_ARRAY_SIZE 8
70 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
71
72 static struct class *bsg_class;
73 static int bsg_major;
74
75 static struct kmem_cache *bsg_cmd_cachep;
76
77 /*
78 * our internal command type
79 */
80 struct bsg_command {
81 struct bsg_device *bd;
82 struct list_head list;
83 struct request *rq;
84 struct bio *bio;
85 struct bio *bidi_bio;
86 int err;
87 struct sg_io_v4 hdr;
88 char sense[SCSI_SENSE_BUFFERSIZE];
89 };
90
91 static void bsg_free_command(struct bsg_command *bc)
92 {
93 struct bsg_device *bd = bc->bd;
94 unsigned long flags;
95
96 kmem_cache_free(bsg_cmd_cachep, bc);
97
98 spin_lock_irqsave(&bd->lock, flags);
99 bd->queued_cmds--;
100 spin_unlock_irqrestore(&bd->lock, flags);
101
102 wake_up(&bd->wq_free);
103 }
104
105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
106 {
107 struct bsg_command *bc = ERR_PTR(-EINVAL);
108
109 spin_lock_irq(&bd->lock);
110
111 if (bd->queued_cmds >= bd->max_queue)
112 goto out;
113
114 bd->queued_cmds++;
115 spin_unlock_irq(&bd->lock);
116
117 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
118 if (unlikely(!bc)) {
119 spin_lock_irq(&bd->lock);
120 bd->queued_cmds--;
121 bc = ERR_PTR(-ENOMEM);
122 goto out;
123 }
124
125 bc->bd = bd;
126 INIT_LIST_HEAD(&bc->list);
127 dprintk("%s: returning free cmd %p\n", bd->name, bc);
128 return bc;
129 out:
130 spin_unlock_irq(&bd->lock);
131 return bc;
132 }
133
134 static inline struct hlist_head *bsg_dev_idx_hash(int index)
135 {
136 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
137 }
138
139 static int bsg_io_schedule(struct bsg_device *bd)
140 {
141 DEFINE_WAIT(wait);
142 int ret = 0;
143
144 spin_lock_irq(&bd->lock);
145
146 BUG_ON(bd->done_cmds > bd->queued_cmds);
147
148 /*
149 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
150 * work to do", even though we return -ENOSPC after this same test
151 * during bsg_write() -- there, it means our buffer can't have more
152 * bsg_commands added to it, thus has no space left.
153 */
154 if (bd->done_cmds == bd->queued_cmds) {
155 ret = -ENODATA;
156 goto unlock;
157 }
158
159 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
160 ret = -EAGAIN;
161 goto unlock;
162 }
163
164 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
165 spin_unlock_irq(&bd->lock);
166 io_schedule();
167 finish_wait(&bd->wq_done, &wait);
168
169 return ret;
170 unlock:
171 spin_unlock_irq(&bd->lock);
172 return ret;
173 }
174
175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
176 struct sg_io_v4 *hdr, struct bsg_device *bd,
177 fmode_t has_write_perm)
178 {
179 if (hdr->request_len > BLK_MAX_CDB) {
180 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
181 if (!rq->cmd)
182 return -ENOMEM;
183 }
184
185 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
186 hdr->request_len))
187 return -EFAULT;
188
189 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
190 if (blk_verify_command(rq->cmd, has_write_perm))
191 return -EPERM;
192 } else if (!capable(CAP_SYS_RAWIO))
193 return -EPERM;
194
195 /*
196 * fill in request structure
197 */
198 rq->cmd_len = hdr->request_len;
199 rq->cmd_type = REQ_TYPE_BLOCK_PC;
200
201 rq->timeout = msecs_to_jiffies(hdr->timeout);
202 if (!rq->timeout)
203 rq->timeout = q->sg_timeout;
204 if (!rq->timeout)
205 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
206 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
207 rq->timeout = BLK_MIN_SG_TIMEOUT;
208
209 return 0;
210 }
211
212 /*
213 * Check if sg_io_v4 from user is allowed and valid
214 */
215 static int
216 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
217 {
218 int ret = 0;
219
220 if (hdr->guard != 'Q')
221 return -EINVAL;
222
223 switch (hdr->protocol) {
224 case BSG_PROTOCOL_SCSI:
225 switch (hdr->subprotocol) {
226 case BSG_SUB_PROTOCOL_SCSI_CMD:
227 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
228 break;
229 default:
230 ret = -EINVAL;
231 }
232 break;
233 default:
234 ret = -EINVAL;
235 }
236
237 *rw = hdr->dout_xfer_len ? WRITE : READ;
238 return ret;
239 }
240
241 /*
242 * map sg_io_v4 to a request.
243 */
244 static struct request *
245 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
246 u8 *sense)
247 {
248 struct request_queue *q = bd->queue;
249 struct request *rq, *next_rq = NULL;
250 int ret, rw;
251 unsigned int dxfer_len;
252 void __user *dxferp = NULL;
253 struct bsg_class_device *bcd = &q->bsg_dev;
254
255 /* if the LLD has been removed then the bsg_unregister_queue will
256 * eventually be called and the class_dev was freed, so we can no
257 * longer use this request_queue. Return no such address.
258 */
259 if (!bcd->class_dev)
260 return ERR_PTR(-ENXIO);
261
262 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
263 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
264 hdr->din_xfer_len);
265
266 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
267 if (ret)
268 return ERR_PTR(ret);
269
270 /*
271 * map scatter-gather elements separately and string them to request
272 */
273 rq = blk_get_request(q, rw, GFP_KERNEL);
274 if (!rq)
275 return ERR_PTR(-ENOMEM);
276 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
277 if (ret)
278 goto out;
279
280 if (rw == WRITE && hdr->din_xfer_len) {
281 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
282 ret = -EOPNOTSUPP;
283 goto out;
284 }
285
286 next_rq = blk_get_request(q, READ, GFP_KERNEL);
287 if (!next_rq) {
288 ret = -ENOMEM;
289 goto out;
290 }
291 rq->next_rq = next_rq;
292 next_rq->cmd_type = rq->cmd_type;
293
294 dxferp = (void __user *)(unsigned long)hdr->din_xferp;
295 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
296 hdr->din_xfer_len, GFP_KERNEL);
297 if (ret)
298 goto out;
299 }
300
301 if (hdr->dout_xfer_len) {
302 dxfer_len = hdr->dout_xfer_len;
303 dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
304 } else if (hdr->din_xfer_len) {
305 dxfer_len = hdr->din_xfer_len;
306 dxferp = (void __user *)(unsigned long)hdr->din_xferp;
307 } else
308 dxfer_len = 0;
309
310 if (dxfer_len) {
311 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
312 GFP_KERNEL);
313 if (ret)
314 goto out;
315 }
316
317 rq->sense = sense;
318 rq->sense_len = 0;
319
320 return rq;
321 out:
322 if (rq->cmd != rq->__cmd)
323 kfree(rq->cmd);
324 blk_put_request(rq);
325 if (next_rq) {
326 blk_rq_unmap_user(next_rq->bio);
327 blk_put_request(next_rq);
328 }
329 return ERR_PTR(ret);
330 }
331
332 /*
333 * async completion call-back from the block layer, when scsi/ide/whatever
334 * calls end_that_request_last() on a request
335 */
336 static void bsg_rq_end_io(struct request *rq, int uptodate)
337 {
338 struct bsg_command *bc = rq->end_io_data;
339 struct bsg_device *bd = bc->bd;
340 unsigned long flags;
341
342 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
343 bd->name, rq, bc, bc->bio, uptodate);
344
345 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
346
347 spin_lock_irqsave(&bd->lock, flags);
348 list_move_tail(&bc->list, &bd->done_list);
349 bd->done_cmds++;
350 spin_unlock_irqrestore(&bd->lock, flags);
351
352 wake_up(&bd->wq_done);
353 }
354
355 /*
356 * do final setup of a 'bc' and submit the matching 'rq' to the block
357 * layer for io
358 */
359 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
360 struct bsg_command *bc, struct request *rq)
361 {
362 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
363
364 /*
365 * add bc command to busy queue and submit rq for io
366 */
367 bc->rq = rq;
368 bc->bio = rq->bio;
369 if (rq->next_rq)
370 bc->bidi_bio = rq->next_rq->bio;
371 bc->hdr.duration = jiffies;
372 spin_lock_irq(&bd->lock);
373 list_add_tail(&bc->list, &bd->busy_list);
374 spin_unlock_irq(&bd->lock);
375
376 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
377
378 rq->end_io_data = bc;
379 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
380 }
381
382 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
383 {
384 struct bsg_command *bc = NULL;
385
386 spin_lock_irq(&bd->lock);
387 if (bd->done_cmds) {
388 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
389 list_del(&bc->list);
390 bd->done_cmds--;
391 }
392 spin_unlock_irq(&bd->lock);
393
394 return bc;
395 }
396
397 /*
398 * Get a finished command from the done list
399 */
400 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
401 {
402 struct bsg_command *bc;
403 int ret;
404
405 do {
406 bc = bsg_next_done_cmd(bd);
407 if (bc)
408 break;
409
410 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
411 bc = ERR_PTR(-EAGAIN);
412 break;
413 }
414
415 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
416 if (ret) {
417 bc = ERR_PTR(-ERESTARTSYS);
418 break;
419 }
420 } while (1);
421
422 dprintk("%s: returning done %p\n", bd->name, bc);
423
424 return bc;
425 }
426
427 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
428 struct bio *bio, struct bio *bidi_bio)
429 {
430 int ret = 0;
431
432 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
433 /*
434 * fill in all the output members
435 */
436 hdr->device_status = rq->errors & 0xff;
437 hdr->transport_status = host_byte(rq->errors);
438 hdr->driver_status = driver_byte(rq->errors);
439 hdr->info = 0;
440 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
441 hdr->info |= SG_INFO_CHECK;
442 hdr->response_len = 0;
443
444 if (rq->sense_len && hdr->response) {
445 int len = min_t(unsigned int, hdr->max_response_len,
446 rq->sense_len);
447
448 ret = copy_to_user((void __user *)(unsigned long)hdr->response,
449 rq->sense, len);
450 if (!ret)
451 hdr->response_len = len;
452 else
453 ret = -EFAULT;
454 }
455
456 if (rq->next_rq) {
457 hdr->dout_resid = rq->resid_len;
458 hdr->din_resid = rq->next_rq->resid_len;
459 blk_rq_unmap_user(bidi_bio);
460 blk_put_request(rq->next_rq);
461 } else if (rq_data_dir(rq) == READ)
462 hdr->din_resid = rq->resid_len;
463 else
464 hdr->dout_resid = rq->resid_len;
465
466 /*
467 * If the request generated a negative error number, return it
468 * (providing we aren't already returning an error); if it's
469 * just a protocol response (i.e. non negative), that gets
470 * processed above.
471 */
472 if (!ret && rq->errors < 0)
473 ret = rq->errors;
474
475 blk_rq_unmap_user(bio);
476 if (rq->cmd != rq->__cmd)
477 kfree(rq->cmd);
478 blk_put_request(rq);
479
480 return ret;
481 }
482
483 static int bsg_complete_all_commands(struct bsg_device *bd)
484 {
485 struct bsg_command *bc;
486 int ret, tret;
487
488 dprintk("%s: entered\n", bd->name);
489
490 /*
491 * wait for all commands to complete
492 */
493 ret = 0;
494 do {
495 ret = bsg_io_schedule(bd);
496 /*
497 * look for -ENODATA specifically -- we'll sometimes get
498 * -ERESTARTSYS when we've taken a signal, but we can't
499 * return until we're done freeing the queue, so ignore
500 * it. The signal will get handled when we're done freeing
501 * the bsg_device.
502 */
503 } while (ret != -ENODATA);
504
505 /*
506 * discard done commands
507 */
508 ret = 0;
509 do {
510 spin_lock_irq(&bd->lock);
511 if (!bd->queued_cmds) {
512 spin_unlock_irq(&bd->lock);
513 break;
514 }
515 spin_unlock_irq(&bd->lock);
516
517 bc = bsg_get_done_cmd(bd);
518 if (IS_ERR(bc))
519 break;
520
521 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
522 bc->bidi_bio);
523 if (!ret)
524 ret = tret;
525
526 bsg_free_command(bc);
527 } while (1);
528
529 return ret;
530 }
531
532 static int
533 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
534 const struct iovec *iov, ssize_t *bytes_read)
535 {
536 struct bsg_command *bc;
537 int nr_commands, ret;
538
539 if (count % sizeof(struct sg_io_v4))
540 return -EINVAL;
541
542 ret = 0;
543 nr_commands = count / sizeof(struct sg_io_v4);
544 while (nr_commands) {
545 bc = bsg_get_done_cmd(bd);
546 if (IS_ERR(bc)) {
547 ret = PTR_ERR(bc);
548 break;
549 }
550
551 /*
552 * this is the only case where we need to copy data back
553 * after completing the request. so do that here,
554 * bsg_complete_work() cannot do that for us
555 */
556 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
557 bc->bidi_bio);
558
559 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
560 ret = -EFAULT;
561
562 bsg_free_command(bc);
563
564 if (ret)
565 break;
566
567 buf += sizeof(struct sg_io_v4);
568 *bytes_read += sizeof(struct sg_io_v4);
569 nr_commands--;
570 }
571
572 return ret;
573 }
574
575 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
576 {
577 if (file->f_flags & O_NONBLOCK)
578 clear_bit(BSG_F_BLOCK, &bd->flags);
579 else
580 set_bit(BSG_F_BLOCK, &bd->flags);
581 }
582
583 /*
584 * Check if the error is a "real" error that we should return.
585 */
586 static inline int err_block_err(int ret)
587 {
588 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
589 return 1;
590
591 return 0;
592 }
593
594 static ssize_t
595 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
596 {
597 struct bsg_device *bd = file->private_data;
598 int ret;
599 ssize_t bytes_read;
600
601 dprintk("%s: read %Zd bytes\n", bd->name, count);
602
603 bsg_set_block(bd, file);
604
605 bytes_read = 0;
606 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
607 *ppos = bytes_read;
608
609 if (!bytes_read || err_block_err(ret))
610 bytes_read = ret;
611
612 return bytes_read;
613 }
614
615 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
616 size_t count, ssize_t *bytes_written,
617 fmode_t has_write_perm)
618 {
619 struct bsg_command *bc;
620 struct request *rq;
621 int ret, nr_commands;
622
623 if (count % sizeof(struct sg_io_v4))
624 return -EINVAL;
625
626 nr_commands = count / sizeof(struct sg_io_v4);
627 rq = NULL;
628 bc = NULL;
629 ret = 0;
630 while (nr_commands) {
631 struct request_queue *q = bd->queue;
632
633 bc = bsg_alloc_command(bd);
634 if (IS_ERR(bc)) {
635 ret = PTR_ERR(bc);
636 bc = NULL;
637 break;
638 }
639
640 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
641 ret = -EFAULT;
642 break;
643 }
644
645 /*
646 * get a request, fill in the blanks, and add to request queue
647 */
648 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
649 if (IS_ERR(rq)) {
650 ret = PTR_ERR(rq);
651 rq = NULL;
652 break;
653 }
654
655 bsg_add_command(bd, q, bc, rq);
656 bc = NULL;
657 rq = NULL;
658 nr_commands--;
659 buf += sizeof(struct sg_io_v4);
660 *bytes_written += sizeof(struct sg_io_v4);
661 }
662
663 if (bc)
664 bsg_free_command(bc);
665
666 return ret;
667 }
668
669 static ssize_t
670 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
671 {
672 struct bsg_device *bd = file->private_data;
673 ssize_t bytes_written;
674 int ret;
675
676 dprintk("%s: write %Zd bytes\n", bd->name, count);
677
678 bsg_set_block(bd, file);
679
680 bytes_written = 0;
681 ret = __bsg_write(bd, buf, count, &bytes_written,
682 file->f_mode & FMODE_WRITE);
683
684 *ppos = bytes_written;
685
686 /*
687 * return bytes written on non-fatal errors
688 */
689 if (!bytes_written || err_block_err(ret))
690 bytes_written = ret;
691
692 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
693 return bytes_written;
694 }
695
696 static struct bsg_device *bsg_alloc_device(void)
697 {
698 struct bsg_device *bd;
699
700 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
701 if (unlikely(!bd))
702 return NULL;
703
704 spin_lock_init(&bd->lock);
705
706 bd->max_queue = BSG_DEFAULT_CMDS;
707
708 INIT_LIST_HEAD(&bd->busy_list);
709 INIT_LIST_HEAD(&bd->done_list);
710 INIT_HLIST_NODE(&bd->dev_list);
711
712 init_waitqueue_head(&bd->wq_free);
713 init_waitqueue_head(&bd->wq_done);
714 return bd;
715 }
716
717 static void bsg_kref_release_function(struct kref *kref)
718 {
719 struct bsg_class_device *bcd =
720 container_of(kref, struct bsg_class_device, ref);
721 struct device *parent = bcd->parent;
722
723 if (bcd->release)
724 bcd->release(bcd->parent);
725
726 put_device(parent);
727 }
728
729 static int bsg_put_device(struct bsg_device *bd)
730 {
731 int ret = 0, do_free;
732 struct request_queue *q = bd->queue;
733
734 mutex_lock(&bsg_mutex);
735
736 do_free = atomic_dec_and_test(&bd->ref_count);
737 if (!do_free) {
738 mutex_unlock(&bsg_mutex);
739 goto out;
740 }
741
742 hlist_del(&bd->dev_list);
743 mutex_unlock(&bsg_mutex);
744
745 dprintk("%s: tearing down\n", bd->name);
746
747 /*
748 * close can always block
749 */
750 set_bit(BSG_F_BLOCK, &bd->flags);
751
752 /*
753 * correct error detection baddies here again. it's the responsibility
754 * of the app to properly reap commands before close() if it wants
755 * fool-proof error detection
756 */
757 ret = bsg_complete_all_commands(bd);
758
759 kfree(bd);
760 out:
761 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
762 if (do_free)
763 blk_put_queue(q);
764 return ret;
765 }
766
767 static struct bsg_device *bsg_add_device(struct inode *inode,
768 struct request_queue *rq,
769 struct file *file)
770 {
771 struct bsg_device *bd;
772 #ifdef BSG_DEBUG
773 unsigned char buf[32];
774 #endif
775 if (!blk_get_queue(rq))
776 return ERR_PTR(-ENXIO);
777
778 bd = bsg_alloc_device();
779 if (!bd) {
780 blk_put_queue(rq);
781 return ERR_PTR(-ENOMEM);
782 }
783
784 bd->queue = rq;
785
786 bsg_set_block(bd, file);
787
788 atomic_set(&bd->ref_count, 1);
789 mutex_lock(&bsg_mutex);
790 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
791
792 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
793 dprintk("bound to <%s>, max queue %d\n",
794 format_dev_t(buf, inode->i_rdev), bd->max_queue);
795
796 mutex_unlock(&bsg_mutex);
797 return bd;
798 }
799
800 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
801 {
802 struct bsg_device *bd;
803 struct hlist_node *entry;
804
805 mutex_lock(&bsg_mutex);
806
807 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
808 if (bd->queue == q) {
809 atomic_inc(&bd->ref_count);
810 goto found;
811 }
812 }
813 bd = NULL;
814 found:
815 mutex_unlock(&bsg_mutex);
816 return bd;
817 }
818
819 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
820 {
821 struct bsg_device *bd;
822 struct bsg_class_device *bcd;
823
824 /*
825 * find the class device
826 */
827 mutex_lock(&bsg_mutex);
828 bcd = idr_find(&bsg_minor_idr, iminor(inode));
829 if (bcd)
830 kref_get(&bcd->ref);
831 mutex_unlock(&bsg_mutex);
832
833 if (!bcd)
834 return ERR_PTR(-ENODEV);
835
836 bd = __bsg_get_device(iminor(inode), bcd->queue);
837 if (bd)
838 return bd;
839
840 bd = bsg_add_device(inode, bcd->queue, file);
841 if (IS_ERR(bd))
842 kref_put(&bcd->ref, bsg_kref_release_function);
843
844 return bd;
845 }
846
847 static int bsg_open(struct inode *inode, struct file *file)
848 {
849 struct bsg_device *bd;
850
851 bd = bsg_get_device(inode, file);
852
853 if (IS_ERR(bd))
854 return PTR_ERR(bd);
855
856 file->private_data = bd;
857 return 0;
858 }
859
860 static int bsg_release(struct inode *inode, struct file *file)
861 {
862 struct bsg_device *bd = file->private_data;
863
864 file->private_data = NULL;
865 return bsg_put_device(bd);
866 }
867
868 static unsigned int bsg_poll(struct file *file, poll_table *wait)
869 {
870 struct bsg_device *bd = file->private_data;
871 unsigned int mask = 0;
872
873 poll_wait(file, &bd->wq_done, wait);
874 poll_wait(file, &bd->wq_free, wait);
875
876 spin_lock_irq(&bd->lock);
877 if (!list_empty(&bd->done_list))
878 mask |= POLLIN | POLLRDNORM;
879 if (bd->queued_cmds < bd->max_queue)
880 mask |= POLLOUT;
881 spin_unlock_irq(&bd->lock);
882
883 return mask;
884 }
885
886 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
887 {
888 struct bsg_device *bd = file->private_data;
889 int __user *uarg = (int __user *) arg;
890 int ret;
891
892 switch (cmd) {
893 /*
894 * our own ioctls
895 */
896 case SG_GET_COMMAND_Q:
897 return put_user(bd->max_queue, uarg);
898 case SG_SET_COMMAND_Q: {
899 int queue;
900
901 if (get_user(queue, uarg))
902 return -EFAULT;
903 if (queue < 1)
904 return -EINVAL;
905
906 spin_lock_irq(&bd->lock);
907 bd->max_queue = queue;
908 spin_unlock_irq(&bd->lock);
909 return 0;
910 }
911
912 /*
913 * SCSI/sg ioctls
914 */
915 case SG_GET_VERSION_NUM:
916 case SCSI_IOCTL_GET_IDLUN:
917 case SCSI_IOCTL_GET_BUS_NUMBER:
918 case SG_SET_TIMEOUT:
919 case SG_GET_TIMEOUT:
920 case SG_GET_RESERVED_SIZE:
921 case SG_SET_RESERVED_SIZE:
922 case SG_EMULATED_HOST:
923 case SCSI_IOCTL_SEND_COMMAND: {
924 void __user *uarg = (void __user *) arg;
925 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
926 }
927 case SG_IO: {
928 struct request *rq;
929 struct bio *bio, *bidi_bio = NULL;
930 struct sg_io_v4 hdr;
931 int at_head;
932 u8 sense[SCSI_SENSE_BUFFERSIZE];
933
934 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
935 return -EFAULT;
936
937 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
938 if (IS_ERR(rq))
939 return PTR_ERR(rq);
940
941 bio = rq->bio;
942 if (rq->next_rq)
943 bidi_bio = rq->next_rq->bio;
944
945 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
946 blk_execute_rq(bd->queue, NULL, rq, at_head);
947 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
948
949 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
950 return -EFAULT;
951
952 return ret;
953 }
954 /*
955 * block device ioctls
956 */
957 default:
958 #if 0
959 return ioctl_by_bdev(bd->bdev, cmd, arg);
960 #else
961 return -ENOTTY;
962 #endif
963 }
964 }
965
966 static const struct file_operations bsg_fops = {
967 .read = bsg_read,
968 .write = bsg_write,
969 .poll = bsg_poll,
970 .open = bsg_open,
971 .release = bsg_release,
972 .unlocked_ioctl = bsg_ioctl,
973 .owner = THIS_MODULE,
974 .llseek = default_llseek,
975 };
976
977 void bsg_unregister_queue(struct request_queue *q)
978 {
979 struct bsg_class_device *bcd = &q->bsg_dev;
980
981 if (!bcd->class_dev)
982 return;
983
984 mutex_lock(&bsg_mutex);
985 idr_remove(&bsg_minor_idr, bcd->minor);
986 if (q->kobj.sd)
987 sysfs_remove_link(&q->kobj, "bsg");
988 device_unregister(bcd->class_dev);
989 bcd->class_dev = NULL;
990 kref_put(&bcd->ref, bsg_kref_release_function);
991 mutex_unlock(&bsg_mutex);
992 }
993 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
994
995 int bsg_register_queue(struct request_queue *q, struct device *parent,
996 const char *name, void (*release)(struct device *))
997 {
998 struct bsg_class_device *bcd;
999 dev_t dev;
1000 int ret, minor;
1001 struct device *class_dev = NULL;
1002 const char *devname;
1003
1004 if (name)
1005 devname = name;
1006 else
1007 devname = dev_name(parent);
1008
1009 /*
1010 * we need a proper transport to send commands, not a stacked device
1011 */
1012 if (!q->request_fn)
1013 return 0;
1014
1015 bcd = &q->bsg_dev;
1016 memset(bcd, 0, sizeof(*bcd));
1017
1018 mutex_lock(&bsg_mutex);
1019
1020 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
1021 if (!ret) {
1022 ret = -ENOMEM;
1023 goto unlock;
1024 }
1025
1026 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1027 if (ret < 0)
1028 goto unlock;
1029
1030 if (minor >= BSG_MAX_DEVS) {
1031 printk(KERN_ERR "bsg: too many bsg devices\n");
1032 ret = -EINVAL;
1033 goto remove_idr;
1034 }
1035
1036 bcd->minor = minor;
1037 bcd->queue = q;
1038 bcd->parent = get_device(parent);
1039 bcd->release = release;
1040 kref_init(&bcd->ref);
1041 dev = MKDEV(bsg_major, bcd->minor);
1042 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
1043 if (IS_ERR(class_dev)) {
1044 ret = PTR_ERR(class_dev);
1045 goto put_dev;
1046 }
1047 bcd->class_dev = class_dev;
1048
1049 if (q->kobj.sd) {
1050 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1051 if (ret)
1052 goto unregister_class_dev;
1053 }
1054
1055 mutex_unlock(&bsg_mutex);
1056 return 0;
1057
1058 unregister_class_dev:
1059 device_unregister(class_dev);
1060 put_dev:
1061 put_device(parent);
1062 remove_idr:
1063 idr_remove(&bsg_minor_idr, minor);
1064 unlock:
1065 mutex_unlock(&bsg_mutex);
1066 return ret;
1067 }
1068 EXPORT_SYMBOL_GPL(bsg_register_queue);
1069
1070 static struct cdev bsg_cdev;
1071
1072 static char *bsg_devnode(struct device *dev, umode_t *mode)
1073 {
1074 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
1075 }
1076
1077 static int __init bsg_init(void)
1078 {
1079 int ret, i;
1080 dev_t devid;
1081
1082 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1083 sizeof(struct bsg_command), 0, 0, NULL);
1084 if (!bsg_cmd_cachep) {
1085 printk(KERN_ERR "bsg: failed creating slab cache\n");
1086 return -ENOMEM;
1087 }
1088
1089 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1090 INIT_HLIST_HEAD(&bsg_device_list[i]);
1091
1092 bsg_class = class_create(THIS_MODULE, "bsg");
1093 if (IS_ERR(bsg_class)) {
1094 ret = PTR_ERR(bsg_class);
1095 goto destroy_kmemcache;
1096 }
1097 bsg_class->devnode = bsg_devnode;
1098
1099 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1100 if (ret)
1101 goto destroy_bsg_class;
1102
1103 bsg_major = MAJOR(devid);
1104
1105 cdev_init(&bsg_cdev, &bsg_fops);
1106 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1107 if (ret)
1108 goto unregister_chrdev;
1109
1110 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1111 " loaded (major %d)\n", bsg_major);
1112 return 0;
1113 unregister_chrdev:
1114 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1115 destroy_bsg_class:
1116 class_destroy(bsg_class);
1117 destroy_kmemcache:
1118 kmem_cache_destroy(bsg_cmd_cachep);
1119 return ret;
1120 }
1121
1122 MODULE_AUTHOR("Jens Axboe");
1123 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1124 MODULE_LICENSE("GPL");
1125
1126 device_initcall(bsg_init);