]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * bsg.c - block layer implementation of the sg v4 interface | |
3 | * | |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs | |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License version 2. See the file "COPYING" in the main directory of this | |
9 | * archive for more details. | |
10 | * | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/cdev.h> | |
17 | #include <linux/jiffies.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/idr.h> | |
20 | #include <linux/bsg.h> | |
21 | #include <linux/slab.h> | |
22 | ||
23 | #include <scsi/scsi.h> | |
24 | #include <scsi/scsi_ioctl.h> | |
25 | #include <scsi/scsi_cmnd.h> | |
26 | #include <scsi/scsi_device.h> | |
27 | #include <scsi/scsi_driver.h> | |
28 | #include <scsi/sg.h> | |
29 | ||
30 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" | |
31 | #define BSG_VERSION "0.4" | |
32 | ||
33 | #define bsg_dbg(bd, fmt, ...) \ | |
34 | pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__) | |
35 | ||
36 | struct bsg_device { | |
37 | struct request_queue *queue; | |
38 | spinlock_t lock; | |
39 | struct hlist_node dev_list; | |
40 | refcount_t ref_count; | |
41 | char name[20]; | |
42 | int max_queue; | |
43 | }; | |
44 | ||
45 | #define BSG_DEFAULT_CMDS 64 | |
46 | #define BSG_MAX_DEVS 32768 | |
47 | ||
48 | static DEFINE_MUTEX(bsg_mutex); | |
49 | static DEFINE_IDR(bsg_minor_idr); | |
50 | ||
51 | #define BSG_LIST_ARRAY_SIZE 8 | |
52 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; | |
53 | ||
54 | static struct class *bsg_class; | |
55 | static int bsg_major; | |
56 | ||
57 | static inline struct hlist_head *bsg_dev_idx_hash(int index) | |
58 | { | |
59 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; | |
60 | } | |
61 | ||
62 | #define uptr64(val) ((void __user *)(uintptr_t)(val)) | |
63 | ||
64 | static int bsg_scsi_check_proto(struct sg_io_v4 *hdr) | |
65 | { | |
66 | if (hdr->protocol != BSG_PROTOCOL_SCSI || | |
67 | hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) | |
68 | return -EINVAL; | |
69 | return 0; | |
70 | } | |
71 | ||
72 | static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, | |
73 | fmode_t mode) | |
74 | { | |
75 | struct scsi_request *sreq = scsi_req(rq); | |
76 | ||
77 | sreq->cmd_len = hdr->request_len; | |
78 | if (sreq->cmd_len > BLK_MAX_CDB) { | |
79 | sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); | |
80 | if (!sreq->cmd) | |
81 | return -ENOMEM; | |
82 | } | |
83 | ||
84 | if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) | |
85 | return -EFAULT; | |
86 | if (blk_verify_command(sreq->cmd, mode)) | |
87 | return -EPERM; | |
88 | return 0; | |
89 | } | |
90 | ||
91 | static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) | |
92 | { | |
93 | struct scsi_request *sreq = scsi_req(rq); | |
94 | int ret = 0; | |
95 | ||
96 | /* | |
97 | * fill in all the output members | |
98 | */ | |
99 | hdr->device_status = sreq->result & 0xff; | |
100 | hdr->transport_status = host_byte(sreq->result); | |
101 | hdr->driver_status = driver_byte(sreq->result); | |
102 | hdr->info = 0; | |
103 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
104 | hdr->info |= SG_INFO_CHECK; | |
105 | hdr->response_len = 0; | |
106 | ||
107 | if (sreq->sense_len && hdr->response) { | |
108 | int len = min_t(unsigned int, hdr->max_response_len, | |
109 | sreq->sense_len); | |
110 | ||
111 | if (copy_to_user(uptr64(hdr->response), sreq->sense, len)) | |
112 | ret = -EFAULT; | |
113 | else | |
114 | hdr->response_len = len; | |
115 | } | |
116 | ||
117 | if (rq->next_rq) { | |
118 | hdr->dout_resid = sreq->resid_len; | |
119 | hdr->din_resid = scsi_req(rq->next_rq)->resid_len; | |
120 | } else if (rq_data_dir(rq) == READ) { | |
121 | hdr->din_resid = sreq->resid_len; | |
122 | } else { | |
123 | hdr->dout_resid = sreq->resid_len; | |
124 | } | |
125 | ||
126 | return ret; | |
127 | } | |
128 | ||
129 | static void bsg_scsi_free_rq(struct request *rq) | |
130 | { | |
131 | scsi_req_free_cmd(scsi_req(rq)); | |
132 | } | |
133 | ||
134 | static const struct bsg_ops bsg_scsi_ops = { | |
135 | .check_proto = bsg_scsi_check_proto, | |
136 | .fill_hdr = bsg_scsi_fill_hdr, | |
137 | .complete_rq = bsg_scsi_complete_rq, | |
138 | .free_rq = bsg_scsi_free_rq, | |
139 | }; | |
140 | ||
141 | static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) | |
142 | { | |
143 | struct request *rq, *next_rq = NULL; | |
144 | struct bio *bio, *bidi_bio = NULL; | |
145 | struct sg_io_v4 hdr; | |
146 | int ret; | |
147 | ||
148 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | |
149 | return -EFAULT; | |
150 | ||
151 | if (!q->bsg_dev.class_dev) | |
152 | return -ENXIO; | |
153 | ||
154 | if (hdr.guard != 'Q') | |
155 | return -EINVAL; | |
156 | ret = q->bsg_dev.ops->check_proto(&hdr); | |
157 | if (ret) | |
158 | return ret; | |
159 | ||
160 | rq = blk_get_request(q, hdr.dout_xfer_len ? | |
161 | REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); | |
162 | if (IS_ERR(rq)) | |
163 | return PTR_ERR(rq); | |
164 | ||
165 | ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); | |
166 | if (ret) | |
167 | goto out; | |
168 | ||
169 | rq->timeout = msecs_to_jiffies(hdr.timeout); | |
170 | if (!rq->timeout) | |
171 | rq->timeout = q->sg_timeout; | |
172 | if (!rq->timeout) | |
173 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
174 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) | |
175 | rq->timeout = BLK_MIN_SG_TIMEOUT; | |
176 | ||
177 | if (hdr.dout_xfer_len && hdr.din_xfer_len) { | |
178 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { | |
179 | ret = -EOPNOTSUPP; | |
180 | goto out; | |
181 | } | |
182 | ||
183 | pr_warn_once( | |
184 | "BIDI support in bsg has been deprecated and might be removed. " | |
185 | "Please report your use case to linux-scsi@vger.kernel.org\n"); | |
186 | ||
187 | next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); | |
188 | if (IS_ERR(next_rq)) { | |
189 | ret = PTR_ERR(next_rq); | |
190 | goto out; | |
191 | } | |
192 | ||
193 | rq->next_rq = next_rq; | |
194 | ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr.din_xferp), | |
195 | hdr.din_xfer_len, GFP_KERNEL); | |
196 | if (ret) | |
197 | goto out_free_nextrq; | |
198 | } | |
199 | ||
200 | if (hdr.dout_xfer_len) { | |
201 | ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), | |
202 | hdr.dout_xfer_len, GFP_KERNEL); | |
203 | } else if (hdr.din_xfer_len) { | |
204 | ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), | |
205 | hdr.din_xfer_len, GFP_KERNEL); | |
206 | } | |
207 | ||
208 | if (ret) | |
209 | goto out_unmap_nextrq; | |
210 | ||
211 | bio = rq->bio; | |
212 | if (rq->next_rq) | |
213 | bidi_bio = rq->next_rq->bio; | |
214 | ||
215 | blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); | |
216 | ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); | |
217 | ||
218 | if (rq->next_rq) { | |
219 | blk_rq_unmap_user(bidi_bio); | |
220 | blk_put_request(rq->next_rq); | |
221 | } | |
222 | ||
223 | blk_rq_unmap_user(bio); | |
224 | rq->q->bsg_dev.ops->free_rq(rq); | |
225 | blk_put_request(rq); | |
226 | ||
227 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | |
228 | return -EFAULT; | |
229 | return ret; | |
230 | ||
231 | out_unmap_nextrq: | |
232 | if (rq->next_rq) | |
233 | blk_rq_unmap_user(rq->next_rq->bio); | |
234 | out_free_nextrq: | |
235 | if (rq->next_rq) | |
236 | blk_put_request(rq->next_rq); | |
237 | out: | |
238 | q->bsg_dev.ops->free_rq(rq); | |
239 | blk_put_request(rq); | |
240 | return ret; | |
241 | } | |
242 | ||
243 | static struct bsg_device *bsg_alloc_device(void) | |
244 | { | |
245 | struct bsg_device *bd; | |
246 | ||
247 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | |
248 | if (unlikely(!bd)) | |
249 | return NULL; | |
250 | ||
251 | spin_lock_init(&bd->lock); | |
252 | bd->max_queue = BSG_DEFAULT_CMDS; | |
253 | INIT_HLIST_NODE(&bd->dev_list); | |
254 | return bd; | |
255 | } | |
256 | ||
257 | static int bsg_put_device(struct bsg_device *bd) | |
258 | { | |
259 | struct request_queue *q = bd->queue; | |
260 | ||
261 | mutex_lock(&bsg_mutex); | |
262 | ||
263 | if (!refcount_dec_and_test(&bd->ref_count)) { | |
264 | mutex_unlock(&bsg_mutex); | |
265 | return 0; | |
266 | } | |
267 | ||
268 | hlist_del(&bd->dev_list); | |
269 | mutex_unlock(&bsg_mutex); | |
270 | ||
271 | bsg_dbg(bd, "tearing down\n"); | |
272 | ||
273 | /* | |
274 | * close can always block | |
275 | */ | |
276 | kfree(bd); | |
277 | blk_put_queue(q); | |
278 | return 0; | |
279 | } | |
280 | ||
281 | static struct bsg_device *bsg_add_device(struct inode *inode, | |
282 | struct request_queue *rq, | |
283 | struct file *file) | |
284 | { | |
285 | struct bsg_device *bd; | |
286 | unsigned char buf[32]; | |
287 | ||
288 | lockdep_assert_held(&bsg_mutex); | |
289 | ||
290 | if (!blk_get_queue(rq)) | |
291 | return ERR_PTR(-ENXIO); | |
292 | ||
293 | bd = bsg_alloc_device(); | |
294 | if (!bd) { | |
295 | blk_put_queue(rq); | |
296 | return ERR_PTR(-ENOMEM); | |
297 | } | |
298 | ||
299 | bd->queue = rq; | |
300 | ||
301 | refcount_set(&bd->ref_count, 1); | |
302 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); | |
303 | ||
304 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); | |
305 | bsg_dbg(bd, "bound to <%s>, max queue %d\n", | |
306 | format_dev_t(buf, inode->i_rdev), bd->max_queue); | |
307 | ||
308 | return bd; | |
309 | } | |
310 | ||
311 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | |
312 | { | |
313 | struct bsg_device *bd; | |
314 | ||
315 | lockdep_assert_held(&bsg_mutex); | |
316 | ||
317 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { | |
318 | if (bd->queue == q) { | |
319 | refcount_inc(&bd->ref_count); | |
320 | goto found; | |
321 | } | |
322 | } | |
323 | bd = NULL; | |
324 | found: | |
325 | return bd; | |
326 | } | |
327 | ||
328 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |
329 | { | |
330 | struct bsg_device *bd; | |
331 | struct bsg_class_device *bcd; | |
332 | ||
333 | /* | |
334 | * find the class device | |
335 | */ | |
336 | mutex_lock(&bsg_mutex); | |
337 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); | |
338 | ||
339 | if (!bcd) { | |
340 | bd = ERR_PTR(-ENODEV); | |
341 | goto out_unlock; | |
342 | } | |
343 | ||
344 | bd = __bsg_get_device(iminor(inode), bcd->queue); | |
345 | if (!bd) | |
346 | bd = bsg_add_device(inode, bcd->queue, file); | |
347 | ||
348 | out_unlock: | |
349 | mutex_unlock(&bsg_mutex); | |
350 | return bd; | |
351 | } | |
352 | ||
353 | static int bsg_open(struct inode *inode, struct file *file) | |
354 | { | |
355 | struct bsg_device *bd; | |
356 | ||
357 | bd = bsg_get_device(inode, file); | |
358 | ||
359 | if (IS_ERR(bd)) | |
360 | return PTR_ERR(bd); | |
361 | ||
362 | file->private_data = bd; | |
363 | return 0; | |
364 | } | |
365 | ||
366 | static int bsg_release(struct inode *inode, struct file *file) | |
367 | { | |
368 | struct bsg_device *bd = file->private_data; | |
369 | ||
370 | file->private_data = NULL; | |
371 | return bsg_put_device(bd); | |
372 | } | |
373 | ||
374 | static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) | |
375 | { | |
376 | return put_user(bd->max_queue, uarg); | |
377 | } | |
378 | ||
379 | static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) | |
380 | { | |
381 | int queue; | |
382 | ||
383 | if (get_user(queue, uarg)) | |
384 | return -EFAULT; | |
385 | if (queue < 1) | |
386 | return -EINVAL; | |
387 | ||
388 | spin_lock_irq(&bd->lock); | |
389 | bd->max_queue = queue; | |
390 | spin_unlock_irq(&bd->lock); | |
391 | return 0; | |
392 | } | |
393 | ||
394 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
395 | { | |
396 | struct bsg_device *bd = file->private_data; | |
397 | void __user *uarg = (void __user *) arg; | |
398 | ||
399 | switch (cmd) { | |
400 | /* | |
401 | * Our own ioctls | |
402 | */ | |
403 | case SG_GET_COMMAND_Q: | |
404 | return bsg_get_command_q(bd, uarg); | |
405 | case SG_SET_COMMAND_Q: | |
406 | return bsg_set_command_q(bd, uarg); | |
407 | ||
408 | /* | |
409 | * SCSI/sg ioctls | |
410 | */ | |
411 | case SG_GET_VERSION_NUM: | |
412 | case SCSI_IOCTL_GET_IDLUN: | |
413 | case SCSI_IOCTL_GET_BUS_NUMBER: | |
414 | case SG_SET_TIMEOUT: | |
415 | case SG_GET_TIMEOUT: | |
416 | case SG_GET_RESERVED_SIZE: | |
417 | case SG_SET_RESERVED_SIZE: | |
418 | case SG_EMULATED_HOST: | |
419 | case SCSI_IOCTL_SEND_COMMAND: | |
420 | return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); | |
421 | case SG_IO: | |
422 | return bsg_sg_io(bd->queue, file->f_mode, uarg); | |
423 | default: | |
424 | return -ENOTTY; | |
425 | } | |
426 | } | |
427 | ||
428 | static const struct file_operations bsg_fops = { | |
429 | .open = bsg_open, | |
430 | .release = bsg_release, | |
431 | .unlocked_ioctl = bsg_ioctl, | |
432 | .owner = THIS_MODULE, | |
433 | .llseek = default_llseek, | |
434 | }; | |
435 | ||
436 | void bsg_unregister_queue(struct request_queue *q) | |
437 | { | |
438 | struct bsg_class_device *bcd = &q->bsg_dev; | |
439 | ||
440 | if (!bcd->class_dev) | |
441 | return; | |
442 | ||
443 | mutex_lock(&bsg_mutex); | |
444 | idr_remove(&bsg_minor_idr, bcd->minor); | |
445 | if (q->kobj.sd) | |
446 | sysfs_remove_link(&q->kobj, "bsg"); | |
447 | device_unregister(bcd->class_dev); | |
448 | bcd->class_dev = NULL; | |
449 | mutex_unlock(&bsg_mutex); | |
450 | } | |
451 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); | |
452 | ||
453 | int bsg_register_queue(struct request_queue *q, struct device *parent, | |
454 | const char *name, const struct bsg_ops *ops) | |
455 | { | |
456 | struct bsg_class_device *bcd; | |
457 | dev_t dev; | |
458 | int ret; | |
459 | struct device *class_dev = NULL; | |
460 | ||
461 | /* | |
462 | * we need a proper transport to send commands, not a stacked device | |
463 | */ | |
464 | if (!queue_is_mq(q)) | |
465 | return 0; | |
466 | ||
467 | bcd = &q->bsg_dev; | |
468 | memset(bcd, 0, sizeof(*bcd)); | |
469 | ||
470 | mutex_lock(&bsg_mutex); | |
471 | ||
472 | ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); | |
473 | if (ret < 0) { | |
474 | if (ret == -ENOSPC) { | |
475 | printk(KERN_ERR "bsg: too many bsg devices\n"); | |
476 | ret = -EINVAL; | |
477 | } | |
478 | goto unlock; | |
479 | } | |
480 | ||
481 | bcd->minor = ret; | |
482 | bcd->queue = q; | |
483 | bcd->ops = ops; | |
484 | dev = MKDEV(bsg_major, bcd->minor); | |
485 | class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name); | |
486 | if (IS_ERR(class_dev)) { | |
487 | ret = PTR_ERR(class_dev); | |
488 | goto idr_remove; | |
489 | } | |
490 | bcd->class_dev = class_dev; | |
491 | ||
492 | if (q->kobj.sd) { | |
493 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); | |
494 | if (ret) | |
495 | goto unregister_class_dev; | |
496 | } | |
497 | ||
498 | mutex_unlock(&bsg_mutex); | |
499 | return 0; | |
500 | ||
501 | unregister_class_dev: | |
502 | device_unregister(class_dev); | |
503 | idr_remove: | |
504 | idr_remove(&bsg_minor_idr, bcd->minor); | |
505 | unlock: | |
506 | mutex_unlock(&bsg_mutex); | |
507 | return ret; | |
508 | } | |
509 | ||
510 | int bsg_scsi_register_queue(struct request_queue *q, struct device *parent) | |
511 | { | |
512 | if (!blk_queue_scsi_passthrough(q)) { | |
513 | WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); | |
514 | return -EINVAL; | |
515 | } | |
516 | ||
517 | return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops); | |
518 | } | |
519 | EXPORT_SYMBOL_GPL(bsg_scsi_register_queue); | |
520 | ||
521 | static struct cdev bsg_cdev; | |
522 | ||
523 | static char *bsg_devnode(struct device *dev, umode_t *mode) | |
524 | { | |
525 | return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); | |
526 | } | |
527 | ||
528 | static int __init bsg_init(void) | |
529 | { | |
530 | int ret, i; | |
531 | dev_t devid; | |
532 | ||
533 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) | |
534 | INIT_HLIST_HEAD(&bsg_device_list[i]); | |
535 | ||
536 | bsg_class = class_create(THIS_MODULE, "bsg"); | |
537 | if (IS_ERR(bsg_class)) | |
538 | return PTR_ERR(bsg_class); | |
539 | bsg_class->devnode = bsg_devnode; | |
540 | ||
541 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); | |
542 | if (ret) | |
543 | goto destroy_bsg_class; | |
544 | ||
545 | bsg_major = MAJOR(devid); | |
546 | ||
547 | cdev_init(&bsg_cdev, &bsg_fops); | |
548 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); | |
549 | if (ret) | |
550 | goto unregister_chrdev; | |
551 | ||
552 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION | |
553 | " loaded (major %d)\n", bsg_major); | |
554 | return 0; | |
555 | unregister_chrdev: | |
556 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); | |
557 | destroy_bsg_class: | |
558 | class_destroy(bsg_class); | |
559 | return ret; | |
560 | } | |
561 | ||
562 | MODULE_AUTHOR("Jens Axboe"); | |
563 | MODULE_DESCRIPTION(BSG_DESCRIPTION); | |
564 | MODULE_LICENSE("GPL"); | |
565 | ||
566 | device_initcall(bsg_init); |