]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * bsg.c - block layer implementation of the sg v4 interface | |
3 | * | |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs | |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License version 2. See the file "COPYING" in the main directory of this | |
9 | * archive for more details. | |
10 | * | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/poll.h> | |
17 | #include <linux/cdev.h> | |
18 | #include <linux/jiffies.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/uio.h> | |
21 | #include <linux/idr.h> | |
22 | #include <linux/bsg.h> | |
23 | #include <linux/slab.h> | |
24 | ||
25 | #include <scsi/scsi.h> | |
26 | #include <scsi/scsi_ioctl.h> | |
27 | #include <scsi/scsi_cmnd.h> | |
28 | #include <scsi/scsi_device.h> | |
29 | #include <scsi/scsi_driver.h> | |
30 | #include <scsi/sg.h> | |
31 | ||
32 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" | |
33 | #define BSG_VERSION "0.4" | |
34 | ||
35 | struct bsg_device { | |
36 | struct request_queue *queue; | |
37 | spinlock_t lock; | |
38 | struct list_head busy_list; | |
39 | struct list_head done_list; | |
40 | struct hlist_node dev_list; | |
41 | atomic_t ref_count; | |
42 | int queued_cmds; | |
43 | int done_cmds; | |
44 | wait_queue_head_t wq_done; | |
45 | wait_queue_head_t wq_free; | |
46 | char name[20]; | |
47 | int max_queue; | |
48 | unsigned long flags; | |
49 | }; | |
50 | ||
51 | enum { | |
52 | BSG_F_BLOCK = 1, | |
53 | }; | |
54 | ||
55 | #define BSG_DEFAULT_CMDS 64 | |
56 | #define BSG_MAX_DEVS 32768 | |
57 | ||
58 | #undef BSG_DEBUG | |
59 | ||
60 | #ifdef BSG_DEBUG | |
61 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) | |
62 | #else | |
63 | #define dprintk(fmt, args...) | |
64 | #endif | |
65 | ||
66 | static DEFINE_MUTEX(bsg_mutex); | |
67 | static DEFINE_IDR(bsg_minor_idr); | |
68 | ||
69 | #define BSG_LIST_ARRAY_SIZE 8 | |
70 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; | |
71 | ||
72 | static struct class *bsg_class; | |
73 | static int bsg_major; | |
74 | ||
75 | static struct kmem_cache *bsg_cmd_cachep; | |
76 | ||
77 | /* | |
78 | * our internal command type | |
79 | */ | |
80 | struct bsg_command { | |
81 | struct bsg_device *bd; | |
82 | struct list_head list; | |
83 | struct request *rq; | |
84 | struct bio *bio; | |
85 | struct bio *bidi_bio; | |
86 | int err; | |
87 | struct sg_io_v4 hdr; | |
88 | }; | |
89 | ||
90 | static void bsg_free_command(struct bsg_command *bc) | |
91 | { | |
92 | struct bsg_device *bd = bc->bd; | |
93 | unsigned long flags; | |
94 | ||
95 | kmem_cache_free(bsg_cmd_cachep, bc); | |
96 | ||
97 | spin_lock_irqsave(&bd->lock, flags); | |
98 | bd->queued_cmds--; | |
99 | spin_unlock_irqrestore(&bd->lock, flags); | |
100 | ||
101 | wake_up(&bd->wq_free); | |
102 | } | |
103 | ||
104 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) | |
105 | { | |
106 | struct bsg_command *bc = ERR_PTR(-EINVAL); | |
107 | ||
108 | spin_lock_irq(&bd->lock); | |
109 | ||
110 | if (bd->queued_cmds >= bd->max_queue) | |
111 | goto out; | |
112 | ||
113 | bd->queued_cmds++; | |
114 | spin_unlock_irq(&bd->lock); | |
115 | ||
116 | bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); | |
117 | if (unlikely(!bc)) { | |
118 | spin_lock_irq(&bd->lock); | |
119 | bd->queued_cmds--; | |
120 | bc = ERR_PTR(-ENOMEM); | |
121 | goto out; | |
122 | } | |
123 | ||
124 | bc->bd = bd; | |
125 | INIT_LIST_HEAD(&bc->list); | |
126 | dprintk("%s: returning free cmd %p\n", bd->name, bc); | |
127 | return bc; | |
128 | out: | |
129 | spin_unlock_irq(&bd->lock); | |
130 | return bc; | |
131 | } | |
132 | ||
133 | static inline struct hlist_head *bsg_dev_idx_hash(int index) | |
134 | { | |
135 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; | |
136 | } | |
137 | ||
138 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, | |
139 | struct sg_io_v4 *hdr, struct bsg_device *bd, | |
140 | fmode_t mode) | |
141 | { | |
142 | struct scsi_request *req = scsi_req(rq); | |
143 | ||
144 | if (hdr->request_len > BLK_MAX_CDB) { | |
145 | req->cmd = kzalloc(hdr->request_len, GFP_KERNEL); | |
146 | if (!req->cmd) | |
147 | return -ENOMEM; | |
148 | } | |
149 | ||
150 | if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request, | |
151 | hdr->request_len)) | |
152 | return -EFAULT; | |
153 | ||
154 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { | |
155 | if (blk_verify_command(req->cmd, mode)) | |
156 | return -EPERM; | |
157 | } else if (!capable(CAP_SYS_RAWIO)) | |
158 | return -EPERM; | |
159 | ||
160 | /* | |
161 | * fill in request structure | |
162 | */ | |
163 | req->cmd_len = hdr->request_len; | |
164 | ||
165 | rq->timeout = msecs_to_jiffies(hdr->timeout); | |
166 | if (!rq->timeout) | |
167 | rq->timeout = q->sg_timeout; | |
168 | if (!rq->timeout) | |
169 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
170 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) | |
171 | rq->timeout = BLK_MIN_SG_TIMEOUT; | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Check if sg_io_v4 from user is allowed and valid | |
178 | */ | |
179 | static int | |
180 | bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op) | |
181 | { | |
182 | int ret = 0; | |
183 | ||
184 | if (hdr->guard != 'Q') | |
185 | return -EINVAL; | |
186 | ||
187 | switch (hdr->protocol) { | |
188 | case BSG_PROTOCOL_SCSI: | |
189 | switch (hdr->subprotocol) { | |
190 | case BSG_SUB_PROTOCOL_SCSI_CMD: | |
191 | case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: | |
192 | break; | |
193 | default: | |
194 | ret = -EINVAL; | |
195 | } | |
196 | break; | |
197 | default: | |
198 | ret = -EINVAL; | |
199 | } | |
200 | ||
201 | *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN; | |
202 | return ret; | |
203 | } | |
204 | ||
205 | /* | |
206 | * map sg_io_v4 to a request. | |
207 | */ | |
208 | static struct request * | |
209 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode) | |
210 | { | |
211 | struct request_queue *q = bd->queue; | |
212 | struct request *rq, *next_rq = NULL; | |
213 | int ret; | |
214 | unsigned int op, dxfer_len; | |
215 | void __user *dxferp = NULL; | |
216 | struct bsg_class_device *bcd = &q->bsg_dev; | |
217 | ||
218 | /* if the LLD has been removed then the bsg_unregister_queue will | |
219 | * eventually be called and the class_dev was freed, so we can no | |
220 | * longer use this request_queue. Return no such address. | |
221 | */ | |
222 | if (!bcd->class_dev) | |
223 | return ERR_PTR(-ENXIO); | |
224 | ||
225 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, | |
226 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, | |
227 | hdr->din_xfer_len); | |
228 | ||
229 | ret = bsg_validate_sgv4_hdr(hdr, &op); | |
230 | if (ret) | |
231 | return ERR_PTR(ret); | |
232 | ||
233 | /* | |
234 | * map scatter-gather elements separately and string them to request | |
235 | */ | |
236 | rq = blk_get_request(q, op, GFP_KERNEL); | |
237 | if (IS_ERR(rq)) | |
238 | return rq; | |
239 | ||
240 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, mode); | |
241 | if (ret) | |
242 | goto out; | |
243 | ||
244 | if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { | |
245 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { | |
246 | ret = -EOPNOTSUPP; | |
247 | goto out; | |
248 | } | |
249 | ||
250 | next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); | |
251 | if (IS_ERR(next_rq)) { | |
252 | ret = PTR_ERR(next_rq); | |
253 | next_rq = NULL; | |
254 | goto out; | |
255 | } | |
256 | rq->next_rq = next_rq; | |
257 | ||
258 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; | |
259 | ret = blk_rq_map_user(q, next_rq, NULL, dxferp, | |
260 | hdr->din_xfer_len, GFP_KERNEL); | |
261 | if (ret) | |
262 | goto out; | |
263 | } | |
264 | ||
265 | if (hdr->dout_xfer_len) { | |
266 | dxfer_len = hdr->dout_xfer_len; | |
267 | dxferp = (void __user *)(unsigned long)hdr->dout_xferp; | |
268 | } else if (hdr->din_xfer_len) { | |
269 | dxfer_len = hdr->din_xfer_len; | |
270 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; | |
271 | } else | |
272 | dxfer_len = 0; | |
273 | ||
274 | if (dxfer_len) { | |
275 | ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, | |
276 | GFP_KERNEL); | |
277 | if (ret) | |
278 | goto out; | |
279 | } | |
280 | ||
281 | return rq; | |
282 | out: | |
283 | scsi_req_free_cmd(scsi_req(rq)); | |
284 | blk_put_request(rq); | |
285 | if (next_rq) { | |
286 | blk_rq_unmap_user(next_rq->bio); | |
287 | blk_put_request(next_rq); | |
288 | } | |
289 | return ERR_PTR(ret); | |
290 | } | |
291 | ||
292 | /* | |
293 | * async completion call-back from the block layer, when scsi/ide/whatever | |
294 | * calls end_that_request_last() on a request | |
295 | */ | |
296 | static void bsg_rq_end_io(struct request *rq, blk_status_t status) | |
297 | { | |
298 | struct bsg_command *bc = rq->end_io_data; | |
299 | struct bsg_device *bd = bc->bd; | |
300 | unsigned long flags; | |
301 | ||
302 | dprintk("%s: finished rq %p bc %p, bio %p\n", | |
303 | bd->name, rq, bc, bc->bio); | |
304 | ||
305 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | |
306 | ||
307 | spin_lock_irqsave(&bd->lock, flags); | |
308 | list_move_tail(&bc->list, &bd->done_list); | |
309 | bd->done_cmds++; | |
310 | spin_unlock_irqrestore(&bd->lock, flags); | |
311 | ||
312 | wake_up(&bd->wq_done); | |
313 | } | |
314 | ||
315 | /* | |
316 | * do final setup of a 'bc' and submit the matching 'rq' to the block | |
317 | * layer for io | |
318 | */ | |
319 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, | |
320 | struct bsg_command *bc, struct request *rq) | |
321 | { | |
322 | int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); | |
323 | ||
324 | /* | |
325 | * add bc command to busy queue and submit rq for io | |
326 | */ | |
327 | bc->rq = rq; | |
328 | bc->bio = rq->bio; | |
329 | if (rq->next_rq) | |
330 | bc->bidi_bio = rq->next_rq->bio; | |
331 | bc->hdr.duration = jiffies; | |
332 | spin_lock_irq(&bd->lock); | |
333 | list_add_tail(&bc->list, &bd->busy_list); | |
334 | spin_unlock_irq(&bd->lock); | |
335 | ||
336 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); | |
337 | ||
338 | rq->end_io_data = bc; | |
339 | blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); | |
340 | } | |
341 | ||
342 | static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | |
343 | { | |
344 | struct bsg_command *bc = NULL; | |
345 | ||
346 | spin_lock_irq(&bd->lock); | |
347 | if (bd->done_cmds) { | |
348 | bc = list_first_entry(&bd->done_list, struct bsg_command, list); | |
349 | list_del(&bc->list); | |
350 | bd->done_cmds--; | |
351 | } | |
352 | spin_unlock_irq(&bd->lock); | |
353 | ||
354 | return bc; | |
355 | } | |
356 | ||
357 | /* | |
358 | * Get a finished command from the done list | |
359 | */ | |
360 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) | |
361 | { | |
362 | struct bsg_command *bc; | |
363 | int ret; | |
364 | ||
365 | do { | |
366 | bc = bsg_next_done_cmd(bd); | |
367 | if (bc) | |
368 | break; | |
369 | ||
370 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | |
371 | bc = ERR_PTR(-EAGAIN); | |
372 | break; | |
373 | } | |
374 | ||
375 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); | |
376 | if (ret) { | |
377 | bc = ERR_PTR(-ERESTARTSYS); | |
378 | break; | |
379 | } | |
380 | } while (1); | |
381 | ||
382 | dprintk("%s: returning done %p\n", bd->name, bc); | |
383 | ||
384 | return bc; | |
385 | } | |
386 | ||
387 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |
388 | struct bio *bio, struct bio *bidi_bio) | |
389 | { | |
390 | struct scsi_request *req = scsi_req(rq); | |
391 | int ret = 0; | |
392 | ||
393 | dprintk("rq %p bio %p 0x%x\n", rq, bio, req->result); | |
394 | /* | |
395 | * fill in all the output members | |
396 | */ | |
397 | hdr->device_status = req->result & 0xff; | |
398 | hdr->transport_status = host_byte(req->result); | |
399 | hdr->driver_status = driver_byte(req->result); | |
400 | hdr->info = 0; | |
401 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
402 | hdr->info |= SG_INFO_CHECK; | |
403 | hdr->response_len = 0; | |
404 | ||
405 | if (req->sense_len && hdr->response) { | |
406 | int len = min_t(unsigned int, hdr->max_response_len, | |
407 | req->sense_len); | |
408 | ||
409 | ret = copy_to_user((void __user *)(unsigned long)hdr->response, | |
410 | req->sense, len); | |
411 | if (!ret) | |
412 | hdr->response_len = len; | |
413 | else | |
414 | ret = -EFAULT; | |
415 | } | |
416 | ||
417 | if (rq->next_rq) { | |
418 | hdr->dout_resid = req->resid_len; | |
419 | hdr->din_resid = scsi_req(rq->next_rq)->resid_len; | |
420 | blk_rq_unmap_user(bidi_bio); | |
421 | blk_put_request(rq->next_rq); | |
422 | } else if (rq_data_dir(rq) == READ) | |
423 | hdr->din_resid = req->resid_len; | |
424 | else | |
425 | hdr->dout_resid = req->resid_len; | |
426 | ||
427 | /* | |
428 | * If the request generated a negative error number, return it | |
429 | * (providing we aren't already returning an error); if it's | |
430 | * just a protocol response (i.e. non negative), that gets | |
431 | * processed above. | |
432 | */ | |
433 | if (!ret && req->result < 0) | |
434 | ret = req->result; | |
435 | ||
436 | blk_rq_unmap_user(bio); | |
437 | scsi_req_free_cmd(req); | |
438 | blk_put_request(rq); | |
439 | ||
440 | return ret; | |
441 | } | |
442 | ||
443 | static bool bsg_complete(struct bsg_device *bd) | |
444 | { | |
445 | bool ret = false; | |
446 | bool spin; | |
447 | ||
448 | do { | |
449 | spin_lock_irq(&bd->lock); | |
450 | ||
451 | BUG_ON(bd->done_cmds > bd->queued_cmds); | |
452 | ||
453 | /* | |
454 | * All commands consumed. | |
455 | */ | |
456 | if (bd->done_cmds == bd->queued_cmds) | |
457 | ret = true; | |
458 | ||
459 | spin = !test_bit(BSG_F_BLOCK, &bd->flags); | |
460 | ||
461 | spin_unlock_irq(&bd->lock); | |
462 | } while (!ret && spin); | |
463 | ||
464 | return ret; | |
465 | } | |
466 | ||
467 | static int bsg_complete_all_commands(struct bsg_device *bd) | |
468 | { | |
469 | struct bsg_command *bc; | |
470 | int ret, tret; | |
471 | ||
472 | dprintk("%s: entered\n", bd->name); | |
473 | ||
474 | /* | |
475 | * wait for all commands to complete | |
476 | */ | |
477 | io_wait_event(bd->wq_done, bsg_complete(bd)); | |
478 | ||
479 | /* | |
480 | * discard done commands | |
481 | */ | |
482 | ret = 0; | |
483 | do { | |
484 | spin_lock_irq(&bd->lock); | |
485 | if (!bd->queued_cmds) { | |
486 | spin_unlock_irq(&bd->lock); | |
487 | break; | |
488 | } | |
489 | spin_unlock_irq(&bd->lock); | |
490 | ||
491 | bc = bsg_get_done_cmd(bd); | |
492 | if (IS_ERR(bc)) | |
493 | break; | |
494 | ||
495 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, | |
496 | bc->bidi_bio); | |
497 | if (!ret) | |
498 | ret = tret; | |
499 | ||
500 | bsg_free_command(bc); | |
501 | } while (1); | |
502 | ||
503 | return ret; | |
504 | } | |
505 | ||
506 | static int | |
507 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, | |
508 | const struct iovec *iov, ssize_t *bytes_read) | |
509 | { | |
510 | struct bsg_command *bc; | |
511 | int nr_commands, ret; | |
512 | ||
513 | if (count % sizeof(struct sg_io_v4)) | |
514 | return -EINVAL; | |
515 | ||
516 | ret = 0; | |
517 | nr_commands = count / sizeof(struct sg_io_v4); | |
518 | while (nr_commands) { | |
519 | bc = bsg_get_done_cmd(bd); | |
520 | if (IS_ERR(bc)) { | |
521 | ret = PTR_ERR(bc); | |
522 | break; | |
523 | } | |
524 | ||
525 | /* | |
526 | * this is the only case where we need to copy data back | |
527 | * after completing the request. so do that here, | |
528 | * bsg_complete_work() cannot do that for us | |
529 | */ | |
530 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, | |
531 | bc->bidi_bio); | |
532 | ||
533 | if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) | |
534 | ret = -EFAULT; | |
535 | ||
536 | bsg_free_command(bc); | |
537 | ||
538 | if (ret) | |
539 | break; | |
540 | ||
541 | buf += sizeof(struct sg_io_v4); | |
542 | *bytes_read += sizeof(struct sg_io_v4); | |
543 | nr_commands--; | |
544 | } | |
545 | ||
546 | return ret; | |
547 | } | |
548 | ||
549 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) | |
550 | { | |
551 | if (file->f_flags & O_NONBLOCK) | |
552 | clear_bit(BSG_F_BLOCK, &bd->flags); | |
553 | else | |
554 | set_bit(BSG_F_BLOCK, &bd->flags); | |
555 | } | |
556 | ||
557 | /* | |
558 | * Check if the error is a "real" error that we should return. | |
559 | */ | |
560 | static inline int err_block_err(int ret) | |
561 | { | |
562 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) | |
563 | return 1; | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
568 | static ssize_t | |
569 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
570 | { | |
571 | struct bsg_device *bd = file->private_data; | |
572 | int ret; | |
573 | ssize_t bytes_read; | |
574 | ||
575 | dprintk("%s: read %zd bytes\n", bd->name, count); | |
576 | ||
577 | bsg_set_block(bd, file); | |
578 | ||
579 | bytes_read = 0; | |
580 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); | |
581 | *ppos = bytes_read; | |
582 | ||
583 | if (!bytes_read || err_block_err(ret)) | |
584 | bytes_read = ret; | |
585 | ||
586 | return bytes_read; | |
587 | } | |
588 | ||
589 | static int __bsg_write(struct bsg_device *bd, const char __user *buf, | |
590 | size_t count, ssize_t *bytes_written, fmode_t mode) | |
591 | { | |
592 | struct bsg_command *bc; | |
593 | struct request *rq; | |
594 | int ret, nr_commands; | |
595 | ||
596 | if (count % sizeof(struct sg_io_v4)) | |
597 | return -EINVAL; | |
598 | ||
599 | nr_commands = count / sizeof(struct sg_io_v4); | |
600 | rq = NULL; | |
601 | bc = NULL; | |
602 | ret = 0; | |
603 | while (nr_commands) { | |
604 | struct request_queue *q = bd->queue; | |
605 | ||
606 | bc = bsg_alloc_command(bd); | |
607 | if (IS_ERR(bc)) { | |
608 | ret = PTR_ERR(bc); | |
609 | bc = NULL; | |
610 | break; | |
611 | } | |
612 | ||
613 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { | |
614 | ret = -EFAULT; | |
615 | break; | |
616 | } | |
617 | ||
618 | /* | |
619 | * get a request, fill in the blanks, and add to request queue | |
620 | */ | |
621 | rq = bsg_map_hdr(bd, &bc->hdr, mode); | |
622 | if (IS_ERR(rq)) { | |
623 | ret = PTR_ERR(rq); | |
624 | rq = NULL; | |
625 | break; | |
626 | } | |
627 | ||
628 | bsg_add_command(bd, q, bc, rq); | |
629 | bc = NULL; | |
630 | rq = NULL; | |
631 | nr_commands--; | |
632 | buf += sizeof(struct sg_io_v4); | |
633 | *bytes_written += sizeof(struct sg_io_v4); | |
634 | } | |
635 | ||
636 | if (bc) | |
637 | bsg_free_command(bc); | |
638 | ||
639 | return ret; | |
640 | } | |
641 | ||
642 | static ssize_t | |
643 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |
644 | { | |
645 | struct bsg_device *bd = file->private_data; | |
646 | ssize_t bytes_written; | |
647 | int ret; | |
648 | ||
649 | dprintk("%s: write %zd bytes\n", bd->name, count); | |
650 | ||
651 | if (unlikely(uaccess_kernel())) | |
652 | return -EINVAL; | |
653 | ||
654 | bsg_set_block(bd, file); | |
655 | ||
656 | bytes_written = 0; | |
657 | ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode); | |
658 | ||
659 | *ppos = bytes_written; | |
660 | ||
661 | /* | |
662 | * return bytes written on non-fatal errors | |
663 | */ | |
664 | if (!bytes_written || err_block_err(ret)) | |
665 | bytes_written = ret; | |
666 | ||
667 | dprintk("%s: returning %zd\n", bd->name, bytes_written); | |
668 | return bytes_written; | |
669 | } | |
670 | ||
671 | static struct bsg_device *bsg_alloc_device(void) | |
672 | { | |
673 | struct bsg_device *bd; | |
674 | ||
675 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | |
676 | if (unlikely(!bd)) | |
677 | return NULL; | |
678 | ||
679 | spin_lock_init(&bd->lock); | |
680 | ||
681 | bd->max_queue = BSG_DEFAULT_CMDS; | |
682 | ||
683 | INIT_LIST_HEAD(&bd->busy_list); | |
684 | INIT_LIST_HEAD(&bd->done_list); | |
685 | INIT_HLIST_NODE(&bd->dev_list); | |
686 | ||
687 | init_waitqueue_head(&bd->wq_free); | |
688 | init_waitqueue_head(&bd->wq_done); | |
689 | return bd; | |
690 | } | |
691 | ||
692 | static void bsg_kref_release_function(struct kref *kref) | |
693 | { | |
694 | struct bsg_class_device *bcd = | |
695 | container_of(kref, struct bsg_class_device, ref); | |
696 | struct device *parent = bcd->parent; | |
697 | ||
698 | if (bcd->release) | |
699 | bcd->release(bcd->parent); | |
700 | ||
701 | put_device(parent); | |
702 | } | |
703 | ||
704 | static int bsg_put_device(struct bsg_device *bd) | |
705 | { | |
706 | int ret = 0, do_free; | |
707 | struct request_queue *q = bd->queue; | |
708 | ||
709 | mutex_lock(&bsg_mutex); | |
710 | ||
711 | do_free = atomic_dec_and_test(&bd->ref_count); | |
712 | if (!do_free) { | |
713 | mutex_unlock(&bsg_mutex); | |
714 | goto out; | |
715 | } | |
716 | ||
717 | hlist_del(&bd->dev_list); | |
718 | mutex_unlock(&bsg_mutex); | |
719 | ||
720 | dprintk("%s: tearing down\n", bd->name); | |
721 | ||
722 | /* | |
723 | * close can always block | |
724 | */ | |
725 | set_bit(BSG_F_BLOCK, &bd->flags); | |
726 | ||
727 | /* | |
728 | * correct error detection baddies here again. it's the responsibility | |
729 | * of the app to properly reap commands before close() if it wants | |
730 | * fool-proof error detection | |
731 | */ | |
732 | ret = bsg_complete_all_commands(bd); | |
733 | ||
734 | kfree(bd); | |
735 | out: | |
736 | kref_put(&q->bsg_dev.ref, bsg_kref_release_function); | |
737 | if (do_free) | |
738 | blk_put_queue(q); | |
739 | return ret; | |
740 | } | |
741 | ||
742 | static struct bsg_device *bsg_add_device(struct inode *inode, | |
743 | struct request_queue *rq, | |
744 | struct file *file) | |
745 | { | |
746 | struct bsg_device *bd; | |
747 | #ifdef BSG_DEBUG | |
748 | unsigned char buf[32]; | |
749 | #endif | |
750 | ||
751 | if (!blk_queue_scsi_passthrough(rq)) { | |
752 | WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); | |
753 | return ERR_PTR(-EINVAL); | |
754 | } | |
755 | ||
756 | if (!blk_get_queue(rq)) | |
757 | return ERR_PTR(-ENXIO); | |
758 | ||
759 | bd = bsg_alloc_device(); | |
760 | if (!bd) { | |
761 | blk_put_queue(rq); | |
762 | return ERR_PTR(-ENOMEM); | |
763 | } | |
764 | ||
765 | bd->queue = rq; | |
766 | ||
767 | bsg_set_block(bd, file); | |
768 | ||
769 | atomic_set(&bd->ref_count, 1); | |
770 | mutex_lock(&bsg_mutex); | |
771 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); | |
772 | ||
773 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); | |
774 | dprintk("bound to <%s>, max queue %d\n", | |
775 | format_dev_t(buf, inode->i_rdev), bd->max_queue); | |
776 | ||
777 | mutex_unlock(&bsg_mutex); | |
778 | return bd; | |
779 | } | |
780 | ||
781 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | |
782 | { | |
783 | struct bsg_device *bd; | |
784 | ||
785 | mutex_lock(&bsg_mutex); | |
786 | ||
787 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { | |
788 | if (bd->queue == q) { | |
789 | atomic_inc(&bd->ref_count); | |
790 | goto found; | |
791 | } | |
792 | } | |
793 | bd = NULL; | |
794 | found: | |
795 | mutex_unlock(&bsg_mutex); | |
796 | return bd; | |
797 | } | |
798 | ||
799 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |
800 | { | |
801 | struct bsg_device *bd; | |
802 | struct bsg_class_device *bcd; | |
803 | ||
804 | /* | |
805 | * find the class device | |
806 | */ | |
807 | mutex_lock(&bsg_mutex); | |
808 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); | |
809 | if (bcd) | |
810 | kref_get(&bcd->ref); | |
811 | mutex_unlock(&bsg_mutex); | |
812 | ||
813 | if (!bcd) | |
814 | return ERR_PTR(-ENODEV); | |
815 | ||
816 | bd = __bsg_get_device(iminor(inode), bcd->queue); | |
817 | if (bd) | |
818 | return bd; | |
819 | ||
820 | bd = bsg_add_device(inode, bcd->queue, file); | |
821 | if (IS_ERR(bd)) | |
822 | kref_put(&bcd->ref, bsg_kref_release_function); | |
823 | ||
824 | return bd; | |
825 | } | |
826 | ||
827 | static int bsg_open(struct inode *inode, struct file *file) | |
828 | { | |
829 | struct bsg_device *bd; | |
830 | ||
831 | bd = bsg_get_device(inode, file); | |
832 | ||
833 | if (IS_ERR(bd)) | |
834 | return PTR_ERR(bd); | |
835 | ||
836 | file->private_data = bd; | |
837 | return 0; | |
838 | } | |
839 | ||
840 | static int bsg_release(struct inode *inode, struct file *file) | |
841 | { | |
842 | struct bsg_device *bd = file->private_data; | |
843 | ||
844 | file->private_data = NULL; | |
845 | return bsg_put_device(bd); | |
846 | } | |
847 | ||
848 | static unsigned int bsg_poll(struct file *file, poll_table *wait) | |
849 | { | |
850 | struct bsg_device *bd = file->private_data; | |
851 | unsigned int mask = 0; | |
852 | ||
853 | poll_wait(file, &bd->wq_done, wait); | |
854 | poll_wait(file, &bd->wq_free, wait); | |
855 | ||
856 | spin_lock_irq(&bd->lock); | |
857 | if (!list_empty(&bd->done_list)) | |
858 | mask |= POLLIN | POLLRDNORM; | |
859 | if (bd->queued_cmds < bd->max_queue) | |
860 | mask |= POLLOUT; | |
861 | spin_unlock_irq(&bd->lock); | |
862 | ||
863 | return mask; | |
864 | } | |
865 | ||
866 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
867 | { | |
868 | struct bsg_device *bd = file->private_data; | |
869 | int __user *uarg = (int __user *) arg; | |
870 | int ret; | |
871 | ||
872 | switch (cmd) { | |
873 | /* | |
874 | * our own ioctls | |
875 | */ | |
876 | case SG_GET_COMMAND_Q: | |
877 | return put_user(bd->max_queue, uarg); | |
878 | case SG_SET_COMMAND_Q: { | |
879 | int queue; | |
880 | ||
881 | if (get_user(queue, uarg)) | |
882 | return -EFAULT; | |
883 | if (queue < 1) | |
884 | return -EINVAL; | |
885 | ||
886 | spin_lock_irq(&bd->lock); | |
887 | bd->max_queue = queue; | |
888 | spin_unlock_irq(&bd->lock); | |
889 | return 0; | |
890 | } | |
891 | ||
892 | /* | |
893 | * SCSI/sg ioctls | |
894 | */ | |
895 | case SG_GET_VERSION_NUM: | |
896 | case SCSI_IOCTL_GET_IDLUN: | |
897 | case SCSI_IOCTL_GET_BUS_NUMBER: | |
898 | case SG_SET_TIMEOUT: | |
899 | case SG_GET_TIMEOUT: | |
900 | case SG_GET_RESERVED_SIZE: | |
901 | case SG_SET_RESERVED_SIZE: | |
902 | case SG_EMULATED_HOST: | |
903 | case SCSI_IOCTL_SEND_COMMAND: { | |
904 | void __user *uarg = (void __user *) arg; | |
905 | return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); | |
906 | } | |
907 | case SG_IO: { | |
908 | struct request *rq; | |
909 | struct bio *bio, *bidi_bio = NULL; | |
910 | struct sg_io_v4 hdr; | |
911 | int at_head; | |
912 | ||
913 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | |
914 | return -EFAULT; | |
915 | ||
916 | rq = bsg_map_hdr(bd, &hdr, file->f_mode); | |
917 | if (IS_ERR(rq)) | |
918 | return PTR_ERR(rq); | |
919 | ||
920 | bio = rq->bio; | |
921 | if (rq->next_rq) | |
922 | bidi_bio = rq->next_rq->bio; | |
923 | ||
924 | at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); | |
925 | blk_execute_rq(bd->queue, NULL, rq, at_head); | |
926 | ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); | |
927 | ||
928 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | |
929 | return -EFAULT; | |
930 | ||
931 | return ret; | |
932 | } | |
933 | default: | |
934 | return -ENOTTY; | |
935 | } | |
936 | } | |
937 | ||
938 | static const struct file_operations bsg_fops = { | |
939 | .read = bsg_read, | |
940 | .write = bsg_write, | |
941 | .poll = bsg_poll, | |
942 | .open = bsg_open, | |
943 | .release = bsg_release, | |
944 | .unlocked_ioctl = bsg_ioctl, | |
945 | .owner = THIS_MODULE, | |
946 | .llseek = default_llseek, | |
947 | }; | |
948 | ||
949 | void bsg_unregister_queue(struct request_queue *q) | |
950 | { | |
951 | struct bsg_class_device *bcd = &q->bsg_dev; | |
952 | ||
953 | if (!bcd->class_dev) | |
954 | return; | |
955 | ||
956 | mutex_lock(&bsg_mutex); | |
957 | idr_remove(&bsg_minor_idr, bcd->minor); | |
958 | if (q->kobj.sd) | |
959 | sysfs_remove_link(&q->kobj, "bsg"); | |
960 | device_unregister(bcd->class_dev); | |
961 | bcd->class_dev = NULL; | |
962 | kref_put(&bcd->ref, bsg_kref_release_function); | |
963 | mutex_unlock(&bsg_mutex); | |
964 | } | |
965 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); | |
966 | ||
967 | int bsg_register_queue(struct request_queue *q, struct device *parent, | |
968 | const char *name, void (*release)(struct device *)) | |
969 | { | |
970 | struct bsg_class_device *bcd; | |
971 | dev_t dev; | |
972 | int ret; | |
973 | struct device *class_dev = NULL; | |
974 | const char *devname; | |
975 | ||
976 | if (name) | |
977 | devname = name; | |
978 | else | |
979 | devname = dev_name(parent); | |
980 | ||
981 | /* | |
982 | * we need a proper transport to send commands, not a stacked device | |
983 | */ | |
984 | if (!queue_is_rq_based(q)) | |
985 | return 0; | |
986 | ||
987 | bcd = &q->bsg_dev; | |
988 | memset(bcd, 0, sizeof(*bcd)); | |
989 | ||
990 | mutex_lock(&bsg_mutex); | |
991 | ||
992 | ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); | |
993 | if (ret < 0) { | |
994 | if (ret == -ENOSPC) { | |
995 | printk(KERN_ERR "bsg: too many bsg devices\n"); | |
996 | ret = -EINVAL; | |
997 | } | |
998 | goto unlock; | |
999 | } | |
1000 | ||
1001 | bcd->minor = ret; | |
1002 | bcd->queue = q; | |
1003 | bcd->parent = get_device(parent); | |
1004 | bcd->release = release; | |
1005 | kref_init(&bcd->ref); | |
1006 | dev = MKDEV(bsg_major, bcd->minor); | |
1007 | class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); | |
1008 | if (IS_ERR(class_dev)) { | |
1009 | ret = PTR_ERR(class_dev); | |
1010 | goto put_dev; | |
1011 | } | |
1012 | bcd->class_dev = class_dev; | |
1013 | ||
1014 | if (q->kobj.sd) { | |
1015 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); | |
1016 | if (ret) | |
1017 | goto unregister_class_dev; | |
1018 | } | |
1019 | ||
1020 | mutex_unlock(&bsg_mutex); | |
1021 | return 0; | |
1022 | ||
1023 | unregister_class_dev: | |
1024 | device_unregister(class_dev); | |
1025 | put_dev: | |
1026 | put_device(parent); | |
1027 | idr_remove(&bsg_minor_idr, bcd->minor); | |
1028 | unlock: | |
1029 | mutex_unlock(&bsg_mutex); | |
1030 | return ret; | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(bsg_register_queue); | |
1033 | ||
1034 | static struct cdev bsg_cdev; | |
1035 | ||
1036 | static char *bsg_devnode(struct device *dev, umode_t *mode) | |
1037 | { | |
1038 | return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); | |
1039 | } | |
1040 | ||
1041 | static int __init bsg_init(void) | |
1042 | { | |
1043 | int ret, i; | |
1044 | dev_t devid; | |
1045 | ||
1046 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", | |
1047 | sizeof(struct bsg_command), 0, 0, NULL); | |
1048 | if (!bsg_cmd_cachep) { | |
1049 | printk(KERN_ERR "bsg: failed creating slab cache\n"); | |
1050 | return -ENOMEM; | |
1051 | } | |
1052 | ||
1053 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) | |
1054 | INIT_HLIST_HEAD(&bsg_device_list[i]); | |
1055 | ||
1056 | bsg_class = class_create(THIS_MODULE, "bsg"); | |
1057 | if (IS_ERR(bsg_class)) { | |
1058 | ret = PTR_ERR(bsg_class); | |
1059 | goto destroy_kmemcache; | |
1060 | } | |
1061 | bsg_class->devnode = bsg_devnode; | |
1062 | ||
1063 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); | |
1064 | if (ret) | |
1065 | goto destroy_bsg_class; | |
1066 | ||
1067 | bsg_major = MAJOR(devid); | |
1068 | ||
1069 | cdev_init(&bsg_cdev, &bsg_fops); | |
1070 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); | |
1071 | if (ret) | |
1072 | goto unregister_chrdev; | |
1073 | ||
1074 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION | |
1075 | " loaded (major %d)\n", bsg_major); | |
1076 | return 0; | |
1077 | unregister_chrdev: | |
1078 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); | |
1079 | destroy_bsg_class: | |
1080 | class_destroy(bsg_class); | |
1081 | destroy_kmemcache: | |
1082 | kmem_cache_destroy(bsg_cmd_cachep); | |
1083 | return ret; | |
1084 | } | |
1085 | ||
1086 | MODULE_AUTHOR("Jens Axboe"); | |
1087 | MODULE_DESCRIPTION(BSG_DESCRIPTION); | |
1088 | MODULE_LICENSE("GPL"); | |
1089 | ||
1090 | device_initcall(bsg_init); |